changeset 14422:2b8e28fdf503

Merge
author kvn
date Tue, 05 Nov 2013 17:38:04 -0800
parents 3068270ba476 (current diff) 9ebaac78a8a0 (diff)
children bd29f2c96a5f
files make/Makefile make/defs.make make/linux/makefiles/gcc.make src/cpu/sparc/vm/sharedRuntime_sparc.cpp src/cpu/sparc/vm/sparc.ad src/cpu/x86/vm/x86_32.ad src/cpu/x86/vm/x86_64.ad src/cpu/zero/vm/cppInterpreter_zero.cpp src/cpu/zero/vm/globals_zero.hpp src/os/bsd/vm/os_bsd.cpp src/os/linux/vm/os_linux.cpp src/os/posix/vm/os_posix.cpp src/os/solaris/vm/os_solaris.cpp src/share/vm/adlc/main.cpp src/share/vm/adlc/output_h.cpp src/share/vm/c1/c1_globals.hpp src/share/vm/classfile/classLoader.cpp src/share/vm/classfile/genericSignatures.cpp src/share/vm/classfile/genericSignatures.hpp src/share/vm/code/nmethod.cpp src/share/vm/code/relocInfo.hpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp src/share/vm/interpreter/abstractInterpreter.hpp src/share/vm/interpreter/interpreterRuntime.cpp src/share/vm/interpreter/templateTable.hpp src/share/vm/libadt/port.hpp src/share/vm/memory/allocation.cpp src/share/vm/memory/allocation.hpp src/share/vm/memory/metablock.cpp src/share/vm/memory/metablock.hpp src/share/vm/memory/universe.cpp src/share/vm/oops/methodData.cpp src/share/vm/oops/methodData.hpp src/share/vm/opto/c2_globals.hpp src/share/vm/opto/c2compiler.cpp src/share/vm/opto/compile.cpp src/share/vm/opto/gcm.cpp src/share/vm/opto/generateOptoStub.cpp src/share/vm/opto/lcm.cpp src/share/vm/opto/loopTransform.cpp src/share/vm/opto/matcher.cpp src/share/vm/opto/output.cpp src/share/vm/opto/runtime.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/jvm.h src/share/vm/prims/nativeLookup.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/frame.hpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/interfaceSupport.hpp src/share/vm/runtime/mutexLocker.hpp src/share/vm/runtime/os.cpp src/share/vm/runtime/os.hpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/synchronizer.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/virtualspace.cpp src/share/vm/runtime/vmStructs.cpp src/share/vm/utilities/decoder.cpp src/share/vm/utilities/globalDefinitions.hpp src/share/vm/utilities/ostream.cpp src/share/vm/utilities/taskqueue.hpp test/compiler/8013496/Test8013496.sh test/gc/7168848/HumongousAlloc.java test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java test/runtime/6878713/Test6878713.sh test/runtime/6878713/testcase.jar test/runtime/7020373/Test7020373.sh test/runtime/7020373/testcase.jar test/testlibrary/AssertsTest.java test/testlibrary/OutputAnalyzerReportingTest.java test/testlibrary/OutputAnalyzerTest.java
diffstat 681 files changed, 27798 insertions(+), 12089 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Oct 16 10:52:41 2013 +0200
+++ b/.hgtags	Tue Nov 05 17:38:04 2013 -0800
@@ -374,3 +374,20 @@
 acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
 18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
 aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
+50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
+5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
+a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
+85072013aad46050a362d10ab78e963121c8014c jdk8-b108
+566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
+c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
+58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
+6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
+562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
+f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
+4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
+0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112
+23b8db5ea31d3079f1326afde4cd5c67b1dac49c hs25-b55
+4589b398ab03aba6a5da8c06ff53603488d1b8f4 jdk8-b113
+82a9cdbf683e374a76f2009352de53e16bed5a91 hs25-b56
+7fd913010dbbf75260688fd2fa8964763fa49a09 jdk8-b114
+3b32d287da89a47a45d16f6d9ba5bd3cd9bf4b3e hs25-b57
--- a/agent/src/os/bsd/ps_core.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/bsd/ps_core.c	Tue Nov 05 17:38:04 2013 -0800
@@ -44,6 +44,7 @@
 // close all file descriptors
 static void close_files(struct ps_prochandle* ph) {
   lib_info* lib = NULL;
+
   // close core file descriptor
   if (ph->core->core_fd >= 0)
     close(ph->core->core_fd);
@@ -149,8 +150,7 @@
 
 // Return the map_info for the given virtual address.  We keep a sorted
 // array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
-{
+static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
   int mid, lo = 0, hi = ph->core->num_maps - 1;
   map_info *mp;
 
@@ -230,9 +230,9 @@
     size_t _used;            // for setting space top on read
 
     // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
-    // the C type matching the C++ bool type on any given platform. For
-    // Hotspot on BSD we assume the corresponding C type is char but
-    // licensees on BSD versions may need to adjust the type of these fields.
+    // the C type matching the C++ bool type on any given platform.
+    // We assume the corresponding C type is char but licensees
+    // may need to adjust the type of these fields.
     char   _read_only;       // read only space?
     char   _allow_exec;      // executable code in space?
 
@@ -286,10 +286,12 @@
 #define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.dylib"
 #else
 #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.so"
 #endif // __APPLE_
 
 static bool init_classsharing_workaround(struct ps_prochandle* ph) {
@@ -300,12 +302,7 @@
     // we are iterating over shared objects from the core dump. look for
     // libjvm.so.
     const char *jvm_name = 0;
-#ifdef __APPLE__
-    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
-#else
-    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
-#endif // __APPLE__
-    {
+    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
       char classes_jsa[PATH_MAX];
       struct FileMapHeader header;
       int fd = -1;
@@ -399,8 +396,8 @@
         }
       }
       return true;
-    }
-    lib = lib->next;
+   }
+   lib = lib->next;
   }
   return true;
 }
@@ -432,8 +429,8 @@
   // allocate map_array
   map_info** array;
   if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
-     print_debug("can't allocate memory for map array\n");
-     return false;
+    print_debug("can't allocate memory for map array\n");
+    return false;
   }
 
   // add maps to array
@@ -450,7 +447,7 @@
   ph->core->map_array = array;
   // sort the map_info array by base virtual address.
   qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
-           core_cmp_mapping);
+        core_cmp_mapping);
 
   // print map
   if (is_debug()) {
@@ -458,7 +455,7 @@
     print_debug("---- sorted virtual address map ----\n");
     for (j = 0; j < ph->core->num_maps; j++) {
       print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
-                                       ph->core->map_array[j]->memsz);
+                  ph->core->map_array[j]->memsz);
     }
   }
 
@@ -1091,9 +1088,9 @@
                                    notep->n_type, notep->n_descsz);
 
       if (notep->n_type == NT_PRSTATUS) {
-         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
-            return false;
-         }
+        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
+          return false;
+        }
       }
       p = descdata + ROUNDUP(notep->n_descsz, 4);
    }
@@ -1121,7 +1118,7 @@
     * contains a set of saved /proc structures), and PT_LOAD (which
     * represents a memory mapping from the process's address space).
     *
-    * Difference b/w Solaris PT_NOTE and BSD PT_NOTE:
+    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
     *
     *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
     *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
@@ -1167,32 +1164,61 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
+
+  int page_size=sysconf(_SC_PAGE_SIZE);
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
+
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_filesz) == NULL) {
+          goto err;
+        }
+      } else {
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (existing_map->memsz != lib_php->p_filesz)){
+
+          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, lib_php->p_filesz);
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = lib_php->p_filesz;
       }
-      lib_php++;
-   }
+    }
+
+    lib_php++;
+  }
 
-   free(phbuf);
-   return true;
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
-// process segments from interpreter (ld-elf.so.1)
+// process segments from interpreter (ld.so or ld-linux.so or ld-elf.so)
 static bool read_interp_segments(struct ps_prochandle* ph) {
    ELF_EHDR interp_ehdr;
 
@@ -1303,32 +1329,34 @@
   debug_base = dyn.d_un.d_ptr;
   // at debug_base we have struct r_debug. This has first link map in r_map field
   if (ps_pread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
-                  &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+                 &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
     print_debug("can't read first link map address\n");
     return false;
   }
 
   // read ld_base address from struct r_debug
-  // XXX: There is no r_ldbase member on BSD
-  /*
+#if 0  // There is no r_ldbase member on BSD
   if (ps_pread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
                   sizeof(uintptr_t)) != PS_OK) {
     print_debug("can't read ld base address\n");
     return false;
   }
   ph->core->ld_base_addr = ld_base_addr;
-  */
+#else
   ph->core->ld_base_addr = 0;
+#endif
 
   print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
 
-  // now read segments from interp (i.e ld-elf.so.1)
-  if (read_interp_segments(ph) != true)
+  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
+  if (read_interp_segments(ph) != true) {
     return false;
+  }
 
   // after adding interpreter (ld.so) mappings sort again
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     return false;
+  }
 
   print_debug("first link map is at 0x%lx\n", first_link_map_addr);
 
@@ -1380,8 +1408,9 @@
           add_lib_info_fd(ph, lib_name, lib_fd, lib_base);
           // Map info is added for the library (lib_name) so
           // we need to re-sort it before calling the p_pdread.
-          if (sort_map_array(ph) != true)
+          if (sort_map_array(ph) != true) {
             return false;
+          }
         } else {
           print_debug("can't read ELF header for shared object %s\n", lib_name);
           close(lib_fd);
@@ -1392,7 +1421,7 @@
 
     // read next link_map address
     if (ps_pread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
-                 &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+                  &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
       print_debug("can't read next link in link_map\n");
       return false;
     }
@@ -1408,7 +1437,7 @@
 
   struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
   if (ph == NULL) {
-    print_debug("cant allocate ps_prochandle\n");
+    print_debug("can't allocate ps_prochandle\n");
     return NULL;
   }
 
@@ -1444,38 +1473,45 @@
   }
 
   if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
-     print_debug("executable file is not a valid ELF ET_EXEC file\n");
-     goto err;
+    print_debug("executable file is not a valid ELF ET_EXEC file\n");
+    goto err;
   }
 
   // process core file segments
-  if (read_core_segments(ph, &core_ehdr) != true)
-     goto err;
+  if (read_core_segments(ph, &core_ehdr) != true) {
+    goto err;
+  }
 
   // process exec file segments
-  if (read_exec_segments(ph, &exec_ehdr) != true)
-     goto err;
+  if (read_exec_segments(ph, &exec_ehdr) != true) {
+    goto err;
+  }
 
   // exec file is also treated like a shared object for symbol search
   if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
-                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
-     goto err;
+                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
+    goto err;
+  }
 
   // allocate and sort maps into map_array, we need to do this
   // here because read_shared_lib_info needs to read from debuggee
   // address space
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     goto err;
+  }
 
-  if (read_shared_lib_info(ph) != true)
+  if (read_shared_lib_info(ph) != true) {
     goto err;
+  }
 
   // sort again because we have added more mappings from shared objects
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     goto err;
+  }
 
-  if (init_classsharing_workaround(ph) != true)
+  if (init_classsharing_workaround(ph) != true) {
     goto err;
+  }
 
   print_debug("Leave Pgrab_core\n");
   return ph;
--- a/agent/src/os/bsd/ps_proc.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/bsd/ps_proc.c	Tue Nov 05 17:38:04 2013 -0800
@@ -131,7 +131,7 @@
 
 static bool ptrace_continue(pid_t pid, int signal) {
   // pass the signal to the process so we don't swallow it
-  if (ptrace(PTRACE_CONT, pid, NULL, signal) < 0) {
+  if (ptrace(PT_CONTINUE, pid, NULL, signal) < 0) {
     print_debug("ptrace(PTRACE_CONT, ..) failed for %d\n", pid);
     return false;
   }
@@ -434,7 +434,6 @@
 // attach to the process. One and only one exposed stuff
 struct ps_prochandle* Pgrab(pid_t pid) {
   struct ps_prochandle* ph = NULL;
-  thread_info* thr = NULL;
 
   if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) {
      print_debug("can't allocate memory for ps_prochandle\n");
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Tue Nov 05 17:38:04 2013 -0800
@@ -29,6 +29,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <stdlib.h>
 #include <string.h>
 #include <limits.h>
 
@@ -80,7 +81,7 @@
   (JNIEnv *env, jclass cls) {
   jclass listClass;
 
-  if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) {
+  if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) {
      THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc");
   }
 
--- a/agent/src/os/linux/ps_core.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/linux/ps_core.c	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,155 +41,158 @@
 // ps_prochandle cleanup helper functions
 
 // close all file descriptors
-static void close_elf_files(struct ps_prochandle* ph) {
-   lib_info* lib = NULL;
+static void close_files(struct ps_prochandle* ph) {
+  lib_info* lib = NULL;
 
-   // close core file descriptor
-   if (ph->core->core_fd >= 0)
-     close(ph->core->core_fd);
+  // close core file descriptor
+  if (ph->core->core_fd >= 0)
+    close(ph->core->core_fd);
 
-   // close exec file descriptor
-   if (ph->core->exec_fd >= 0)
-     close(ph->core->exec_fd);
+  // close exec file descriptor
+  if (ph->core->exec_fd >= 0)
+    close(ph->core->exec_fd);
 
-   // close interp file descriptor
-   if (ph->core->interp_fd >= 0)
-     close(ph->core->interp_fd);
+  // close interp file descriptor
+  if (ph->core->interp_fd >= 0)
+    close(ph->core->interp_fd);
 
-   // close class share archive file
-   if (ph->core->classes_jsa_fd >= 0)
-     close(ph->core->classes_jsa_fd);
+  // close class share archive file
+  if (ph->core->classes_jsa_fd >= 0)
+    close(ph->core->classes_jsa_fd);
 
-   // close all library file descriptors
-   lib = ph->libs;
-   while (lib) {
-      int fd = lib->fd;
-      if (fd >= 0 && fd != ph->core->exec_fd) close(fd);
-      lib = lib->next;
-   }
+  // close all library file descriptors
+  lib = ph->libs;
+  while (lib) {
+    int fd = lib->fd;
+    if (fd >= 0 && fd != ph->core->exec_fd) {
+      close(fd);
+    }
+    lib = lib->next;
+  }
 }
 
 // clean all map_info stuff
 static void destroy_map_info(struct ps_prochandle* ph) {
   map_info* map = ph->core->maps;
   while (map) {
-     map_info* next = map->next;
-     free(map);
-     map = next;
+    map_info* next = map->next;
+    free(map);
+    map = next;
   }
 
   if (ph->core->map_array) {
-     free(ph->core->map_array);
+    free(ph->core->map_array);
   }
 
   // Part of the class sharing workaround
   map = ph->core->class_share_maps;
   while (map) {
-     map_info* next = map->next;
-     free(map);
-     map = next;
+    map_info* next = map->next;
+    free(map);
+    map = next;
   }
 }
 
 // ps_prochandle operations
 static void core_release(struct ps_prochandle* ph) {
-   if (ph->core) {
-      close_elf_files(ph);
-      destroy_map_info(ph);
-      free(ph->core);
-   }
+  if (ph->core) {
+    close_files(ph);
+    destroy_map_info(ph);
+    free(ph->core);
+  }
 }
 
 static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
-      print_debug("can't allocate memory for map_info\n");
-      return NULL;
-   }
+  map_info* map;
+  if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
+    print_debug("can't allocate memory for map_info\n");
+    return NULL;
+  }
 
-   // initialize map
-   map->fd     = fd;
-   map->offset = offset;
-   map->vaddr  = vaddr;
-   map->memsz  = memsz;
-   return map;
+  // initialize map
+  map->fd     = fd;
+  map->offset = offset;
+  map->vaddr  = vaddr;
+  map->memsz  = memsz;
+  return map;
 }
 
 // add map info with given fd, offset, vaddr and memsz
 static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
                              uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
-      return NULL;
-   }
+  map_info* map;
+  if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
+    return NULL;
+  }
 
-   // add this to map list
-   map->next  = ph->core->maps;
-   ph->core->maps   = map;
-   ph->core->num_maps++;
+  // add this to map list
+  map->next  = ph->core->maps;
+  ph->core->maps   = map;
+  ph->core->num_maps++;
 
-   return map;
+  return map;
 }
 
 // Part of the class sharing workaround
-static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
+static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
                              uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ((map = allocate_init_map(ph->core->classes_jsa_fd,
-                                offset, vaddr, memsz)) == NULL) {
-      return;
-   }
+  map_info* map;
+  if ((map = allocate_init_map(ph->core->classes_jsa_fd,
+                               offset, vaddr, memsz)) == NULL) {
+    return NULL;
+  }
 
-   map->next = ph->core->class_share_maps;
-   ph->core->class_share_maps = map;
+  map->next = ph->core->class_share_maps;
+  ph->core->class_share_maps = map;
+  return map;
 }
 
 // Return the map_info for the given virtual address.  We keep a sorted
 // array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
-{
-   int mid, lo = 0, hi = ph->core->num_maps - 1;
-   map_info *mp;
+static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
+  int mid, lo = 0, hi = ph->core->num_maps - 1;
+  map_info *mp;
 
-   while (hi - lo > 1) {
-     mid = (lo + hi) / 2;
-      if (addr >= ph->core->map_array[mid]->vaddr)
-         lo = mid;
-      else
-         hi = mid;
-   }
+  while (hi - lo > 1) {
+    mid = (lo + hi) / 2;
+    if (addr >= ph->core->map_array[mid]->vaddr) {
+      lo = mid;
+    } else {
+      hi = mid;
+    }
+  }
 
-   if (addr < ph->core->map_array[hi]->vaddr)
-      mp = ph->core->map_array[lo];
-   else
-      mp = ph->core->map_array[hi];
+  if (addr < ph->core->map_array[hi]->vaddr) {
+    mp = ph->core->map_array[lo];
+  } else {
+    mp = ph->core->map_array[hi];
+  }
 
-   if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz)
-      return (mp);
+  if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+    return (mp);
+  }
 
 
-   // Part of the class sharing workaround
-   // Unfortunately, we have no way of detecting -Xshare state.
-   // Check out the share maps atlast, if we don't find anywhere.
-   // This is done this way so to avoid reading share pages
-   // ahead of other normal maps. For eg. with -Xshare:off we don't
-   // want to prefer class sharing data to data from core.
-   mp = ph->core->class_share_maps;
-   if (mp) {
-      print_debug("can't locate map_info at 0x%lx, trying class share maps\n",
-             addr);
-   }
-   while (mp) {
-      if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
-         print_debug("located map_info at 0x%lx from class share maps\n",
-                  addr);
-         return (mp);
-      }
-      mp = mp->next;
-   }
+  // Part of the class sharing workaround
+  // Unfortunately, we have no way of detecting -Xshare state.
+  // Check out the share maps atlast, if we don't find anywhere.
+  // This is done this way so to avoid reading share pages
+  // ahead of other normal maps. For eg. with -Xshare:off we don't
+  // want to prefer class sharing data to data from core.
+  mp = ph->core->class_share_maps;
+  if (mp) {
+    print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
+  }
+  while (mp) {
+    if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+      print_debug("located map_info at 0x%lx from class share maps\n", addr);
+      return (mp);
+    }
+    mp = mp->next;
+  }
 
-   print_debug("can't locate map_info at 0x%lx\n", addr);
-   return (NULL);
+  print_debug("can't locate map_info at 0x%lx\n", addr);
+  return (NULL);
 }
 
 //---------------------------------------------------------------
@@ -226,9 +229,9 @@
     size_t _used;            // for setting space top on read
 
     // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
-    // the C type matching the C++ bool type on any given platform. For
-    // Hotspot on Linux we assume the corresponding C type is char but
-    // licensees on Linux versions may need to adjust the type of these fields.
+    // the C type matching the C++ bool type on any given platform.
+    // We assume the corresponding C type is char but licensees
+    // may need to adjust the type of these fields.
     char   _read_only;       // read only space?
     char   _allow_exec;      // executable code in space?
 
@@ -238,154 +241,159 @@
 };
 
 static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
-   jboolean i;
-   if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
-      *pvalue = i;
-      return true;
-   } else {
-      return false;
-   }
+  jboolean i;
+  if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
+    *pvalue = i;
+    return true;
+  } else {
+    return false;
+  }
 }
 
 static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
-   uintptr_t uip;
-   if (ps_pdread(ph, (psaddr_t) addr, &uip, sizeof(uip)) == PS_OK) {
-      *pvalue = uip;
-      return true;
-   } else {
-      return false;
-   }
+  uintptr_t uip;
+  if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
+    *pvalue = uip;
+    return true;
+  } else {
+    return false;
+  }
 }
 
 // used to read strings from debuggee
 static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
-   size_t i = 0;
-   char  c = ' ';
+  size_t i = 0;
+  char  c = ' ';
 
-   while (c != '\0') {
-     if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK)
-         return false;
-      if (i < size - 1)
-         buf[i] = c;
-      else // smaller buffer
-         return false;
-      i++; addr++;
-   }
+  while (c != '\0') {
+    if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
+      return false;
+    }
+    if (i < size - 1) {
+      buf[i] = c;
+    } else {
+      // smaller buffer
+      return false;
+    }
+    i++; addr++;
+  }
 
-   buf[i] = '\0';
-   return true;
+  buf[i] = '\0';
+  return true;
 }
 
 #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.so"
 
 static bool init_classsharing_workaround(struct ps_prochandle* ph) {
-   lib_info* lib = ph->libs;
-   while (lib != NULL) {
-      // we are iterating over shared objects from the core dump. look for
-      // libjvm.so.
-      const char *jvm_name = 0;
-      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
-         char classes_jsa[PATH_MAX];
-         struct FileMapHeader header;
-         size_t n = 0;
-         int fd = -1, m = 0;
-         uintptr_t base = 0, useSharedSpacesAddr = 0;
-         uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
-         jboolean useSharedSpaces = 0;
-         map_info* mi = 0;
+  lib_info* lib = ph->libs;
+  while (lib != NULL) {
+    // we are iterating over shared objects from the core dump. look for
+    // libjvm.so.
+    const char *jvm_name = 0;
+    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
+      char classes_jsa[PATH_MAX];
+      struct FileMapHeader header;
+      int fd = -1;
+      int m = 0;
+      size_t n = 0;
+      uintptr_t base = 0, useSharedSpacesAddr = 0;
+      uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
+      jboolean useSharedSpaces = 0;
+      map_info* mi = 0;
 
-         memset(classes_jsa, 0, sizeof(classes_jsa));
-         jvm_name = lib->name;
-         useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
-         if (useSharedSpacesAddr == 0) {
-            print_debug("can't lookup 'UseSharedSpaces' flag\n");
-            return false;
-         }
+      memset(classes_jsa, 0, sizeof(classes_jsa));
+      jvm_name = lib->name;
+      useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
+      if (useSharedSpacesAddr == 0) {
+        print_debug("can't lookup 'UseSharedSpaces' flag\n");
+        return false;
+      }
 
-         // Hotspot vm types are not exported to build this library. So
-         // using equivalent type jboolean to read the value of
-         // UseSharedSpaces which is same as hotspot type "bool".
-         if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
-            print_debug("can't read the value of 'UseSharedSpaces' flag\n");
-            return false;
-         }
+      // Hotspot vm types are not exported to build this library. So
+      // using equivalent type jboolean to read the value of
+      // UseSharedSpaces which is same as hotspot type "bool".
+      if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
+        print_debug("can't read the value of 'UseSharedSpaces' flag\n");
+        return false;
+      }
 
-         if ((int)useSharedSpaces == 0) {
-            print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
-            return true;
-         }
+      if ((int)useSharedSpaces == 0) {
+        print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
+        return true;
+      }
 
-         sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
-         if (sharedArchivePathAddrAddr == 0) {
-            print_debug("can't lookup shared archive path symbol\n");
-            return false;
-         }
+      sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
+      if (sharedArchivePathAddrAddr == 0) {
+        print_debug("can't lookup shared archive path symbol\n");
+        return false;
+      }
 
-         if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
-            print_debug("can't read shared archive path pointer\n");
-            return false;
-         }
+      if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
+        print_debug("can't read shared archive path pointer\n");
+        return false;
+      }
 
-         if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
-            print_debug("can't read shared archive path value\n");
-            return false;
-         }
+      if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
+        print_debug("can't read shared archive path value\n");
+        return false;
+      }
 
-         print_debug("looking for %s\n", classes_jsa);
-         // open the class sharing archive file
-         fd = pathmap_open(classes_jsa);
-         if (fd < 0) {
-            print_debug("can't open %s!\n", classes_jsa);
-            ph->core->classes_jsa_fd = -1;
-            return false;
-         } else {
-            print_debug("opened %s\n", classes_jsa);
-         }
+      print_debug("looking for %s\n", classes_jsa);
+      // open the class sharing archive file
+      fd = pathmap_open(classes_jsa);
+      if (fd < 0) {
+        print_debug("can't open %s!\n", classes_jsa);
+        ph->core->classes_jsa_fd = -1;
+        return false;
+      } else {
+        print_debug("opened %s\n", classes_jsa);
+      }
 
-         // read FileMapHeader from the file
-         memset(&header, 0, sizeof(struct FileMapHeader));
-         if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
-              != sizeof(struct FileMapHeader)) {
-            print_debug("can't read shared archive file map header from %s\n", classes_jsa);
-            close(fd);
-            return false;
-         }
+      // read FileMapHeader from the file
+      memset(&header, 0, sizeof(struct FileMapHeader));
+      if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
+           != sizeof(struct FileMapHeader)) {
+        print_debug("can't read shared archive file map header from %s\n", classes_jsa);
+        close(fd);
+        return false;
+      }
 
-         // check file magic
-         if (header._magic != 0xf00baba2) {
-            print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
-                        classes_jsa, header._magic);
-            close(fd);
-            return false;
-         }
+      // check file magic
+      if (header._magic != 0xf00baba2) {
+        print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
+                     classes_jsa, header._magic);
+        close(fd);
+        return false;
+      }
 
-         // check version
-         if (header._version != CURRENT_ARCHIVE_VERSION) {
-            print_debug("%s has wrong shared archive file version %d, expecting %d\n",
-                        classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
-            close(fd);
-            return false;
-         }
+      // check version
+      if (header._version != CURRENT_ARCHIVE_VERSION) {
+        print_debug("%s has wrong shared archive file version %d, expecting %d\n",
+                     classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
+        close(fd);
+        return false;
+      }
 
-         ph->core->classes_jsa_fd = fd;
-         // add read-only maps from classes.jsa to the list of maps
-         for (m = 0; m < NUM_SHARED_MAPS; m++) {
-            if (header._space[m]._read_only) {
-               base = (uintptr_t) header._space[m]._base;
-               // no need to worry about the fractional pages at-the-end.
-               // possible fractional pages are handled by core_read_data.
-               add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
-                         base, (size_t) header._space[m]._used);
-               print_debug("added a share archive map at 0x%lx\n", base);
-            }
-         }
-         return true;
+      ph->core->classes_jsa_fd = fd;
+      // add read-only maps from classes.jsa to the list of maps
+      for (m = 0; m < NUM_SHARED_MAPS; m++) {
+        if (header._space[m]._read_only) {
+          base = (uintptr_t) header._space[m]._base;
+          // no need to worry about the fractional pages at-the-end.
+          // possible fractional pages are handled by core_read_data.
+          add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
+                                   base, (size_t) header._space[m]._used);
+          print_debug("added a share archive map at 0x%lx\n", base);
+        }
       }
-      lib = lib->next;
+      return true;
    }
-   return true;
+   lib = lib->next;
+  }
+  return true;
 }
 
 
@@ -396,54 +404,58 @@
 // callback for sorting the array of map_info pointers.
 static int core_cmp_mapping(const void *lhsp, const void *rhsp)
 {
-   const map_info *lhs = *((const map_info **)lhsp);
-   const map_info *rhs = *((const map_info **)rhsp);
+  const map_info *lhs = *((const map_info **)lhsp);
+  const map_info *rhs = *((const map_info **)rhsp);
 
-   if (lhs->vaddr == rhs->vaddr)
-      return (0);
+  if (lhs->vaddr == rhs->vaddr) {
+    return (0);
+  }
 
-   return (lhs->vaddr < rhs->vaddr ? -1 : 1);
+  return (lhs->vaddr < rhs->vaddr ? -1 : 1);
 }
 
 // we sort map_info by starting virtual address so that we can do
 // binary search to read from an address.
 static bool sort_map_array(struct ps_prochandle* ph) {
-   size_t num_maps = ph->core->num_maps;
-   map_info* map = ph->core->maps;
-   int i = 0;
+  size_t num_maps = ph->core->num_maps;
+  map_info* map = ph->core->maps;
+  int i = 0;
 
-   // allocate map_array
-   map_info** array;
-   if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
-      print_debug("can't allocate memory for map array\n");
-      return false;
-   }
+  // allocate map_array
+  map_info** array;
+  if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
+    print_debug("can't allocate memory for map array\n");
+    return false;
+  }
 
-   // add maps to array
-   while (map) {
-      array[i] = map;
-      i++;
-      map = map->next;
-   }
+  // add maps to array
+  while (map) {
+    array[i] = map;
+    i++;
+    map = map->next;
+  }
 
-   // sort is called twice. If this is second time, clear map array
-   if (ph->core->map_array) free(ph->core->map_array);
-   ph->core->map_array = array;
-   // sort the map_info array by base virtual address.
-   qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
-            core_cmp_mapping);
+  // sort is called twice. If this is second time, clear map array
+  if (ph->core->map_array) {
+    free(ph->core->map_array);
+  }
+
+  ph->core->map_array = array;
+  // sort the map_info array by base virtual address.
+  qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
+        core_cmp_mapping);
 
-   // print map
-   if (is_debug()) {
-      int j = 0;
-      print_debug("---- sorted virtual address map ----\n");
-      for (j = 0; j < ph->core->num_maps; j++) {
-        print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
-                                         ph->core->map_array[j]->memsz);
-      }
-   }
+  // print map
+  if (is_debug()) {
+    int j = 0;
+    print_debug("---- sorted virtual address map ----\n");
+    for (j = 0; j < ph->core->num_maps; j++) {
+      print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
+                  ph->core->map_array[j]->memsz);
+    }
+  }
 
-   return true;
+  return true;
 }
 
 #ifndef MIN
@@ -460,16 +472,18 @@
       off_t off;
       int fd;
 
-      if (mp == NULL)
+      if (mp == NULL) {
          break;  /* No mapping for this address */
+      }
 
       fd = mp->fd;
       mapoff = addr - mp->vaddr;
       len = MIN(resid, mp->memsz - mapoff);
       off = mp->offset + mapoff;
 
-      if ((len = pread(fd, buf, len, off)) <= 0)
+      if ((len = pread(fd, buf, len, off)) <= 0) {
          break;
+      }
 
       resid -= len;
       addr += len;
@@ -625,8 +639,9 @@
                                    notep->n_type, notep->n_descsz);
 
       if (notep->n_type == NT_PRSTATUS) {
-         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true)
-            return false;
+        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
+          return false;
+        }
       }
       p = descdata + ROUNDUP(notep->n_descsz, 4);
    }
@@ -654,7 +669,7 @@
     * contains a set of saved /proc structures), and PT_LOAD (which
     * represents a memory mapping from the process's address space).
     *
-    * Difference b/w Solaris PT_NOTE and Linux PT_NOTE:
+    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
     *
     *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
     *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
@@ -674,7 +689,9 @@
     for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) {
       switch (core_php->p_type) {
          case PT_NOTE:
-            if (core_handle_note(ph, core_php) != true) goto err;
+            if (core_handle_note(ph, core_php) != true) {
+              goto err;
+            }
             break;
 
          case PT_LOAD: {
@@ -698,29 +715,61 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
+
+  int page_size = sysconf(_SC_PAGE_SIZE);
+
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
+
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_memsz) == NULL) {
+          goto err;
+        }
+      } else {
+        // Coredump stores value of p_memsz elf field
+        // rounded up to page boundary.
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (ROUNDUP(existing_map->memsz, page_size) != ROUNDUP(lib_php->p_memsz, page_size))) {
+
+          print_debug("address conflict @ 0x%lx (existing map size = %ld, size = %ld, flags = %d)\n",
+                        target_vaddr, existing_map->memsz, lib_php->p_memsz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, ROUNDUP(lib_php->p_memsz, page_size));
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = ROUNDUP(lib_php->p_memsz, page_size);
       }
-      lib_php++;
-   }
+    }
+
+    lib_php++;
+  }
 
-   free(phbuf);
-   return true;
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
 // process segments from interpreter (ld.so or ld-linux.so)
@@ -803,60 +852,62 @@
 // read shared library info from runtime linker's data structures.
 // This work is done by librtlb_db in Solaris
 static bool read_shared_lib_info(struct ps_prochandle* ph) {
-   uintptr_t addr = ph->core->dynamic_addr;
-   uintptr_t debug_base;
-   uintptr_t first_link_map_addr;
-   uintptr_t ld_base_addr;
-   uintptr_t link_map_addr;
-   uintptr_t lib_base_diff;
-   uintptr_t lib_base;
-   uintptr_t lib_name_addr;
-   char lib_name[BUF_SIZE];
-   ELF_DYN dyn;
-   ELF_EHDR elf_ehdr;
-   int lib_fd;
+  uintptr_t addr = ph->core->dynamic_addr;
+  uintptr_t debug_base;
+  uintptr_t first_link_map_addr;
+  uintptr_t ld_base_addr;
+  uintptr_t link_map_addr;
+  uintptr_t lib_base_diff;
+  uintptr_t lib_base;
+  uintptr_t lib_name_addr;
+  char lib_name[BUF_SIZE];
+  ELF_DYN dyn;
+  ELF_EHDR elf_ehdr;
+  int lib_fd;
 
-   // _DYNAMIC has information of the form
-   //         [tag] [data] [tag] [data] .....
-   // Both tag and data are pointer sized.
-   // We look for dynamic info with DT_DEBUG. This has shared object info.
-   // refer to struct r_debug in link.h
+  // _DYNAMIC has information of the form
+  //         [tag] [data] [tag] [data] .....
+  // Both tag and data are pointer sized.
+  // We look for dynamic info with DT_DEBUG. This has shared object info.
+  // refer to struct r_debug in link.h
+
+  dyn.d_tag = DT_NULL;
+  while (dyn.d_tag != DT_DEBUG) {
+    if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
+      print_debug("can't read debug info from _DYNAMIC\n");
+      return false;
+    }
+    addr += sizeof(ELF_DYN);
+  }
 
-   dyn.d_tag = DT_NULL;
-   while (dyn.d_tag != DT_DEBUG) {
-      if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
-         print_debug("can't read debug info from _DYNAMIC\n");
-         return false;
-      }
-      addr += sizeof(ELF_DYN);
-   }
-
-   // we have got Dyn entry with DT_DEBUG
-   debug_base = dyn.d_un.d_ptr;
-   // at debug_base we have struct r_debug. This has first link map in r_map field
-   if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
+  // we have got Dyn entry with DT_DEBUG
+  debug_base = dyn.d_un.d_ptr;
+  // at debug_base we have struct r_debug. This has first link map in r_map field
+  if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
                  &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
-      print_debug("can't read first link map address\n");
-      return false;
-   }
+    print_debug("can't read first link map address\n");
+    return false;
+  }
 
-   // read ld_base address from struct r_debug
-   if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
+  // read ld_base address from struct r_debug
+  if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
                  sizeof(uintptr_t)) != PS_OK) {
-      print_debug("can't read ld base address\n");
-      return false;
-   }
-   ph->core->ld_base_addr = ld_base_addr;
+    print_debug("can't read ld base address\n");
+    return false;
+  }
+  ph->core->ld_base_addr = ld_base_addr;
+
+  print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
 
-   print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
-
-   // now read segments from interp (i.e ld.so or ld-linux.so)
-   if (read_interp_segments(ph) != true)
+  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
+  if (read_interp_segments(ph) != true) {
       return false;
+  }
 
-   // after adding interpreter (ld.so) mappings sort again
-   if (sort_map_array(ph) != true)
-      return false;
+  // after adding interpreter (ld.so) mappings sort again
+  if (sort_map_array(ph) != true) {
+    return false;
+  }
 
    print_debug("first link map is at 0x%lx\n", first_link_map_addr);
 
@@ -921,95 +972,102 @@
          }
       }
 
-      // read next link_map address
-      if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
-                        &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
-         print_debug("can't read next link in link_map\n");
-         return false;
-      }
-   }
+    // read next link_map address
+    if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
+                   &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+      print_debug("can't read next link in link_map\n");
+      return false;
+    }
+  }
 
-   return true;
+  return true;
 }
 
 // the one and only one exposed stuff from this file
 struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
-   ELF_EHDR core_ehdr;
-   ELF_EHDR exec_ehdr;
-   ELF_EHDR lib_ehdr;
+  ELF_EHDR core_ehdr;
+  ELF_EHDR exec_ehdr;
+  ELF_EHDR lib_ehdr;
 
-   struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
-   if (ph == NULL) {
-      print_debug("can't allocate ps_prochandle\n");
-      return NULL;
-   }
+  struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
+  if (ph == NULL) {
+    print_debug("can't allocate ps_prochandle\n");
+    return NULL;
+  }
 
-   if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
-      free(ph);
-      print_debug("can't allocate ps_prochandle\n");
-      return NULL;
-   }
+  if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
+    free(ph);
+    print_debug("can't allocate ps_prochandle\n");
+    return NULL;
+  }
 
-   // initialize ph
-   ph->ops = &core_ops;
-   ph->core->core_fd   = -1;
-   ph->core->exec_fd   = -1;
-   ph->core->interp_fd = -1;
+  // initialize ph
+  ph->ops = &core_ops;
+  ph->core->core_fd   = -1;
+  ph->core->exec_fd   = -1;
+  ph->core->interp_fd = -1;
 
-   // open the core file
-   if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
-      print_debug("can't open core file\n");
-      goto err;
-   }
+  // open the core file
+  if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
+    print_debug("can't open core file\n");
+    goto err;
+  }
 
-   // read core file ELF header
-   if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
-      print_debug("core file is not a valid ELF ET_CORE file\n");
-      goto err;
-   }
+  // read core file ELF header
+  if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
+    print_debug("core file is not a valid ELF ET_CORE file\n");
+    goto err;
+  }
+
+  if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
+    print_debug("can't open executable file\n");
+    goto err;
+  }
 
-   if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
-      print_debug("can't open executable file\n");
-      goto err;
-   }
+  if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
+    print_debug("executable file is not a valid ELF ET_EXEC file\n");
+    goto err;
+  }
+
+  // process core file segments
+  if (read_core_segments(ph, &core_ehdr) != true) {
+    goto err;
+  }
 
-   if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
-      print_debug("executable file is not a valid ELF ET_EXEC file\n");
-      goto err;
-   }
+  // process exec file segments
+  if (read_exec_segments(ph, &exec_ehdr) != true) {
+    goto err;
+  }
 
-   // process core file segments
-   if (read_core_segments(ph, &core_ehdr) != true)
-      goto err;
-
-   // process exec file segments
-   if (read_exec_segments(ph, &exec_ehdr) != true)
-      goto err;
+  // exec file is also treated like a shared object for symbol search
+  if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
+                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
+    goto err;
+  }
 
-   // exec file is also treated like a shared object for symbol search
-   if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
-                       (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
-      goto err;
+  // allocate and sort maps into map_array, we need to do this
+  // here because read_shared_lib_info needs to read from debuggee
+  // address space
+  if (sort_map_array(ph) != true) {
+    goto err;
+  }
 
-   // allocate and sort maps into map_array, we need to do this
-   // here because read_shared_lib_info needs to read from debuggee
-   // address space
-   if (sort_map_array(ph) != true)
-      goto err;
+  if (read_shared_lib_info(ph) != true) {
+    goto err;
+  }
 
-   if (read_shared_lib_info(ph) != true)
-      goto err;
+  // sort again because we have added more mappings from shared objects
+  if (sort_map_array(ph) != true) {
+    goto err;
+  }
 
-   // sort again because we have added more mappings from shared objects
-   if (sort_map_array(ph) != true)
-      goto err;
+  if (init_classsharing_workaround(ph) != true) {
+    goto err;
+  }
 
-   if (init_classsharing_workaround(ph) != true)
-      goto err;
-
-   return ph;
+  return ph;
 
 err:
-   Prelease(ph);
-   return NULL;
+  Prelease(ph);
+  return NULL;
 }
--- a/agent/src/os/linux/ps_proc.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/linux/ps_proc.c	Tue Nov 05 17:38:04 2013 -0800
@@ -27,6 +27,8 @@
 #include <string.h>
 #include <signal.h>
 #include <errno.h>
+#include <sys/types.h>
+#include <sys/wait.h>
 #include <sys/ptrace.h>
 #include "libproc_impl.h"
 
--- a/agent/src/os/linux/salibelf.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/linux/salibelf.c	Tue Nov 05 17:38:04 2013 -0800
@@ -25,6 +25,7 @@
 #include "salibelf.h"
 #include <stdlib.h>
 #include <unistd.h>
+#include <string.h>
 
 extern void print_debug(const char*,...);
 
--- a/agent/src/os/linux/symtab.c	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/os/linux/symtab.c	Tue Nov 05 17:38:04 2013 -0800
@@ -305,7 +305,7 @@
 
   unsigned char *bytes
     = (unsigned char*)(note+1) + note->n_namesz;
-  unsigned char *filename
+  char *filename
     = (build_id_to_debug_filename (note->n_descsz, bytes));
 
   fd = pathmap_open(filename);
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Tue Nov 05 17:38:04 2013 -0800
@@ -1213,6 +1213,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("intConstant " + name + " " + db.lookupIntConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getIntConstants();
@@ -1235,6 +1236,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("longConstant " + name + " " + db.lookupLongConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getLongConstants();
--- a/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Tue Nov 05 17:38:04 2013 -0800
@@ -67,6 +67,13 @@
          String libname = "hsdis";
          String arch = System.getProperty("os.arch");
          if (os.lastIndexOf("Windows", 0) != -1) {
+            if (arch.equals("x86")) {
+               libname +=  "-i386";
+            } else if (arch.equals("amd64")) {
+               libname +=  "-amd64";
+            } else {
+               libname +=  "-" + arch;
+            }
             path.append(sep + "bin" + sep);
             libname += ".dll";
          } else if (os.lastIndexOf("SunOS", 0) != -1) {
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Tue Nov 05 17:38:04 2013 -0800
@@ -81,7 +81,7 @@
 
     public Address getCompKlassAddressAt(long offset)
             throws UnalignedAddressException, UnmappedAddressException {
-        return debugger.readCompOopAddress(addr + offset);
+        return debugger.readCompKlassAddress(addr + offset);
     }
 
     //
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for G1HeapRegionTable. It's essentially an index -> HeapRegion map.
+
+public class G1HeapRegionTable extends VMObject {
+    // HeapRegion** _base;
+    static private AddressField baseField;
+    // uint _length;
+    static private CIntegerField lengthField;
+    // HeapRegion** _biased_base
+    static private AddressField biasedBaseField;
+    // size_t _bias
+    static private CIntegerField biasField;
+    // uint _shift_by
+    static private CIntegerField shiftByField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("G1HeapRegionTable");
+
+        baseField = type.getAddressField("_base");
+        lengthField = type.getCIntegerField("_length");
+        biasedBaseField = type.getAddressField("_biased_base");
+        biasField = type.getCIntegerField("_bias");
+        shiftByField = type.getCIntegerField("_shift_by");
+    }
+
+    private HeapRegion at(long index) {
+        Address arrayAddr = baseField.getValue(addr);
+        // Offset of &_base[index]
+        long offset = index * VM.getVM().getAddressSize();
+        Address regionAddr = arrayAddr.getAddressAt(offset);
+        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
+                                                      regionAddr);
+    }
+
+    public long length() {
+        return lengthField.getValue(addr);
+    }
+
+    public long bias() {
+        return biasField.getValue(addr);
+    }
+
+    public long shiftBy() {
+        return shiftByField.getValue(addr);
+    }
+
+    private class HeapRegionIterator implements Iterator<HeapRegion> {
+        private long index;
+        private long length;
+
+        @Override
+        public boolean hasNext() { return index < length; }
+
+        @Override
+        public HeapRegion next() { return at(index++);    }
+
+        @Override
+        public void remove()     { /* not supported */    }
+
+        HeapRegionIterator(Address addr) {
+            index = 0;
+            length = length();
+        }
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return new HeapRegionIterator(addr);
+    }
+
+    public G1HeapRegionTable(Address addr) {
+        super(addr);
+    }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,13 +37,11 @@
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
-// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
+// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
 
 public class HeapRegionSeq extends VMObject {
-    // HeapRegion** _regions;
-    static private AddressField regionsField;
-    // uint _length;
-    static private CIntegerField lengthField;
+    // G1HeapRegionTable _regions
+    static private long regionsFieldOffset;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -56,44 +54,21 @@
     static private synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("HeapRegionSeq");
 
-        regionsField = type.getAddressField("_regions");
-        lengthField = type.getCIntegerField("_length");
+        regionsFieldOffset = type.getField("_regions").getOffset();
     }
 
-    private HeapRegion at(long index) {
-        Address arrayAddr = regionsField.getValue(addr);
-        // Offset of &_region[index]
-        long offset = index * VM.getVM().getAddressSize();
-        Address regionAddr = arrayAddr.getAddressAt(offset);
-        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
-                                                      regionAddr);
+    private G1HeapRegionTable regions() {
+        Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
+        return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
+                                                             regionsAddr);
     }
 
     public long length() {
-        return lengthField.getValue(addr);
-    }
-
-    private class HeapRegionIterator implements Iterator<HeapRegion> {
-        private long index;
-        private long length;
-
-        @Override
-        public boolean hasNext() { return index < length; }
-
-        @Override
-        public HeapRegion next() { return at(index++);    }
-
-        @Override
-        public void remove()     { /* not supported */    }
-
-        HeapRegionIterator(Address addr) {
-            index = 0;
-            length = length();
-        }
+        return regions().length();
     }
 
     public Iterator<HeapRegion> heapRegionIterator() {
-        return new HeapRegionIterator(addr);
+        return regions().heapRegionIterator();
     }
 
     public HeapRegionSeq(Address addr) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class ProtectionDomainCacheEntry extends VMObject {
+  private static sun.jvm.hotspot.types.OopField protectionDomainField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("ProtectionDomainCacheEntry");
+    protectionDomainField = type.getOopField("_literal");
+  }
+
+  public ProtectionDomainCacheEntry(Address addr) {
+    super(addr);
+  }
+
+  public Oop protectionDomain() {
+    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 
 public class ProtectionDomainEntry extends VMObject {
   private static AddressField nextField;
-  private static sun.jvm.hotspot.types.OopField protectionDomainField;
+  private static AddressField pdCacheField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -46,7 +46,7 @@
     Type type = db.lookupType("ProtectionDomainEntry");
 
     nextField = type.getAddressField("_next");
-    protectionDomainField = type.getOopField("_protection_domain");
+    pdCacheField = type.getAddressField("_pd_cache");
   }
 
   public ProtectionDomainEntry(Address addr) {
@@ -54,10 +54,12 @@
   }
 
   public ProtectionDomainEntry next() {
-    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr);
+    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
   }
 
   public Oop protectionDomain() {
-    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
+    ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
+      VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
+    return pd_cache.protectionDomain();
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Tue Nov 05 17:38:04 2013 -0800
@@ -44,12 +44,10 @@
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("SymbolTable");
     theTableField  = type.getAddressField("_the_table");
-    symbolTableSize = db.lookupIntConstant("SymbolTable::symbol_table_size").intValue();
   }
 
   // Fields
   private static AddressField theTableField;
-  private static int symbolTableSize;
 
   // Accessors
   public static SymbolTable getTheTable() {
@@ -57,10 +55,6 @@
     return (SymbolTable) VMObjectFactory.newObject(SymbolTable.class, tmp);
   }
 
-  public static int getSymbolTableSize() {
-    return symbolTableSize;
-  }
-
   public SymbolTable(Address addr) {
     super(addr);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Tue Nov 05 17:38:04 2013 -0800
@@ -51,6 +51,7 @@
   private static int HAS_GENERIC_SIGNATURE;
   private static int HAS_METHOD_ANNOTATIONS;
   private static int HAS_PARAMETER_ANNOTATIONS;
+  private static int HAS_METHOD_PARAMETERS;
   private static int HAS_DEFAULT_ANNOTATIONS;
   private static int HAS_TYPE_ANNOTATIONS;
 
@@ -70,6 +71,7 @@
     HAS_GENERIC_SIGNATURE     = db.lookupIntConstant("ConstMethod::_has_generic_signature").intValue();
     HAS_METHOD_ANNOTATIONS    = db.lookupIntConstant("ConstMethod::_has_method_annotations").intValue();
     HAS_PARAMETER_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_parameter_annotations").intValue();
+    HAS_METHOD_PARAMETERS = db.lookupIntConstant("ConstMethod::_has_method_parameters").intValue();
     HAS_DEFAULT_ANNOTATIONS   = db.lookupIntConstant("ConstMethod::_has_default_annotations").intValue();
     HAS_TYPE_ANNOTATIONS      = db.lookupIntConstant("ConstMethod::_has_type_annotations").intValue();
 
@@ -85,6 +87,9 @@
     // start of byte code
     bytecodeOffset = type.getSize();
 
+    type                       = db.lookupType("MethodParametersElement");
+    methodParametersElementSize = type.getSize();
+
     type                       = db.lookupType("CheckedExceptionElement");
     checkedExceptionElementSize = type.getSize();
 
@@ -113,7 +118,7 @@
 
   // start of bytecode
   private static long bytecodeOffset;
-
+  private static long methodParametersElementSize;
   private static long checkedExceptionElementSize;
   private static long localVariableTableElementSize;
   private static long exceptionTableElementSize;
@@ -387,6 +392,10 @@
     return ret;
   }
 
+  private boolean hasMethodParameters() {
+    return (getFlags() & HAS_METHOD_PARAMETERS) != 0;
+  }
+
   private boolean hasGenericSignature() {
     return (getFlags() & HAS_GENERIC_SIGNATURE) != 0;
   }
@@ -442,11 +451,41 @@
     return offsetOfLastU2Element();
   }
 
-  private long offsetOfCheckedExceptionsLength() {
+  private long offsetOfMethodParametersLength() {
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(hasMethodParameters(), "should only be called if table is present");
+    }
     return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
                                    offsetOfLastU2Element();
   }
 
+  private int getMethodParametersLength() {
+      if (hasMethodParameters())
+          return (int) getAddress().getCIntegerAt(offsetOfMethodParametersLength(), 2, true);
+      else
+          return 0;
+  }
+
+  // Offset of start of checked exceptions
+  private long offsetOfMethodParameters() {
+    long offset = offsetOfMethodParametersLength();
+    long length = getMethodParametersLength();
+    if (Assert.ASSERTS_ENABLED) {
+      Assert.that(length > 0, "should only be called if method parameter information is present");
+    }
+    offset -= length * methodParametersElementSize;
+    return offset;
+  }
+
+  private long offsetOfCheckedExceptionsLength() {
+    if (hasMethodParameters())
+      return offsetOfMethodParameters() - sizeofShort;
+    else {
+      return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
+                                     offsetOfLastU2Element();
+    }
+  }
+
   private int getCheckedExceptionsLength() {
     if (hasCheckedExceptions()) {
       return (int) getAddress().getCIntegerAt(offsetOfCheckedExceptionsLength(), 2, true);
@@ -496,6 +535,8 @@
       return offsetOfExceptionTable() - sizeofShort;
     } else if (hasCheckedExceptions()) {
       return offsetOfCheckedExceptions() - sizeofShort;
+    } else if (hasMethodParameters()) {
+      return offsetOfMethodParameters() - sizeofShort;
     } else {
       return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
                                      offsetOfLastU2Element();
@@ -526,6 +567,8 @@
     }
     if (hasCheckedExceptions()) {
       return offsetOfCheckedExceptions() - sizeofShort;
+    } else if (hasMethodParameters()) {
+      return offsetOfMethodParameters() - sizeofShort;
     } else {
       return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
                                      offsetOfLastU2Element();
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Tue Nov 05 17:38:04 2013 -0800
@@ -354,9 +354,16 @@
   public boolean   getIsMarkedDependent()   { return                isMarkedDependent.getValue(this) != 0; }
   public long      getVtableLen()           { return                vtableLen.getValue(this); }
   public long      getItableLen()           { return                itableLen.getValue(this); }
-  public Symbol    getGenericSignature()    { return                getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
   public long      majorVersion()           { return                majorVersion.getValue(this); }
   public long      minorVersion()           { return                minorVersion.getValue(this); }
+  public Symbol    getGenericSignature()    {
+    long index = genericSignatureIndex.getValue(this);
+    if (index != 0) {
+      return getConstants().getSymbolAt(index);
+    } else {
+      return null;
+    }
+  }
 
   // "size helper" == instance size in words
   public long getSizeHelper() {
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Tue Nov 05 17:38:04 2013 -0800
@@ -134,15 +134,13 @@
      private String type;
      private String name;
      private Address addr;
-     private String kind;
-     private int origin;
+     private int flags;
 
-     private Flag(String type, String name, Address addr, String kind, int origin) {
+     private Flag(String type, String name, Address addr, int flags) {
         this.type = type;
         this.name = name;
         this.addr = addr;
-        this.kind = kind;
-        this.origin = origin;
+        this.flags = flags;
      }
 
      public String getType() {
@@ -157,12 +155,8 @@
         return addr;
      }
 
-     public String getKind() {
-        return kind;
-     }
-
      public int getOrigin() {
-        return origin;
+        return flags & 0xF;  // XXX can we get the mask bits from somewhere?
      }
 
      public boolean isBool() {
@@ -173,8 +167,7 @@
         if (Assert.ASSERTS_ENABLED) {
            Assert.that(isBool(), "not a bool flag!");
         }
-        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned())
-               != 0;
+        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
      }
 
      public boolean isIntx() {
@@ -792,7 +785,7 @@
 
   public boolean isCompressedKlassPointersEnabled() {
     if (compressedKlassPointersEnabled == null) {
-        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
         compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
              (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
     }
@@ -843,11 +836,10 @@
 
     Address flagAddr = flagType.getAddressField("flags").getValue();
 
-    AddressField typeFld = flagType.getAddressField("type");
-    AddressField nameFld = flagType.getAddressField("name");
-    AddressField addrFld = flagType.getAddressField("addr");
-    AddressField kindFld = flagType.getAddressField("kind");
-    CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
+    AddressField typeFld = flagType.getAddressField("_type");
+    AddressField nameFld = flagType.getAddressField("_name");
+    AddressField addrFld = flagType.getAddressField("_addr");
+    CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0);
 
     long flagSize = flagType.getSize(); // sizeof(Flag)
 
@@ -856,9 +848,8 @@
       String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
       String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
       Address addr = addrFld.getValue(flagAddr);
-      String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
-      int origin = (int)originFld.getValue(flagAddr);
-      commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
+      int flags = (int)flagsFld.getValue(flagAddr);
+      commandLineFlags[f] = new Flag(type, name, addr, flags);
       flagAddr = flagAddr.addOffsetTo(flagSize);
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Tue Nov 05 17:38:04 2013 -0800
@@ -51,8 +51,7 @@
 
    public static void main(String[] args) {
       ClassLoaderStats cls = new ClassLoaderStats();
-      cls.start(args);
-      cls.stop();
+      cls.execute(args);
    }
 
    private static class ClassData {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Tue Nov 05 17:38:04 2013 -0800
@@ -54,8 +54,7 @@
 
     public static void main(String[] args) {
         FinalizerInfo finfo = new FinalizerInfo();
-        finfo.start(args);
-        finfo.stop();
+        finfo.execute(args);
     }
 
     public void run() {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Tue Nov 05 17:38:04 2013 -0800
@@ -54,7 +54,6 @@
 
    public static void main(String[] args) {
       FlagDumper fd = new FlagDumper();
-      fd.start(args);
-      fd.stop();
+      fd.execute(args);
    }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Tue Nov 05 17:38:04 2013 -0800
@@ -80,8 +80,7 @@
         }
 
         HeapDumper dumper = new HeapDumper(file);
-        dumper.start(args);
-        dumper.stop();
+        dumper.execute(args);
     }
 
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Nov 05 17:38:04 2013 -0800
@@ -46,8 +46,7 @@
 
    public static void main(String[] args) {
       HeapSummary hs = new HeapSummary();
-      hs.start(args);
-      hs.stop();
+      hs.execute(args);
    }
 
    public void run() {
@@ -66,18 +65,18 @@
       printGCAlgorithm(flagMap);
       System.out.println();
       System.out.println("Heap Configuration:");
-      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
-      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
-      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
-      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
-      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
-      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
-      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
-      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
-      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
-      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
-      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
-      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
+      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
+      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
+      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
+      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
+      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
+      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
+      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
+      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
+      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
+      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
+      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
+      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Tue Nov 05 17:38:04 2013 -0800
@@ -134,8 +134,7 @@
         }
 
         JInfo jinfo = new JInfo(mode);
-        jinfo.start(args);
-        jinfo.stop();
+        jinfo.execute(args);
     }
 
     private void printVMFlags() {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Tue Nov 05 17:38:04 2013 -0800
@@ -136,7 +136,9 @@
                         mode = MODE_HEAP_GRAPH_GXL;
                     } else {
                         System.err.println("unknown heap format:" + format);
-                        return;
+
+                        // Exit with error status
+                        System.exit(1);
                     }
                 } else {
                     copyArgs = false;
@@ -153,8 +155,7 @@
         }
 
         JMap jmap = new JMap(mode);
-        jmap.start(args);
-        jmap.stop();
+        jmap.execute(args);
     }
 
     public boolean writeHeapHprofBin(String fileName) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Tue Nov 05 17:38:04 2013 -0800
@@ -64,7 +64,6 @@
 
     public static void main(String[] args) {
         JSnap js = new JSnap();
-        js.start(args);
-        js.stop();
+        js.execute(args);
     }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Tue Nov 05 17:38:04 2013 -0800
@@ -89,8 +89,7 @@
         }
 
         JStack jstack = new JStack(mixedMode, concurrentLocks);
-        jstack.start(args);
-        jstack.stop();
+        jstack.execute(args);
     }
 
     private boolean mixedMode;
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Tue Nov 05 17:38:04 2013 -0800
@@ -61,7 +61,6 @@
 
    public static void main(String[] args) {
       ObjectHistogram oh = new ObjectHistogram();
-      oh.start(args);
-      oh.stop();
+      oh.execute(args);
    }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Tue Nov 05 17:38:04 2013 -0800
@@ -69,7 +69,6 @@
 
    public static void main(String[] args) throws Exception {
       PMap t = new PMap();
-      t.start(args);
-      t.stop();
+      t.execute(args);
    }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java	Tue Nov 05 17:38:04 2013 -0800
@@ -182,8 +182,7 @@
 
    public static void main(String[] args) throws Exception {
       PStack t = new PStack();
-      t.start(args);
-      t.stop();
+      t.execute(args);
    }
 
    // -- Internals only below this point
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Tue Nov 05 17:38:04 2013 -0800
@@ -137,8 +137,7 @@
 
    public static void main(String[] args) {
       StackTrace st = new StackTrace();
-      st.start(args);
-      st.stop();
+      st.execute(args);
    }
 
    private boolean verbose;
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Tue Nov 05 17:38:04 2013 -0800
@@ -58,7 +58,6 @@
 
    public static void main(String[] args) {
       SysPropsDumper pd = new SysPropsDumper();
-      pd.start(args);
-      pd.stop();
+      pd.execute(args);
    }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Tue Nov 05 17:38:04 2013 -0800
@@ -26,6 +26,7 @@
 
 import java.io.PrintStream;
 import java.util.Hashtable;
+
 import sun.jvm.hotspot.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.debugger.*;
@@ -105,26 +106,44 @@
 
       public static void main(String[] args) {
          <derived class> obj = new <derived class>;
-         obj.start(args);
+         obj.execute(args);
       }
 
    */
 
-   protected void stop() {
+   protected void execute(String[] args) {
+       int returnStatus = 1;
+
+       try {
+           returnStatus = start(args);
+       } finally {
+           stop();
+       }
+
+       // Exit with 0 or 1
+       System.exit(returnStatus);
+   }
+
+   public void stop() {
       if (agent != null) {
          agent.detach();
       }
    }
 
-   protected void start(String[] args) {
+   private int start(String[] args) {
+
       if ((args.length < 1) || (args.length > 2)) {
          usage();
-         return;
+         return 1;
       }
 
       // Attempt to handle -h or -help or some invalid flag
-      if (args[0].startsWith("-")) {
+      if (args[0].startsWith("-h")) {
           usage();
+          return 0;
+      } else if (args[0].startsWith("-")) {
+          usage();
+          return 1;
       }
 
       PrintStream err = System.err;
@@ -154,6 +173,7 @@
 
         default:
            usage();
+           return 1;
       }
 
       agent = new HotSpotAgent();
@@ -191,15 +211,16 @@
              break;
         }
         if (e.getMessage() != null) {
-          err.print(e.getMessage());
+          err.println(e.getMessage());
           e.printStackTrace();
         }
         err.println();
-        return;
+        return 1;
       }
 
       err.println("Debugger attached successfully.");
       startInternal();
+      return 0;
    }
 
    // When using an existing JVMDebugger.
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Tue Nov 05 17:38:04 2013 -0800
@@ -177,7 +177,6 @@
     public static void main(String[] args) {
 
         ClassDump cd = new ClassDump();
-        cd.start(args);
-        cd.stop();
+        cd.execute(args);
     }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Tue Nov 05 17:38:04 2013 -0800
@@ -42,8 +42,7 @@
 
     public static void main(String[] args) {
         JSDB jsdb = new JSDB();
-        jsdb.start(args);
-        jsdb.stop();
+        jsdb.execute(args);
     }
 
     public void run() {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Tue Nov 05 17:38:04 2013 -0800
@@ -40,8 +40,7 @@
 public class SOQL extends Tool {
    public static void main(String[] args) {
       SOQL soql = new SOQL();
-      soql.start(args);
-      soql.stop();
+      soql.execute(args);
    }
 
    public SOQL() {
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Tue Nov 05 17:38:04 2013 -0800
@@ -59,6 +59,7 @@
 
                     public boolean doObj(Oop oop) {
                         try {
+                            writeHeapRecordPrologue();
                             if (oop instanceof TypeArray) {
                                 writePrimitiveArray((TypeArray)oop);
                             } else if (oop instanceof ObjArray) {
@@ -97,6 +98,7 @@
                                 // not-a-Java-visible oop
                                 writeInternalObject(oop);
                             }
+                            writeHeapRecordEpilogue();
                         } catch (IOException exp) {
                             throw new RuntimeException(exp);
                         }
@@ -416,6 +418,12 @@
     protected void writeHeapFooter() throws IOException {
     }
 
+    protected void writeHeapRecordPrologue() throws IOException {
+    }
+
+    protected void writeHeapRecordEpilogue() throws IOException {
+    }
+
     // HeapVisitor, OopVisitor methods can't throw any non-runtime
     // exception. But, derived class write methods (which are called
     // from visitor callbacks) may throw IOException. Hence, we throw
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Tue Nov 05 17:38:04 2013 -0800
@@ -44,7 +44,7 @@
  * WARNING: This format is still under development, and is subject to
  * change without notice.
  *
- * header    "JAVA PROFILE 1.0.1" (0-terminated)
+ * header    "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated)
  * u4        size of identifiers. Identifiers are used to represent
  *            UTF8 strings, objects, stack traces, etc. They usually
  *            have the same size as host pointers. For example, on
@@ -292,11 +292,34 @@
  *                          0x00000002: cpu sampling on/off
  *                u2        stack trace depth
  *
+ *
+ * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
+ * be generated as a sequence of heap dump segments. This sequence is
+ * terminated by an end record. The additional tags allowed by format
+ * "JAVA PROFILE 1.0.2" are:
+ *
+ * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
+ *
+ *               [heap dump sub-records]*
+ *               The same sub-record types allowed by HPROF_HEAP_DUMP
+ *
+ * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
+ *
  */
 
 public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+
+    // The heap size threshold used to determine if segmented format
+    // ("JAVA PROFILE 1.0.2") should be used.
+    private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000;
+
+    // The approximate size of a heap segment. Used to calculate when to create
+    // a new segment.
+    private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000;
+
     // hprof binary file header
-    private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1";
+    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
+    private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
 
     // constants in enum HprofTag
     private static final int HPROF_UTF8             = 0x01;
@@ -312,6 +335,10 @@
     private static final int HPROF_CPU_SAMPLES      = 0x0D;
     private static final int HPROF_CONTROL_SETTINGS = 0x0E;
 
+    // 1.0.2 record types
+    private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C;
+    private static final int HPROF_HEAP_DUMP_END     = 0x2C;
+
     // Heap dump constants
     // constants in enum HprofGcTag
     private static final int HPROF_GC_ROOT_UNKNOWN       = 0xFF;
@@ -352,11 +379,9 @@
     private static final int JVM_SIGNATURE_ARRAY   = '[';
     private static final int JVM_SIGNATURE_CLASS   = 'L';
 
-
     public synchronized void write(String fileName) throws IOException {
         // open file stream and create buffered data output stream
-        FileOutputStream fos = new FileOutputStream(fileName);
-        FileChannel chn = fos.getChannel();
+        fos = new FileOutputStream(fileName);
         out = new DataOutputStream(new BufferedOutputStream(fos));
 
         VM vm = VM.getVM();
@@ -385,6 +410,9 @@
         FLOAT_SIZE = objectHeap.getFloatSize();
         DOUBLE_SIZE = objectHeap.getDoubleSize();
 
+        // Check weather we should dump the heap as segments
+        useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD;
+
         // hprof bin format header
         writeFileHeader();
 
@@ -394,38 +422,87 @@
 
         // hprof UTF-8 symbols section
         writeSymbols();
+
         // HPROF_LOAD_CLASS records for all classes
         writeClasses();
 
-        // write heap data now
-        out.writeByte((byte)HPROF_HEAP_DUMP);
-        out.writeInt(0); // relative timestamp
-
-        // remember position of dump length, we will fixup
-        // length later - hprof format requires length.
-        out.flush();
-        long dumpStart = chn.position();
-
-        // write dummy length of 0 and we'll fix it later.
-        out.writeInt(0);
-
         // write CLASS_DUMP records
         writeClassDumpRecords();
 
         // this will write heap data into the buffer stream
         super.write();
 
+        // flush buffer stream.
+        out.flush();
+
+        // Fill in final length
+        fillInHeapRecordLength();
+
+        if (useSegmentedHeapDump) {
+            // Write heap segment-end record
+            out.writeByte((byte) HPROF_HEAP_DUMP_END);
+            out.writeInt(0);
+            out.writeInt(0);
+        }
+
         // flush buffer stream and throw it.
         out.flush();
         out = null;
 
+        // close the file stream
+        fos.close();
+    }
+
+    @Override
+    protected void writeHeapRecordPrologue() throws IOException {
+        if (currentSegmentStart == 0) {
+            // write heap data header, depending on heap size use segmented heap
+            // format
+            out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT
+                    : HPROF_HEAP_DUMP));
+            out.writeInt(0);
+
+            // remember position of dump length, we will fixup
+            // length later - hprof format requires length.
+            out.flush();
+            currentSegmentStart = fos.getChannel().position();
+
+            // write dummy length of 0 and we'll fix it later.
+            out.writeInt(0);
+        }
+    }
+
+    @Override
+    protected void writeHeapRecordEpilogue() throws IOException {
+        if (useSegmentedHeapDump) {
+            out.flush();
+            if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) {
+                fillInHeapRecordLength();
+                currentSegmentStart = 0;
+            }
+        }
+    }
+
+    private void fillInHeapRecordLength() throws IOException {
+
         // now get current position to calculate length
-        long dumpEnd = chn.position();
+        long dumpEnd = fos.getChannel().position();
+
         // calculate length of heap data
-        int dumpLen = (int) (dumpEnd - dumpStart - 4);
+        long dumpLenLong = (dumpEnd - currentSegmentStart - 4L);
+
+        // Check length boundary, overflow could happen but is _very_ unlikely
+        if(dumpLenLong >= (4L * 0x40000000)){
+            throw new RuntimeException("Heap segment size overflow.");
+        }
+
+        // Save the current position
+        long currentPosition = fos.getChannel().position();
 
         // seek the position to write length
-        chn.position(dumpStart);
+        fos.getChannel().position(currentSegmentStart);
+
+        int dumpLen = (int) dumpLenLong;
 
         // write length as integer
         fos.write((dumpLen >>> 24) & 0xFF);
@@ -433,8 +510,8 @@
         fos.write((dumpLen >>> 8) & 0xFF);
         fos.write((dumpLen >>> 0) & 0xFF);
 
-        // close the file stream
-        fos.close();
+        //Reset to previous current position
+        fos.getChannel().position(currentPosition);
     }
 
     private void writeClassDumpRecords() throws IOException {
@@ -443,7 +520,9 @@
             sysDict.allClassesDo(new SystemDictionary.ClassVisitor() {
                             public void visit(Klass k) {
                                 try {
+                                    writeHeapRecordPrologue();
                                     writeClassDumpRecord(k);
+                                    writeHeapRecordEpilogue();
                                 } catch (IOException e) {
                                     throw new RuntimeException(e);
                                 }
@@ -884,7 +963,12 @@
     // writes hprof binary file header
     private void writeFileHeader() throws IOException {
         // version string
-        out.writeBytes(HPROF_HEADER);
+        if(useSegmentedHeapDump) {
+            out.writeBytes(HPROF_HEADER_1_0_2);
+        }
+        else {
+            out.writeBytes(HPROF_HEADER_1_0_1);
+        }
         out.writeByte((byte)'\0');
 
         // write identifier size. we use pointers as identifiers.
@@ -976,6 +1060,7 @@
     private static final int EMPTY_FRAME_DEPTH = -1;
 
     private DataOutputStream out;
+    private FileOutputStream fos;
     private Debugger dbg;
     private ObjectHeap objectHeap;
     private SymbolTable symTbl;
@@ -983,6 +1068,10 @@
     // oopSize of the debuggee
     private int OBJ_ID_SIZE;
 
+    // Added for hprof file format 1.0.2 support
+    private boolean useSegmentedHeapDump;
+    private long currentSegmentStart;
+
     private long BOOLEAN_BASE_OFFSET;
     private long BYTE_BASE_OFFSET;
     private long CHAR_BASE_OFFSET;
@@ -1005,6 +1094,7 @@
     private static class ClassData {
         int instSize;
         List fields;
+
         ClassData(int instSize, List fields) {
             this.instSize = instSize;
             this.fields = fields;
--- a/make/Makefile	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/Makefile	Tue Nov 05 17:38:04 2013 -0800
@@ -361,6 +361,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/64/%.diz:    		$(C2_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C2_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:       		$(C2_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Client (C1)
@@ -406,6 +411,11 @@
 	$(install-file)
 $(EXPORT_CLIENT_DIR)/64/%.diz:    		$(C1_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C1_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_CLIENT_DIR)/%.dSYM:       		$(C1_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Minimal1
@@ -451,6 +461,7 @@
 	$(install-file)
 $(EXPORT_MINIMAL_DIR)/64/%.diz:			$(MINIMAL1_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X does not support Minimal1 config
 endif
 
 # Zero
@@ -473,6 +484,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(ZERO_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:			$(ZERO_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 # Core
@@ -517,6 +533,11 @@
 	$(install-file)
 $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_BUILD_DIR)/%.diz
 	$(install-file)
+# MacOS X
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(SHARK_BUILD_DIR)/%.dSYM
+	$(install-dir)
+$(EXPORT_SERVER_DIR)/%.dSYM:			$(SHARK_BUILD_DIR)/%.dSYM
+	$(install-dir)
 endif
 
 $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
--- a/make/bsd/Makefile	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/Makefile	Tue Nov 05 17:38:04 2013 -0800
@@ -204,6 +204,7 @@
 BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
 BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
 
@@ -337,9 +338,11 @@
 
 # Doc target.  This is the same for all build options.
 #     Hence create a docs directory beside ...$(ARCH)_[...]
+# We specify 'BUILD_FLAVOR=product' so that the proper
+# ENABLE_FULL_DEBUG_SYMBOLS value is used.
 docs: checks
 	$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
-	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs
+	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
 
 # Synonyms for win32-like targets.
 compiler2:  debug product
--- a/make/bsd/makefiles/buildtree.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/buildtree.make	Tue Nov 05 17:38:04 2013 -0800
@@ -261,6 +261,16 @@
 	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
 	[ -n "$(CFLAGS_BROWSE)" ] && \
 	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
+	    echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
+	[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
+	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
+	[ -n "$(ZIPEXE)" ] && \
+	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/make/bsd/makefiles/defs.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/defs.make	Tue Nov 05 17:38:04 2013 -0800
@@ -136,10 +136,127 @@
   endif
 endif
 
+OS_VENDOR:=$(shell uname -s)
+
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer.
+  # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
+  # builds is enabled with debug info files ZIP'ed to save space. For
+  # BUILD_FLAVOR != product builds, FDS is always enabled, after all a
+  # debug build without debug info isn't very useful.
+  # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
+  #
+  # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
+  # disabled for a BUILD_FLAVOR == product build.
+  #
+  # Note: Use of a different variable name for the FDS override option
+  # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
+  # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
+  # in options via environment variables, use of distinct variables
+  # prevents strange behaviours. For example, in a BUILD_FLAVOR !=
+  # product build, the FULL_DEBUG_SYMBOLS environment variable will be
+  # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
+  # the same variable name is used, then different values can be picked
+  # up by different parts of the build. Just to be clear, we only need
+  # two variable names because the incoming option value can be
+  # overridden in some situations, e.g., a BUILD_FLAVOR != product
+  # build.
+
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      ifeq ($(OS_VENDOR), Darwin)
+          # MacOS X doesn't use OBJCOPY or STRIP_POLICY
+          OBJCOPY=
+          STRIP_POLICY=
+          ZIP_DEBUGINFO_FILES ?= 1
+      else
+        # Default OBJCOPY comes from GNU Binutils on BSD
+        ifeq ($(CROSS_COMPILE_ARCH),)
+          DEF_OBJCOPY=/usr/bin/objcopy
+        else
+          # Assume objcopy is part of the cross-compilation toolset
+          ifneq ($(ALT_COMPILER_PATH),)
+            DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
+          endif
+        endif
+        OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+        ifneq ($(ALT_OBJCOPY),)
+          _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+          OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+        endif
+
+        ifeq ($(OBJCOPY),)
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo" \
+              "files. You may need to set ALT_OBJCOPY.")
+          ENABLE_FULL_DEBUG_SYMBOLS=0
+          _JUNK_ := $(shell \
+            echo >&2 "INFO:" \
+              "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+        else
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo" \
+              "files.")
+
+          # Library stripping policies for .debuginfo configs:
+          #   all_strip - strips everything from the library
+          #   min_strip - strips most stuff from the library; leaves
+          #               minimum symbols
+          #   no_strip  - does not strip the library at all
+          #
+          # Oracle security policy requires "all_strip". A waiver was
+          # granted on 2011.09.01 that permits using "min_strip" in the
+          # Java JDK and Java JRE.
+          #
+          # Currently, STRIP_POLICY is only used when Full Debug Symbols
+          # is enabled.
+          #
+          STRIP_POLICY ?= min_strip
+
+          _JUNK_ := $(shell \
+            echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+
+          ZIP_DEBUGINFO_FILES ?= 1
+        endif
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
+
 JDK_INCLUDE_SUBDIR=bsd
 
 # Library suffix
-OS_VENDOR:=$(shell uname -s)
 ifeq ($(OS_VENDOR),Darwin)
   LIBRARY_SUFFIX=dylib
 else
@@ -150,6 +267,19 @@
 
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+    else
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+    endif
+  endif
+endif
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
@@ -157,34 +287,76 @@
 ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
+    else
+      ifeq ($(OS_VENDOR), Darwin)
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
+      else
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+      endif
+    endif
+  endif
 endif
 
 ifeq ($(JVM_VARIANT_CLIENT),true)
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
+    else
+      ifeq ($(OS_VENDOR), Darwin)
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
+      else
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+      endif
+    endif
+  endif
 endif
 
 ifeq ($(JVM_VARIANT_MINIMAL1),true)
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
-
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    ifeq ($(ZIP_DEBUGINFO_FILES),1)
-	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
-    else
-	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
-    endif
-  endif
 endif
 
 # Serviceability Binaries
 # No SA Support for PPC, IA64, ARM or zero
 ADD_SA_BINARIES/x86   = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+    else
+        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+    endif
+  endif
+endif
+
 ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar
 ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                             $(EXPORT_LIB_DIR)/sa-jdi.jar
+
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+  else
+    ifeq ($(OS_VENDOR), Darwin)
+        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+    else
+        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+    endif
+  endif
+endif
+
 ADD_SA_BINARIES/ppc   =
 ADD_SA_BINARIES/ia64  =
 ADD_SA_BINARIES/arm   =
@@ -225,6 +397,19 @@
     # Files to simply copy in place
     UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
     UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      ifeq ($(ZIP_DEBUGINFO_FILES),1)
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
+      else
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
+          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
+      endif
+    endif
 
   endif
 endif
--- a/make/bsd/makefiles/dtrace.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/dtrace.make	Tue Nov 05 17:38:04 2013 -0800
@@ -39,9 +39,15 @@
 JVM_DB = libjvm_db
 LIBJVM_DB = libjvm_db.dylib
 
+LIBJVM_DB_DEBUGINFO   = libjvm_db.dylib.dSYM
+LIBJVM_DB_DIZ         = libjvm_db.diz
+
 JVM_DTRACE = jvm_dtrace
 LIBJVM_DTRACE = libjvm_dtrace.dylib
 
+LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.dylib.dSYM
+LIBJVM_DTRACE_DIZ         = libjvm_dtrace.diz
+
 JVMOFFS = JvmOffsets
 JVMOFFS.o = $(JVMOFFS).o
 GENOFFS = generate$(JVMOFFS)
@@ -76,21 +82,87 @@
 # Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so
 ifneq ("${ISA}","${BUILDARCH}")
 
-XLIBJVM_DB = 64/$(LIBJVM_DB)
-XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DIR = 64
+XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB)
+XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE)
 XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
 
+XLIBJVM_DB_DEBUGINFO       = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO)
+XLIBJVM_DB_DIZ             = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
+XLIBJVM_DTRACE_DEBUGINFO   = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
+XLIBJVM_DTRACE_DIZ         = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
+
 $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
-	$(QUIETLY) mkdir -p 64/ ; \
+	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
+	$(RM) -r $(XLIBJVM_DB_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the link name:
+        $(QUIETLY) ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
+	$(RM) $(XLIBJVM_DB_DEBUGINFO)
+    endif
+  endif
+endif
 
 $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
-	$(QUIETLY) mkdir -p 64/ ; \
+	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
+	$(RM) -r $(XLIBJVM_DTRACE_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the link name:
+	( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
+        # is not in the archived name:
+	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
+	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO)
+    endif
+  endif
+endif
 
 endif # ifneq ("${ISA}","${BUILDARCH}")
 
@@ -134,11 +206,59 @@
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
+	$(RM) -r $(LIBJVM_DB_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
+	$(RM) $(LIBJVM_DB_DEBUGINFO)
+    endif
+  endif
+endif
 
 $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
+	$(RM) -r $(LIBJVM_DTRACE_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -x $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
+	$(RM) $(LIBJVM_DTRACE_DEBUGINFO)
+    endif
+  endif
+endif
 
 #$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
 #             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
--- a/make/bsd/makefiles/fastdebug.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/fastdebug.make	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
 
 VERSION = fastdebug
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/bsd/makefiles/gcc.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/gcc.make	Tue Nov 05 17:38:04 2013 -0800
@@ -80,9 +80,14 @@
     HOSTCC  = $(CC)
   endif
 
-  AS   = $(CC) -c -x assembler-with-cpp
+  AS   = $(CC) -c 
 endif
 
+ifeq ($(OS_VENDOR), Darwin)
+  ifeq ($(DSYMUTIL),)
+    DSYMUTIL=dsymutil
+  endif
+endif
 
 ifeq ($(USE_CLANG), true)
   CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
@@ -129,16 +134,21 @@
   
     # We only use precompiled headers for the JVM build
     CFLAGS += $(VM_PCH_FLAG)
-  
-    # There are some files which don't like precompiled headers
-    # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
-    # But Clang doesn't support a precompiled header which was compiled with -O3
-    # to be used in a compilation unit which uses '-O0'. We could also prepare an
-    # extra '-O0' PCH file for the opt build and use it here, but it's probably
-    # not worth the effort as long as only two files need this special handling.
+ 
+    # The following files are compiled at various optimization
+    # levels due to optimization issues encountered at the
+    # 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile
+    # time error if there is an optimization level specification
+    # skew between the PCH file and the C++ file.  Especially if the
+    # PCH file is compiled at a higher optimization level than
+    # the C++ file.  One solution might be to prepare extra optimization
+    # level specific PCH files for the opt build and use them here, but
+    # it's probably not worth the effort as long as only a few files
+    # need this special handling.
     PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
     PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
     PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
+    PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
   
   endif
 else # ($(USE_CLANG), true)
@@ -242,7 +252,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
 # Not yet supported by clang in Xcode 4.6.2
 #  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
@@ -257,7 +267,7 @@
   # conversions which might affect the values. Only enable it in earlier versions.
   WARNING_FLAGS = -Wunused-function
   ifeq ($(USE_CLANG),)
-    WARNINGS_FLAGS += -Wconversion
+    WARNING_FLAGS += -Wconversion
   endif
 endif
 
@@ -306,6 +316,7 @@
 ifeq ($(USE_CLANG), true)
   ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
     OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
+    OPT_CFLAGS/unsafe.o += -O1
   endif
 else
   # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
@@ -341,6 +352,13 @@
   LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
 endif
 
+
+#------------------------------------------------------------------------
+# Assembler flags
+
+# Enforce prerpocessing of .s files
+ASFLAGS += -x assembler-with-cpp
+
 #------------------------------------------------------------------------
 # Linker flags
 
@@ -421,6 +439,36 @@
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
   DEBUG_CFLAGS += -gstabs
   endif
+  
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    FASTDEBUG_CFLAGS/ia64  = -g
+    FASTDEBUG_CFLAGS/amd64 = -g
+    FASTDEBUG_CFLAGS/arm   = -g
+    FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
+    ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        FASTDEBUG_CFLAGS += -g
+      else
+        FASTDEBUG_CFLAGS += -gstabs
+      endif
+    endif
+  
+    OPT_CFLAGS/ia64  = -g
+    OPT_CFLAGS/amd64 = -g
+    OPT_CFLAGS/arm   = -g
+    OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
+    ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
+      ifeq ($(USE_CLANG), true)
+        # Clang doesn't understand -gstabs
+        OPT_CFLAGS += -g
+      else
+        OPT_CFLAGS += -gstabs
+      endif
+    endif
+  endif
 endif
 
 # If we are building HEADLESS, pass on to VM
--- a/make/bsd/makefiles/jsig.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/jsig.make	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -29,13 +29,21 @@
 
 ifeq ($(OS_VENDOR), Darwin)
   LIBJSIG   = lib$(JSIG).dylib
+
+  LIBJSIG_DEBUGINFO   = lib$(JSIG).dylib.dSYM
+  LIBJSIG_DIZ         = lib$(JSIG).diz
 else
   LIBJSIG   = lib$(JSIG).so
+
+  LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+  LIBJSIG_DIZ         = lib$(JSIG).diz
 endif
 
 JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
 
-DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
+DEST_JSIG_DIZ       = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
 
 LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
 
@@ -55,9 +63,42 @@
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+	$(RM) -r $(LIBJSIG_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+	$(RM) $(LIBJSIG_DEBUGINFO)
+    endif
+  endif
+endif
 
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
+	    cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
 	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
 
 .PHONY: install_jsig
--- a/make/bsd/makefiles/product.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/product.make	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -43,15 +43,17 @@
 SYSDEFS += -DPRODUCT
 VERSION = optimized
 
-# use -g to strip library as -x will discard its symbol table; -x is fine for
-# executables.
-ifdef CROSS_COMPILE_ARCH
-  STRIP = $(ALT_COMPILER_PATH)/strip
-else
-  STRIP = strip
+ifneq ($(OS_VENDOR), Darwin)
+  # use -g to strip library as -x will discard its symbol table; -x is fine for
+  # executables.
+  ifdef CROSS_COMPILE_ARCH
+    STRIP = $(ALT_COMPILER_PATH)/strip
+  else
+    STRIP = strip
+  endif
+  STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
+  STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
+
+  # Don't strip in VM build; JDK build will strip libraries later
+  # LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
 endif
-STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
-STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
-
-# Don't strip in VM build; JDK build will strip libraries later
-# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- a/make/bsd/makefiles/saproc.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/saproc.make	Tue Nov 05 17:38:04 2013 -0800
@@ -28,9 +28,15 @@
 SAPROC   = saproc
 
 ifeq ($(OS_VENDOR), Darwin)
-  LIBSAPROC   = lib$(SAPROC).dylib
+  LIBSAPROC           = lib$(SAPROC).dylib
+
+  LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
+  LIBSAPROC_DIZ       = lib$(SAPROC).diz
 else
-  LIBSAPROC   = lib$(SAPROC).so
+  LIBSAPROC           = lib$(SAPROC).so
+
+  LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo
+  LIBSAPROC_DIZ       = lib$(SAPROC).diz
 endif
 
 AGENT_DIR = $(GAMMADIR)/agent
@@ -70,7 +76,9 @@
 
 SAMAPFILE = $(SASRCDIR)/mapfile
 
-DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
+DEST_SAPROC_DIZ       = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
 
 # DEBUG_BINARIES overrides everything, use full -g debug information
 ifeq ($(DEBUG_BINARIES), true)
@@ -117,11 +125,42 @@
 	           $(SA_DEBUG_CFLAGS)                                   \
 	           -o $@                                                \
 	           $(SALIBS)
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) -r $(LIBSAPROC_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) $(LIBSAPROC_DEBUGINFO)
+    endif
+  endif
+endif
 
 install_saproc: $(BUILDLIBSAPROC)
-	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
-	fi
+	@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
+	    cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ)
+	$(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"
 
 .PHONY: install_saproc
--- a/make/bsd/makefiles/universal.gmk	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/universal.gmk	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # macosx universal builds
@@ -35,15 +35,15 @@
 all_product_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize
 all_fastdebug_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=fastdebug EXPORT_SUBDIR=/fastdebug universalize
 all_debug_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
-	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=debug EXPORT_SUBDIR=/debug universalize
 
 
 # Consolidate architecture builds into a single Universal binary
@@ -57,18 +57,18 @@
 	if [ -n "$${BUILT_LIPO_FILES}" ]; then \
 	  $(MKDIR) -p $(shell dirname $@); \
 	  lipo -create -output $@ $${BUILT_LIPO_FILES}; \
-	fi	
+	fi
 
 
 # Copy built non-universal binaries in place
+# - copies directories; including empty dirs
+# - copies files, symlinks, other non-directory files
 $(UNIVERSAL_COPY_LIST):
-	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
+	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
 	if [ -n "$${BUILT_COPY_FILES}" ]; then \
 	  for i in $${BUILT_COPY_FILES}; do \
-	    if [ -f $${i} ]; then \
-	      $(MKDIR) -p $(shell dirname $@); \
-	      $(CP) $${i} $@; \
-	    fi; \
+	    $(MKDIR) -p $(shell dirname $@); \
+	    $(CP) -R $${i} $@; \
 	  done; \
 	fi
 
--- a/make/bsd/makefiles/vm.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/bsd/makefiles/vm.make	Tue Nov 05 17:38:04 2013 -0800
@@ -60,10 +60,16 @@
 # The order is important for the precompiled headers to work.
 INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
 
-ifeq (${VERSION}, debug)
+# SYMFLAG is used by {jsig,saproc}.make
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  # always build with debug info when we can create .dSYM/.debuginfo files
   SYMFLAG = -g
 else
-  SYMFLAG =
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
 endif
 
 # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
@@ -147,8 +153,14 @@
   ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug))
     CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
   endif
+
+  LIBJVM_DEBUGINFO   = lib$(JVM).dylib.dSYM
+  LIBJVM_DIZ         = lib$(JVM).diz
 else
   LIBJVM   = lib$(JVM).so
+
+  LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+  LIBJVM_DIZ         = lib$(JVM).diz
 endif
 
 SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
@@ -322,10 +334,47 @@
 	    rm -f $@.1; ln -s $@ $@.1;                                  \
 	}
 
-DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifeq ($(OS_VENDOR), Darwin)
+	$(DSYMUTIL) $@
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -r -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+	$(RM) -r $(LIBJVM_DEBUGINFO)
+    endif
+  else
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+    ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+    else
+      ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+      # implied else here is no stripping at all
+      endif
+    endif
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+	$(RM) $(LIBJVM_DEBUGINFO)
+    endif
+  endif
+endif
+
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
+DEST_JVM_DIZ       = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
 
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+ifeq ($(OS_VENDOR), Darwin)
+	$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
+	    cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+else
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+endif
+	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
 	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
 
 #----------------------------------------------------------------------
@@ -340,11 +389,8 @@
 #----------------------------------------------------------------------
 
 ifeq ($(OS_VENDOR), Darwin)
-$(LIBJVM).dSYM: $(LIBJVM)
-	dsymutil $(LIBJVM)
-
 # no libjvm_db for macosx
-build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
 	echo "Doing vm.make build:"
 else
 build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
--- a/make/defs.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/defs.make	Tue Nov 05 17:38:04 2013 -0800
@@ -77,6 +77,16 @@
 @$(RM) $@
 $(CP) $< $@
 endef
+
+# MacOS X strongly discourages 'cp -r' and provides 'cp -R' instead.
+# May need to have a MacOS X specific definition of install-dir
+# sometime in the future.
+define install-dir
+@$(MKDIR) -p $(@D)
+@$(RM) -r $@
+$(CP) -r $< $@
+endef
+
 define prep-target
 @$(MKDIR) -p $(@D)
 @$(RM) $@
--- a/make/excludeSrc.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/excludeSrc.make	Tue Nov 05 17:38:04 2013 -0800
@@ -88,7 +88,7 @@
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
 	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
 	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
-	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
 	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
 	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
 	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
@@ -99,7 +99,7 @@
 	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
 	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
 	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
-	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
 endif
 
 ifeq ($(INCLUDE_NMT), false)
--- a/make/hotspot_version	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/hotspot_version	Tue Nov 05 17:38:04 2013 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=48
+HS_BUILD_NUMBER=57
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.properties	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/jprt.properties	Tue Nov 05 17:38:04 2013 -0800
@@ -24,12 +24,7 @@
 
 # Properties for jprt
 
-# All build result bundles are full jdks, so the 64bit testing does not
-#    need the 32bit sibling bundle installed.
-#    Note: If the hotspot/make/Makefile changed to only bundle the 64bit files
-#          when bundling 64bit, and stripped out the 64bit files from any 32bit
-#          bundles, then this setting would be need to be "true".
-
+# All build result bundles are full jdks.
 jprt.need.sibling.build=false
 
 # At submit time, the release supplied will be in jprt.submit.release
@@ -52,21 +47,11 @@
 #       sparc etc.
 
 # Define the Solaris platforms we want for the various releases
-jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7}
-jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
-
 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
-jprt.my.solaris.i586.jdk8=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7}
-jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
-
 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
@@ -120,22 +105,20 @@
 jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
-jprt.my.windows.i586.jdk8=windows_i586_5.1
-jprt.my.windows.i586.jdk7=windows_i586_5.1
+jprt.my.windows.i586.jdk8=windows_i586_6.1
+jprt.my.windows.i586.jdk7=windows_i586_6.1
 jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
-jprt.my.windows.x64.jdk8=windows_x64_5.2
-jprt.my.windows.x64.jdk7=windows_x64_5.2
+jprt.my.windows.x64.jdk8=windows_x64_6.1
+jprt.my.windows.x64.jdk7=windows_x64_6.1
 jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
 
 jprt.build.targets.standard= \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
-    ${jprt.my.solaris.i586}-{product|fastdebug}, \
     ${jprt.my.solaris.x64}-{product|fastdebug}, \
     ${jprt.my.linux.i586}-{product|fastdebug}, \
     ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
@@ -145,7 +128,6 @@
     ${jprt.my.linux.armvh}-{product|fastdebug}
 
 jprt.build.targets.open= \
-    ${jprt.my.solaris.i586}-{productOpen}, \
     ${jprt.my.solaris.x64}-{debugOpen}, \
     ${jprt.my.linux.x64}-{productOpen}
 
@@ -168,31 +150,6 @@
 
 # Subset lists of test targets for this source tree
 
-jprt.my.solaris.sparc.test.targets= \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
-    ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_G1, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParOldGC
-
 jprt.my.solaris.sparcv9.test.targets= \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
@@ -242,37 +199,6 @@
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
 
-jprt.my.solaris.i586.test.targets= \
-    ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
-    ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
-    ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \
-    ${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
-    ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParOldGC
-
 jprt.my.linux.i586.test.targets = \
     ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
     ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
@@ -395,7 +321,6 @@
 # Some basic "smoke" tests for OpenJDK builds
 jprt.test.targets.open = \
     ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
-    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \
     ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
 
 # Testing for actual embedded builds is different to standard
@@ -407,9 +332,7 @@
 
 jprt.test.targets.standard = \
   ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparc.test.targets}, \
   ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.i586.test.targets}, \
   ${jprt.my.solaris.x64.test.targets}, \
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
@@ -420,15 +343,12 @@
 
 jprt.test.targets.embedded= 		\
   ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparc.test.targets}, \
   ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.i586.test.targets}, \
   ${jprt.my.solaris.x64.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
   ${jprt.my.windows.x64.test.targets}
 
-
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
 jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
@@ -439,15 +359,11 @@
 #jprt.make.rule.test.targets=*-product-*-packtest
 
 jprt.make.rule.test.targets.standard.client = \
-  ${jprt.my.solaris.sparc}-*-c1-clienttest, \
-  ${jprt.my.solaris.i586}-*-c1-clienttest, \
   ${jprt.my.linux.i586}-*-c1-clienttest, \
   ${jprt.my.windows.i586}-*-c1-clienttest
 
 jprt.make.rule.test.targets.standard.server = \
-  ${jprt.my.solaris.sparc}-*-c2-servertest, \
   ${jprt.my.solaris.sparcv9}-*-c2-servertest, \
-  ${jprt.my.solaris.i586}-*-c2-servertest, \
   ${jprt.my.solaris.x64}-*-c2-servertest, \
   ${jprt.my.linux.i586}-*-c2-servertest, \
   ${jprt.my.linux.x64}-*-c2-servertest, \
@@ -456,9 +372,7 @@
   ${jprt.my.windows.x64}-*-c2-servertest
 
 jprt.make.rule.test.targets.standard.internalvmtests = \
-  ${jprt.my.solaris.sparc}-fastdebug-c2-internalvmtests, \
   ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.solaris.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
   ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
@@ -467,16 +381,12 @@
   ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
 
 jprt.make.rule.test.targets.standard.wbapi = \
-  ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.solaris.i586}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
   ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.solaris.sparc}-{product|fastdebug}-c1-wbapitest, \
-  ${jprt.my.solaris.i586}-{product|fastdebug}-c1-wbapitest, \
   ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
   ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
 
--- a/make/linux/makefiles/fastdebug.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/linux/makefiles/fastdebug.make	Tue Nov 05 17:38:04 2013 -0800
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
 VERSION = optimized
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/gcc.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/linux/makefiles/gcc.make	Tue Nov 05 17:38:04 2013 -0800
@@ -209,7 +209,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
   WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
--- a/make/windows/create.bat	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/create.bat	Tue Nov 05 17:38:04 2013 -0800
@@ -82,6 +82,7 @@
 
 echo **************************************************************
 set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj
+echo MSC_VER = "%MSC_VER%" 
 if "%MSC_VER%" == "1200" (
 set ProjectFile=%HotSpotBuildSpace%\jvm.dsp
 echo Will generate VC6 project {unsupported}
@@ -96,11 +97,17 @@
 echo Will generate VC10 {Visual Studio 2010}
 set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
 ) else (
+if "%MSC_VER%" == "1700" (
+echo Will generate VC10 {compatible with Visual Studio 2012}
+echo After opening in VS 2012, click "Update" when prompted.
+set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
+) else (
 echo Will generate VC7 project {Visual Studio 2003 .NET}
 )
 )
 )
 )
+)
 echo %ProjectFile%
 echo **************************************************************
 
--- a/make/windows/makefiles/compile.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/makefiles/compile.make	Tue Nov 05 17:38:04 2013 -0800
@@ -44,6 +44,7 @@
 #   /GS       Inserts security stack checks in some functions (VS2005 default)
 #   /Oi       Use intrinsics (in /O2)
 #   /Od       Disable all optimizations
+#   /MP       Use multiple cores for compilation
 #
 # NOTE: Normally following any of the above with a '-' will turn off that flag
 #
@@ -180,6 +181,7 @@
 PRODUCT_OPT_OPTION   = /O2 /Oy-
 FASTDEBUG_OPT_OPTION = /O2 /Oy-
 DEBUG_OPT_OPTION     = /Od
+SAFESEH_FLAG = /SAFESEH
 !endif
 
 !if "$(COMPILER_NAME)" == "VS2005"
@@ -198,6 +200,7 @@
 !if "x$(MT)" == "x"
 MT=mt.exe
 !endif
+SAFESEH_FLAG = /SAFESEH
 !endif
 
 !if "$(COMPILER_NAME)" == "VS2008"
@@ -206,11 +209,13 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
 MT=mt.exe
 !endif
+SAFESEH_FLAG = /SAFESEH
 !endif
 
 !if "$(COMPILER_NAME)" == "VS2010"
@@ -219,6 +224,7 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
@@ -235,15 +241,20 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
 MT=mt.exe
 !endif
+SAFESEH_FLAG = /SAFESEH
+!endif
+
 !if "$(BUILDARCH)" == "i486"
-LD_FLAGS = /SAFESEH $(LD_FLAGS)
+LD_FLAGS = $(SAFESEH_FLAG) $(LD_FLAGS)
 !endif
-!endif
+
+CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)
 
 # If NO_OPTIMIZATIONS is defined in the environment, turn everything off
 !ifdef NO_OPTIMIZATIONS
--- a/make/windows/makefiles/fastdebug.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/makefiles/fastdebug.make	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/rules.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/makefiles/rules.make	Tue Nov 05 17:38:04 2013 -0800
@@ -69,6 +69,13 @@
 VcVersion=VC10
 ProjectFile=jvm.vcxproj
 
+!elseif "$(MSC_VER)" == "1700"
+# This is VS2012, but it loads VS10 projects just fine (and will
+# upgrade them automatically to VS2012 format).
+
+VcVersion=VC10
+ProjectFile=jvm.vcxproj
+
 !else
 
 VcVersion=VC7
--- a/make/windows/makefiles/sa.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/makefiles/sa.make	Tue Nov 05 17:38:04 2013 -0800
@@ -102,11 +102,19 @@
 !if "$(MT)" != ""
 SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
 !endif
-SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
+
+SASRCFILES = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp \
+		$(AGENT_DIR)/src/share/native/sadis.c
+		            
 SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
 SA_LFLAGS = $(SA_LFLAGS) -map -debug
 !endif
+!if "$(BUILDARCH)" == "i486"
+SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
+!endif
+
+SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
 
 # Note that we do not keep sawindbj.obj around as it would then
 # get included in the dumpbin command in build_vm_def.sh
@@ -114,16 +122,16 @@
 # In VS2005 or VS2008 the link command creates a .manifest file that we want
 # to insert into the linked artifact so we do not need to track it separately.
 # Use ";#2" for .dll and ";#1" for .exe in the MT command below:
-$(SAWINDBG): $(SASRCFILE)
+$(SAWINDBG): $(SASRCFILES)
 	set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
 	$(CXX) @<<
 	  -I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32" 
 	  -I"$(GENERATED)" $(SA_CFLAGS)
-	  $(SASRCFILE)
+	  $(SASRCFILES)
 	  -out:$*.obj
 <<
 	set LIB=$(SA_LIB)$(LIB)
-	$(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
+	$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
 !if "$(MT)" != ""
 	$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
 !endif
--- a/make/windows/makefiles/trace.make	Wed Oct 16 10:52:41 2013 +0200
+++ b/make/windows/makefiles/trace.make	Tue Nov 05 17:38:04 2013 -0800
@@ -40,8 +40,7 @@
     traceEventIds.hpp     \
     traceTypes.hpp
 
-
-!if "$(OPENJDK)" != "true"
+!if EXISTS($(TraceAltSrcDir))
 TraceGeneratedNames = $(TraceGeneratedNames) \
     traceRequestables.hpp \
     traceEventControl.hpp \
@@ -56,7 +55,7 @@
 	$(TraceOutDir)/traceEventIds.hpp     \
 	$(TraceOutDir)/traceTypes.hpp
 
-!if "$(OPENJDK)" != "true"
+!if EXISTS($(TraceAltSrcDir))
 TraceGeneratedFiles = $(TraceGeneratedFiles) \
 	$(TraceOutDir)/traceRequestables.hpp \
     $(TraceOutDir)/traceEventControl.hpp \
@@ -68,7 +67,7 @@
 XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
     $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
 
-!if "$(OPENJDK)" != "true"
+!if EXISTS($(TraceAltSrcDir))
 XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
 !endif
 
@@ -87,7 +86,7 @@
 	@echo Generating $@
 	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
 
-!if "$(OPENJDK)" == "true"
+!if !EXISTS($(TraceAltSrcDir))
 
 $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
 	@echo Generating OpenJDK $@
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -307,7 +307,7 @@
       assert(a_byte == *start++, "should be the same code");
     }
 #endif
-  } else if (_id == load_mirror_id) {
+  } else if (_id == load_mirror_id || _id == load_appendix_id) {
     // produce a copy of the load mirror instruction for use by the being initialized case
 #ifdef ASSERT
     address start = __ pc();
@@ -384,6 +384,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -397,7 +398,7 @@
   ce->add_call_info_here(_info);
   __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
   __ delayed()->nop();
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     address pc = (address)_pc_start;
     RelocIterator iter(cs, pc, pc + 1);
--- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,6 +53,8 @@
       opr = as_long_opr(reg);
     } else if (type == T_OBJECT || type == T_ARRAY) {
       opr = as_oop_opr(reg);
+    } else if (type == T_METADATA) {
+      opr = as_metadata_opr(reg);
     } else {
       opr = as_opr(reg);
     }
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -105,7 +105,7 @@
         if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
       }
 
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
       }
@@ -520,7 +520,7 @@
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
   // Allocate a new index in table to hold the object once it's been patched
   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 
   AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
@@ -963,7 +963,7 @@
       case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
       case T_ADDRESS:
 #ifdef _LP64
-        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
           __ lduw(base, offset, to_reg->as_register());
           __ decode_klass_not_null(to_reg->as_register());
         } else
@@ -2208,7 +2208,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         // We don't need decode because we just need to compare
         __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
         __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@@ -2342,7 +2342,7 @@
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
     metadata2reg(op->expected_type()->constant_encoding(), tmp);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       // tmp holds the default type. It currently comes uncompressed after the
       // load of a constant, so encode it.
       __ encode_klass_not_null(tmp);
@@ -2565,7 +2565,7 @@
     Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
                           mdo_offset_bias);
     __ ld_ptr(receiver_addr, tmp1);
-    __ verify_oop(tmp1);
+    __ verify_klass_ptr(tmp1);
     __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
     Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
                       mdo_offset_bias);
@@ -3100,6 +3100,10 @@
   }
 }
 
+void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
+  fatal("Type profiling not implemented on this platform");
+}
+
 void LIR_Assembler::align_backward_branch_target() {
   __ align(OptoLoopAlignment);
 }
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -186,7 +186,7 @@
     set((intx)markOopDesc::prototype(), t1);
   }
   st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Save klass
     mov(klass, t1);
     encode_klass_not_null(t1);
@@ -196,7 +196,7 @@
   }
   if (len->is_valid()) {
     st(len, obj, arrayOopDesc::length_offset_in_bytes());
-  } else if (UseCompressedKlassPointers) {
+  } else if (UseCompressedClassPointers) {
     // otherwise length is in the class gap
     store_klass_gap(G0, obj);
   }
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -37,6 +37,9 @@
 #include "runtime/vframeArray.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_sparc.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 // Implementation of StubAssembler
 
@@ -401,7 +404,9 @@
           if (id == fast_new_instance_init_check_id) {
             // make sure the klass is initialized
             __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
-            __ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
+            __ cmp(G3_t1, InstanceKlass::fully_initialized);
+            __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
+            __ delayed()->nop();
           }
 #ifdef ASSERT
           // assert object can be fast path allocated
@@ -512,7 +517,9 @@
 
           // check that array length is small enough for fast path
           __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
-          __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
+          __ cmp(G4_length, G3_t1);
+          __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
+          __ delayed()->nop();
 
           // if we got here then the TLAB allocation failed, so try
           // refilling the TLAB or allocating directly from eden.
@@ -804,6 +811,12 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { __ set_info("load_appendix_patching", dont_gc_arguments);
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
@@ -906,7 +919,7 @@
         Register tmp2 = G3_scratch;
         jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
 
-        Label not_already_dirty, restart, refill;
+        Label not_already_dirty, restart, refill, young_card;
 
 #ifdef _LP64
         __ srlx(addr, CardTableModRefBS::card_shift, addr);
@@ -918,9 +931,15 @@
         __ set(rs, cardtable);         // cardtable := <card table base>
         __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
 
+        __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+
+        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
+
         assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
         __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
+        __ bind(young_card);
         // We didn't take the branch, so we're already dirty: return.
         // Use return-from-leaf
         __ retl();
@@ -1061,6 +1080,25 @@
 
   __ verify_not_null_oop(Oexception);
 
+#ifdef ASSERT
+  // check that fields in JavaThread for exception oop and issuing pc are
+  // empty before writing to them
+  Label oop_empty;
+  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
+  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
+  __ br_null(scratch, false, Assembler::pt, oop_empty);
+  __ delayed()->nop();
+  __ stop("exception oop already set");
+  __ bind(oop_empty);
+
+  Label pc_empty;
+  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
+  __ br_null(scratch, false, Assembler::pt, pc_empty);
+  __ delayed()->nop();
+  __ stop("exception pc already set");
+  __ bind(pc_empty);
+#endif
+
   // save the exception and issuing pc in the thread
   __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
   __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -57,6 +57,7 @@
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
 define_pd_global(intx, LoopUnrollLimit,              60); // Design center runs on 1.3.1
+define_pd_global(intx, MinJumpTableSize,             5);
 
 // Peephole and CISC spilling both break the graph, and so makes the
 // scheduler sick.
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -365,7 +365,7 @@
   return entry;
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   // make it look good in the debugger
   return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
 }
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -764,7 +764,7 @@
 #ifdef CC_INTERP
         *oop_result = istate->_oop_temp;
 #else
-        oop obj = (oop) at(interpreter_frame_oop_temp_offset);
+        oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
 #endif // CC_INTERP
@@ -788,7 +788,7 @@
     switch(type) {
       case T_OBJECT:
       case T_ARRAY: {
-        oop obj = (oop)*tos_addr;
+        oop obj = cast_to_oop(*tos_addr);
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
         break;
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -76,6 +76,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 0);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
                                                                             \
   product(intx, UseVIS, 99,                                                 \
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -3333,7 +3333,8 @@
 
   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
-    ba_short(slow_case);
+    ba(slow_case);
+    delayed()->nop();
   }
 
   ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
@@ -3358,7 +3359,8 @@
     add(t2, 1, t2);
     stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
   }
-  ba_short(try_eden);
+  ba(try_eden);
+  delayed()->nop();
 
   bind(discard_tlab);
   if (TLABStats) {
@@ -3420,7 +3422,8 @@
   sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
   st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
   verify_tlab();
-  ba_short(retry);
+  ba(retry);
+  delayed()->nop();
 }
 
 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
@@ -3752,7 +3755,7 @@
 #define __ masm.
   address start = __ pc();
 
-  Label not_already_dirty, restart, refill;
+  Label not_already_dirty, restart, refill, young_card;
 
 #ifdef _LP64
   __ srlx(O0, CardTableModRefBS::card_shift, O0);
@@ -3763,9 +3766,15 @@
   __ set(addrlit, O1); // O1 := <card table base>
   __ ldub(O0, O1, O2); // O2 := [O0 + O1]
 
+  __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+
+  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+  __ ldub(O0, O1, O2); // O2 := [O0 + O1]
+
   assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
   __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
+  __ bind(young_card);
   // We didn't take the branch, so we're already dirty: return.
   // Use return-from-leaf
   __ retl();
@@ -3911,7 +3920,7 @@
   // The number of bytes in this code is used by
   // MachCallDynamicJavaNode::ret_addr_offset()
   // if this changes, change that.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
     decode_klass_not_null(klass);
   } else {
@@ -3920,7 +3929,7 @@
 }
 
 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(dst_oop != klass, "not enough registers");
     encode_klass_not_null(klass);
     st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -3930,7 +3939,7 @@
 }
 
 void MacroAssembler::store_klass_gap(Register s, Register d) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(s != d, "not enough registers");
     st(s, d, oopDesc::klass_gap_offset_in_bytes());
   }
@@ -4089,28 +4098,37 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert (UseCompressedKlassPointers, "must be compressed");
-  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-  assert(r != G6_heapbase, "bad register choice");
-  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-  sub(r, G6_heapbase, r);
-  if (Universe::narrow_klass_shift() != 0) {
-    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
-    srlx(r, LogKlassAlignmentInBytes, r);
+  assert (UseCompressedClassPointers, "must be compressed");
+  if (Universe::narrow_klass_base() != NULL) {
+    assert(r != G6_heapbase, "bad register choice");
+    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+    sub(r, G6_heapbase, r);
+    if (Universe::narrow_klass_shift() != 0) {
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+      srlx(r, LogKlassAlignmentInBytes, r);
+    }
+    reinit_heapbase();
+  } else {
+    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+    srlx(r, Universe::narrow_klass_shift(), r);
   }
-  reinit_heapbase();
 }
 
 void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
   if (src == dst) {
     encode_klass_not_null(src);
   } else {
-    assert (UseCompressedKlassPointers, "must be compressed");
-    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-    set((intptr_t)Universe::narrow_klass_base(), dst);
-    sub(src, dst, dst);
-    if (Universe::narrow_klass_shift() != 0) {
-      srlx(dst, LogKlassAlignmentInBytes, dst);
+    assert (UseCompressedClassPointers, "must be compressed");
+    if (Universe::narrow_klass_base() != NULL) {
+      set((intptr_t)Universe::narrow_klass_base(), dst);
+      sub(src, dst, dst);
+      if (Universe::narrow_klass_shift() != 0) {
+        srlx(dst, LogKlassAlignmentInBytes, dst);
+      }
+    } else {
+      // shift src into dst
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+      srlx(src, Universe::narrow_klass_shift(), dst);
     }
   }
 }
@@ -4119,15 +4137,17 @@
 // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
 // the instructions they generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
-  // set + add + set
-  int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
-    insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
-  if (Universe::narrow_klass_shift() == 0) {
-    return num_instrs * BytesPerInstWord;
-  } else { // sllx
-    return (num_instrs + 1) * BytesPerInstWord;
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
+  int num_instrs = 1;  // shift src,dst or add
+  if (Universe::narrow_klass_base() != NULL) {
+    // set + add + set
+    num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) +
+                  insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
+    if (Universe::narrow_klass_shift() != 0) {
+      num_instrs += 1;  // sllx
+    }
   }
+  return num_instrs * BytesPerInstWord;
 }
 
 // !!! If the instructions that get generated here change then function
@@ -4135,14 +4155,18 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
-  assert (UseCompressedKlassPointers, "must be compressed");
-  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-  assert(r != G6_heapbase, "bad register choice");
-  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-  if (Universe::narrow_klass_shift() != 0)
-    sllx(r, LogKlassAlignmentInBytes, r);
-  add(r, G6_heapbase, r);
-  reinit_heapbase();
+  assert (UseCompressedClassPointers, "must be compressed");
+  if (Universe::narrow_klass_base() != NULL) {
+    assert(r != G6_heapbase, "bad register choice");
+    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+    if (Universe::narrow_klass_shift() != 0)
+      sllx(r, LogKlassAlignmentInBytes, r);
+    add(r, G6_heapbase, r);
+    reinit_heapbase();
+  } else {
+    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+    sllx(r, Universe::narrow_klass_shift(), r);
+  }
 }
 
 void  MacroAssembler::decode_klass_not_null(Register src, Register dst) {
@@ -4151,23 +4175,28 @@
   } else {
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
-    assert (UseCompressedKlassPointers, "must be compressed");
-    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
-    if (Universe::narrow_klass_shift() != 0) {
-      assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
-      set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
-      sllx(src, LogKlassAlignmentInBytes, dst);
-      add(dst, G6_heapbase, dst);
-      reinit_heapbase();
+    assert (UseCompressedClassPointers, "must be compressed");
+    if (Universe::narrow_klass_base() != NULL) {
+      if (Universe::narrow_klass_shift() != 0) {
+        assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
+        set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
+        sllx(src, LogKlassAlignmentInBytes, dst);
+        add(dst, G6_heapbase, dst);
+        reinit_heapbase();
+      } else {
+        set((intptr_t)Universe::narrow_klass_base(), dst);
+        add(src, dst, dst);
+      }
     } else {
-      set((intptr_t)Universe::narrow_klass_base(), dst);
-      add(src, dst, dst);
+      // shift/mov src into dst.
+      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong");
+      sllx(src, Universe::narrow_klass_shift(), dst);
     }
   }
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
     } else {
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,6 +121,7 @@
 
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
                                             bool for_compiler_entry) {
+  Label L_no_such_method;
   assert(method == G5_method, "interpreter calling convention");
   assert_different_registers(method, target, temp);
 
@@ -133,6 +134,9 @@
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     __ ld(interp_only, temp);
     __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
+    // Null method test is replicated below in compiled case,
+    // it might be able to address across the verify_thread()
+    __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
     __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
     __ jmp(target, 0);
     __ delayed()->nop();
@@ -141,11 +145,19 @@
     // it doesn't matter, since this is interpreter code.
   }
 
+  // Compiled case, either static or fall-through from runtime conditional
+  __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
+
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ ld_ptr(G5_method, in_bytes(entry_offset), target);
   __ jmp(target, 0);
   __ delayed()->nop();
+
+  __ bind(L_no_such_method);
+  AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
+  __ jump_to(ame, temp);
+  __ delayed()->nop();
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
@@ -478,7 +478,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -3583,6 +3583,7 @@
   // the pending exception will be picked up the interpreter.
   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
   __ bind(noException);
 
   // deallocate the deoptimization frame taking care to preserve the return values
--- a/src/cpu/sparc/vm/sparc.ad	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/sparc.ad	Tue Nov 05 17:38:04 2013 -0800
@@ -557,7 +557,7 @@
     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     int klass_load_size;
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       assert(Universe::heap() != NULL, "java heap should be initialized");
       klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
     } else {
@@ -1657,15 +1657,19 @@
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
 #ifdef    _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
-    st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
-    if (Universe::narrow_klass_shift() != 0) {
-      st->print_cr("\tSLL    R_G5,3,R_G5");
+    if (Universe::narrow_klass_base() != 0) {
+      st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
+      if (Universe::narrow_klass_shift() != 0) {
+        st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
+      }
+      st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
+      st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
+    } else {
+      st->print_cr("\tSLL    R_G5,Universe::narrow_klass_shift,R_G5");
     }
-    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
-    st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
   } else {
     st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
   }
@@ -1897,7 +1901,7 @@
 
 bool Matcher::narrow_klass_use_complex_address() {
   NOT_LP64(ShouldNotCallThis());
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return false;
 }
 
@@ -2018,6 +2022,19 @@
   return L7_REGP_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return G1_REGI_mask();
+}
+
+const RegMask Matcher::mathExactL_result_proj_mask() {
+  return G1_REGL_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
+
 %}
 
 
@@ -2561,7 +2578,7 @@
       int off = __ offset();
       __ load_klass(O0, G3_scratch);
       int klass_load_size;
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         assert(Universe::heap() != NULL, "java heap should be initialized");
         klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
       } else {
@@ -4245,12 +4262,16 @@
     greater_equal(0xB);
     less_equal(0x2);
     greater(0xA);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, unsigned
 operand cmpOpU() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "u" %}
   interface(COND_INTER) %{
@@ -4260,12 +4281,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, pointer (same as unsigned)
 operand cmpOpP() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "p" %}
   interface(COND_INTER) %{
@@ -4275,12 +4300,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, branch-register encoding
 operand cmpOp_reg() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4290,12 +4319,16 @@
     greater_equal(0x7);
     less_equal   (0x2);
     greater      (0x6);
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Comparison Code, floating, unordered same as less
 operand cmpOpF() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "fl" %}
   interface(COND_INTER) %{
@@ -4305,12 +4338,17 @@
     greater_equal(0xB);
     less_equal(0xE);
     greater(0x6);
+
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Used by long compare
 operand cmpOp_commute() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4320,6 +4358,8 @@
     greater_equal(0x2);
     less_equal(0xB);
     greater(0x3);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -2945,7 +2945,7 @@
 
     BLOCK_COMMENT("arraycopy argument klass checks");
     //  get src->klass()
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop(); // ??? not good
       __ load_klass(src, G3_src_klass);
     } else {
@@ -2980,7 +2980,7 @@
     // Load 32-bits signed value. Use br() instruction with it to check icc.
     __ lduw(G3_src_klass, lh_offset, G5_lh);
 
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(dst, G4_dst_klass);
     }
     // Handle objArrays completely differently...
@@ -2988,7 +2988,7 @@
     __ set(objArray_lh, O5_temp);
     __ cmp(G5_lh,       O5_temp);
     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop();
     } else {
       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -153,13 +153,9 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
-  TosState incoming_state = state;
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+  address entry = __ pc();
 
-  Label cont;
-  address compiled_entry = __ pc();
-
-  address entry = __ pc();
 #if !defined(_LP64) && defined(COMPILER2)
   // All return values are where we want them, except for Longs.  C2 returns
   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
@@ -170,14 +166,12 @@
   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 
-  if (incoming_state == ltos) {
+  if (state == ltos) {
     __ srl (G1,  0, O1);
     __ srlx(G1, 32, O0);
   }
 #endif // !_LP64 && COMPILER2
 
-  __ bind(cont);
-
   // The callee returns with the stack possibly adjusted by adapter transition
   // We remove that possible adjustment here.
   // All interpreter local registers are untouched. Any result is passed back
@@ -186,29 +180,18 @@
 
   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 
-  Label L_got_cache, L_giant_index;
   const Register cache = G3_scratch;
-  const Register size  = G1_scratch;
-  if (EnableInvokeDynamic) {
-    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
-    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index);
-  }
-  __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
-  __ bind(L_got_cache);
-  __ ld_ptr(cache, ConstantPoolCache::base_offset() +
-                   ConstantPoolCacheEntry::flags_offset(), size);
-  __ and3(size, 0xFF, size);                   // argument size in words
-  __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
-  __ add(Lesp, size, Lesp);                    // pop arguments
+  const Register index  = G1_scratch;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
+  const Register parameter_size = flags;
+  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
+  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
+  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
-    __ ba_short(L_got_cache);
-  }
-
   return entry;
 }
 
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -2932,9 +2932,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     AddressLiteral table(table_addr);
     __ set(table, temp);
     __ sll(ra, LogBytesPerWord, ra);
@@ -2984,7 +2982,7 @@
   __ verify_oop(O0_recv);
 
   // get return address
-  AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
+  AddressLiteral table(Interpreter::invoke_return_entry_table());
   __ set(table, Rtemp);
   __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret);          // get return type
   // Make sure we don't need to mask Rret after the above shift
@@ -3026,7 +3024,7 @@
   __ profile_final_call(O4);
 
   // get return address
-  AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
+  AddressLiteral table(Interpreter::invoke_return_entry_table());
   __ set(table, Rtemp);
   __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret);          // get return type
   // Make sure we don't need to mask Rret after the above shift
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,6 +52,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -218,13 +228,13 @@
       // ld;ld;ld,jmp,nop
       const int basic = 5*BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return (basic + slop);
     }
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1405,6 +1405,15 @@
   }
 }
 
+void Assembler::imull(Register dst, Address src) {
+  InstructionMark im(this);
+  prefix(src, dst);
+  emit_int8(0x0F);
+  emit_int8((unsigned char) 0xAF);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::incl(Address dst) {
   // Don't use it directly. Use MacroAssembler::increment() instead.
   InstructionMark im(this);
@@ -4769,7 +4778,7 @@
 }
 
 void Assembler::adcq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x13, 0xC0, dst, src);
 }
 
@@ -4824,7 +4833,7 @@
 }
 
 void Assembler::andq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x23, 0xC0, dst, src);
 }
 
@@ -5024,6 +5033,14 @@
   }
 }
 
+void Assembler::imulq(Register dst, Address src) {
+  InstructionMark im(this);
+  prefixq(src, dst);
+  emit_int8(0x0F);
+  emit_int8((unsigned char) 0xAF);
+  emit_operand(dst, src);
+}
+
 void Assembler::incl(Register dst) {
   // Don't use it directly. Use MacroAssembler::incrementl() instead.
   // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
--- a/src/cpu/x86/vm/assembler_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1162,9 +1162,13 @@
 
   void imull(Register dst, Register src);
   void imull(Register dst, Register src, int value);
+  void imull(Register dst, Address src);
 
   void imulq(Register dst, Register src);
   void imulq(Register dst, Register src, int value);
+#ifdef _LP64
+  void imulq(Register dst, Address src);
+#endif
 
 
   // jcc is the generic conditional branch generator to run-
--- a/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -40,11 +40,8 @@
 #include "runtime/synchronizer.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/debug.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
-# include "interp_masm_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
-# include "interp_masm_x86_64.hpp"
+#ifdef TARGET_ARCH_x86
+# include "interp_masm_x86.hpp"
 #endif
 
 #ifdef CC_INTERP
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -402,6 +402,7 @@
     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -419,7 +420,7 @@
   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
     __ nop();
   }
-  if (_id == load_klass_id || _id == load_mirror_id) {
+  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
     CodeSection* cs = __ code_section();
     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
--- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,6 +52,8 @@
 #endif // _LP64
     } else if (type == T_OBJECT || type == T_ARRAY) {
       opr = as_oop_opr(reg);
+    } else if (type == T_METADATA) {
+      opr = as_metadata_opr(reg);
     } else {
       opr = as_opr(reg);
     }
--- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -148,7 +148,7 @@
 
   static int adjust_reg_range(int range) {
     // Reduce the number of available regs (to free r12) in case of compressed oops
-    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
+    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
     return range;
   }
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -341,7 +341,7 @@
   Register receiver = FrameMap::receiver_opr->as_register();
   Register ic_klass = IC_Klass;
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
+  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
   if (!do_post_padding) {
     // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
     while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -362,7 +362,7 @@
 
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
   jobject o = NULL;
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
+  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
   __ movoop(reg, o);
   patching_epilog(patch, lir_patch_normal, reg, info);
 }
@@ -432,15 +432,16 @@
   int offset = code_offset();
 
   // Fetch the exception from TLS and clear out exception related thread state
-  __ get_thread(rsi);
-  __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
-  __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
-  __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
+  Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
+  NOT_LP64(__ get_thread(rsi));
+  __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
+  __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
+  __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
 
   __ bind(_unwind_handler_entry);
   __ verify_not_null_oop(rax);
   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
-    __ mov(rsi, rax);  // Preserve the exception
+    __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
   }
 
   // Preform needed unlocking
@@ -448,19 +449,24 @@
   if (method()->is_synchronized()) {
     monitor_address(0, FrameMap::rax_opr);
     stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
-    __ unlock_object(rdi, rbx, rax, *stub->entry());
+    __ unlock_object(rdi, rsi, rax, *stub->entry());
     __ bind(*stub->continuation());
   }
 
   if (compilation()->env()->dtrace_method_probes()) {
+#ifdef _LP64
+    __ mov(rdi, r15_thread);
+    __ mov_metadata(rsi, method()->constant_encoding());
+#else
     __ get_thread(rax);
     __ movptr(Address(rsp, 0), rax);
     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
+#endif
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
   }
 
   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
-    __ mov(rax, rsi);  // Restore the exception
+    __ mov(rax, rbx);  // Restore the exception
   }
 
   // remove the activation and dispatch to the unwind handler
@@ -1206,6 +1212,10 @@
   LIR_Address* addr = src->as_address_ptr();
   Address from_addr = as_Address(addr);
 
+  if (addr->base()->type() == T_OBJECT) {
+    __ verify_oop(addr->base()->as_pointer_register());
+  }
+
   switch (type) {
     case T_BOOLEAN: // fall through
     case T_BYTE:    // fall through
@@ -1263,7 +1273,7 @@
       break;
 
     case T_ADDRESS:
-      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
         __ movl(dest->as_register(), from_addr);
       } else {
         __ movptr(dest->as_register(), from_addr);
@@ -1371,7 +1381,7 @@
     __ verify_oop(dest->as_register());
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ decode_klass_not_null(dest->as_register());
     }
 #endif
@@ -1716,7 +1726,7 @@
   } else if (obj == klass_RInfo) {
     klass_RInfo = dst;
   }
-  if (k->is_loaded() && !UseCompressedKlassPointers) {
+  if (k->is_loaded() && !UseCompressedClassPointers) {
     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   } else {
     Rtmp1 = op->tmp3()->as_register();
@@ -1724,14 +1734,6 @@
   }
 
   assert_different_registers(obj, k_RInfo, klass_RInfo);
-  if (!k->is_loaded()) {
-    klass2reg_with_patching(k_RInfo, op->info_for_patch());
-  } else {
-#ifdef _LP64
-    __ mov_metadata(k_RInfo, k->constant_encoding());
-#endif // _LP64
-  }
-  assert(obj != k_RInfo, "must be different");
 
   __ cmpptr(obj, (int32_t)NULL_WORD);
   if (op->should_profile()) {
@@ -1748,13 +1750,21 @@
   } else {
     __ jcc(Assembler::equal, *obj_is_null);
   }
+
+  if (!k->is_loaded()) {
+    klass2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ mov_metadata(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
   __ verify_oop(obj);
 
   if (op->fast_check()) {
     // get object class
     // not a safepoint as obj null check happens earlier
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(Rtmp1, obj);
       __ cmpptr(k_RInfo, Rtmp1);
     } else {
@@ -3294,7 +3304,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         __ movl(tmp, src_klass_addr);
         __ cmpl(tmp, dst_klass_addr);
       } else {
@@ -3456,21 +3466,21 @@
     Label known_ok, halt;
     __ mov_metadata(tmp, default_type->constant_encoding());
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ encode_klass_not_null(tmp);
     }
 #endif
 
     if (basic_type != T_OBJECT) {
 
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::notEqual, halt);
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
       else                   __ cmpptr(tmp, src_klass_addr);
       __ jcc(Assembler::equal, known_ok);
     } else {
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::equal, known_ok);
       __ cmpptr(src, dst);
@@ -3632,6 +3642,161 @@
   }
 }
 
+void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
+  Register obj = op->obj()->as_register();
+  Register tmp = op->tmp()->as_pointer_register();
+  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
+  ciKlass* exact_klass = op->exact_klass();
+  intptr_t current_klass = op->current_klass();
+  bool not_null = op->not_null();
+  bool no_conflict = op->no_conflict();
+
+  Label update, next, none;
+
+  bool do_null = !not_null;
+  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
+  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
+
+  assert(do_null || do_update, "why are we here?");
+  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
+
+  __ verify_oop(obj);
+
+  if (tmp != obj) {
+    __ mov(tmp, obj);
+  }
+  if (do_null) {
+    __ testptr(tmp, tmp);
+    __ jccb(Assembler::notZero, update);
+    if (!TypeEntries::was_null_seen(current_klass)) {
+      __ orptr(mdo_addr, TypeEntries::null_seen);
+    }
+    if (do_update) {
+#ifndef ASSERT
+      __ jmpb(next);
+    }
+#else
+      __ jmp(next);
+    }
+  } else {
+    __ testptr(tmp, tmp);
+    __ jccb(Assembler::notZero, update);
+    __ stop("unexpect null obj");
+#endif
+  }
+
+  __ bind(update);
+
+  if (do_update) {
+#ifdef ASSERT
+    if (exact_klass != NULL) {
+      Label ok;
+      __ load_klass(tmp, tmp);
+      __ push(tmp);
+      __ mov_metadata(tmp, exact_klass->constant_encoding());
+      __ cmpptr(tmp, Address(rsp, 0));
+      __ jccb(Assembler::equal, ok);
+      __ stop("exact klass and actual klass differ");
+      __ bind(ok);
+      __ pop(tmp);
+    }
+#endif
+    if (!no_conflict) {
+      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
+        if (exact_klass != NULL) {
+          __ mov_metadata(tmp, exact_klass->constant_encoding());
+        } else {
+          __ load_klass(tmp, tmp);
+        }
+
+        __ xorptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_klass_mask);
+        // klass seen before, nothing to do. The unknown bit may have been
+        // set already but no need to check.
+        __ jccb(Assembler::zero, next);
+
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+        if (TypeEntries::is_type_none(current_klass)) {
+          __ cmpptr(mdo_addr, 0);
+          __ jccb(Assembler::equal, none);
+          __ cmpptr(mdo_addr, TypeEntries::null_seen);
+          __ jccb(Assembler::equal, none);
+          // There is a chance that the checks above (re-reading profiling
+          // data from memory) fail if another thread has just set the
+          // profiling to this obj's klass
+          __ xorptr(tmp, mdo_addr);
+          __ testptr(tmp, TypeEntries::type_klass_mask);
+          __ jccb(Assembler::zero, next);
+        }
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
+
+        __ movptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+      }
+
+      // different than before. Cannot keep accurate profile.
+      __ orptr(mdo_addr, TypeEntries::type_unknown);
+
+      if (TypeEntries::is_type_none(current_klass)) {
+        __ jmpb(next);
+
+        __ bind(none);
+        // first time here. Set profile type.
+        __ movptr(mdo_addr, tmp);
+      }
+    } else {
+      // There's a single possible klass at this profile point
+      assert(exact_klass != NULL, "should be");
+      if (TypeEntries::is_type_none(current_klass)) {
+        __ mov_metadata(tmp, exact_klass->constant_encoding());
+        __ xorptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_klass_mask);
+#ifdef ASSERT
+        __ jcc(Assembler::zero, next);
+
+        {
+          Label ok;
+          __ push(tmp);
+          __ cmpptr(mdo_addr, 0);
+          __ jcc(Assembler::equal, ok);
+          __ cmpptr(mdo_addr, TypeEntries::null_seen);
+          __ jcc(Assembler::equal, ok);
+          // may have been set by another thread
+          __ mov_metadata(tmp, exact_klass->constant_encoding());
+          __ xorptr(tmp, mdo_addr);
+          __ testptr(tmp, TypeEntries::type_mask);
+          __ jcc(Assembler::zero, ok);
+
+          __ stop("unexpected profiling mismatch");
+          __ bind(ok);
+          __ pop(tmp);
+        }
+#else
+        __ jccb(Assembler::zero, next);
+#endif
+        // first time here. Set profile type.
+        __ movptr(mdo_addr, tmp);
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
+
+        __ movptr(tmp, mdo_addr);
+        __ testptr(tmp, TypeEntries::type_unknown);
+        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+        __ orptr(mdo_addr, TypeEntries::type_unknown);
+      }
+    }
+
+    __ bind(next);
+  }
+}
+
 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
   Unimplemented();
 }
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1239,7 +1239,7 @@
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ checkcast(reg, obj.result(), x->klass(),
@@ -1261,7 +1261,7 @@
   }
   obj.load_item();
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ instanceof(reg, obj.result(), x->klass(),
@@ -1468,19 +1468,18 @@
     addr = new LIR_Address(src.result(), offset, type);
   }
 
-  if (data != dst) {
-    __ move(data, dst);
-    data = dst;
-  }
+  // Because we want a 2-arg form of xchg and xadd
+  __ move(data, dst);
+
   if (x->is_add()) {
-    __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
   } else {
     if (is_obj) {
       // Do the pre-write barrier, if any.
       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                   true /* do_load */, false /* patch */, NULL);
     }
-    __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
+    __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
     if (is_obj) {
       // Seems to be a precise address
       post_barrier(LIR_OprFact::address(addr), data);
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -157,7 +157,7 @@
     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
   }
 #ifdef _LP64
-  if (UseCompressedKlassPointers) { // Take care not to kill klass
+  if (UseCompressedClassPointers) { // Take care not to kill klass
     movptr(t1, klass);
     encode_klass_not_null(t1);
     movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@
     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   }
 #ifdef _LP64
-  else if (UseCompressedKlassPointers) {
+  else if (UseCompressedClassPointers) {
     xorptr(t1, t1);
     store_klass_gap(obj, t1);
   }
@@ -334,7 +334,7 @@
   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   int start_offset = offset();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     load_klass(rscratch1, receiver);
     cmpptr(rscratch1, iCache);
   } else {
@@ -345,7 +345,7 @@
   jump_cc(Assembler::notEqual,
           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 }
 
 
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -38,6 +38,9 @@
 #include "runtime/vframeArray.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 
 // Implementation of StubAssembler
@@ -1499,6 +1502,13 @@
       }
       break;
 
+    case load_appendix_patching_id:
+      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
+        // we should set up register map
+        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
+      }
+      break;
+
     case dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
@@ -1746,13 +1756,17 @@
         __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
 #endif
 
-        __ cmpb(Address(card_addr, 0), 0);
+        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ jcc(Assembler::equal, done);
+
+        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
         __ jcc(Assembler::equal, done);
 
         // storing region crossing non-NULL, card is clean.
         // dirty card and log.
 
-        __ movb(Address(card_addr, 0), 0);
+        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
 
         __ cmpl(queue_index, 0);
         __ jcc(Assembler::equal, runtime);
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -30,7 +30,6 @@
 
 // Sets the default values for platform dependent flags used by the server compiler.
 // (see c2_globals.hpp).  Alpha-sorted.
-
 define_pd_global(bool, BackgroundCompilation,        true);
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
@@ -52,6 +51,7 @@
 define_pd_global(intx, ConditionalMoveLimit,         3);
 define_pd_global(intx, FLOATPRESSURE,                6);
 define_pd_global(intx, FreqInlineSize,               325);
+define_pd_global(intx, MinJumpTableSize,             10);
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, InteriorEntryAlignment,       16);
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -367,7 +367,7 @@
   return entry;
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   // make it look good in the debugger
   return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
 }
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/frame_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -639,7 +639,7 @@
 #ifdef CC_INTERP
         obj = istate->_oop_temp;
 #else
-        obj = (oop) at(interpreter_frame_oop_temp_offset);
+        obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
 #endif // CC_INTERP
       } else {
         oop* obj_p = (oop*)tos_addr;
--- a/src/cpu/x86/vm/globals_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/globals_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -79,6 +79,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 111);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
                                                                             \
   develop(bool, IEEEPrecision, true,                                        \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/interp_masm_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interp_masm_x86.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/methodData.hpp"
+
+#ifndef CC_INTERP
+void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
+  Label update, next, none;
+
+  verify_oop(obj);
+
+  testptr(obj, obj);
+  jccb(Assembler::notZero, update);
+  orptr(mdo_addr, TypeEntries::null_seen);
+  jmpb(next);
+
+  bind(update);
+  load_klass(obj, obj);
+
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next); // klass seen before, nothing to
+                               // do. The unknown bit may have been
+                               // set already but no need to check.
+
+  testptr(obj, TypeEntries::type_unknown);
+  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
+
+  cmpptr(mdo_addr, 0);
+  jccb(Assembler::equal, none);
+  cmpptr(mdo_addr, TypeEntries::null_seen);
+  jccb(Assembler::equal, none);
+  // There is a chance that the checks above (re-reading profiling
+  // data from memory) fail if another thread has just set the
+  // profiling to this obj's klass
+  xorptr(obj, mdo_addr);
+  testptr(obj, TypeEntries::type_klass_mask);
+  jccb(Assembler::zero, next);
+
+  // different than before. Cannot keep accurate profile.
+  orptr(mdo_addr, TypeEntries::type_unknown);
+  jmpb(next);
+
+  bind(none);
+  // first time here. Set profile type.
+  movptr(mdo_addr, obj);
+
+  bind(next);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    jcc(Assembler::notEqual, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      addptr(mdp, off_to_args);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile
+          movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
+          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
+          jcc(Assembler::less, done);
+        }
+        movptr(tmp, Address(callee, Method::const_offset()));
+        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+        // stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list
+        subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+        subl(tmp, 1);
+        Address arg_addr = argument_address(tmp);
+        movptr(tmp, arg_addr);
+
+        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+        profile_obj_type(tmp, mdo_arg_addr);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addptr(mdp, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
+        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp is the number of cell left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        shll(tmp, exact_log2(DataLayout::cell_size));
+        addptr(mdp, tmp);
+      }
+      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
+    }
+
+    // mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+  assert_different_registers(mdp, ret, tmp, _bcp_register);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length
+      Label do_profile;
+      cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
+      jcc(Assembler::equal, do_profile);
+      cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
+      jcc(Assembler::equal, do_profile);
+      get_method(tmp);
+      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
+      jcc(Assembler::notEqual, profile_continue);
+
+      bind(do_profile);
+    }
+
+    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+    mov(tmp, ret);
+    profile_obj_type(tmp, mdo_ret_addr);
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
+  if (ProfileInterpreter && MethodData::profile_parameters()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(mdp, profile_continue);
+
+    // Load the offset of the area within the MDO used for
+    // parameters. If it's negative we're not profiling any parameters
+    movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
+    testl(tmp1, tmp1);
+    jcc(Assembler::negative, profile_continue);
+
+    // Compute a pointer to the area for parameters from the offset
+    // and move the pointer to the slot for the last
+    // parameters. Collect profiling from last parameter down.
+    // mdo start + parameters offset + array length - 1
+    addptr(mdp, tmp1);
+    movptr(tmp1, Address(mdp, in_bytes(ArrayData::array_len_offset())));
+    decrement(tmp1, TypeStackSlotEntries::per_arg_count());
+
+    Label loop;
+    bind(loop);
+
+    int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
+    int type_base = in_bytes(ParametersTypeData::type_offset(0));
+    Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size);
+    Address arg_off(mdp, tmp1, per_arg_scale, off_base);
+    Address arg_type(mdp, tmp1, per_arg_scale, type_base);
+
+    // load offset on the stack from the slot for this parameter
+    movptr(tmp2, arg_off);
+    negptr(tmp2);
+    // read the parameter from the local area
+    movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));
+
+    // profile the parameter
+    profile_obj_type(tmp2, arg_type);
+
+    // go to next parameter
+    decrement(tmp1, TypeStackSlotEntries::per_arg_count());
+    jcc(Assembler::positive, loop);
+
+    bind(profile_continue);
+  }
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/interp_masm_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_INTERP_MASM_X86_HPP
+#define CPU_X86_VM_INTERP_MASM_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/invocationCounter.hpp"
+#include "runtime/frame.hpp"
+
+// This file specializes the assember with interpreter-specific macros
+
+
+class InterpreterMacroAssembler: public MacroAssembler {
+
+#ifdef TARGET_ARCH_MODEL_x86_32
+# include "interp_masm_x86_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_x86_64
+# include "interp_masm_x86_64.hpp"
+#endif
+
+ private:
+
+  Register _locals_register; // register that contains the pointer to the locals
+  Register _bcp_register; // register that contains the bcp
+
+ public:
+#ifndef CC_INTERP
+  void profile_obj_type(Register obj, const Address& mdo_addr);
+  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
+  void profile_return_type(Register mdp, Register ret, Register tmp);
+  void profile_parameters_type(Register mdp, Register tmp1, Register tmp2);
+#endif /* !CC_INTERP */
+
+};
+
+#endif // CPU_X86_VM_INTERP_MASM_X86_HPP
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "interp_masm_x86_32.hpp"
+#include "interp_masm_x86.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "oops/arrayOop.hpp"
@@ -1046,7 +1046,6 @@
   }
 }
 
-
 void InterpreterMacroAssembler::profile_call(Register mdp) {
   if (ProfileInterpreter) {
     Label profile_continue;
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -22,18 +22,6 @@
  *
  */
 
-#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
-#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
-
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "interpreter/invocationCounter.hpp"
-#include "runtime/frame.hpp"
-
-// This file specializes the assember with interpreter-specific macros
-
-
-class InterpreterMacroAssembler: public MacroAssembler {
 #ifndef CC_INTERP
  protected:
   // Interpreter specific version of call_VM_base
@@ -59,7 +47,7 @@
 #endif /* CC_INTERP */
 
  public:
-  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
+  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(rdi), _bcp_register(rsi) {}
 
   void load_earlyret_value(TosState state);
 
@@ -233,7 +221,3 @@
   // support for jvmti
   void notify_method_entry();
   void notify_method_exit(TosState state, NotifyMethodExitMode mode);
-
-};
-
-#endif // CPU_X86_VM_INTERP_MASM_X86_32_HPP
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "interp_masm_x86_64.hpp"
+#include "interp_masm_x86.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "oops/arrayOop.hpp"
@@ -1067,7 +1067,6 @@
   }
 }
 
-
 void InterpreterMacroAssembler::profile_call(Register mdp) {
   if (ProfileInterpreter) {
     Label profile_continue;
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -22,18 +22,6 @@
  *
  */
 
-#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
-#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
-
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "interpreter/invocationCounter.hpp"
-#include "runtime/frame.hpp"
-
-// This file specializes the assember with interpreter-specific macros
-
-
-class InterpreterMacroAssembler: public MacroAssembler {
 #ifndef CC_INTERP
  protected:
   // Interpreter specific version of call_VM_base
@@ -55,7 +43,7 @@
 #endif // CC_INTERP
 
  public:
-  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
+  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(r14), _bcp_register(r13) {}
 
   void load_earlyret_value(TosState state);
 
@@ -250,6 +238,3 @@
   // support for jvmti/dtrace
   void notify_method_entry();
   void notify_method_exit(TosState state, NotifyMethodExitMode mode);
-};
-
-#endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1635,7 +1635,7 @@
 #ifdef ASSERT
   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
   // r12 is the heapbase.
-  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
 #endif // ASSERT
 
   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
@@ -3389,13 +3389,18 @@
   const Register card_addr = tmp;
   lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
 #endif
-  cmpb(Address(card_addr, 0), 0);
+  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
   jcc(Assembler::equal, done);
 
+  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+  jcc(Assembler::equal, done);
+
+
   // storing a region crossing, non-NULL oop, card is clean.
   // dirty card and log.
 
-  movb(Address(card_addr, 0), 0);
+  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
 
   cmpl(queue_index, 0);
   jcc(Assembler::equal, runtime);
@@ -4802,7 +4807,7 @@
 
 void MacroAssembler::load_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
     decode_klass_not_null(dst);
   } else
@@ -4817,7 +4822,7 @@
 
 void MacroAssembler::store_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     encode_klass_not_null(src);
     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   } else
@@ -4892,7 +4897,7 @@
 
 #ifdef _LP64
 void MacroAssembler::store_klass_gap(Register dst, Register src) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Store to klass gap in destination
     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   }
@@ -5044,25 +5049,32 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
-  assert(r != r12_heapbase, "Encoding a klass in r12");
-  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
-  subq(r, r12_heapbase);
+  if (Universe::narrow_klass_base() != NULL) {
+    // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
+    assert(r != r12_heapbase, "Encoding a klass in r12");
+    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+    subq(r, r12_heapbase);
+  }
   if (Universe::narrow_klass_shift() != 0) {
     assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
     shrq(r, LogKlassAlignmentInBytes);
   }
-  reinit_heapbase();
+  if (Universe::narrow_klass_base() != NULL) {
+    reinit_heapbase();
+  }
 }
 
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   if (dst == src) {
     encode_klass_not_null(src);
   } else {
-    mov64(dst, (int64_t)Universe::narrow_klass_base());
-    negq(dst);
-    addq(dst, src);
+    if (Universe::narrow_klass_base() != NULL) {
+      mov64(dst, (int64_t)Universe::narrow_klass_base());
+      negq(dst);
+      addq(dst, src);
+    } else {
+      movptr(dst, src);
+    }
     if (Universe::narrow_klass_shift() != 0) {
       assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
       shrq(dst, LogKlassAlignmentInBytes);
@@ -5075,17 +5087,21 @@
 // when (Universe::heap() != NULL).  Hence, if the instructions they
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
-  // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
-  return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
+  if (Universe::narrow_klass_base() != NULL) {
+    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
+    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
+  } else {
+    // longest load decode klass function, mov64, leaq
+    return 16;
+  }
 }
 
 // !!! If the instructions that get generated here change then function
 // instr_size_for_decode_klass_not_null() needs to get updated.
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Note: it will change flags
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
@@ -5095,22 +5111,22 @@
     shlq(r, LogKlassAlignmentInBytes);
   }
   // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
-  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
-  addq(r, r12_heapbase);
-  reinit_heapbase();
+  if (Universe::narrow_klass_base() != NULL) {
+    mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
+    addq(r, r12_heapbase);
+    reinit_heapbase();
+  }
 }
 
 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   // Note: it will change flags
-  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   if (dst == src) {
     decode_klass_not_null(dst);
   } else {
     // Cannot assert, unverified entry point counts instructions (see .ad file)
     // vtableStubs also counts instructions in pd_code_size_limit.
     // Also do not verify_oop as this is called by verify_oop.
-
     mov64(dst, (int64_t)Universe::narrow_klass_base());
     if (Universe::narrow_klass_shift() != 0) {
       assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
@@ -5141,7 +5157,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5149,7 +5165,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5175,7 +5191,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5183,7 +5199,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5191,7 +5207,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       if (Universe::narrow_oop_base() == NULL) {
         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -773,6 +773,7 @@
   void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
   void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
   void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+  void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
 
   void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
   void testptr(Register src1, Register src2);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,11 @@
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                             bool for_compiler_entry) {
   assert(method == rbx, "interpreter calling convention");
+
+   Label L_no_such_method;
+   __ testptr(rbx, rbx);
+   __ jcc(Assembler::zero, L_no_such_method);
+
   __ verify_method_ptr(method);
 
   if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
@@ -138,6 +143,9 @@
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ jmp(Address(method, entry_offset));
+
+  __ bind(L_no_such_method);
+  __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@@ -475,7 +483,7 @@
   const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
   tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
                 adaptername, mh_reg_name,
-                mh, entry_sp);
+                (void *)mh, entry_sp);
 
   if (Verbose) {
     tty->print_cr("Registers:");
--- a/src/cpu/x86/vm/register_definitions_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/register_definitions_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -26,11 +26,8 @@
 #include "asm/assembler.hpp"
 #include "asm/register.hpp"
 #include "register_x86.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
-# include "interp_masm_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
-# include "interp_masm_x86_64.hpp"
+#ifdef TARGET_ARCH_x86
+# include "interp_masm_x86.hpp"
 #endif
 
 REGISTER_DEFINITION(Register, noreg);
--- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -34,9 +34,9 @@
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
 #ifdef AMD64
-  const static int InterpreterCodeSize = 200 * 1024;
+  const static int InterpreterCodeSize = 256 * 1024;
 #else
-  const static int InterpreterCodeSize = 168 * 1024;
+  const static int InterpreterCodeSize = 224 * 1024;
 #endif // AMD64
 
 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -150,13 +150,12 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
-  TosState incoming_state = state;
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   address entry = __ pc();
 
 #ifdef COMPILER2
   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
-  if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
     for (int i = 1; i < 8; i++) {
         __ ffree(i);
     }
@@ -164,7 +163,7 @@
     __ empty_FPU_stack();
   }
 #endif
-  if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
+  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
     __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
   } else {
     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
@@ -172,12 +171,12 @@
 
   // In SSE mode, interpreter returns FP results in xmm0 but they need
   // to end up back on the FPU so it can operate on them.
-  if (incoming_state == ftos && UseSSE >= 1) {
+  if (state == ftos && UseSSE >= 1) {
     __ subptr(rsp, wordSize);
     __ movflt(Address(rsp, 0), xmm0);
     __ fld_s(Address(rsp, 0));
     __ addptr(rsp, wordSize);
-  } else if (incoming_state == dtos && UseSSE >= 2) {
+  } else if (state == dtos && UseSSE >= 2) {
     __ subptr(rsp, 2*wordSize);
     __ movdbl(Address(rsp, 0), xmm0);
     __ fld_d(Address(rsp, 0));
@@ -194,27 +193,22 @@
   __ restore_bcp();
   __ restore_locals();
 
-  Label L_got_cache, L_giant_index;
-  if (EnableInvokeDynamic) {
-    __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
-    __ jcc(Assembler::equal, L_giant_index);
+  if (state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
   }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
-  __ bind(L_got_cache);
-  __ movl(rbx, Address(rbx, rcx,
-                    Address::times_ptr, ConstantPoolCache::base_offset() +
-                    ConstantPoolCacheEntry::flags_offset()));
-  __ andptr(rbx, 0xFF);
-  __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
+
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
-    __ jmp(L_got_cache);
-  }
-
   return entry;
 }
 
@@ -1484,6 +1478,7 @@
         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
   __ movbool(do_not_unlock_if_synchronized, true);
 
+  __ profile_parameters_type(rax, rcx, rdx);
   // increment invocation count & check for overflow
   Label invocation_counter_overflow;
   Label profile_method;
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -166,7 +166,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   address entry = __ pc();
 
   // Restore stack bottom in case i2c adjusted stack
@@ -177,28 +177,22 @@
   __ restore_bcp();
   __ restore_locals();
 
-  Label L_got_cache, L_giant_index;
-  if (EnableInvokeDynamic) {
-    __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
-    __ jcc(Assembler::equal, L_giant_index);
+  if (state == atos) {
+    Register mdp = rbx;
+    Register tmp = rcx;
+    __ profile_return_type(mdp, rax, tmp);
   }
-  __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2));
-  __ bind(L_got_cache);
-  __ movl(rbx, Address(rbx, rcx,
-                       Address::times_ptr,
-                       in_bytes(ConstantPoolCache::base_offset()) +
-                       3 * wordSize));
-  __ andl(rbx, 0xFF);
-  __ lea(rsp, Address(rsp, rbx, Address::times_8));
+
+  const Register cache = rbx;
+  const Register index = rcx;
+  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
+
+  const Register flags = cache;
+  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
+  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
   __ dispatch_next(state, step);
 
-  // out of the main line of code...
-  if (EnableInvokeDynamic) {
-    __ bind(L_giant_index);
-    __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4));
-    __ jmp(L_got_cache);
-  }
-
   return entry;
 }
 
@@ -1491,6 +1485,7 @@
         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
   __ movbool(do_not_unlock_if_synchronized, true);
 
+  __ profile_parameters_type(rax, rcx, rdx);
   // increment invocation count & check for overflow
   Label invocation_counter_overflow;
   Label profile_method;
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -2925,9 +2925,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     ExternalAddress table(table_addr);
     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
   }
@@ -2970,6 +2968,7 @@
 
   // profile this call
   __ profile_final_call(rax);
+  __ profile_arguments_type(rax, method, rsi, true);
 
   __ jump_from_interpreted(method, rax);
 
@@ -2984,6 +2983,7 @@
 
   // get target Method* & entry point
   __ lookup_virtual_method(rax, index, method);
+  __ profile_arguments_type(rdx, method, rsi, true);
   __ jump_from_interpreted(method, rdx);
 }
 
@@ -3013,6 +3013,7 @@
   __ null_check(rcx);
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, rsi, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3023,6 +3024,7 @@
   prepare_invoke(byte_no, rbx);  // get f1 Method*
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, rsi, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3082,6 +3084,8 @@
   __ testptr(rbx, rbx);
   __ jcc(Assembler::zero, no_such_method);
 
+  __ profile_arguments_type(rdx, rbx, rsi, true);
+
   // do the call
   // rcx: receiver
   // rbx,: Method*
@@ -3138,6 +3142,7 @@
 
   // FIXME: profile the LambdaForm also
   __ profile_final_call(rax);
+  __ profile_arguments_type(rdx, rbx_method, rsi, true);
 
   __ jump_from_interpreted(rbx_method, rdx);
 }
@@ -3171,6 +3176,7 @@
   // %%% should make a type profile for any invokedynamic that takes a ref argument
   // profile this call
   __ profile_call(rsi);
+  __ profile_arguments_type(rdx, rbx, rsi, false);
 
   __ verify_oop(rax_callsite);
 
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -2980,9 +2980,7 @@
   ConstantPoolCacheEntry::verify_tos_state_shift();
   // load return address
   {
-    const address table_addr = (is_invokeinterface || is_invokedynamic) ?
-        (address)Interpreter::return_5_addrs_by_index_table() :
-        (address)Interpreter::return_3_addrs_by_index_table();
+    const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
     ExternalAddress table(table_addr);
     __ lea(rscratch1, table);
     __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
@@ -3026,6 +3024,7 @@
 
   // profile this call
   __ profile_final_call(rax);
+  __ profile_arguments_type(rax, method, r13, true);
 
   __ jump_from_interpreted(method, rax);
 
@@ -3040,6 +3039,7 @@
 
   // get target Method* & entry point
   __ lookup_virtual_method(rax, index, method);
+  __ profile_arguments_type(rdx, method, r13, true);
   __ jump_from_interpreted(method, rdx);
 }
 
@@ -3069,6 +3069,7 @@
   __ null_check(rcx);
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, r13, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3079,6 +3080,7 @@
   prepare_invoke(byte_no, rbx);  // get f1 Method*
   // do the call
   __ profile_call(rax);
+  __ profile_arguments_type(rax, rbx, r13, false);
   __ jump_from_interpreted(rbx, rax);
 }
 
@@ -3136,6 +3138,8 @@
   __ testptr(rbx, rbx);
   __ jcc(Assembler::zero, no_such_method);
 
+  __ profile_arguments_type(rdx, rbx, r13, true);
+
   // do the call
   // rcx: receiver
   // rbx,: Method*
@@ -3193,6 +3197,7 @@
 
   // FIXME: profile the LambdaForm also
   __ profile_final_call(rax);
+  __ profile_arguments_type(rdx, rbx_method, r13, true);
 
   __ jump_from_interpreted(rbx_method, rdx);
 }
@@ -3226,6 +3231,7 @@
   // %%% should make a type profile for any invokedynamic that takes a ref argument
   // profile this call
   __ profile_call(r13);
+  __ profile_arguments_type(rdx, rbx_method, r13, false);
 
   __ verify_oop(rax_callsite);
 
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "code/vtableStubs.hpp"
-#include "interp_masm_x86_32.hpp"
+#include "interp_masm_x86.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
@@ -58,6 +58,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int i486_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
   //            add code here, bump the code stub size returned by pd_code_size_limit!
   const int i486_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "code/vtableStubs.hpp"
-#include "interp_masm_x86_64.hpp"
+#include "interp_masm_x86.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
@@ -49,6 +49,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
   // returned by pd_code_size_limit!
   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -211,11 +221,11 @@
   if (is_vtable_stub) {
     // Vtable stub size
     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/src/cpu/x86/vm/x86_32.ad	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/x86_32.ad	Tue Nov 05 17:38:04 2013 -0800
@@ -351,7 +351,7 @@
         int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
-    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -1534,6 +1534,19 @@
   return EBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return EAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactL_result_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 // Returns true if the high 32 bits of the value is known to be zero.
 bool is_operand_hi32_zero(Node* n) {
   int opc = n->Opcode();
@@ -4922,6 +4935,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4939,6 +4954,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4957,6 +4974,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4974,6 +4993,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4981,6 +5002,8 @@
 operand cmpOp_fcmov() %{
   match(Bool);
 
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
   format %{ "" %}
   interface(COND_INTER) %{
     equal        (0x0C8);
@@ -4989,6 +5012,8 @@
     greater_equal(0x1C0);
     less_equal   (0x0D0);
     greater      (0x1D0);
+    overflow(0x0, "o"); // not really supported by the instruction
+    no_overflow(0x1, "no"); // not really supported by the instruction
   %}
 %}
 
@@ -5004,6 +5029,8 @@
     greater_equal(0xE, "le");
     less_equal(0xD, "ge");
     greater(0xC, "l");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -7496,6 +7523,45 @@
 
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
+
+instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
+%{
+  match(AddExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(125);
+  format %{ "ADD    $dst,$src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Address);
+  %}
+  ins_pipe( ialu_reg_mem );
+%}
+
+
 // Integer Addition Instructions
 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (AddI dst src));
@@ -7804,6 +7870,44 @@
 %}
 
 //----------Subtraction Instructions-------------------------------------------
+
+instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
+%{
+  match(SubExactI dst src);
+  effect(DEF cr);
+
+  format %{ "SUB    $dst, $src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
+%{
+  match(SubExactI dst src);
+  effect(DEF cr);
+
+  format %{ "SUB    $dst, $src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
+%{
+  match(SubExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(125);
+  format %{ "SUB    $dst,$src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$Address);
+  %}
+  ins_pipe( ialu_reg_mem );
+%}
+
 // Integer Subtraction Instructions
 instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (SubI dst src));
@@ -7872,6 +7976,16 @@
   ins_pipe( ialu_reg );
 %}
 
+instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{
+  match(NegExactI dst);
+  effect(DEF cr);
+
+  format %{ "NEG    $dst\t# negExact int"%}
+  ins_encode %{
+    __ negl($dst$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
 
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
@@ -8084,6 +8198,46 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
+%{
+  match(MulExactI dst src);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "IMUL   $dst, $src\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr)
+%{
+  match(MulExactI src imm);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "IMUL   $dst, $src, $imm\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Register, $imm$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
+%{
+  match(MulExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(350);
+  format %{ "IMUL   $dst, $src\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Address);
+  %}
+  ins_pipe(ialu_reg_mem_alu0);
+%}
+
+
 // Integer DIV with Register
 instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
   match(Set rax (DivI rax div));
--- a/src/cpu/x86/vm/x86_64.ad	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/x86/vm/x86_64.ad	Tue Nov 05 17:38:04 2013 -0800
@@ -529,7 +529,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
-    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -556,7 +556,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
-    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
+    assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
            "cannot embed scavengable oops in code");
   }
 #endif
@@ -1391,7 +1391,7 @@
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
     st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
@@ -1408,7 +1408,7 @@
 {
   MacroAssembler masm(&cbuf);
   uint insts_size = cbuf.insts_size();
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
   } else {
@@ -1557,7 +1557,7 @@
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return (LogKlassAlignmentInBytes <= 3);
 }
 
@@ -1649,6 +1649,18 @@
   return PTR_RBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return INT_RAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactL_result_proj_mask() {
+  return LONG_RAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -4133,6 +4145,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4151,6 +4165,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4170,6 +4186,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4187,6 +4205,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -6922,6 +6942,82 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
+instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
+%{
+  match(AddExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(125); // XXX
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Address);
+  %}
+
+  ins_pipe(ialu_reg_mem);
+%}
+
+instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
+%{
+  match(AddExactL dst src);
+  effect(DEF cr);
+
+  format %{ "addq    $dst, $src\t# addExact long" %}
+  ins_encode %{
+    __ addq($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
+%{
+  match(AddExactL dst src);
+  effect(DEF cr);
+
+  format %{ "addq    $dst, $src\t# addExact long" %}
+  ins_encode %{
+    __ addq($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
+%{
+  match(AddExactL dst (LoadL src));
+  effect(DEF cr);
+
+  ins_cost(125); // XXX
+  format %{ "addq    $dst, $src\t# addExact long" %}
+  ins_encode %{
+    __ addq($dst$$Register, $src$$Address);
+  %}
+
+  ins_pipe(ialu_reg_mem);
+%}
+
 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
 %{
   match(Set dst (AddI dst src));
@@ -7534,6 +7630,80 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
+%{
+  match(SubExactI dst src);
+  effect(DEF cr);
+
+  format %{ "subl    $dst, $src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
+%{
+  match(SubExactI dst src);
+  effect(DEF cr);
+
+  format %{ "subl    $dst, $src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
+%{
+  match(SubExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(125);
+  format %{ "subl    $dst, $src\t# subExact int" %}
+  ins_encode %{
+    __ subl($dst$$Register, $src$$Address);
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
+instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
+%{
+  match(SubExactL dst src);
+  effect(DEF cr);
+
+  format %{ "subq    $dst, $src\t# subExact long" %}
+  ins_encode %{
+    __ subq($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
+%{
+  match(SubExactL dst (LoadL src));
+  effect(DEF cr);
+
+  format %{ "subq    $dst, $src\t# subExact long" %}
+  ins_encode %{
+    __ subq($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
+%{
+  match(SubExactI dst src);
+  effect(DEF cr);
+
+  ins_cost(125);
+  format %{ "subq    $dst, $src\t# subExact long" %}
+  ins_encode %{
+    __ subq($dst$$Register, $src$$Address);
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
 %{
   match(Set dst (SubL dst src));
@@ -7650,6 +7820,30 @@
   ins_pipe(ialu_reg);
 %}
 
+instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr)
+%{
+  match(NegExactI dst);
+  effect(KILL cr);
+
+  format %{ "negl    $dst\t# negExact int" %}
+  ins_encode %{
+    __ negl($dst$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr)
+%{
+  match(NegExactL dst);
+  effect(KILL cr);
+
+  format %{ "negq    $dst\t# negExact long" %}
+  ins_encode %{
+    __ negq($dst$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
 
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
@@ -7767,6 +7961,86 @@
   ins_pipe(ialu_reg_reg_alu0);
 %}
 
+
+instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
+%{
+  match(MulExactI dst src);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "imull   $dst, $src\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+
+instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr)
+%{
+  match(MulExactI src imm);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "imull   $dst, $src, $imm\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Register, $imm$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
+%{
+  match(MulExactI dst (LoadI src));
+  effect(DEF cr);
+
+  ins_cost(350);
+  format %{ "imull   $dst, $src\t# mulExact int" %}
+  ins_encode %{
+    __ imull($dst$$Register, $src$$Address);
+  %}
+  ins_pipe(ialu_reg_mem_alu0);
+%}
+
+instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
+%{
+  match(MulExactL dst src);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "imulq   $dst, $src\t# mulExact long" %}
+  ins_encode %{
+    __ imulq($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr)
+%{
+  match(MulExactL src imm);
+  effect(DEF cr);
+
+  ins_cost(300);
+  format %{ "imulq   $dst, $src, $imm\t# mulExact long" %}
+  ins_encode %{
+    __ imulq($dst$$Register, $src$$Register, $imm$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
+%{
+  match(MulExactL dst (LoadL src));
+  effect(DEF cr);
+
+  ins_cost(350);
+  format %{ "imulq   $dst, $src\t# mulExact long" %}
+  ins_encode %{
+    __ imulq($dst$$Register, $src$$Address);
+  %}
+  ins_pipe(ialu_reg_mem_alu0);
+%}
+
 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
                    rFlagsReg cr)
 %{
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1006,7 +1006,7 @@
   istate->set_stack_limit(stack_base - method->max_stack() - 1);
 }
 
-address CppInterpreter::return_entry(TosState state, int length) {
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   ShouldNotCallThis();
   return NULL;
 }
--- a/src/cpu/zero/vm/globals_zero.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/cpu/zero/vm/globals_zero.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -58,6 +58,8 @@
 // GC Ergo Flags
 define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
 
+define_pd_global(uintx, TypeProfileLevel, 0);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
 
 #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/src/os/bsd/vm/osThread_bsd.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/bsd/vm/osThread_bsd.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -42,7 +42,7 @@
 #ifdef __APPLE__
   typedef thread_t thread_id_t;
 #else
-  typedef pthread_t thread_id_t;
+  typedef pid_t thread_id_t;
 #endif
 
   // _pthread_id is the pthread id, which is used by library calls
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -100,6 +100,7 @@
 # include <stdint.h>
 # include <inttypes.h>
 # include <sys/ioctl.h>
+# include <sys/syscall.h>
 
 #if defined(__FreeBSD__) || defined(__NetBSD__)
 # include <elf.h>
@@ -152,14 +153,27 @@
 // utility functions
 
 static int SR_initialize();
+static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 
 julong os::available_memory() {
   return Bsd::available_memory();
 }
 
+// available here means free
 julong os::Bsd::available_memory() {
-  // XXXBSD: this is just a stopgap implementation
-  return physical_memory() >> 2;
+  uint64_t available = physical_memory() >> 2;
+#ifdef __APPLE__
+  mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+  vm_statistics64_data_t vmstat;
+  kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
+                                         (host_info64_t)&vmstat, &count);
+  assert(kerr == KERN_SUCCESS,
+         "host_statistics64 failed - check mach_host_self() and count");
+  if (kerr == KERN_SUCCESS) {
+    available = vmstat.free_count * os::vm_page_size();
+  }
+#endif
+  return available;
 }
 
 julong os::physical_memory() {
@@ -247,7 +261,17 @@
    * since it returns a 64 bit value)
    */
   mib[0] = CTL_HW;
+
+#if defined (HW_MEMSIZE) // Apple
   mib[1] = HW_MEMSIZE;
+#elif defined(HW_PHYSMEM) // Most of BSD
+  mib[1] = HW_PHYSMEM;
+#elif defined(HW_REALMEM) // Old FreeBSD
+  mib[1] = HW_REALMEM;
+#else
+  #error No ways to get physmem
+#endif
+
   len = sizeof(mem_val);
   if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
        assert(len == sizeof(mem_val), "unexpected data size");
@@ -679,18 +703,12 @@
     return NULL;
   }
 
+  osthread->set_thread_id(os::Bsd::gettid());
+
 #ifdef __APPLE__
-  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
-  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
-  guarantee(thread_id != 0, "thread id missing from pthreads");
-  osthread->set_thread_id(thread_id);
-
-  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   guarantee(unique_thread_id != 0, "unique thread id was not found");
   osthread->set_unique_thread_id(unique_thread_id);
-#else
-  // thread_id is pthread_id on BSD
-  osthread->set_thread_id(::pthread_self());
 #endif
   // initialize signal mask for this thread
   os::Bsd::hotspot_sigmask(thread);
@@ -847,18 +865,13 @@
     return false;
   }
 
+  osthread->set_thread_id(os::Bsd::gettid());
+
   // Store pthread info into the OSThread
 #ifdef __APPLE__
-  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
-  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
-  guarantee(thread_id != 0, "just checking");
-  osthread->set_thread_id(thread_id);
-
-  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   guarantee(unique_thread_id != 0, "just checking");
   osthread->set_unique_thread_id(unique_thread_id);
-#else
-  osthread->set_thread_id(::pthread_self());
 #endif
   osthread->set_pthread_id(::pthread_self());
 
@@ -932,17 +945,15 @@
 // Used by VMSelfDestructTimer and the MemProfiler.
 double os::elapsedTime() {
 
-  return (double)(os::elapsed_counter()) * 0.000001;
+  return ((double)os::elapsed_counter()) / os::elapsed_frequency();
 }
 
 jlong os::elapsed_counter() {
-  timeval time;
-  int status = gettimeofday(&time, NULL);
-  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+  return javaTimeNanos() - initial_time_count;
 }
 
 jlong os::elapsed_frequency() {
-  return (1000 * 1000);
+  return NANOSECS_PER_SEC; // nanosecond resolution
 }
 
 bool os::supports_vtime() { return true; }
@@ -1125,6 +1136,30 @@
   return n;
 }
 
+// Information of current thread in variety of formats
+pid_t os::Bsd::gettid() {
+  int retval = -1;
+
+#ifdef __APPLE__ //XNU kernel
+  // despite the fact mach port is actually not a thread id use it
+  // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
+  retval = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(retval != 0, "just checking");
+  return retval;
+
+#elif __FreeBSD__
+  retval = syscall(SYS_thr_self);
+#elif __OpenBSD__
+  retval = syscall(SYS_getthrid);
+#elif __NetBSD__
+  retval = (pid_t) syscall(SYS__lwp_self);
+#endif
+
+  if (retval == -1) {
+    return getpid();
+  }
+}
+
 intx os::current_thread_id() {
 #ifdef __APPLE__
   return (intx)::pthread_mach_thread_np(::pthread_self());
@@ -1132,6 +1167,7 @@
   return (intx)::pthread_self();
 #endif
 }
+
 int os::current_process_id() {
 
   // Under the old bsd thread library, bsd gives each thread
@@ -1858,7 +1894,7 @@
     bool timedwait(unsigned int sec, int nsec);
   private:
     jlong currenttime() const;
-    semaphore_t _semaphore;
+    os_semaphore_t _semaphore;
 };
 
 Semaphore::Semaphore() : _semaphore(0) {
@@ -1926,7 +1962,7 @@
 
 bool Semaphore::timedwait(unsigned int sec, int nsec) {
   struct timespec ts;
-  jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
+  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
 
   while (1) {
     int result = sem_timedwait(&_semaphore, &ts);
@@ -3500,7 +3536,7 @@
   Bsd::_main_thread = pthread_self();
 
   Bsd::clock_init();
-  initial_time_count = os::elapsed_counter();
+  initial_time_count = javaTimeNanos();
 
 #ifdef __APPLE__
   // XXXDARWIN
@@ -3545,8 +3581,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -4666,6 +4700,10 @@
 // as libawt.so, and renamed libawt_xawt.so
 //
 bool os::is_headless_jre() {
+#ifdef __APPLE__
+    // We no longer build headless-only on Mac OS X
+    return false;
+#else
     struct stat statbuf;
     char buf[MAXPATHLEN];
     char libmawtpath[MAXPATHLEN];
@@ -4697,6 +4735,7 @@
     if (::stat(libmawtpath, &statbuf) == 0) return false;
 
     return true;
+#endif
 }
 
 // Get the default path to the core file
--- a/src/os/bsd/vm/os_bsd.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/bsd/vm/os_bsd.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -84,6 +84,7 @@
   static void hotspot_sigmask(Thread* thread);
 
   static bool is_initial_thread(void);
+  static pid_t gettid();
 
   static int page_size(void)                                        { return _page_size; }
   static void set_page_size(int val)                                { _page_size = val; }
--- a/src/os/linux/vm/globals_linux.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/linux/vm/globals_linux.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,7 +53,7 @@
 // Defines Linux-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
-define_pd_global(bool, UseLargePages, true);
+define_pd_global(bool, UseLargePages, false);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
 define_pd_global(bool, UseThreadPriorities, true) ;
--- a/src/os/linux/vm/os_linux.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/linux/vm/os_linux.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -131,6 +131,7 @@
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
+pthread_condattr_t os::Linux::_condattr[1];
 
 static jlong initial_time_count=0;
 
@@ -1334,17 +1335,15 @@
 // Used by VMSelfDestructTimer and the MemProfiler.
 double os::elapsedTime() {
 
-  return (double)(os::elapsed_counter()) * 0.000001;
+  return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
 }
 
 jlong os::elapsed_counter() {
-  timeval time;
-  int status = gettimeofday(&time, NULL);
-  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+  return javaTimeNanos() - initial_time_count;
 }
 
 jlong os::elapsed_frequency() {
-  return (1000 * 1000);
+  return NANOSECS_PER_SEC; // nanosecond resolution
 }
 
 bool os::supports_vtime() { return true; }
@@ -1401,12 +1400,15 @@
           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
         // yes, monotonic clock is supported
         _clock_gettime = clock_gettime_func;
+        return;
       } else {
         // close librt if there is no monotonic clock
         dlclose(handle);
       }
     }
   }
+  warning("No monotonic clock was available - timed services may " \
+          "be adversely affected if the time-of-day clock changes");
 }
 
 #ifndef SYS_clock_getres
@@ -2167,23 +2169,49 @@
 }
 
 // Try to identify popular distros.
-// Most Linux distributions have /etc/XXX-release file, which contains
-// the OS version string. Some have more than one /etc/XXX-release file
-// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
-// so the order is important.
+// Most Linux distributions have a /etc/XXX-release file, which contains
+// the OS version string. Newer Linux distributions have a /etc/lsb-release
+// file that also contains the OS version string. Some have more than one
+// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
+// /etc/redhat-release.), so the order is important.
+// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
+// their own specific XXX-release file as well as a redhat-release file.
+// Because of this the XXX-release file needs to be searched for before the
+// redhat-release file.
+// Since Red Hat has a lsb-release file that is not very descriptive the
+// search for redhat-release needs to be before lsb-release.
+// Since the lsb-release file is the new standard it needs to be searched
+// before the older style release files.
+// Searching system-release (Red Hat) and os-release (other Linuxes) are a
+// next to last resort.  The os-release file is a new standard that contains
+// distribution information and the system-release file seems to be an old
+// standard that has been replaced by the lsb-release and os-release files.
+// Searching for the debian_version file is the last resort.  It contains
+// an informative string like "6.0.6" or "wheezy/sid". Because of this
+// "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-  if (!_print_ascii_file("/etc/mandrake-release", st) &&
-      !_print_ascii_file("/etc/sun-release", st) &&
-      !_print_ascii_file("/etc/redhat-release", st) &&
-      !_print_ascii_file("/etc/SuSE-release", st) &&
-      !_print_ascii_file("/etc/turbolinux-release", st) &&
-      !_print_ascii_file("/etc/gentoo-release", st) &&
-      !_print_ascii_file("/etc/debian_version", st) &&
-      !_print_ascii_file("/etc/ltib-release", st) &&
-      !_print_ascii_file("/etc/angstrom-version", st)) {
-      st->print("Linux");
-  }
-  st->cr();
+   if (!_print_ascii_file("/etc/oracle-release", st) &&
+       !_print_ascii_file("/etc/mandriva-release", st) &&
+       !_print_ascii_file("/etc/mandrake-release", st) &&
+       !_print_ascii_file("/etc/sun-release", st) &&
+       !_print_ascii_file("/etc/redhat-release", st) &&
+       !_print_ascii_file("/etc/lsb-release", st) &&
+       !_print_ascii_file("/etc/SuSE-release", st) &&
+       !_print_ascii_file("/etc/turbolinux-release", st) &&
+       !_print_ascii_file("/etc/gentoo-release", st) &&
+       !_print_ascii_file("/etc/ltib-release", st) &&
+       !_print_ascii_file("/etc/angstrom-version", st) &&
+       !_print_ascii_file("/etc/system-release", st) &&
+       !_print_ascii_file("/etc/os-release", st)) {
+
+       if (file_exists("/etc/debian_version")) {
+         st->print("Debian ");
+         _print_ascii_file("/etc/debian_version", st);
+       } else {
+         st->print("Linux");
+       }
+   }
+   st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -2723,7 +2751,19 @@
   Linux::numa_interleave_memory(addr, bytes);
 }
 
+// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
+// bind policy to MPOL_PREFERRED for the current thread.
+#define USE_MPOL_PREFERRED 0
+
 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
+  // To make NUMA and large pages more robust when both enabled, we need to ease
+  // the requirements on where the memory should be allocated. MPOL_BIND is the
+  // default policy and it will force memory to be allocated on the specified
+  // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
+  // the specified node, but will not force it. Using this policy will prevent
+  // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
+  // free large pages.
+  Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
   Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
 }
 
@@ -2825,6 +2865,8 @@
                                             libnuma_dlsym(handle, "numa_tonode_memory")));
       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
                                             libnuma_dlsym(handle, "numa_interleave_memory")));
+      set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
+                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
 
 
       if (numa_available() != -1) {
@@ -2891,6 +2933,7 @@
 os::Linux::numa_available_func_t os::Linux::_numa_available;
 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
+os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
 unsigned long* os::Linux::_numa_all_nodes;
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
@@ -2899,6 +2942,53 @@
   return res  != (uintptr_t) MAP_FAILED;
 }
 
+static
+address get_stack_commited_bottom(address bottom, size_t size) {
+  address nbot = bottom;
+  address ntop = bottom + size;
+
+  size_t page_sz = os::vm_page_size();
+  unsigned pages = size / page_sz;
+
+  unsigned char vec[1];
+  unsigned imin = 1, imax = pages + 1, imid;
+  int mincore_return_value;
+
+  while (imin < imax) {
+    imid = (imax + imin) / 2;
+    nbot = ntop - (imid * page_sz);
+
+    // Use a trick with mincore to check whether the page is mapped or not.
+    // mincore sets vec to 1 if page resides in memory and to 0 if page
+    // is swapped output but if page we are asking for is unmapped
+    // it returns -1,ENOMEM
+    mincore_return_value = mincore(nbot, page_sz, vec);
+
+    if (mincore_return_value == -1) {
+      // Page is not mapped go up
+      // to find first mapped page
+      if (errno != EAGAIN) {
+        assert(errno == ENOMEM, "Unexpected mincore errno");
+        imax = imid;
+      }
+    } else {
+      // Page is mapped go down
+      // to find first not mapped page
+      imin = imid + 1;
+    }
+  }
+
+  nbot = nbot + page_sz;
+
+  // Adjust stack bottom one page up if last checked page is not mapped
+  if (mincore_return_value == -1) {
+    nbot = nbot + page_sz;
+  }
+
+  return nbot;
+}
+
+
 // Linux uses a growable mapping for the stack, and if the mapping for
 // the stack guard pages is not removed when we detach a thread the
 // stack cannot grow beyond the pages where the stack guard was
@@ -2913,59 +3003,37 @@
 // So, we need to know the extent of the stack mapping when
 // create_stack_guard_pages() is called.
 
-// Find the bounds of the stack mapping.  Return true for success.
-//
 // We only need this for stacks that are growable: at the time of
 // writing thread stacks don't use growable mappings (i.e. those
 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
 // only applies to the main thread.
 
-static
-bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
-
-  char buf[128];
-  int fd, sz;
-
-  if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
-    return false;
-  }
-
-  const char kw[] = "[stack]";
-  const int kwlen = sizeof(kw)-1;
-
-  // Address part of /proc/self/maps couldn't be more than 128 bytes
-  while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
-     if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
-        // Extract addresses
-        if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
-           uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
-           if (sp >= *bottom && sp <= *top) {
-              ::close(fd);
-              return true;
-           }
-        }
-     }
-  }
-
- ::close(fd);
-  return false;
-}
-
-
 // If the (growable) stack mapping already extends beyond the point
 // where we're going to put our guard pages, truncate the mapping at
 // that point by munmap()ping it.  This ensures that when we later
 // munmap() the guard pages we don't leave a hole in the stack
-// mapping. This only affects the main/initial thread, but guard
-// against future OS changes
+// mapping. This only affects the main/initial thread
+
 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
-  uintptr_t stack_extent, stack_base;
-  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
-  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
-      assert(os::Linux::is_initial_thread(),
-           "growable stack in non-initial thread");
-    if (stack_extent < (uintptr_t)addr)
-      ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
+
+  if (os::Linux::is_initial_thread()) {
+    // As we manually grow stack up to bottom inside create_attached_thread(),
+    // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
+    // we don't need to do anything special.
+    // Check it first, before calling heavy function.
+    uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
+    unsigned char vec[1];
+
+    if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
+      // Fallback to slow path on all errors, including EAGAIN
+      stack_extent = (uintptr_t) get_stack_commited_bottom(
+                                    os::Linux::initial_thread_stack_bottom(),
+                                    (size_t)addr - stack_extent);
+    }
+
+    if (stack_extent < (uintptr_t)addr) {
+      ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
+    }
   }
 
   return os::commit_memory(addr, size, !ExecMem);
@@ -2974,13 +3042,13 @@
 // If this is a growable mapping, remove the guard pages entirely by
 // munmap()ping them.  If not, just call uncommit_memory(). This only
 // affects the main/initial thread, but guard against future OS changes
+// It's safe to always unmap guard pages for initial thread because we
+// always place it right after end of the mapped region
+
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
   uintptr_t stack_extent, stack_base;
-  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
-  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
-      assert(os::Linux::is_initial_thread(),
-           "growable stack in non-initial thread");
-
+
+  if (os::Linux::is_initial_thread()) {
     return ::munmap(addr, size) == 0;
   }
 
@@ -3247,13 +3315,15 @@
   if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
       FLAG_IS_DEFAULT(UseSHM) &&
       FLAG_IS_DEFAULT(UseTransparentHugePages)) {
-    // If UseLargePages is specified on the command line try all methods,
-    // if it's default, then try only UseTransparentHugePages.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseTransparentHugePages = true;
-    } else {
-      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
-    }
+
+    // The type of large pages has not been specified by the user.
+
+    // Try UseHugeTLBFS and then UseSHM.
+    UseHugeTLBFS = UseSHM = true;
+
+    // Don't try UseTransparentHugePages since there are known
+    // performance issues with it turned on. This might change in the future.
+    UseTransparentHugePages = false;
   }
 
   if (UseTransparentHugePages) {
@@ -3279,9 +3349,19 @@
 }
 
 void os::large_page_init() {
-  if (!UseLargePages) {
+  if (!UseLargePages &&
+      !UseTransparentHugePages &&
+      !UseHugeTLBFS &&
+      !UseSHM) {
+    // Not using large pages.
+    return;
+  }
+
+  if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
+    // The user explicitly turned off large pages.
+    // Ignore the rest of the large pages flags.
+    UseTransparentHugePages = false;
     UseHugeTLBFS = false;
-    UseTransparentHugePages = false;
     UseSHM = false;
     return;
   }
@@ -4626,7 +4706,27 @@
   Linux::_main_thread = pthread_self();
 
   Linux::clock_init();
-  initial_time_count = os::elapsed_counter();
+  initial_time_count = javaTimeNanos();
+
+  // pthread_condattr initialization for monotonic clock
+  int status;
+  pthread_condattr_t* _condattr = os::Linux::condAttr();
+  if ((status = pthread_condattr_init(_condattr)) != 0) {
+    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
+  }
+  // Only set the clock if CLOCK_MONOTONIC is available
+  if (Linux::supports_monotonic_clock()) {
+    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
+      if (status == EINVAL) {
+        warning("Unable to use monotonic clock with relative timed-waits" \
+                " - changes to the time-of-day clock may have adverse affects");
+      } else {
+        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
+      }
+    }
+  }
+  // else it defaults to CLOCK_REALTIME
+
   pthread_mutex_init(&dl_mutex, NULL);
 
   // If the pagesize of the VM is greater than 8K determine the appropriate
@@ -4673,8 +4773,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -4709,6 +4807,10 @@
 
   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
 
+#if defined(IA32)
+  workaround_expand_exec_shield_cs_limit();
+#endif
+
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
      tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
@@ -5436,21 +5538,36 @@
 
 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
   if (millis < 0)  millis = 0;
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
+
   jlong seconds = millis / 1000;
   millis %= 1000;
   if (seconds > 50000000) { // see man cond_timedwait(3T)
     seconds = 50000000;
   }
-  abstime->tv_sec = now.tv_sec  + seconds;
-  long       usec = now.tv_usec + millis * 1000;
-  if (usec >= 1000000) {
-    abstime->tv_sec += 1;
-    usec -= 1000000;
-  }
-  abstime->tv_nsec = usec * 1000;
+
+  if (os::Linux::supports_monotonic_clock()) {
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
+    if (nanos >= NANOSECS_PER_SEC) {
+      abstime->tv_sec += 1;
+      nanos -= NANOSECS_PER_SEC;
+    }
+    abstime->tv_nsec = nanos;
+  } else {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long usec = now.tv_usec + millis * 1000;
+    if (usec >= 1000000) {
+      abstime->tv_sec += 1;
+      usec -= 1000000;
+    }
+    abstime->tv_nsec = usec * 1000;
+  }
   return abstime;
 }
 
@@ -5542,7 +5659,7 @@
     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy (_cond);
-      pthread_cond_init (_cond, NULL) ;
+      pthread_cond_init (_cond, os::Linux::condAttr()) ;
     }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
@@ -5643,32 +5760,50 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert (time > 0, "convertTime");
-
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
-
-  time_t max_secs = now.tv_sec + MAX_SECS;
-
-  if (isAbsolute) {
-    jlong secs = time / 1000;
-    if (secs > max_secs) {
-      absTime->tv_sec = max_secs;
+  time_t max_secs = 0;
+
+  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+
+    max_secs = now.tv_sec + MAX_SECS;
+
+    if (isAbsolute) {
+      jlong secs = time / 1000;
+      if (secs > max_secs) {
+        absTime->tv_sec = max_secs;
+      } else {
+        absTime->tv_sec = secs;
+      }
+      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+    } else {
+      jlong secs = time / NANOSECS_PER_SEC;
+      if (secs >= MAX_SECS) {
+        absTime->tv_sec = max_secs;
+        absTime->tv_nsec = 0;
+      } else {
+        absTime->tv_sec = now.tv_sec + secs;
+        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+          absTime->tv_nsec -= NANOSECS_PER_SEC;
+          ++absTime->tv_sec; // note: this must be <= max_secs
+        }
+      }
     }
-    else {
-      absTime->tv_sec = secs;
-    }
-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
+    // must be relative using monotonic clock
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    max_secs = now.tv_sec + MAX_SECS;
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
         absTime->tv_nsec -= NANOSECS_PER_SEC;
         ++absTime->tv_sec; // note: this must be <= max_secs
@@ -5748,15 +5883,19 @@
   jt->set_suspend_equivalent();
   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
+  assert(_cur_index == -1, "invariant");
   if (time == 0) {
-    status = pthread_cond_wait (_cond, _mutex) ;
+    _cur_index = REL_INDEX; // arbitrary choice when not timed
+    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
   } else {
-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
+    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy (_cond) ;
-      pthread_cond_init    (_cond, NULL);
+      pthread_cond_destroy (&_cond[_cur_index]) ;
+      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
     }
   }
+  _cur_index = -1;
   assert_status(status == 0 || status == EINTR ||
                 status == ETIME || status == ETIMEDOUT,
                 status, "cond_timedwait");
@@ -5785,17 +5924,24 @@
   s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
+    // thread might be parked
+    if (_cur_index != -1) {
+      // thread is definitely parked
+      if (WorkAroundNPTLTimedWaitHang) {
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-     } else {
+        assert (status == 0, "invariant");
+      } else {
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
-     }
+        assert (status == 0, "invariant");
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
+      }
+    } else {
+      pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant") ;
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
--- a/src/os/linux/vm/os_linux.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/linux/vm/os_linux.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -221,6 +221,13 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  // pthread_cond clock suppport
+  private:
+  static pthread_condattr_t _condattr[1];
+
+  public:
+  static pthread_condattr_t* condAttr() { return _condattr; }
+
   // Stack repair handling
 
   // none present
@@ -235,6 +242,7 @@
   typedef int (*numa_available_func_t)(void);
   typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
   typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
+  typedef void (*numa_set_bind_policy_func_t)(int policy);
 
   static sched_getcpu_func_t _sched_getcpu;
   static numa_node_to_cpus_func_t _numa_node_to_cpus;
@@ -242,6 +250,7 @@
   static numa_available_func_t _numa_available;
   static numa_tonode_memory_func_t _numa_tonode_memory;
   static numa_interleave_memory_func_t _numa_interleave_memory;
+  static numa_set_bind_policy_func_t _numa_set_bind_policy;
   static unsigned long* _numa_all_nodes;
 
   static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
@@ -250,6 +259,7 @@
   static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
+  static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static int sched_getcpu_syscall(void);
 public:
@@ -267,6 +277,11 @@
       _numa_interleave_memory(start, size, _numa_all_nodes);
     }
   }
+  static void numa_set_bind_policy(int policy) {
+    if (_numa_set_bind_policy != NULL) {
+      _numa_set_bind_policy(policy);
+    }
+  }
   static int get_node_by_cpu(int cpu_id);
 };
 
@@ -287,7 +302,7 @@
   public:
     PlatformEvent() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
+      status = pthread_cond_init (_cond, os::Linux::condAttr());
       assert_status(status == 0, status, "cond_init");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
@@ -302,14 +317,19 @@
     void park () ;
     void unpark () ;
     int  TryPark () ;
-    int  park (jlong millis) ;
+    int  park (jlong millis) ; // relative timed-wait only
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
 class PlatformParker : public CHeapObj<mtInternal> {
   protected:
+    enum {
+        REL_INDEX = 0,
+        ABS_INDEX = 1
+    };
+    int _cur_index;  // which cond is in use: -1, 0, 1
     pthread_mutex_t _mutex [1] ;
-    pthread_cond_t  _cond  [1] ;
+    pthread_cond_t  _cond  [2] ; // one for relative times and one for abs.
 
   public:       // TODO-FIXME: make dtor private
     ~PlatformParker() { guarantee (0, "invariant") ; }
@@ -317,10 +337,13 @@
   public:
     PlatformParker() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
+      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+      assert_status(status == 0, status, "cond_init rel");
+      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+      assert_status(status == 0, status, "cond_init abs");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
+      _cur_index = -1; // mark as unused
     }
 };
 
--- a/src/os/posix/vm/os_posix.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/posix/vm/os_posix.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -32,6 +32,8 @@
 #include <unistd.h>
 #include <sys/resource.h>
 #include <sys/utsname.h>
+#include <pthread.h>
+#include <signal.h>
 
 // Todo: provide a os::get_max_process_id() or similar. Number of processes
 // may have been configured, can be read more accurately from proc fs etc.
@@ -805,11 +807,17 @@
  * The callback is supposed to provide the method that should be protected.
  */
 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
+  sigset_t saved_sig_mask;
+
   assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   assert(!WatcherThread::watcher_thread()->has_crash_protection(),
       "crash_protection already set?");
 
-  if (sigsetjmp(_jmpbuf, 1) == 0) {
+  // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
+  // since on at least some systems (OS X) siglongjmp will restore the mask
+  // for the process, not the thread
+  pthread_sigmask(0, NULL, &saved_sig_mask);
+  if (sigsetjmp(_jmpbuf, 0) == 0) {
     // make sure we can see in the signal handler that we have crash protection
     // installed
     WatcherThread::watcher_thread()->set_crash_protection(this);
@@ -819,6 +827,7 @@
     return true;
   }
   // this happens when we siglongjmp() back
+  pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
   WatcherThread::watcher_thread()->set_crash_protection(NULL);
   return false;
 }
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -5134,9 +5134,7 @@
     if(Verbose && PrintMiscellaneous)
       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
 #endif
-}
-
-  os::large_page_init();
+  }
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
--- a/src/os/windows/vm/decoder_windows.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/windows/vm/decoder_windows.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -32,7 +32,11 @@
   _can_decode_in_vm = false;
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
-
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   _decoder_status = no_error;
   initialize();
 }
@@ -53,14 +57,24 @@
     _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
 
     if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
-      _pfnSymGetSymFromAddr64 = NULL;
-      _pfnUndecorateSymbolName = NULL;
-      ::FreeLibrary(handle);
-      _dbghelp_handle = NULL;
+      uninitialize();
       _decoder_status = helper_func_error;
       return;
     }
 
+#ifdef AMD64
+    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
+    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
+    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
+    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
+      // We can't call StackWalk64 to walk the stack, but we are still
+      // able to decode the symbols. Let's limp on.
+      _pfnStackWalk64 = NULL;
+      _pfnSymFunctionTableAccess64 = NULL;
+      _pfnSymGetModuleBase64 = NULL;
+    }
+#endif
+
     HANDLE hProcess = ::GetCurrentProcess();
     _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
     if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
@@ -156,6 +170,11 @@
 void WindowsDecoder::uninitialize() {
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   if (_dbghelp_handle != NULL) {
     ::FreeLibrary(_dbghelp_handle);
   }
@@ -195,3 +214,65 @@
          _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
 }
 
+#ifdef AMD64
+BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
+                                 HANDLE hProcess,
+                                 HANDLE hThread,
+                                 LPSTACKFRAME64 StackFrame,
+                                 PVOID ContextRecord,
+                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnStackWalk64) {
+    return wd->_pfnStackWalk64(MachineType,
+                               hProcess,
+                               hThread,
+                               StackFrame,
+                               ContextRecord,
+                               ReadMemoryRoutine,
+                               FunctionTableAccessRoutine,
+                               GetModuleBaseRoutine,
+                               TranslateAddress);
+  } else {
+    return false;
+  }
+}
+
+PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
+    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymFunctionTableAccess64;
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymGetModuleBase64;
+  } else {
+    return NULL;
+  }
+}
+
+#endif // AMD64
--- a/src/os/windows/vm/decoder_windows.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/windows/vm/decoder_windows.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -38,6 +38,20 @@
 typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
 typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
 
+#ifdef AMD64
+typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
+                                        HANDLE hProcess,
+                                        HANDLE hThread,
+                                        LPSTACKFRAME64 StackFrame,
+                                        PVOID ContextRecord,
+                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
+typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
+#endif
+
 class WindowsDecoder : public AbstractDecoder {
 
 public:
@@ -61,7 +75,34 @@
   bool                      _can_decode_in_vm;
   pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
   pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+#ifdef AMD64
+  pfn_StackWalk64              _pfnStackWalk64;
+  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
+  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
+
+  friend class WindowsDbgHelp;
+#endif
 };
 
+#ifdef AMD64
+// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
+class WindowsDbgHelp : public Decoder {
+public:
+  static BOOL StackWalk64(DWORD MachineType,
+                          HANDLE hProcess,
+                          HANDLE hThread,
+                          LPSTACKFRAME64 StackFrame,
+                          PVOID ContextRecord,
+                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
+
+  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
+  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
+};
+#endif
+
 #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
 
--- a/src/os/windows/vm/os_windows.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os/windows/vm/os_windows.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -3189,9 +3189,12 @@
     return p_buf;
 
   } else {
+    if (TracePageSizes && Verbose) {
+       tty->print_cr("Reserving large pages in a single large chunk.");
+    }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
-    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
       address pc = CALLER_PC;
       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@@ -3917,8 +3920,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // Setup Windows Exceptions
 
   // for debugging float code generation bugs
@@ -5429,7 +5430,7 @@
       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
         lib_name = ++start;
       } else {
-        // Need to check for C:
+        // Need to check for drive prefix
         if ((start = strchr(lib_name, ':')) != NULL) {
           lib_name = ++start;
         }
@@ -5714,7 +5715,66 @@
 #endif
 
 #ifndef PRODUCT
+
+// test the code path in reserve_memory_special() that tries to allocate memory in a single
+// contiguous memory block at a particular address.
+// The test first tries to find a good approximate address to allocate at by using the same
+// method to allocate some memory at any address. The test then tries to allocate memory in
+// the vicinity (not directly after it to avoid possible by-chance use of that location)
+// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
+// the previously allocated memory is available for allocation. The only actual failure
+// that is reported is when the test tries to allocate at a particular location but gets a
+// different valid one. A NULL return value at this point is not considered an error but may
+// be legitimate.
+// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
 void TestReserveMemorySpecial_test() {
-  // No tests available for this platform
-}
-#endif
+  if (!UseLargePages) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Skipping test because large pages are disabled");
+    }
+    return;
+  }
+  // save current value of globals
+  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
+  bool old_use_numa_interleaving = UseNUMAInterleaving;
+
+  // set globals to make sure we hit the correct code path
+  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
+
+  // do an allocation at an address selected by the OS to get a good one.
+  const size_t large_allocation_size = os::large_page_size() * 4;
+  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
+  if (result == NULL) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
+        large_allocation_size);
+    }
+  } else {
+    os::release_memory_special(result, large_allocation_size);
+
+    // allocate another page within the recently allocated memory area which seems to be a good location. At least
+    // we managed to get it once.
+    const size_t expected_allocation_size = os::large_page_size();
+    char* expected_location = result + os::large_page_size();
+    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
+    if (actual_location == NULL) {
+      if (VerboseInternalVMTests) {
+        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
+          expected_location, large_allocation_size);
+      }
+    } else {
+      // release memory
+      os::release_memory_special(actual_location, expected_allocation_size);
+      // only now check, after releasing any memory to avoid any leaks.
+      assert(actual_location == expected_location,
+        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+          expected_location, expected_allocation_size, actual_location));
+    }
+  }
+
+  // restore globals
+  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
+  UseNUMAInterleaving = old_use_numa_interleaving;
+}
+#endif // PRODUCT
+
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -79,6 +79,15 @@
 # include <pthread_np.h>
 #endif
 
+// needed by current_stack_region() workaround for Mavericks
+#if defined(__APPLE__)
+# include <errno.h>
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048
+# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13
+#endif
+
 #ifdef AMD64
 #define SPELL_REG_SP "rsp"
 #define SPELL_REG_FP "rbp"
@@ -828,6 +837,21 @@
   pthread_t self = pthread_self();
   void *stacktop = pthread_get_stackaddr_np(self);
   *size = pthread_get_stacksize_np(self);
+  // workaround for OS X 10.9.0 (Mavericks)
+  // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
+  if (pthread_main_np() == 1) {
+    if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
+      char kern_osrelease[256];
+      size_t kern_osrelease_size = sizeof(kern_osrelease);
+      int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0);
+      if (ret == 0) {
+        // get the major number, atoi will ignore the minor amd micro portions of the version string
+        if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
+          *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
+        }
+      }
+    }
+  }
   *bottom = (address) stacktop - *size;
 #elif defined(__OpenBSD__)
   stack_t ss;
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -876,3 +876,46 @@
 #endif
 }
 #endif
+
+
+/*
+ * IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
+ * updates (JDK-8023956).
+ */
+void os::workaround_expand_exec_shield_cs_limit() {
+#if defined(IA32)
+  size_t page_size = os::vm_page_size();
+  /*
+   * Take the highest VA the OS will give us and exec
+   *
+   * Although using -(pagesz) as mmap hint works on newer kernel as you would
+   * think, older variants affected by this work-around don't (search forward only).
+   *
+   * On the affected distributions, we understand the memory layout to be:
+   *
+   *   TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
+   *
+   * A few pages south main stack will do it.
+   *
+   * If we are embedded in an app other than launcher (initial != main stack),
+   * we don't have much control or understanding of the address space, just let it slide.
+   */
+  char* hint = (char*) (Linux::initial_thread_stack_bottom() -
+                        ((StackYellowPages + StackRedPages + 1) * page_size));
+  char* codebuf = os::reserve_memory(page_size, hint);
+  if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
+    return; // No matter, we tried, best effort.
+  }
+  if (PrintMiscellaneous && (Verbose || WizardMode)) {
+     tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
+  }
+
+  // Some code to exec: the 'ret' instruction
+  codebuf[0] = 0xC3;
+
+  // Call the code in the codebuf
+  __asm__ volatile("call *%0" : : "r"(codebuf));
+
+  // keep the page mapped so CS limit isn't reduced.
+#endif
+}
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -36,4 +36,17 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
+  /*
+   * Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
+   * (IA32 only).
+   *
+   * Map and execute at a high VA to prevent CS lazy updates race with SMP MM
+   * invalidation.Further code generation by the JVM will no longer cause CS limit
+   * updates.
+   *
+   * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
+   * @see JDK-8023956
+   */
+  static void workaround_expand_exec_shield_cs_limit();
+
 #endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
--- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -35,7 +35,9 @@
 
 // Used on 64 bit platforms for UseCompressedOops base address
 #ifdef _LP64
-define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
+// use 6G as default base address because by default the OS maps the application
+// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
+define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
 #else
 define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 #endif
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -44,6 +44,6 @@
 define_pd_global(intx, CompilerThreadStackSize,  0);
 
 // Used on 64 bit platforms for UseCompressedOops base address
-define_pd_global(uintx,HeapBaseMinAddress,       256*M);
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
 
 #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
+#include "decoder_windows.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
 #include "memory/allocation.inline.hpp"
@@ -327,6 +328,94 @@
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
+#ifdef AMD64
+/*
+ * Windows/x64 does not use stack frames the way expected by Java:
+ * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
+ * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
+ *     not be RBP.
+ * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ *
+ * So it's not possible to print the native stack using the
+ *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
+ * loop in vmError.cpp. We need to roll our own loop.
+ */
+bool os::platform_print_native_stack(outputStream* st, void* context,
+                                     char *buf, int buf_size)
+{
+  CONTEXT ctx;
+  if (context != NULL) {
+    memcpy(&ctx, context, sizeof(ctx));
+  } else {
+    RtlCaptureContext(&ctx);
+  }
+
+  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+  STACKFRAME stk;
+  memset(&stk, 0, sizeof(stk));
+  stk.AddrStack.Offset    = ctx.Rsp;
+  stk.AddrStack.Mode      = AddrModeFlat;
+  stk.AddrFrame.Offset    = ctx.Rbp;
+  stk.AddrFrame.Mode      = AddrModeFlat;
+  stk.AddrPC.Offset       = ctx.Rip;
+  stk.AddrPC.Mode         = AddrModeFlat;
+
+  int count = 0;
+  address lastpc = 0;
+  while (count++ < StackPrintLimit) {
+    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
+    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
+    address pc = (address)stk.AddrPC.Offset;
+
+    if (pc != NULL && sp != NULL && fp != NULL) {
+      if (count == 2 && lastpc == pc) {
+        // Skip it -- StackWalk64() may return the same PC
+        // (but different SP) on the first try.
+      } else {
+        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
+        // may not contain what Java expects, and may cause the frame() constructor
+        // to crash. Let's just print out the symbolic address.
+        frame::print_C_frame(st, buf, buf_size, pc);
+        st->cr();
+      }
+      lastpc = pc;
+    } else {
+      break;
+    }
+
+    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+    if (!p) {
+      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
+      break;
+    }
+
+    BOOL result = WindowsDbgHelp::StackWalk64(
+        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
+        GetCurrentProcess(),       // __in      HANDLE hProcess,
+        GetCurrentThread(),        // __in      HANDLE hThread,
+        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
+        &ctx,                      // __inout   PVOID ContextRecord,
+        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
+                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+        WindowsDbgHelp::pfnSymGetModuleBase64(),
+                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
+
+    if (!result) {
+      break;
+    }
+  }
+  if (count > StackPrintLimit) {
+    st->print_cr("...<more frames>...");
+  }
+  st->cr();
+
+  return true;
+}
+#endif // AMD64
+
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -401,6 +490,9 @@
                                      StubRoutines::x86::get_previous_fp_entry());
   if (func == NULL) return frame();
   intptr_t* fp = (*func)();
+  if (fp == NULL) {
+    return frame();
+  }
 #else
   intptr_t* fp = _get_previous_fp();
 #endif // AMD64
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -62,4 +62,10 @@
 
   static bool      register_code_area(char *low, char *high);
 
+#ifdef AMD64
+#define PLATFORM_PRINT_NATIVE_STACK 1
+static bool platform_print_native_stack(outputStream* st, void* context,
+                                        char *buf, int buf_size);
+#endif
+
 #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP
--- a/src/share/tools/LogCompilation/README	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/tools/LogCompilation/README	Tue Nov 05 17:38:04 2013 -0800
@@ -4,14 +4,14 @@
 requires a 1.5 JDK to build and simply typing make should build it.
 
 It produces a jar file, logc.jar, that can be run on the
-hotspot.log from LogCompilation output like this:
+HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
 
-  java -jar logc.jar hotspot.log
+  java -jar logc.jar hotspot_pid1234.log
 
 This will produce something like the normal PrintCompilation output.
 Adding the -i option with also report inlining like PrintInlining.
 
-More information about the LogCompilation output can be found at 
+More information about the LogCompilation output can be found at
 
 https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
 https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Tue Nov 05 17:38:04 2013 -0800
@@ -106,10 +106,12 @@
                         " (" + getMethod().getBytes() + " bytes) " + getReason());
             }
         }
+        stream.printf(" (end time: %6.4f", getTimeStamp());
         if (getEndNodes() > 0) {
-            stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
+            stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
         }
-        stream.println("");
+        stream.println(")");
+
         if (getReceiver() != null) {
             emit(stream, indent + 4);
             //                 stream.println("type profile " + method.holder + " -> " + receiver + " (" +
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Tue Nov 05 17:38:04 2013 -0800
@@ -207,7 +207,12 @@
     }
 
     String search(Attributes attr, String name) {
-        return search(attr, name, null);
+        String result = attr.getValue(name);
+        if (result != null) {
+            return result;
+        } else {
+            throw new InternalError("can't find " + name);
+        }
     }
 
     String search(Attributes attr, String name, String defaultValue) {
@@ -215,13 +220,7 @@
         if (result != null) {
             return result;
         }
-        if (defaultValue != null) {
-            return defaultValue;
-        }
-        for (int i = 0; i < attr.getLength(); i++) {
-            System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
-        }
-        throw new InternalError("can't find " + name);
+        return defaultValue;
     }
     int indent = 0;
 
@@ -268,17 +267,18 @@
             Phase p = new Phase(search(atts, "name"),
                     Double.parseDouble(search(atts, "stamp")),
                     Integer.parseInt(search(atts, "nodes", "0")),
-                    Integer.parseInt(search(atts, "live")));
+                    Integer.parseInt(search(atts, "live", "0")));
             phaseStack.push(p);
         } else if (qname.equals("phase_done")) {
             Phase p = phaseStack.pop();
-            if (! p.getId().equals(search(atts, "name"))) {
+            String phaseName = search(atts, "name", null);
+            if (phaseName != null && !p.getId().equals(phaseName)) {
                 System.out.println("phase: " + p.getId());
                 throw new InternalError("phase name mismatch");
             }
             p.setEnd(Double.parseDouble(search(atts, "stamp")));
             p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
-            p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
+            p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             compile.getPhases().add(p);
         } else if (qname.equals("task")) {
             compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@@ -413,8 +413,8 @@
             }
         } else if (qname.equals("parse_done")) {
             CallSite call = scopes.pop();
-            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
-            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
+            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
+            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
             call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
             scopes.push(call);
         }
--- a/src/share/vm/adlc/adlparse.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/adlparse.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -3395,12 +3395,16 @@
   char *greater_equal;
   char *less_equal;
   char *greater;
+  char *overflow;
+  char *no_overflow;
   const char *equal_format = "eq";
   const char *not_equal_format = "ne";
   const char *less_format = "lt";
   const char *greater_equal_format = "ge";
   const char *less_equal_format = "le";
   const char *greater_format = "gt";
+  const char *overflow_format = "o";
+  const char *no_overflow_format = "no";
 
   if (_curchar != '%') {
     parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
@@ -3437,6 +3441,12 @@
     else if ( strcmp(field,"greater") == 0 ) {
       greater = interface_field_parse(&greater_format);
     }
+    else if ( strcmp(field,"overflow") == 0 ) {
+      overflow = interface_field_parse(&overflow_format);
+    }
+    else if ( strcmp(field,"no_overflow") == 0 ) {
+      no_overflow = interface_field_parse(&no_overflow_format);
+    }
     else {
       parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
@@ -3455,7 +3465,9 @@
                                        less,          less_format,
                                        greater_equal, greater_equal_format,
                                        less_equal,    less_equal_format,
-                                       greater,       greater_format);
+                                       greater,       greater_format,
+                                       overflow,      overflow_format,
+                                       no_overflow,   no_overflow_format);
   return inter;
 }
 
--- a/src/share/vm/adlc/archDesc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/archDesc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1192,6 +1192,15 @@
          || strcmp(idealName,"CmpF") == 0
          || strcmp(idealName,"FastLock") == 0
          || strcmp(idealName,"FastUnlock") == 0
+         || strcmp(idealName,"AddExactI") == 0
+         || strcmp(idealName,"AddExactL") == 0
+         || strcmp(idealName,"SubExactI") == 0
+         || strcmp(idealName,"SubExactL") == 0
+         || strcmp(idealName,"MulExactI") == 0
+         || strcmp(idealName,"MulExactL") == 0
+         || strcmp(idealName,"NegExactI") == 0
+         || strcmp(idealName,"NegExactL") == 0
+         || strcmp(idealName,"FlagsProj") == 0
          || strcmp(idealName,"Bool") == 0
          || strcmp(idealName,"Binary") == 0 ) {
       // Removed ConI from the must_clone list.  CPUs that cannot use
--- a/src/share/vm/adlc/arena.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/arena.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
 
 #include "adlc.hpp"
 
-void* Chunk::operator new(size_t requested_size, size_t length) {
+void* Chunk::operator new(size_t requested_size, size_t length) throw() {
   return CHeapObj::operator new(requested_size + length);
 }
 
@@ -163,7 +163,7 @@
 //-----------------------------------------------------------------------------
 // CHeapObj
 
-void* CHeapObj::operator new(size_t size){
+void* CHeapObj::operator new(size_t size) throw() {
   return (void *) malloc(size);
 }
 
--- a/src/share/vm/adlc/arena.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/arena.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 
 class CHeapObj {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void  operator delete(void* p);
   void* new_array(size_t size);
 };
@@ -53,7 +53,7 @@
 
 class ValueObj {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void operator delete(void* p);
 };
 
@@ -61,7 +61,7 @@
 
 class AllStatic {
  public:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void operator delete(void* p);
 };
 
@@ -70,7 +70,7 @@
 // Linked list of raw memory chunks
 class Chunk: public CHeapObj {
  public:
-  void* operator new(size_t size, size_t length);
+  void* operator new(size_t size, size_t length) throw();
   void  operator delete(void* p, size_t length);
   Chunk(size_t length);
 
--- a/src/share/vm/adlc/formssel.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/formssel.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -536,12 +536,6 @@
   if( data_type != Form::none )
     rematerialize = true;
 
-  // Ugly: until a better fix is implemented, disable rematerialization for
-  // negD nodes because they are proved to be problematic.
-  if (is_ideal_negD()) {
-    return false;
-  }
-
   // Constants
   if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
     rematerialize = true;
@@ -2757,14 +2751,18 @@
                              const char* less,          const char* less_format,
                              const char* greater_equal, const char* greater_equal_format,
                              const char* less_equal,    const char* less_equal_format,
-                             const char* greater,       const char* greater_format)
+                             const char* greater,       const char* greater_format,
+                             const char* overflow,      const char* overflow_format,
+                             const char* no_overflow,   const char* no_overflow_format)
   : Interface("COND_INTER"),
     _equal(equal),                 _equal_format(equal_format),
     _not_equal(not_equal),         _not_equal_format(not_equal_format),
     _less(less),                   _less_format(less_format),
     _greater_equal(greater_equal), _greater_equal_format(greater_equal_format),
     _less_equal(less_equal),       _less_equal_format(less_equal_format),
-    _greater(greater),             _greater_format(greater_format) {
+    _greater(greater),             _greater_format(greater_format),
+    _overflow(overflow),           _overflow_format(overflow_format),
+    _no_overflow(no_overflow),     _no_overflow_format(no_overflow_format) {
 }
 CondInterface::~CondInterface() {
   // not owner of any character arrays
@@ -2777,12 +2775,14 @@
 // Write info to output files
 void CondInterface::output(FILE *fp) {
   Interface::output(fp);
-  if ( _equal  != NULL )     fprintf(fp," equal       == %s\n", _equal);
-  if ( _not_equal  != NULL ) fprintf(fp," not_equal   == %s\n", _not_equal);
-  if ( _less  != NULL )      fprintf(fp," less        == %s\n", _less);
-  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal   == %s\n", _greater_equal);
-  if ( _less_equal  != NULL ) fprintf(fp," less_equal  == %s\n", _less_equal);
-  if ( _greater  != NULL )    fprintf(fp," greater     == %s\n", _greater);
+  if ( _equal  != NULL )     fprintf(fp," equal        == %s\n", _equal);
+  if ( _not_equal  != NULL ) fprintf(fp," not_equal    == %s\n", _not_equal);
+  if ( _less  != NULL )      fprintf(fp," less         == %s\n", _less);
+  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal    == %s\n", _greater_equal);
+  if ( _less_equal  != NULL ) fprintf(fp," less_equal   == %s\n", _less_equal);
+  if ( _greater  != NULL )    fprintf(fp," greater      == %s\n", _greater);
+  if ( _overflow != NULL )    fprintf(fp," overflow     == %s\n", _overflow);
+  if ( _no_overflow != NULL ) fprintf(fp," no_overflow  == %s\n", _no_overflow);
   // fprintf(fp,"\n");
 }
 
--- a/src/share/vm/adlc/formssel.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/formssel.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -798,12 +798,16 @@
   const char *_greater_equal;
   const char *_less_equal;
   const char *_greater;
+  const char *_overflow;
+  const char *_no_overflow;
   const char *_equal_format;
   const char *_not_equal_format;
   const char *_less_format;
   const char *_greater_equal_format;
   const char *_less_equal_format;
   const char *_greater_format;
+  const char *_overflow_format;
+  const char *_no_overflow_format;
 
   // Public Methods
   CondInterface(const char* equal,         const char* equal_format,
@@ -811,7 +815,9 @@
                 const char* less,          const char* less_format,
                 const char* greater_equal, const char* greater_equal_format,
                 const char* less_equal,    const char* less_equal_format,
-                const char* greater,       const char* greater_format);
+                const char* greater,       const char* greater_format,
+                const char* overflow,      const char* overflow_format,
+                const char* no_overflow,   const char* no_overflow_format);
   ~CondInterface();
 
   void dump();
--- a/src/share/vm/adlc/main.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/main.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -490,7 +490,7 @@
 
 // VS2005 has its own definition, identical to this one.
 #if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
-void *operator new( size_t size, int, const char *, int ) {
+void *operator new( size_t size, int, const char *, int ) throw() {
   return ::operator new( size );
 }
 #endif
--- a/src/share/vm/adlc/output_c.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/output_c.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1095,7 +1095,7 @@
         fprintf(fp, "  // Identify previous instruction if inside this block\n");
         fprintf(fp, "  if( ");
         print_block_index(fp, inst_position);
-        fprintf(fp, " > 0 ) {\n    Node *n = block->_nodes.at(");
+        fprintf(fp, " > 0 ) {\n    Node *n = block->get_node(");
         print_block_index(fp, inst_position);
         fprintf(fp, ");\n    inst%d = (n->is_Mach()) ? ", inst_position);
         fprintf(fp, "n->as_Mach() : NULL;\n  }\n");
--- a/src/share/vm/adlc/output_h.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/adlc/output_h.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -388,6 +388,8 @@
   fprintf(fp, "  else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
   fprintf(fp, "  else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
   fprintf(fp, "  else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format);
 }
 
 // Output code that dumps constant values, increment "i" if type is constant
@@ -1208,6 +1210,8 @@
       fprintf(fp,"    case  BoolTest::ne : return not_equal();\n");
       fprintf(fp,"    case  BoolTest::le : return less_equal();\n");
       fprintf(fp,"    case  BoolTest::ge : return greater_equal();\n");
+      fprintf(fp,"    case  BoolTest::overflow : return overflow();\n");
+      fprintf(fp,"    case  BoolTest::no_overflow: return no_overflow();\n");
       fprintf(fp,"    default : ShouldNotReachHere(); return 0;\n");
       fprintf(fp,"    }\n");
       fprintf(fp,"  };\n");
@@ -1373,6 +1377,14 @@
         if( greater != NULL ) {
           define_oper_interface(fp, *oper, _globalNames, "greater", greater);
         }
+        const char *overflow = cInterface->_overflow;
+        if( overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "overflow", overflow);
+        }
+        const char *no_overflow = cInterface->_no_overflow;
+        if( no_overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow);
+        }
       } // end Conditional Interface
       // Check if it is a Constant Interface
       else if (oper->_interface->is_ConstInterface() != NULL ) {
--- a/src/share/vm/asm/codeBuffer.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/asm/codeBuffer.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,8 +296,8 @@
   // CodeBuffers must be allocated on the stack except for a single
   // special case during expansion which is handled internally.  This
   // is done to guarantee proper cleanup of resources.
-  void* operator new(size_t size) { return ResourceObj::operator new(size); }
-  void  operator delete(void* p)  { ShouldNotCallThis(); }
+  void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
+  void  operator delete(void* p)          { ShouldNotCallThis(); }
 
  public:
   typedef int csize_t;  // code size type; would be size_t except for history
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -935,6 +935,7 @@
 void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
 void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
+void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
 void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
 void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
 void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
--- a/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -104,6 +104,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType*  x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -364,7 +364,8 @@
   enum PatchID {
     access_field_id,
     load_klass_id,
-    load_mirror_id
+    load_mirror_id,
+    load_appendix_id
   };
   enum constants {
     patch_info_size = 3
@@ -417,7 +418,7 @@
       }
       NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
       n_move->set_offset(field_offset);
-    } else if (_id == load_klass_id || _id == load_mirror_id) {
+    } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
       assert(_obj != noreg, "must have register object for load_klass/load_mirror");
 #ifdef ASSERT
       // verify that we're pointing at a NativeMovConstReg
--- a/src/share/vm/c1/c1_Compilation.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Compilation.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -74,16 +74,19 @@
  private:
   JavaThread* _thread;
   CompileLog* _log;
+  TimerName _timer;
 
  public:
   PhaseTraceTime(TimerName timer)
-  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
+  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
+    _log(NULL), _timer(timer)
+  {
     if (Compilation::current() != NULL) {
       _log = Compilation::current()->log();
     }
 
     if (_log != NULL) {
-      _log->begin_head("phase name='%s'", timer_name[timer]);
+      _log->begin_head("phase name='%s'", timer_name[_timer]);
       _log->stamp();
       _log->end_head();
     }
@@ -91,7 +94,7 @@
 
   ~PhaseTraceTime() {
     if (_log != NULL)
-      _log->done("phase");
+      _log->done("phase name='%s'", timer_name[_timer]);
   }
 };
 
@@ -598,6 +601,17 @@
   }
 }
 
+ciKlass* Compilation::cha_exact_type(ciType* type) {
+  if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
+    ciInstanceKlass* ik = type->as_instance_klass();
+    assert(ik->exact_klass() == NULL, "no cha for final klass");
+    if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
+      dependency_recorder()->assert_leaf_type(ik);
+      return ik;
+    }
+  }
+  return NULL;
+}
 
 void Compilation::print_timers() {
   // tty->print_cr("    Native methods         : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
--- a/src/share/vm/c1/c1_Compilation.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Compilation.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -238,7 +238,18 @@
     return env()->comp_level() == CompLevel_full_profile &&
       C1UpdateMethodData && C1ProfileCheckcasts;
   }
-
+  bool profile_parameters() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && MethodData::profile_parameters();
+  }
+  bool profile_arguments() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && MethodData::profile_arguments();
+  }
+  bool profile_return() {
+    return env()->comp_level() == CompLevel_full_profile &&
+      C1UpdateMethodData && MethodData::profile_return();
+  }
   // will compilation make optimistic assumptions that might lead to
   // deoptimization and that the runtime will account for?
   bool is_optimistic() const                             {
@@ -246,6 +257,8 @@
       (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
       method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
   }
+
+  ciKlass* cha_exact_type(ciType* type);
 };
 
 
@@ -279,8 +292,8 @@
 // Base class for objects allocated by the compiler in the compilation arena
 class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
-  void* operator new(size_t size, Arena* arena) {
+  void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); }
+  void* operator new(size_t size, Arena* arena) throw() {
     return arena->Amalloc(size);
   }
   void  operator delete(void* p) {} // nothing to do
--- a/src/share/vm/c1/c1_Compiler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Compiler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -42,26 +42,16 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/sharedRuntime.hpp"
 
-volatile int Compiler::_runtimes = uninitialized;
-
-Compiler::Compiler() {
-}
-
 
-Compiler::~Compiler() {
-  Unimplemented();
-}
+Compiler::Compiler () {}
 
-
-void Compiler::initialize_all() {
+void Compiler::init_c1_runtime() {
   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   Arena* arena = new (mtCompiler) Arena();
   Runtime1::initialize(buffer_blob);
   FrameMap::initialize();
   // initialize data structures
   ValueType::initialize(arena);
-  // Instruction::initialize();
-  // BlockBegin::initialize();
   GraphBuilder::initialize();
   // note: to use more than one instance of LinearScan at a time this function call has to
   //       be moved somewhere outside of this constructor:
@@ -70,32 +60,33 @@
 
 
 void Compiler::initialize() {
-  if (_runtimes != initialized) {
-    initialize_runtimes( initialize_all, &_runtimes);
+  // Buffer blob must be allocated per C1 compiler thread at startup
+  BufferBlob* buffer_blob = init_buffer_blob();
+
+  if (should_perform_init()) {
+    if (buffer_blob == NULL) {
+      // When we come here we are in state 'initializing'; entire C1 compilation
+      // can be shut down.
+      set_state(failed);
+    } else {
+      init_c1_runtime();
+      set_state(initialized);
+    }
   }
-  mark_initialized();
 }
 
-
-BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
+BufferBlob* Compiler::init_buffer_blob() {
   // Allocate buffer blob once at startup since allocation for each
   // compilation seems to be too expensive (at least on Intel win32).
-  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
-  if (buffer_blob != NULL) {
-    return buffer_blob;
-  }
+  assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
 
   // setup CodeBuffer.  Preallocate a BufferBlob of size
   // NMethodSizeLimit plus some extra space for constants.
   int code_buffer_size = Compilation::desired_max_code_buffer_size() +
     Compilation::desired_max_constant_size();
 
-  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
-                                   code_buffer_size);
-  if (buffer_blob == NULL) {
-    CompileBroker::handle_full_code_cache();
-    env->record_failure("CodeCache is full");
-  } else {
+  BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
+  if (buffer_blob != NULL) {
     CompilerThread::current()->set_buffer_blob(buffer_blob);
   }
 
@@ -104,15 +95,8 @@
 
 
 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
-  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
-  if (buffer_blob == NULL) {
-    return;
-  }
-
-  if (!is_initialized()) {
-    initialize();
-  }
-
+  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  assert(buffer_blob != NULL, "Must exist");
   // invoke compilation
   {
     // We are nested here because we need for the destructor
--- a/src/share/vm/c1/c1_Compiler.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Compiler.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -30,11 +30,9 @@
 // There is one instance of the Compiler per CompilerThread.
 
 class Compiler: public AbstractCompiler {
-
  private:
-
- // Tracks whether runtime has been initialized
- static volatile int _runtimes;
+  static void init_c1_runtime();
+  BufferBlob* init_buffer_blob();
 
  public:
   // Creation
@@ -46,19 +44,12 @@
 
   virtual bool is_c1()                           { return true; };
 
-  BufferBlob* get_buffer_blob(ciEnv* env);
-
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
 
-  // Customization
-  virtual bool needs_adapters         ()         { return false; }
-  virtual bool needs_stubs            ()         { return false; }
-
   // Initialization
   virtual void initialize();
-  static  void initialize_all();
 
   // Compilation entry point for methods
   virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1466,9 +1466,22 @@
     // State at end of inlined method is the state of the caller
     // without the method parameters on stack, including the
     // return value, if any, of the inlined method on operand stack.
+    int invoke_bci = state()->caller_state()->bci();
     set_state(state()->caller_state()->copy_for_parsing());
     if (x != NULL) {
       state()->push(x->type(), x);
+      if (profile_return() && x->type()->is_object_kind()) {
+        ciMethod* caller = state()->scope()->method();
+        ciMethodData* md = caller->method_data_or_null();
+        ciProfileData* data = md->bci_to_data(invoke_bci);
+        if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+          bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
+          // May not be true in case of an inlined call through a method handle intrinsic.
+          if (has_return) {
+            profile_return_type(x, method(), caller, invoke_bci);
+          }
+        }
+      }
     }
     Goto* goto_callee = new Goto(continuation(), false);
 
@@ -1583,7 +1596,7 @@
       ObjectType* obj_type = obj->type()->as_ObjectType();
       if (obj_type->is_constant() && !PatchALot) {
         ciObject* const_oop = obj_type->constant_value();
-        if (!const_oop->is_null_object()) {
+        if (!const_oop->is_null_object() && const_oop->is_loaded()) {
           if (field->is_constant()) {
             ciConstant field_val = field->constant_value_of(const_oop);
             BasicType field_type = field_val.basic_type();
@@ -1658,6 +1671,50 @@
   return compilation()->dependency_recorder();
 }
 
+// How many arguments do we want to profile?
+Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
+  int n = 0;
+  bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
+  start = has_receiver ? 1 : 0;
+  if (profile_arguments()) {
+    ciProfileData* data = method()->method_data()->bci_to_data(bci());
+    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+      n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
+    }
+  }
+  // If we are inlining then we need to collect arguments to profile parameters for the target
+  if (profile_parameters() && target != NULL) {
+    if (target->method_data() != NULL && target->method_data()->parameters_type_data() != NULL) {
+      // The receiver is profiled on method entry so it's included in
+      // the number of parameters but here we're only interested in
+      // actual arguments.
+      n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start);
+    }
+  }
+  if (n > 0) {
+    return new Values(n);
+  }
+  return NULL;
+}
+
+// Collect arguments that we want to profile in a list
+Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) {
+  int start = 0;
+  Values* obj_args = args_list_for_profiling(target, start, may_have_receiver);
+  if (obj_args == NULL) {
+    return NULL;
+  }
+  int s = obj_args->size();
+  for (int i = start, j = 0; j < s; i++) {
+    if (args->at(i)->type()->is_object_kind()) {
+      obj_args->push(args->at(i));
+      j++;
+    }
+  }
+  assert(s == obj_args->length(), "missed on arg?");
+  return obj_args;
+}
+
 
 void GraphBuilder::invoke(Bytecodes::Code code) {
   bool will_link;
@@ -1667,9 +1724,8 @@
   const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
   assert(declared_signature != NULL, "cannot be null");
 
-  // FIXME bail out for now
-  if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
-    BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
+  if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
+    BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
   }
 
   // we have to make sure the argument size (incl. the receiver)
@@ -1713,10 +1769,23 @@
       code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
       break;
     }
+  } else {
+    if (bc_raw == Bytecodes::_invokehandle) {
+      assert(!will_link, "should come here only for unlinked call");
+      code = Bytecodes::_invokespecial;
+    }
   }
 
   // Push appendix argument (MethodType, CallSite, etc.), if one.
-  if (stream()->has_appendix()) {
+  bool patch_for_appendix = false;
+  int patching_appendix_arg = 0;
+  if (C1PatchInvokeDynamic &&
+      (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
+    Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
+    apush(arg);
+    patch_for_appendix = true;
+    patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
+  } else if (stream()->has_appendix()) {
     ciObject* appendix = stream()->get_appendix();
     Value arg = append(new Constant(new ObjectConstant(appendix)));
     apush(arg);
@@ -1732,7 +1801,8 @@
   if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
       !(// %%% FIXME: Are both of these relevant?
         target->is_method_handle_intrinsic() ||
-        target->is_compiled_lambda_form())) {
+        target->is_compiled_lambda_form()) &&
+      !patch_for_appendix) {
     Value receiver = NULL;
     ciInstanceKlass* receiver_klass = NULL;
     bool type_is_exact = false;
@@ -1803,7 +1873,7 @@
         // number of implementors for decl_interface is 0 or 1. If
         // it's 0 then no class implements decl_interface and there's
         // no point in inlining.
-        if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) {
+        if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) {
           singleton = NULL;
         }
       }
@@ -1850,7 +1920,8 @@
   // check if we could do inlining
   if (!PatchALot && Inline && klass->is_loaded() &&
       (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
-      && target->is_loaded()) {
+      && target->is_loaded()
+      && !patch_for_appendix) {
     // callee is known => check if we have static binding
     assert(target->is_loaded(), "callee must be known");
     if (code == Bytecodes::_invokestatic  ||
@@ -1901,7 +1972,7 @@
     code == Bytecodes::_invokespecial   ||
     code == Bytecodes::_invokevirtual   ||
     code == Bytecodes::_invokeinterface;
-  Values* args = state()->pop_arguments(target->arg_size_no_receiver());
+  Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
   Value recv = has_receiver ? apop() : NULL;
   int vtable_index = Method::invalid_vtable_index;
 
@@ -1943,7 +2014,7 @@
       } else if (exact_target != NULL) {
         target_klass = exact_target->holder();
       }
-      profile_call(target, recv, target_klass);
+      profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
     }
   }
 
@@ -1958,6 +2029,9 @@
       push(result_type, result);
     }
   }
+  if (profile_return() && result_type->is_object_kind()) {
+    profile_return_type(result, target);
+  }
 }
 
 
@@ -3495,7 +3569,7 @@
           recv = args->at(0);
           null_check(recv);
         }
-        profile_call(callee, recv, NULL);
+        profile_call(callee, recv, NULL, collect_args_for_profiling(args, callee, true), true);
       }
     }
   }
@@ -3506,6 +3580,10 @@
   Value value = append_split(result);
   if (result_type != voidType) push(result_type, value);
 
+  if (callee != method() && profile_return() && result_type->is_object_kind()) {
+    profile_return_type(result, callee);
+  }
+
   // done
   return true;
 }
@@ -3690,6 +3768,7 @@
 
   // now perform tests that are based on flag settings
   if (callee->force_inline()) {
+    if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
     print_inlining(callee, "force inline by annotation");
   } else if (callee->should_inline()) {
     print_inlining(callee, "force inline by CompileOracle");
@@ -3749,7 +3828,28 @@
     compilation()->set_would_profile(true);
 
     if (profile_calls()) {
-      profile_call(callee, recv, holder_known ? callee->holder() : NULL);
+      int start = 0;
+      Values* obj_args = args_list_for_profiling(callee, start, has_receiver);
+      if (obj_args != NULL) {
+        int s = obj_args->size();
+        // if called through method handle invoke, some arguments may have been popped
+        for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
+          Value v = state()->stack_at_inc(i);
+          if (v->type()->is_object_kind()) {
+            obj_args->push(v);
+            j++;
+          }
+        }
+#ifdef ASSERT
+        {
+          bool ignored_will_link;
+          ciSignature* declared_signature = NULL;
+          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
+          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
+        }
+#endif
+      }
+      profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
     }
   }
 
@@ -4205,7 +4305,9 @@
     }
   }
 
-  if (!PrintInlining)  return;
+  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+    return;
+  }
   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
@@ -4235,8 +4337,23 @@
 }
 #endif // PRODUCT
 
-void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
-  append(new ProfileCall(method(), bci(), callee, recv, known_holder));
+void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
+  append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
+}
+
+void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
+  assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
+  if (m == NULL) {
+    m = method();
+  }
+  if (invoke_bci < 0) {
+    invoke_bci = bci();
+  }
+  ciMethodData* md = m->method_data_or_null();
+  ciProfileData* data = md->bci_to_data(invoke_bci);
+  if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
+    append(new ProfileReturnType(m , invoke_bci, callee, ret));
+  }
 }
 
 void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
--- a/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -374,7 +374,8 @@
 
   void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
 
-  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
+  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
+  void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
   void profile_invocation(ciMethod* inlinee, ValueStack* state);
 
   // Shortcuts to profiling control.
@@ -385,6 +386,12 @@
   bool profile_calls()         { return _compilation->profile_calls();         }
   bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
+  bool profile_parameters()    { return _compilation->profile_parameters();    }
+  bool profile_arguments()     { return _compilation->profile_arguments();     }
+  bool profile_return()        { return _compilation->profile_return();        }
+
+  Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
+  Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
 
  public:
   NOT_PRODUCT(void print_stats();)
--- a/src/share/vm/c1/c1_Instruction.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Instruction.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -104,6 +104,14 @@
   }
 }
 
+ciType* Instruction::exact_type() const {
+  ciType* t =  declared_type();
+  if (t != NULL && t->is_klass()) {
+    return t->as_klass()->exact_klass();
+  }
+  return NULL;
+}
+
 
 #ifndef PRODUCT
 void Instruction::check_state(ValueStack* state) {
@@ -135,9 +143,7 @@
 
 // perform constant and interval tests on index value
 bool AccessIndexed::compute_needs_range_check() {
-
   if (length()) {
-
     Constant* clength = length()->as_Constant();
     Constant* cindex = index()->as_Constant();
     if (clength && cindex) {
@@ -157,34 +163,8 @@
 }
 
 
-ciType* Local::exact_type() const {
-  ciType* type = declared_type();
-
-  // for primitive arrays, the declared type is the exact type
-  if (type->is_type_array_klass()) {
-    return type;
-  } else if (type->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)type;
-    if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
-      return type;
-    }
-  } else if (type->is_obj_array_klass()) {
-    ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
-    ciType* base = oak->base_element_type();
-    if (base->is_instance_klass()) {
-      ciInstanceKlass* ik = base->as_instance_klass();
-      if (ik->is_loaded() && ik->is_final()) {
-        return type;
-      }
-    } else if (base->is_primitive_type()) {
-      return type;
-    }
-  }
-  return NULL;
-}
-
 ciType* Constant::exact_type() const {
-  if (type()->is_object()) {
+  if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
     return type()->as_ObjectType()->exact_type();
   }
   return NULL;
@@ -192,19 +172,18 @@
 
 ciType* LoadIndexed::exact_type() const {
   ciType* array_type = array()->exact_type();
-  if (array_type == NULL) {
-    return NULL;
-  }
-  assert(array_type->is_array_klass(), "what else?");
-  ciArrayKlass* ak = (ciArrayKlass*)array_type;
+  if (array_type != NULL) {
+    assert(array_type->is_array_klass(), "what else?");
+    ciArrayKlass* ak = (ciArrayKlass*)array_type;
 
-  if (ak->element_type()->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
-    if (ik->is_loaded() && ik->is_final()) {
-      return ik;
+    if (ak->element_type()->is_instance_klass()) {
+      ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
+      if (ik->is_loaded() && ik->is_final()) {
+        return ik;
+      }
     }
   }
-  return NULL;
+  return Instruction::exact_type();
 }
 
 
@@ -224,22 +203,6 @@
 }
 
 
-ciType* LoadField::exact_type() const {
-  ciType* type = declared_type();
-  // for primitive arrays, the declared type is the exact type
-  if (type->is_type_array_klass()) {
-    return type;
-  }
-  if (type->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)type;
-    if (ik->is_loaded() && ik->is_final()) {
-      return type;
-    }
-  }
-  return NULL;
-}
-
-
 ciType* NewTypeArray::exact_type() const {
   return ciTypeArrayKlass::make(elt_type());
 }
@@ -264,16 +227,6 @@
   return klass();
 }
 
-ciType* CheckCast::exact_type() const {
-  if (klass()->is_instance_klass()) {
-    ciInstanceKlass* ik = (ciInstanceKlass*)klass();
-    if (ik->is_loaded() && ik->is_final()) {
-      return ik;
-    }
-  }
-  return NULL;
-}
-
 // Implementation of ArithmeticOp
 
 bool ArithmeticOp::is_commutative() const {
--- a/src/share/vm/c1/c1_Instruction.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Instruction.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -107,6 +107,7 @@
 class         UnsafePrefetchRead;
 class         UnsafePrefetchWrite;
 class   ProfileCall;
+class   ProfileReturnType;
 class   ProfileInvoke;
 class   RuntimeCall;
 class   MemBar;
@@ -211,6 +212,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   virtual void do_ProfileCall    (ProfileCall*     x) = 0;
+  virtual void do_ProfileReturnType (ProfileReturnType*  x) = 0;
   virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   virtual void do_MemBar         (MemBar*          x) = 0;
@@ -322,8 +324,38 @@
     _type = type;
   }
 
+  // Helper class to keep track of which arguments need a null check
+  class ArgsNonNullState {
+  private:
+    int _nonnull_state; // mask identifying which args are nonnull
+  public:
+    ArgsNonNullState()
+      : _nonnull_state(AllBits) {}
+
+    // Does argument number i needs a null check?
+    bool arg_needs_null_check(int i) const {
+      // No data is kept for arguments starting at position 33 so
+      // conservatively assume that they need a null check.
+      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
+        return is_set_nth_bit(_nonnull_state, i);
+      }
+      return true;
+    }
+
+    // Set whether argument number i needs a null check or not
+    void set_arg_needs_null_check(int i, bool check) {
+      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
+        if (check) {
+          _nonnull_state |= nth_bit(i);
+        } else {
+          _nonnull_state &= ~(nth_bit(i));
+        }
+      }
+    }
+  };
+
  public:
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
     Compilation* c = Compilation::current();
     void* res = c->arena()->Amalloc(size);
     ((Instruction*)res)->_id = c->get_next_id();
@@ -566,7 +598,7 @@
   virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
           void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
 
-  virtual ciType* exact_type() const             { return NULL; }
+  virtual ciType* exact_type() const;
   virtual ciType* declared_type() const          { return NULL; }
 
   // hashing
@@ -689,7 +721,6 @@
   int java_index() const                         { return _java_index; }
 
   virtual ciType* declared_type() const          { return _declared_type; }
-  virtual ciType* exact_type() const;
 
   // generic
   virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
@@ -806,7 +837,6 @@
   {}
 
   ciType* declared_type() const;
-  ciType* exact_type() const;
 
   // generic
   HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset())  // cannot be eliminated if needs patching or if volatile
@@ -1299,6 +1329,7 @@
 
   virtual bool needs_exception_state() const     { return false; }
 
+  ciType* exact_type() const                     { return NULL; }
   ciType* declared_type() const;
 
   // generic
@@ -1422,7 +1453,6 @@
   }
 
   ciType* declared_type() const;
-  ciType* exact_type() const;
 };
 
 
@@ -1490,7 +1520,7 @@
   vmIntrinsics::ID _id;
   Values*          _args;
   Value            _recv;
-  int              _nonnull_state; // mask identifying which args are nonnull
+  ArgsNonNullState _nonnull_state;
 
  public:
   // preserves_state can be set to true for Intrinsics
@@ -1511,7 +1541,6 @@
   , _id(id)
   , _args(args)
   , _recv(NULL)
-  , _nonnull_state(AllBits)
   {
     assert(args != NULL, "args must exist");
     ASSERT_VALUES
@@ -1537,21 +1566,12 @@
   Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
   bool preserves_state() const                   { return check_flag(PreservesStateFlag); }
 
-  bool arg_needs_null_check(int i) {
-    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
-      return is_set_nth_bit(_nonnull_state, i);
-    }
-    return true;
+  bool arg_needs_null_check(int i) const {
+    return _nonnull_state.arg_needs_null_check(i);
   }
 
   void set_arg_needs_null_check(int i, bool check) {
-    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
-      if (check) {
-        _nonnull_state |= nth_bit(i);
-      } else {
-        _nonnull_state &= ~(nth_bit(i));
-      }
-    }
+    _nonnull_state.set_arg_needs_null_check(i, check);
   }
 
   // generic
@@ -1611,7 +1631,7 @@
   friend class SuxAndWeightAdjuster;
 
  public:
-   void* operator new(size_t size) {
+   void* operator new(size_t size) throw() {
     Compilation* c = Compilation::current();
     void* res = c->arena()->Amalloc(size);
     ((BlockBegin*)res)->_id = c->get_next_id();
@@ -2450,34 +2470,87 @@
 
 LEAF(ProfileCall, Instruction)
  private:
-  ciMethod* _method;
-  int       _bci_of_invoke;
-  ciMethod* _callee;         // the method that is called at the given bci
-  Value     _recv;
-  ciKlass*  _known_holder;
+  ciMethod*        _method;
+  int              _bci_of_invoke;
+  ciMethod*        _callee;         // the method that is called at the given bci
+  Value            _recv;
+  ciKlass*         _known_holder;
+  Values*          _obj_args;       // arguments for type profiling
+  ArgsNonNullState _nonnull_state;  // Do we know whether some arguments are never null?
+  bool             _inlined;        // Are we profiling a call that is inlined
 
  public:
-  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
+  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
     : Instruction(voidType)
     , _method(method)
     , _bci_of_invoke(bci)
     , _callee(callee)
     , _recv(recv)
     , _known_holder(known_holder)
+    , _obj_args(obj_args)
+    , _inlined(inlined)
   {
     // The ProfileCall has side-effects and must occur precisely where located
     pin();
   }
 
-  ciMethod* method()      { return _method; }
-  int bci_of_invoke()     { return _bci_of_invoke; }
-  ciMethod* callee()      { return _callee; }
-  Value recv()            { return _recv; }
-  ciKlass* known_holder() { return _known_holder; }
-
-  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
+  ciMethod* method()             const { return _method; }
+  int bci_of_invoke()            const { return _bci_of_invoke; }
+  ciMethod* callee()             const { return _callee; }
+  Value recv()                   const { return _recv; }
+  ciKlass* known_holder()        const { return _known_holder; }
+  int nb_profiled_args()         const { return _obj_args == NULL ? 0 : _obj_args->length(); }
+  Value profiled_arg_at(int i)   const { return _obj_args->at(i); }
+  bool arg_needs_null_check(int i) const {
+    return _nonnull_state.arg_needs_null_check(i);
+  }
+  bool inlined()                 const { return _inlined; }
+
+  void set_arg_needs_null_check(int i, bool check) {
+    _nonnull_state.set_arg_needs_null_check(i, check);
+  }
+
+  virtual void input_values_do(ValueVisitor* f)   {
+    if (_recv != NULL) {
+      f->visit(&_recv);
+    }
+    for (int i = 0; i < nb_profiled_args(); i++) {
+      f->visit(_obj_args->adr_at(i));
+    }
+  }
 };
 
+LEAF(ProfileReturnType, Instruction)
+ private:
+  ciMethod*        _method;
+  ciMethod*        _callee;
+  int              _bci_of_invoke;
+  Value            _ret;
+
+ public:
+  ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
+    : Instruction(voidType)
+    , _method(method)
+    , _callee(callee)
+    , _bci_of_invoke(bci)
+    , _ret(ret)
+  {
+    set_needs_null_check(true);
+    // The ProfileType has side-effects and must occur precisely where located
+    pin();
+  }
+
+  ciMethod* method()             const { return _method; }
+  ciMethod* callee()             const { return _callee; }
+  int bci_of_invoke()            const { return _bci_of_invoke; }
+  Value ret()                    const { return _ret; }
+
+  virtual void input_values_do(ValueVisitor* f)   {
+    if (_ret != NULL) {
+      f->visit(&_ret);
+    }
+  }
+};
 
 // Call some C runtime function that doesn't safepoint,
 // optionally passing the current thread as the first argument.
--- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -892,10 +892,24 @@
   if (x->known_holder() != NULL) {
     output()->print(", ");
     print_klass(x->known_holder());
+    output()->print(" ");
+  }
+  for (int i = 0; i < x->nb_profiled_args(); i++) {
+    if (i > 0) output()->print(", ");
+    print_value(x->profiled_arg_at(i));
+    if (x->arg_needs_null_check(i)) {
+      output()->print(" [NC]");
+    }
   }
   output()->put(')');
 }
 
+void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
+  output()->print("profile ret type ");
+  print_value(x->ret());
+  output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
+  output()->put(')');
+}
 void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   output()->print("profile_invoke ");
   output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
--- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -132,6 +132,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType*  x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/src/share/vm/c1/c1_LIR.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIR.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -183,10 +183,10 @@
     case T_LONG:
     case T_OBJECT:
     case T_ADDRESS:
-    case T_METADATA:
     case T_VOID:
       return ::type2char(t);
-
+    case T_METADATA:
+      return 'M';
     case T_ILLEGAL:
       return '?';
 
@@ -1001,6 +1001,17 @@
       assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
       break;
     }
+
+// LIR_OpProfileType:
+    case lir_profile_type: {
+      assert(op->as_OpProfileType() != NULL, "must be");
+      LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
+
+      do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
+      do_input(opProfileType->_obj);
+      do_temp(opProfileType->_tmp);
+      break;
+    }
   default:
     ShouldNotReachHere();
   }
@@ -1151,6 +1162,10 @@
   masm->emit_profile_call(this);
 }
 
+void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
+  masm->emit_profile_type(this);
+}
+
 // LIR_List
 LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   : _operations(8)
@@ -1803,6 +1818,8 @@
      case lir_cas_int:               s = "cas_int";      break;
      // LIR_OpProfileCall
      case lir_profile_call:          s = "profile_call";  break;
+     // LIR_OpProfileType
+     case lir_profile_type:          s = "profile_type";  break;
      // LIR_OpAssert
 #ifdef ASSERT
      case lir_assert:                s = "assert";        break;
@@ -2086,6 +2103,15 @@
   tmp1()->print(out);          out->print(" ");
 }
 
+// LIR_OpProfileType
+void LIR_OpProfileType::print_instr(outputStream* out) const {
+  out->print("exact = "); exact_klass()->print_name_on(out);
+  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
+  mdp()->print(out);          out->print(" ");
+  obj()->print(out);          out->print(" ");
+  tmp()->print(out);          out->print(" ");
+}
+
 #endif // PRODUCT
 
 // Implementation of LIR_InsertionBuffer
--- a/src/share/vm/c1/c1_LIR.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIR.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -882,6 +882,7 @@
 class    LIR_OpTypeCheck;
 class    LIR_OpCompareAndSwap;
 class    LIR_OpProfileCall;
+class    LIR_OpProfileType;
 #ifdef ASSERT
 class    LIR_OpAssert;
 #endif
@@ -1005,6 +1006,7 @@
   , end_opCompareAndSwap
   , begin_opMDOProfile
     , lir_profile_call
+    , lir_profile_type
   , end_opMDOProfile
   , begin_opAssert
     , lir_assert
@@ -1145,6 +1147,7 @@
   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
+  virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
 #ifdef ASSERT
   virtual LIR_OpAssert* as_OpAssert() { return NULL; }
 #endif
@@ -1211,8 +1214,6 @@
   bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
   bool is_method_handle_invoke() const {
     return
-      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
-      ||
       method()->is_compiled_lambda_form()  // Java-generated adapter
       ||
       method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
@@ -1927,8 +1928,8 @@
 
  public:
   // Destroys recv
-  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
-    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
+  LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
+    : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
     , _profiled_method(profiled_method)
     , _profiled_bci(profiled_bci)
     , _profiled_callee(profiled_callee)
@@ -1950,6 +1951,45 @@
   virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
 };
 
+// LIR_OpProfileType
+class LIR_OpProfileType : public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+  LIR_Opr      _mdp;
+  LIR_Opr      _obj;
+  LIR_Opr      _tmp;
+  ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
+  intptr_t     _current_klass; // what the profiling currently reports
+  bool         _not_null;      // true if we know statically that _obj cannot be null
+  bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
+                               // _exact_klass it the only possible type for this parameter in any context.
+
+ public:
+  // Destroys recv
+  LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
+    : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
+    , _mdp(mdp)
+    , _obj(obj)
+    , _exact_klass(exact_klass)
+    , _current_klass(current_klass)
+    , _tmp(tmp)
+    , _not_null(not_null)
+    , _no_conflict(no_conflict) { }
+
+  LIR_Opr      mdp()              const             { return _mdp;              }
+  LIR_Opr      obj()              const             { return _obj;              }
+  LIR_Opr      tmp()              const             { return _tmp;              }
+  ciKlass*     exact_klass()      const             { return _exact_klass;      }
+  intptr_t     current_klass()    const             { return _current_klass;    }
+  bool         not_null()         const             { return _not_null;         }
+  bool         no_conflict()      const             { return _no_conflict;      }
+
+  virtual void emit_code(LIR_Assembler* masm);
+  virtual LIR_OpProfileType* as_OpProfileType() { return this; }
+  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
+
 class LIR_InsertionBuffer;
 
 //--------------------------------LIR_List---------------------------------------------------
@@ -2249,7 +2289,10 @@
                   ciMethod* profiled_method, int profiled_bci);
   // MethodData* profiling
   void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
-    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
+    append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
+  }
+  void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
+    append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
   }
 
   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -93,12 +93,23 @@
       default:
         ShouldNotReachHere();
     }
+  } else if (patch->id() == PatchingStub::load_appendix_id) {
+    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
+    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
   } else {
     ShouldNotReachHere();
   }
 #endif
 }
 
+PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
+  IRScope* scope = info->scope();
+  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
+  if (Bytecodes::has_optional_appendix(bc_raw)) {
+    return PatchingStub::load_appendix_id;
+  }
+  return PatchingStub::load_mirror_id;
+}
 
 //---------------------------------------------------------------
 
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -119,6 +119,8 @@
 
   void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
 
+  PatchingStub::PatchID patching_id(CodeEmitInfo* info);
+
  public:
   LIR_Assembler(Compilation* c);
   ~LIR_Assembler();
@@ -206,6 +208,7 @@
   void emit_call(LIR_OpJavaCall* op);
   void emit_rtcall(LIR_OpRTCall* op);
   void emit_profile_call(LIR_OpProfileCall* op);
+  void emit_profile_type(LIR_OpProfileType* op);
   void emit_delay(LIR_OpDelay* op);
 
   void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1175,7 +1175,7 @@
   if (compilation()->env()->dtrace_method_probes()) {
     BasicTypeList signature;
     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
-    signature.append(T_OBJECT); // Method*
+    signature.append(T_METADATA); // Method*
     LIR_OprList* args = new LIR_OprList();
     args->append(getThreadPointer());
     LIR_Opr meth = new_register(T_METADATA);
@@ -1265,6 +1265,7 @@
 
   LIRItem rcvr(x->argument_at(0), this);
   rcvr.load_item();
+  LIR_Opr temp = new_register(T_METADATA);
   LIR_Opr result = rlock_result(x);
 
   // need to perform the null check on the rcvr
@@ -1272,8 +1273,11 @@
   if (x->needs_null_check()) {
     info = state_for(x);
   }
-  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
-  __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
+
+  // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
+  // meaning of these two is mixed up (see JDK-8026837).
+  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
+  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
 }
 
 
@@ -2571,6 +2575,111 @@
 }
 
 
+ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
+  ciKlass* result = NULL;
+  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
+  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
+  // known not to be null or null bit already set and already set to
+  // unknown: nothing we can do to improve profiling
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  ciKlass* exact_klass = NULL;
+  Compilation* comp = Compilation::current();
+  if (do_update) {
+    // try to find exact type, using CHA if possible, so that loading
+    // the klass from the object can be avoided
+    ciType* type = arg->exact_type();
+    if (type == NULL) {
+      type = arg->declared_type();
+      type = comp->cha_exact_type(type);
+    }
+    assert(type == NULL || type->is_klass(), "type should be class");
+    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
+
+    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
+  }
+
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  ciKlass* exact_signature_k = NULL;
+  if (do_update) {
+    // Is the type from the signature exact (the only one possible)?
+    exact_signature_k = signature_k->exact_klass();
+    if (exact_signature_k == NULL) {
+      exact_signature_k = comp->cha_exact_type(signature_k);
+    } else {
+      result = exact_signature_k;
+      do_update = false;
+      // Known statically. No need to emit any code: prevent
+      // LIR_Assembler::emit_profile_type() from emitting useless code
+      profiled_k = ciTypeEntries::with_status(result, profiled_k);
+    }
+    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
+      assert(exact_klass == NULL, "arg and signature disagree?");
+      // sometimes the type of the signature is better than the best type
+      // the compiler has
+      exact_klass = exact_signature_k;
+      do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
+    }
+  }
+
+  if (!do_null && !do_update) {
+    return result;
+  }
+
+  if (mdp == LIR_OprFact::illegalOpr) {
+    mdp = new_register(T_METADATA);
+    __ metadata2reg(md->constant_encoding(), mdp);
+    if (md_base_offset != 0) {
+      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
+      mdp = new_pointer_register();
+      __ leal(LIR_OprFact::address(base_type_address), mdp);
+    }
+  }
+  LIRItem value(arg, this);
+  value.load_item();
+  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
+                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
+  return result;
+}
+
+// profile parameters on entry to the root of the compilation
+void LIRGenerator::profile_parameters(Base* x) {
+  if (compilation()->profile_parameters()) {
+    CallingConvention* args = compilation()->frame_map()->incoming_arguments();
+    ciMethodData* md = scope()->method()->method_data_or_null();
+    assert(md != NULL, "Sanity");
+
+    if (md->parameters_type_data() != NULL) {
+      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
+      ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
+      LIR_Opr mdp = LIR_OprFact::illegalOpr;
+      for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
+        LIR_Opr src = args->at(i);
+        assert(!src->is_illegal(), "check");
+        BasicType t = src->type();
+        if (t == T_OBJECT || t == T_ARRAY) {
+          intptr_t profiled_k = parameters->type(j);
+          Local* local = x->state()->local_at(java_index)->as_Local();
+          ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
+                                            in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
+                                            profiled_k, local, mdp, false, local->declared_type()->as_klass());
+          // If the profile is known statically set it once for all and do not emit any code
+          if (exact != NULL) {
+            md->set_parameter_type(j, exact);
+          }
+          j++;
+        }
+        java_index += type2size[t];
+      }
+    }
+  }
+}
+
 void LIRGenerator::do_Base(Base* x) {
   __ std_entry(LIR_OprFact::illegalOpr);
   // Emit moves from physical registers / stack slots to virtual registers
@@ -2611,7 +2720,7 @@
   if (compilation()->env()->dtrace_method_probes()) {
     BasicTypeList signature;
     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
-    signature.append(T_OBJECT); // Method*
+    signature.append(T_METADATA); // Method*
     LIR_OprList* args = new LIR_OprList();
     args->append(getThreadPointer());
     LIR_Opr meth = new_register(T_METADATA);
@@ -2646,6 +2755,7 @@
 
   // increment invocation counters if needed
   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
+    profile_parameters(x);
     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
     increment_invocation_counter(info);
   }
@@ -3004,12 +3114,123 @@
   }
 }
 
+void LIRGenerator::profile_arguments(ProfileCall* x) {
+  if (compilation()->profile_arguments()) {
+    int bci = x->bci_of_invoke();
+    ciMethodData* md = x->method()->method_data_or_null();
+    ciProfileData* data = md->bci_to_data(bci);
+    if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
+        (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
+      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
+      int base_offset = md->byte_offset_of_slot(data, extra);
+      LIR_Opr mdp = LIR_OprFact::illegalOpr;
+      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
+
+      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
+      int start = 0;
+      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
+      if (x->nb_profiled_args() < stop) {
+        // if called through method handle invoke, some arguments may have been popped
+        stop = x->nb_profiled_args();
+      }
+      ciSignature* sig = x->callee()->signature();
+      // method handle call to virtual method
+      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
+      ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
+      for (int i = 0; i < stop; i++) {
+        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
+        ciKlass* exact = profile_arg_type(md, base_offset, off,
+                                          args->type(i), x->profiled_arg_at(i+start), mdp,
+                                          !x->arg_needs_null_check(i+start), sig_stream.next_klass());
+        if (exact != NULL) {
+          md->set_argument_type(bci, i, exact);
+        }
+      }
+    } else {
+#ifdef ASSERT
+      Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
+      int n = x->nb_profiled_args();
+      assert(MethodData::profile_parameters() && x->inlined() &&
+             ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)),
+             "only at JSR292 bytecodes");
+#endif
+    }
+  }
+}
+
+// profile parameters on entry to an inlined method
+void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
+  if (compilation()->profile_parameters() && x->inlined()) {
+    ciMethodData* md = x->callee()->method_data_or_null();
+    if (md != NULL) {
+      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
+      if (parameters_type_data != NULL) {
+        ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
+        LIR_Opr mdp = LIR_OprFact::illegalOpr;
+        bool has_receiver = !x->callee()->is_static();
+        ciSignature* sig = x->callee()->signature();
+        ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
+        int i = 0; // to iterate on the Instructions
+        Value arg = x->recv();
+        bool not_null = false;
+        int bci = x->bci_of_invoke();
+        Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
+        // The first parameter is the receiver so that's what we start
+        // with if it exists. On exception if method handle call to
+        // virtual method has receiver in the args list
+        if (arg == NULL || !Bytecodes::has_receiver(bc)) {
+          i = 1;
+          arg = x->profiled_arg_at(0);
+          not_null = !x->arg_needs_null_check(0);
+        }
+        int k = 0; // to iterate on the profile data
+        for (;;) {
+          intptr_t profiled_k = parameters->type(k);
+          ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
+                                            in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
+                                            profiled_k, arg, mdp, not_null, sig_stream.next_klass());
+          // If the profile is known statically set it once for all and do not emit any code
+          if (exact != NULL) {
+            md->set_parameter_type(k, exact);
+          }
+          k++;
+          if (k >= parameters_type_data->number_of_parameters()) {
+#ifdef ASSERT
+            int extra = 0;
+            if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
+                x->nb_profiled_args() >= TypeProfileParmsLimit &&
+                x->recv() != NULL && Bytecodes::has_receiver(bc)) {
+              extra += 1;
+            }
+            assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
+#endif
+            break;
+          }
+          arg = x->profiled_arg_at(i);
+          not_null = !x->arg_needs_null_check(i);
+          i++;
+        }
+      }
+    }
+  }
+}
+
 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
   // Need recv in a temporary register so it interferes with the other temporaries
   LIR_Opr recv = LIR_OprFact::illegalOpr;
   LIR_Opr mdo = new_register(T_OBJECT);
   // tmp is used to hold the counters on SPARC
   LIR_Opr tmp = new_pointer_register();
+
+  if (x->nb_profiled_args() > 0) {
+    profile_arguments(x);
+  }
+
+  // profile parameters on inlined method entry including receiver
+  if (x->recv() != NULL || x->nb_profiled_args() > 0) {
+    profile_parameters_at_call(x);
+  }
+
   if (x->recv() != NULL) {
     LIRItem value(x->recv(), this);
     value.load_item();
@@ -3019,6 +3240,21 @@
   __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
 }
 
+void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
+  int bci = x->bci_of_invoke();
+  ciMethodData* md = x->method()->method_data_or_null();
+  ciProfileData* data = md->bci_to_data(bci);
+  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
+  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
+  LIR_Opr mdp = LIR_OprFact::illegalOpr;
+  ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
+                                    ret->type(), x->ret(), mdp,
+                                    !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
+  if (exact != NULL) {
+    md->set_return_type(bci, exact);
+  }
+}
+
 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
   // We can safely ignore accessors here, since c2 will inline them anyway,
   // accessors are also always mature.
@@ -3053,7 +3289,11 @@
   int offset = -1;
   LIR_Opr counter_holder;
   if (level == CompLevel_limited_profile) {
-    address counters_adr = method->ensure_method_counters();
+    MethodCounters* counters_adr = method->ensure_method_counters();
+    if (counters_adr == NULL) {
+      bailout("method counters allocation failed");
+      return;
+    }
     counter_holder = new_pointer_register();
     __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
     offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
@@ -3091,7 +3331,7 @@
   BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
 
   if (x->pass_thread()) {
-    signature->append(T_ADDRESS);
+    signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
     args->append(getThreadPointer());
   }
 
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -434,6 +434,10 @@
   void do_ThreadIDIntrinsic(Intrinsic* x);
   void do_ClassIDIntrinsic(Intrinsic* x);
 #endif
+  ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
+  void profile_arguments(ProfileCall* x);
+  void profile_parameters(Base* x);
+  void profile_parameters_at_call(ProfileCall* x);
 
  public:
   Compilation*  compilation() const              { return _compilation; }
@@ -534,6 +538,7 @@
   virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   virtual void do_ProfileCall    (ProfileCall*     x);
+  virtual void do_ProfileReturnType (ProfileReturnType* x);
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
--- a/src/share/vm/c1/c1_LinearScan.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -75,9 +75,9 @@
 
 // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
 #ifdef _LP64
-static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1};
+static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2,  1, 2, 1, -1};
 #else
-static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
+static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1};
 #endif
 
 
--- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -531,6 +531,7 @@
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   void do_ProfileCall    (ProfileCall*     x);
+  void do_ProfileReturnType (ProfileReturnType*  x);
   void do_ProfileInvoke  (ProfileInvoke*   x);
   void do_RuntimeCall    (RuntimeCall*     x);
   void do_MemBar         (MemBar*          x);
@@ -657,6 +658,8 @@
   void handle_Intrinsic       (Intrinsic* x);
   void handle_ExceptionObject (ExceptionObject* x);
   void handle_Phi             (Phi* x);
+  void handle_ProfileCall     (ProfileCall* x);
+  void handle_ProfileReturnType (ProfileReturnType* x);
 };
 
 
@@ -715,7 +718,9 @@
 void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
 void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
 void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
-void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
+void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
+                                                                nce()->handle_ProfileCall(x); }
+void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
 void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
 void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
 void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
@@ -1134,6 +1139,15 @@
   }
 }
 
+void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
+  for (int i = 0; i < x->nb_profiled_args(); i++) {
+    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
+  }
+}
+
+void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
+  x->set_needs_null_check(!set_contains(x->ret()));
+}
 
 void Optimizer::eliminate_null_checks() {
   ResourceMark rm;
--- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -162,7 +162,8 @@
     void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
     void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
     void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
-    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
+    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
+    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
     void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
     void do_MemBar         (MemBar*          x) { /* nothing to do */ };
     void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
--- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -542,8 +542,7 @@
     // exception handler can cause class loading, which might throw an
     // exception and those fields are expected to be clear during
     // normal bytecode execution.
-    thread->set_exception_oop(NULL);
-    thread->set_exception_pc(NULL);
+    thread->clear_exception_oop_and_pc();
 
     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
     // If an exception was thrown during exception dispatch, the exception oop may have changed
@@ -709,10 +708,10 @@
   Bytecodes::Code code       = field_access.code();
 
   // We must load class, initialize class and resolvethe field
-  FieldAccessInfo result; // initialize class if needed
+  fieldDescriptor result; // initialize class if needed
   constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
-  return result.klass()();
+  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
+  return result.field_holder();
 }
 
 
@@ -819,17 +818,18 @@
   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
+  Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
   bool load_klass_or_mirror_patch_id =
     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 
   if (stub_id == Runtime1::access_field_patching_id) {
 
     Bytecode_field field_access(caller_method, bci);
-    FieldAccessInfo result; // initialize class if needed
+    fieldDescriptor result; // initialize class if needed
     Bytecodes::Code code = field_access.code();
     constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
-    patch_field_offset = result.field_offset();
+    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
+    patch_field_offset = result.offset();
 
     // If we're patching a field which is volatile then at compile it
     // must not have been know to be volatile, so the generated code
@@ -888,10 +888,32 @@
           mirror = Handle(THREAD, m);
         }
         break;
-      default: Unimplemented();
+      default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
     }
     // convert to handle
     load_klass = KlassHandle(THREAD, k);
+  } else if (stub_id == load_appendix_patching_id) {
+    Bytecode_invoke bytecode(caller_method, bci);
+    Bytecodes::Code bc = bytecode.invoke_code();
+
+    CallInfo info;
+    constantPoolHandle pool(thread, caller_method->constants());
+    int index = bytecode.index();
+    LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
+    appendix = info.resolved_appendix();
+    switch (bc) {
+      case Bytecodes::_invokehandle: {
+        int cache_index = ConstantPool::decode_cpcache_index(index, true);
+        assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
+        pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
+        break;
+      }
+      case Bytecodes::_invokedynamic: {
+        pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
+        break;
+      }
+      default: fatal("unexpected bytecode for load_appendix_patching_id");
+    }
   } else {
     ShouldNotReachHere();
   }
@@ -992,65 +1014,80 @@
                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
                    "illegal init value");
             if (stub_id == Runtime1::load_klass_patching_id) {
-            assert(load_klass() != NULL, "klass not set");
-            n_copy->set_data((intx) (load_klass()));
+              assert(load_klass() != NULL, "klass not set");
+              n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
-              n_copy->set_data((intx) (mirror()));
+              n_copy->set_data(cast_from_oop<intx>(mirror()));
             }
 
             if (TracePatching) {
               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
             }
+          }
+        } else if (stub_id == Runtime1::load_appendix_patching_id) {
+          NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
+          assert(n_copy->data() == 0 ||
+                 n_copy->data() == (intptr_t)Universe::non_oop_word(),
+                 "illegal init value");
+          n_copy->set_data(cast_from_oop<intx>(appendix()));
 
-#if defined(SPARC) || defined(PPC)
-            // Update the location in the nmethod with the proper
-            // metadata.  When the code was generated, a NULL was stuffed
-            // in the metadata table and that table needs to be update to
-            // have the right value.  On intel the value is kept
-            // directly in the instruction instead of in the metadata
-            // table, so set_data above effectively updated the value.
-            nmethod* nm = CodeCache::find_nmethod(instr_pc);
-            assert(nm != NULL, "invalid nmethod_pc");
-            RelocIterator mds(nm, copy_buff, copy_buff + 1);
-            bool found = false;
-            while (mds.next() && !found) {
-              if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
-                oop_Relocation* r = mds.oop_reloc();
-                oop* oop_adr = r->oop_addr();
-                *oop_adr = mirror();
-                r->fix_oop_relocation();
-                found = true;
-              } else if (mds.type() == relocInfo::metadata_type) {
-                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
-                metadata_Relocation* r = mds.metadata_reloc();
-                Metadata** metadata_adr = r->metadata_addr();
-                *metadata_adr = load_klass();
-                r->fix_metadata_relocation();
-                found = true;
-              }
-            }
-            assert(found, "the metadata must exist!");
-#endif
-
+          if (TracePatching) {
+            Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
           }
         } else {
           ShouldNotReachHere();
         }
 
+#if defined(SPARC) || defined(PPC)
+        if (load_klass_or_mirror_patch_id ||
+            stub_id == Runtime1::load_appendix_patching_id) {
+          // Update the location in the nmethod with the proper
+          // metadata.  When the code was generated, a NULL was stuffed
+          // in the metadata table and that table needs to be update to
+          // have the right value.  On intel the value is kept
+          // directly in the instruction instead of in the metadata
+          // table, so set_data above effectively updated the value.
+          nmethod* nm = CodeCache::find_nmethod(instr_pc);
+          assert(nm != NULL, "invalid nmethod_pc");
+          RelocIterator mds(nm, copy_buff, copy_buff + 1);
+          bool found = false;
+          while (mds.next() && !found) {
+            if (mds.type() == relocInfo::oop_type) {
+              assert(stub_id == Runtime1::load_mirror_patching_id ||
+                     stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+              oop_Relocation* r = mds.oop_reloc();
+              oop* oop_adr = r->oop_addr();
+              *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
+              r->fix_oop_relocation();
+              found = true;
+            } else if (mds.type() == relocInfo::metadata_type) {
+              assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+              metadata_Relocation* r = mds.metadata_reloc();
+              Metadata** metadata_adr = r->metadata_addr();
+              *metadata_adr = load_klass();
+              r->fix_metadata_relocation();
+              found = true;
+            }
+          }
+          assert(found, "the metadata must exist!");
+        }
+#endif
         if (do_patch) {
           // replace instructions
           // first replace the tail, then the call
 #ifdef ARM
-          if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
+          if((load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) &&
+             !VM_Version::supports_movw()) {
             nmethod* nm = CodeCache::find_nmethod(instr_pc);
             address addr = NULL;
             assert(nm != NULL, "invalid nmethod_pc");
             RelocIterator mds(nm, copy_buff, copy_buff + 1);
             while (mds.next()) {
               if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
+                assert(stub_id == Runtime1::load_mirror_patching_id ||
+                       stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
                 oop_Relocation* r = mds.oop_reloc();
                 addr = (address)r->oop_addr();
                 break;
@@ -1077,7 +1114,8 @@
           ICache::invalidate_range(instr_pc, *byte_count);
           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
 
-          if (load_klass_or_mirror_patch_id) {
+          if (load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) {
             relocInfo::relocType rtype =
               (stub_id == Runtime1::load_klass_patching_id) ?
                                    relocInfo::metadata_type :
@@ -1118,7 +1156,8 @@
 
   // If we are patching in a non-perm oop, make sure the nmethod
   // is on the right list.
-  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
+  if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
+                              (appendix.not_null() && appendix->is_scavengable()))) {
     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@@ -1179,6 +1218,24 @@
   return caller_is_deopted();
 }
 
+int Runtime1::move_appendix_patching(JavaThread* thread) {
+//
+// NOTE: we are still in Java
+//
+  Thread* THREAD = thread;
+  debug_only(NoHandleMark nhm;)
+  {
+    // Enter VM mode
+
+    ResetNoHandleMark rnhm;
+    patch_code(thread, load_appendix_patching_id);
+  }
+  // Back in JAVA, use no oops DON'T safepoint
+
+  // Return true if calling code is deoptimized
+
+  return caller_is_deopted();
+}
 //
 // Entry point for compiled code. We want to patch a nmethod.
 // We don't do a normal VM transition here because we want to
--- a/src/share/vm/c1/c1_Runtime1.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_Runtime1.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -67,6 +67,7 @@
   stub(access_field_patching)        \
   stub(load_klass_patching)          \
   stub(load_mirror_patching)         \
+  stub(load_appendix_patching)       \
   stub(g1_pre_barrier_slow)          \
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
@@ -160,6 +161,7 @@
   static int access_field_patching(JavaThread* thread);
   static int move_klass_patching(JavaThread* thread);
   static int move_mirror_patching(JavaThread* thread);
+  static int move_appendix_patching(JavaThread* thread);
 
   static void patch_code(JavaThread* thread, StubID stub_id);
 
--- a/src/share/vm/c1/c1_ValueMap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_ValueMap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -203,6 +203,7 @@
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
   void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
+  void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ }
   void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   void do_MemBar         (MemBar*          x) { /* nothing to do */ };
--- a/src/share/vm/c1/c1_globals.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_globals.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,4 +25,4 @@
 #include "precompiled.hpp"
 #include "c1/c1_globals.hpp"
 
-C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/src/share/vm/c1/c1_globals.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/c1/c1_globals.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -57,7 +57,7 @@
 //
 // Defines all global flags used by the client compiler.
 //
-#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
+#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
                                                                             \
   /* Printing */                                                            \
   notproduct(bool, PrintC1Statistics, false,                                \
@@ -336,15 +336,21 @@
           "Use CHA and exact type results at call sites when updating MDOs")\
                                                                             \
   product(bool, C1UpdateMethodData, trueInTiered,                           \
-          "Update MethodData*s in Tier1-generated code")                  \
+          "Update MethodData*s in Tier1-generated code")                    \
                                                                             \
   develop(bool, PrintCFGToFile, false,                                      \
           "print control flow graph to a separate file during compilation") \
                                                                             \
+  diagnostic(bool, C1PatchInvokeDynamic, true,                              \
+             "Patch invokedynamic appendix not known at compile time")      \
+                                                                            \
+  develop(intx, MaxForceInlineLevel, 100,                                   \
+          "maximum number of nested @ForceInline calls that are inlined")   \
+                                                                            \
 
 
 // Read default values for c1 globals
 
-C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
 #endif // SHARE_VM_C1_C1_GLOBALS_HPP
--- a/src/share/vm/ci/ciArray.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciArray.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -24,13 +24,92 @@
 
 #include "precompiled.hpp"
 #include "ci/ciArray.hpp"
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
 #include "ci/ciKlass.hpp"
 #include "ci/ciUtilities.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/typeArrayOop.hpp"
 
 // ciArray
 //
 // This class represents an arrayOop in the HotSpot virtual
 // machine.
+static BasicType fixup_element_type(BasicType bt) {
+  if (bt == T_ARRAY)    return T_OBJECT;
+  if (bt == T_BOOLEAN)  return T_BYTE;
+  return bt;
+}
+
+ciConstant ciArray::element_value_impl(BasicType elembt,
+                                       arrayOop ary,
+                                       int index) {
+  if (ary == NULL)
+    return ciConstant();
+  assert(ary->is_array(), "");
+  if (index < 0 || index >= ary->length())
+    return ciConstant();
+  ArrayKlass* ak = (ArrayKlass*) ary->klass();
+  BasicType abt = ak->element_type();
+  if (fixup_element_type(elembt) !=
+      fixup_element_type(abt))
+    return ciConstant();
+  switch (elembt) {
+  case T_ARRAY:
+  case T_OBJECT:
+    {
+      assert(ary->is_objArray(), "");
+      objArrayOop objary = (objArrayOop) ary;
+      oop elem = objary->obj_at(index);
+      ciEnv* env = CURRENT_ENV;
+      ciObject* box = env->get_object(elem);
+      return ciConstant(T_OBJECT, box);
+    }
+  }
+  assert(ary->is_typeArray(), "");
+  typeArrayOop tary = (typeArrayOop) ary;
+  jint value = 0;
+  switch (elembt) {
+  case T_LONG:          return ciConstant(tary->long_at(index));
+  case T_FLOAT:         return ciConstant(tary->float_at(index));
+  case T_DOUBLE:        return ciConstant(tary->double_at(index));
+  default:              return ciConstant();
+  case T_BYTE:          value = tary->byte_at(index);           break;
+  case T_BOOLEAN:       value = tary->byte_at(index) & 1;       break;
+  case T_SHORT:         value = tary->short_at(index);          break;
+  case T_CHAR:          value = tary->char_at(index);           break;
+  case T_INT:           value = tary->int_at(index);            break;
+  }
+  return ciConstant(elembt, value);
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value
+//
+// Current value of an element.
+// Returns T_ILLEGAL if there is no element at the given index.
+ciConstant ciArray::element_value(int index) {
+  BasicType elembt = element_basic_type();
+  GUARDED_VM_ENTRY(
+    return element_value_impl(elembt, get_arrayOop(), index);
+  )
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value_by_offset
+//
+// Current value of an element at the specified offset.
+// Returns T_ILLEGAL if there is no element at the given offset.
+ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
+  BasicType elembt = element_basic_type();
+  intptr_t shift  = exact_log2(type2aelembytes(elembt));
+  intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
+  intptr_t index = (element_offset - header) >> shift;
+  intptr_t offset = header + ((intptr_t)index << shift);
+  if (offset != element_offset || index != (jint)index)
+    return ciConstant();
+  return element_value((jint) index);
+}
 
 // ------------------------------------------------------------------
 // ciArray::print_impl
--- a/src/share/vm/ci/ciArray.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciArray.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_CI_CIARRAY_HPP
 #define SHARE_VM_CI_CIARRAY_HPP
 
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
 #include "ci/ciObject.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/objArrayOop.hpp"
@@ -45,15 +47,30 @@
 
   ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {}
 
-  arrayOop get_arrayOop() { return (arrayOop)get_oop(); }
+  arrayOop get_arrayOop() const { return (arrayOop)get_oop(); }
 
   const char* type_string() { return "ciArray"; }
 
   void print_impl(outputStream* st);
 
+  ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
+
 public:
   int length() { return _length; }
 
+  // Convenience routines.
+  ciArrayKlass* array_type()         { return klass()->as_array_klass(); }
+  ciType*       element_type()       { return array_type()->element_type(); }
+  BasicType     element_basic_type() { return element_type()->basic_type(); }
+
+  // Current value of an element.
+  // Returns T_ILLEGAL if there is no element at the given index.
+  ciConstant element_value(int index);
+
+  // Current value of an element at the specified offset.
+  // Returns T_ILLEGAL if there is no element at the given offset.
+  ciConstant element_value_by_offset(intptr_t element_offset);
+
   // What kind of ciObject is this?
   bool is_array()        { return true; }
   bool is_java_object()  { return true; }
--- a/src/share/vm/ci/ciClassList.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciClassList.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -102,6 +102,7 @@
 friend class ciMethodHandle;           \
 friend class ciMethodType;             \
 friend class ciReceiverTypeData;       \
+friend class ciTypeEntries;            \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
 friend class ciObjArray;               \
--- a/src/share/vm/ci/ciConstant.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciConstant.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,7 +41,6 @@
   union {
     jint      _int;
     jlong     _long;
-    jint      _long_half[2];
     jfloat    _float;
     jdouble   _double;
     ciObject* _object;
@@ -111,6 +110,20 @@
     return _value._object;
   }
 
+  bool      is_null_or_zero() const {
+    if (!is_java_primitive(basic_type())) {
+      return as_object()->is_null_object();
+    } else if (type2size[basic_type()] == 1) {
+      // treat float bits as int, to avoid comparison with -0 and NaN
+      return (_value._int == 0);
+    } else if (type2size[basic_type()] == 2) {
+      // treat double bits as long, to avoid comparison with -0 and NaN
+      return (_value._long == 0);
+    } else {
+      return false;
+    }
+  }
+
   // Debugging output
   void print();
 };
--- a/src/share/vm/ci/ciEnv.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciEnv.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -483,8 +483,7 @@
     {
       // We have to lock the cpool to keep the oop from being resolved
       // while we are accessing it.
-      oop cplock = cpool->lock();
-      ObjectLocker ol(cplock, THREAD, cplock != NULL);
+        MonitorLockerEx ml(cpool->lock());
       constantTag tag = cpool->tag_at(index);
       if (tag.is_klass()) {
         // The klass has been inserted into the constant pool
@@ -1150,9 +1149,16 @@
   record_method_not_compilable("out of memory");
 }
 
-void ciEnv::dump_replay_data(outputStream* out) {
-  VM_ENTRY_MARK;
-  MutexLocker ml(Compile_lock);
+ciInstance* ciEnv::unloaded_ciinstance() {
+  GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
+}
+
+// ------------------------------------------------------------------
+// ciEnv::dump_replay_data*
+
+// Don't change thread state and acquire any locks.
+// Safe to call from VM error reporter.
+void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   ResourceMark rm;
 #if INCLUDE_JVMTI
   out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
@@ -1177,3 +1183,10 @@
                 entry_bci, comp_level);
   out->flush();
 }
+
+void ciEnv::dump_replay_data(outputStream* out) {
+  GUARDED_VM_ENTRY(
+    MutexLocker ml(Compile_lock);
+    dump_replay_data_unsafe(out);
+  )
+}
--- a/src/share/vm/ci/ciEnv.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciEnv.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -400,6 +400,7 @@
   static ciInstanceKlass* unloaded_ciinstance_klass() {
     return _unloaded_ciinstance_klass;
   }
+  ciInstance* unloaded_ciinstance();
 
   ciKlass*  find_system_klass(ciSymbol* klass_name);
   // Note:  To find a class from its name string, use ciSymbol::make,
@@ -451,6 +452,7 @@
 
   // Dump the compilation replay data for the ciEnv to the stream.
   void dump_replay_data(outputStream* out);
+  void dump_replay_data_unsafe(outputStream* out);
 };
 
 #endif // SHARE_VM_CI_CIENV_HPP
--- a/src/share/vm/ci/ciField.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciField.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
 
-  _cp_index = index;
   constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
 
   // Get the field's name, signature, and type.
@@ -116,7 +115,7 @@
   // The declared holder of this field may not have been loaded.
   // Bail out with partial field information.
   if (!holder_is_accessible) {
-    // _cp_index and _type have already been set.
+    // _type has already been set.
     // The default values for _flags and _constant_value will suffice.
     // We need values for _holder, _offset,  and _is_constant,
     _holder = declared_holder;
@@ -146,8 +145,6 @@
 ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
   ASSERT_IN_VM;
 
-  _cp_index = -1;
-
   // Get the field's name, signature, and type.
   ciEnv* env = CURRENT_ENV;
   _name = env->get_symbol(fd->name());
@@ -189,12 +186,14 @@
   _holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
 
   // Check to see if the field is constant.
-  if (_holder->is_initialized() && this->is_final()) {
+  bool is_final = this->is_final();
+  bool is_stable = FoldStableValues && this->is_stable();
+  if (_holder->is_initialized() && (is_final || is_stable)) {
     if (!this->is_static()) {
       // A field can be constant if it's a final static field or if
       // it's a final non-static field of a trusted class (classes in
       // java.lang.invoke and sun.invoke packages and subpackages).
-      if (trust_final_non_static_fields(_holder)) {
+      if (is_stable || trust_final_non_static_fields(_holder)) {
         _is_constant = true;
         return;
       }
@@ -227,7 +226,6 @@
 
     Handle mirror = k->java_mirror();
 
-    _is_constant = true;
     switch(type()->basic_type()) {
     case T_BYTE:
       _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
@@ -273,6 +271,12 @@
         }
       }
     }
+    if (is_stable && _constant_value.is_null_or_zero()) {
+      // It is not a constant after all; treat it as uninitialized.
+      _is_constant = false;
+    } else {
+      _is_constant = true;
+    }
   } else {
     _is_constant = false;
   }
@@ -344,12 +348,11 @@
     }
   }
 
-  FieldAccessInfo result;
-  constantPoolHandle c_pool(THREAD,
-                         accessing_klass->get_instanceKlass()->constants());
-  LinkResolver::resolve_field(result, c_pool, _cp_index,
-                              Bytecodes::java_code(bc),
-                              true, false, KILL_COMPILE_ON_FATAL_(false));
+  fieldDescriptor result;
+  LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
+                              _name->get_symbol(), _signature->get_symbol(),
+                              accessing_klass->get_Klass(), bc, true, false,
+                              KILL_COMPILE_ON_FATAL_(false));
 
   // update the hit-cache, unless there is a problem with memory scoping:
   if (accessing_klass->is_shared() || !is_shared()) {
@@ -373,8 +376,11 @@
   tty->print(" signature=");
   _signature->print_symbol();
   tty->print(" offset=%d type=", _offset);
-  if (_type != NULL) _type->print_name();
-  else               tty->print("(reference)");
+  if (_type != NULL)
+    _type->print_name();
+  else
+    tty->print("(reference)");
+  tty->print(" flags=%04x", flags().as_int());
   tty->print(" is_constant=%s", bool_to_str(_is_constant));
   if (_is_constant && is_static()) {
     tty->print(" constant_value=");
--- a/src/share/vm/ci/ciField.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciField.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,6 @@
   ciInstanceKlass* _known_to_link_with_get;
   ciConstant       _constant_value;
 
-  // Used for will_link
-  int              _cp_index;
-
   ciType* compute_type();
   ciType* compute_type_impl();
 
@@ -139,7 +136,10 @@
   //      non-constant fields.  These are java.lang.System.in
   //      and java.lang.System.out.  Abomination.
   //
-  // Note: the check for case 4 is not yet implemented.
+  // A field is also considered constant if it is marked @Stable
+  // and is non-null (or non-zero, if a primitive).
+  // For non-static fields, the null/zero check must be
+  // arranged by the user, as constant_value().is_null_or_zero().
   bool is_constant() { return _is_constant; }
 
   // Get the constant value of this field.
@@ -173,6 +173,7 @@
   bool is_protected   () { return flags().is_protected(); }
   bool is_static      () { return flags().is_static(); }
   bool is_final       () { return flags().is_final(); }
+  bool is_stable      () { return flags().is_stable(); }
   bool is_volatile    () { return flags().is_volatile(); }
   bool is_transient   () { return flags().is_transient(); }
 
--- a/src/share/vm/ci/ciFlags.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciFlags.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -59,6 +59,7 @@
   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
   bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
+  bool is_stable      () const         { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
 
   // Conversion
   jint   as_int()                      { return _flags; }
--- a/src/share/vm/ci/ciInstance.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciInstance.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -60,10 +60,10 @@
 //
 // Constant value of a field.
 ciConstant ciInstance::field_value(ciField* field) {
-  assert(is_loaded() &&
-         field->holder()->is_loaded() &&
-         klass()->is_subclass_of(field->holder()),
-         "invalid access");
+  assert(is_loaded(), "invalid access - must be loaded");
+  assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
+  assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
+
   VM_ENTRY_MARK;
   ciConstant result;
   Handle obj = get_oop();
@@ -127,6 +127,8 @@
 ciConstant ciInstance::field_value_by_offset(int field_offset) {
   ciInstanceKlass* ik = klass()->as_instance_klass();
   ciField* field = ik->get_field_by_offset(field_offset, false);
+  if (field == NULL)
+    return ciConstant();  // T_ILLEGAL
   return field_value(field);
 }
 
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,7 @@
   _init_state = ik->init_state();
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
+  _has_default_methods = ik->has_default_methods();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
 
   _implementor = NULL; // we will fill these lazily
@@ -522,8 +523,7 @@
 
   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static())  continue;
-    fieldDescriptor fd;
-    fd.initialize(k, fs.index());
+    fieldDescriptor& fd = fs.field_descriptor();
     ciField* field = new (arena) ciField(&fd);
     fields->append(field);
   }
@@ -672,7 +672,6 @@
 
 
 void ciInstanceKlass::dump_replay_data(outputStream* out) {
-  ASSERT_IN_VM;
   ResourceMark rm;
 
   InstanceKlass* ik = get_instanceKlass();
--- a/src/share/vm/ci/ciInstanceKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciInstanceKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,6 +52,7 @@
   bool                   _has_finalizer;
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
+  bool                   _has_default_methods;
 
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
@@ -171,6 +172,11 @@
     }
   }
 
+  bool has_default_methods()  {
+    assert(is_loaded(), "must be loaded");
+    return _has_default_methods;
+  }
+
   ciInstanceKlass* get_canonical_holder(int offset);
   ciField* get_field_by_offset(int field_offset, bool is_static);
   ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
@@ -235,6 +241,13 @@
   bool is_instance_klass() const { return true; }
   bool is_java_klass() const     { return true; }
 
+  virtual ciKlass* exact_klass() {
+    if (is_loaded() && is_final() && !is_interface()) {
+      return this;
+    }
+    return NULL;
+  }
+
   // Dump the current state of this klass for compilation replay.
   virtual void dump_replay_data(outputStream* out);
 };
--- a/src/share/vm/ci/ciKlass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciKlass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -66,7 +66,9 @@
 // ------------------------------------------------------------------
 // ciKlass::is_subtype_of
 bool ciKlass::is_subtype_of(ciKlass* that) {
-  assert(is_loaded() && that->is_loaded(), "must be loaded");
+  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
+  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
+
   // Check to see if the klasses are identical.
   if (this == that) {
     return true;
@@ -83,8 +85,8 @@
 // ------------------------------------------------------------------
 // ciKlass::is_subclass_of
 bool ciKlass::is_subclass_of(ciKlass* that) {
-  assert(is_loaded() && that->is_loaded(), "must be loaded");
-  // Check to see if the klasses are identical.
+  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
+  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
 
   VM_ENTRY_MARK;
   Klass* this_klass = get_Klass();
--- a/src/share/vm/ci/ciKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,6 +41,7 @@
   friend class ciEnv;
   friend class ciField;
   friend class ciMethod;
+  friend class ciMethodData;
   friend class ciObjArrayKlass;
 
 private:
@@ -121,6 +122,8 @@
   // What kind of ciObject is this?
   bool is_klass() const { return true; }
 
+  virtual ciKlass* exact_klass() = 0;
+
   void print_name_on(outputStream* st);
 };
 
--- a/src/share/vm/ci/ciMethod.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciMethod.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -286,7 +286,10 @@
   check_is_loaded();
   assert(holder()->is_linked(), "must be linked");
   VM_ENTRY_MARK;
-  return klassItable::compute_itable_index(get_Method());
+  Method* m = get_Method();
+  if (!m->has_itable_index())
+    return Method::nonvirtual_vtable_index;
+  return m->itable_index();
 }
 #endif // SHARK
 
@@ -562,6 +565,116 @@
   if (_limit < MorphismLimit) _limit++;
 }
 
+
+void ciMethod::assert_virtual_call_type_ok(int bci) {
+  assert(java_code_at_bci(bci) == Bytecodes::_invokevirtual ||
+         java_code_at_bci(bci) == Bytecodes::_invokeinterface, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
+}
+
+void ciMethod::assert_call_type_ok(int bci) {
+  assert(java_code_at_bci(bci) == Bytecodes::_invokestatic ||
+         java_code_at_bci(bci) == Bytecodes::_invokespecial ||
+         java_code_at_bci(bci) == Bytecodes::_invokedynamic, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
+}
+
+/**
+ * Check whether profiling provides a type for the argument i to the
+ * call at bci bci
+ *
+ * @param bci  bci of the call
+ * @param i    argument number
+ * @return     profiled type
+ *
+ * If the profile reports that the argument may be null, return false
+ * at least for now.
+ */
+ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
+  if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
+    ciProfileData* data = method_data()->bci_to_data(bci);
+    if (data != NULL) {
+      if (data->is_VirtualCallTypeData()) {
+        assert_virtual_call_type_ok(bci);
+        ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
+        if (i >= call->number_of_arguments()) {
+          return NULL;
+        }
+        ciKlass* type = call->valid_argument_type(i);
+        if (type != NULL && !call->argument_maybe_null(i)) {
+          return type;
+        }
+      } else if (data->is_CallTypeData()) {
+        assert_call_type_ok(bci);
+        ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
+        if (i >= call->number_of_arguments()) {
+          return NULL;
+        }
+        ciKlass* type = call->valid_argument_type(i);
+        if (type != NULL && !call->argument_maybe_null(i)) {
+          return type;
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+/**
+ * Check whether profiling provides a type for the return value from
+ * the call at bci bci
+ *
+ * @param bci  bci of the call
+ * @return     profiled type
+ *
+ * If the profile reports that the argument may be null, return false
+ * at least for now.
+ */
+ciKlass* ciMethod::return_profiled_type(int bci) {
+  if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
+    ciProfileData* data = method_data()->bci_to_data(bci);
+    if (data != NULL) {
+      if (data->is_VirtualCallTypeData()) {
+        assert_virtual_call_type_ok(bci);
+        ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
+        ciKlass* type = call->valid_return_type();
+        if (type != NULL && !call->return_maybe_null()) {
+          return type;
+        }
+      } else if (data->is_CallTypeData()) {
+        assert_call_type_ok(bci);
+        ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
+        ciKlass* type = call->valid_return_type();
+        if (type != NULL && !call->return_maybe_null()) {
+          return type;
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+/**
+ * Check whether profiling provides a type for the parameter i
+ *
+ * @param i    parameter number
+ * @return     profiled type
+ *
+ * If the profile reports that the argument may be null, return false
+ * at least for now.
+ */
+ciKlass* ciMethod::parameter_profiled_type(int i) {
+  if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
+    ciParametersTypeData* parameters = method_data()->parameters_type_data();
+    if (parameters != NULL && i < parameters->number_of_parameters()) {
+      ciKlass* type = parameters->valid_parameter_type(i);
+      if (type != NULL && !parameters->parameter_maybe_null(i)) {
+        return type;
+      }
+    }
+  }
+  return NULL;
+}
+
+
 // ------------------------------------------------------------------
 // ciMethod::find_monomorphic_target
 //
@@ -843,7 +956,9 @@
 // Return true if allocation was successful or no MDO is required.
 bool ciMethod::ensure_method_data(methodHandle h_m) {
   EXCEPTION_CONTEXT;
-  if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
+  if (is_native() || is_abstract() || h_m()->is_accessor()) {
+    return true;
+  }
   if (h_m()->method_data() == NULL) {
     Method::build_interpreter_method_data(h_m, THREAD);
     if (HAS_PENDING_EXCEPTION) {
@@ -900,22 +1015,21 @@
 // NULL otherwise.
 ciMethodData* ciMethod::method_data_or_null() {
   ciMethodData *md = method_data();
-  if (md->is_empty()) return NULL;
+  if (md->is_empty()) {
+    return NULL;
+  }
   return md;
 }
 
 // ------------------------------------------------------------------
 // ciMethod::ensure_method_counters
 //
-address ciMethod::ensure_method_counters() {
+MethodCounters* ciMethod::ensure_method_counters() {
   check_is_loaded();
   VM_ENTRY_MARK;
   methodHandle mh(THREAD, get_Method());
-  MethodCounters *counter = mh->method_counters();
-  if (counter == NULL) {
-    counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
-  }
-  return (address)counter;
+  MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
+  return method_counters;
 }
 
 // ------------------------------------------------------------------
@@ -1137,6 +1251,10 @@
 // ------------------------------------------------------------------
 // ciMethod::check_call
 bool ciMethod::check_call(int refinfo_index, bool is_static) const {
+  // This method is used only in C2 from InlineTree::ok_to_inline,
+  // and is only used under -Xcomp or -XX:CompileTheWorld.
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
   VM_ENTRY_MARK;
   {
     EXCEPTION_MARK;
@@ -1240,7 +1358,6 @@
 #undef FETCH_FLAG_FROM_VM
 
 void ciMethod::dump_replay_data(outputStream* st) {
-  ASSERT_IN_VM;
   ResourceMark rm;
   Method* method = get_Method();
   MethodCounters* mcs = method->method_counters();
--- a/src/share/vm/ci/ciMethod.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciMethod.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -117,6 +117,10 @@
     *bcp = code;
   }
 
+  // Check bytecode and profile data collected are compatible
+  void assert_virtual_call_type_ok(int bci);
+  void assert_call_type_ok(int bci);
+
  public:
   // Basic method information.
   ciFlags flags() const                          { check_is_loaded(); return _flags; }
@@ -177,6 +181,10 @@
     address bcp = code() + bci;
     return Bytecodes::java_code_at(NULL, bcp);
   }
+  Bytecodes::Code raw_code_at_bci(int bci) {
+    address bcp = code() + bci;
+    return Bytecodes::code_at(NULL, bcp);
+  }
   BCEscapeAnalyzer  *get_bcea();
   ciMethodBlocks    *get_method_blocks();
 
@@ -226,6 +234,11 @@
   ciCallProfile call_profile_at_bci(int bci);
   int           interpreter_call_site_count(int bci);
 
+  // Does type profiling provide a useful type at this point?
+  ciKlass*      argument_profiled_type(int bci, int i);
+  ciKlass*      parameter_profiled_type(int i);
+  ciKlass*      return_profiled_type(int bci);
+
   ciField*      get_field_at_bci( int bci, bool &will_link);
   ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
 
@@ -261,7 +274,7 @@
   bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
   bool check_call(int refinfo_index, bool is_static) const;
   bool ensure_method_data();  // make sure it exists in the VM also
-  address ensure_method_counters();
+  MethodCounters* ensure_method_counters();
   int instructions_size();
   int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
 
--- a/src/share/vm/ci/ciMethodData.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciMethodData.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,6 +53,7 @@
   _hint_di = first_di();
   // Initialize the escape information (to "don't know.");
   _eflags = _arg_local = _arg_stack = _arg_returned = 0;
+  _parameters = NULL;
 }
 
 // ------------------------------------------------------------------
@@ -74,11 +75,14 @@
   _hint_di = first_di();
   // Initialize the escape information (to "don't know.");
   _eflags = _arg_local = _arg_stack = _arg_returned = 0;
+  _parameters = NULL;
 }
 
 void ciMethodData::load_data() {
   MethodData* mdo = get_MethodData();
-  if (mdo == NULL) return;
+  if (mdo == NULL) {
+    return;
+  }
 
   // To do: don't copy the data if it is not "ripe" -- require a minimum #
   // of invocations.
@@ -106,6 +110,12 @@
     ci_data = next_data(ci_data);
     data = mdo->next_data(data);
   }
+  if (mdo->parameters_type_data() != NULL) {
+    _parameters = data_layout_at(mdo->parameters_type_data_di());
+    ciParametersTypeData* parameters = new ciParametersTypeData(_parameters);
+    parameters->translate_from(mdo->parameters_type_data());
+  }
+
   // Note:  Extra data are all BitData, and do not need translation.
   _current_mileage = MethodData::mileage_of(mdo->method());
   _invocation_counter = mdo->invocation_count();
@@ -123,7 +133,7 @@
 #endif
 }
 
-void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
+void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
   for (uint row = 0; row < row_limit(); row++) {
     Klass* k = data->as_ReceiverTypeData()->receiver(row);
     if (k != NULL) {
@@ -134,6 +144,18 @@
 }
 
 
+void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
+  for (int i = 0; i < _number_of_entries; i++) {
+    intptr_t k = entries->type(i);
+    TypeStackSlotEntries::set_type(i, translate_klass(k));
+  }
+}
+
+void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
+  intptr_t k = ret->type();
+  set_type(translate_klass(k));
+}
+
 // Get the data at an arbitrary (sort of) data index.
 ciProfileData* ciMethodData::data_at(int data_index) {
   if (out_of_bounds(data_index)) {
@@ -164,6 +186,12 @@
     return new ciMultiBranchData(data_layout);
   case DataLayout::arg_info_data_tag:
     return new ciArgInfoData(data_layout);
+  case DataLayout::call_type_data_tag:
+    return new ciCallTypeData(data_layout);
+  case DataLayout::virtual_call_type_data_tag:
+    return new ciVirtualCallTypeData(data_layout);
+  case DataLayout::parameters_type_data_tag:
+    return new ciParametersTypeData(data_layout);
   };
 }
 
@@ -286,6 +314,42 @@
   }
 }
 
+void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
+  VM_ENTRY_MARK;
+  MethodData* mdo = get_MethodData();
+  if (mdo != NULL) {
+    ProfileData* data = mdo->bci_to_data(bci);
+    if (data->is_CallTypeData()) {
+      data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
+    } else {
+      assert(data->is_VirtualCallTypeData(), "no arguments!");
+      data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
+    }
+  }
+}
+
+void ciMethodData::set_parameter_type(int i, ciKlass* k) {
+  VM_ENTRY_MARK;
+  MethodData* mdo = get_MethodData();
+  if (mdo != NULL) {
+    mdo->parameters_type_data()->set_type(i, k->get_Klass());
+  }
+}
+
+void ciMethodData::set_return_type(int bci, ciKlass* k) {
+  VM_ENTRY_MARK;
+  MethodData* mdo = get_MethodData();
+  if (mdo != NULL) {
+    ProfileData* data = mdo->bci_to_data(bci);
+    if (data->is_CallTypeData()) {
+      data->as_CallTypeData()->set_return_type(k->get_Klass());
+    } else {
+      assert(data->is_VirtualCallTypeData(), "no arguments!");
+      data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
+    }
+  }
+}
+
 bool ciMethodData::has_escape_info() {
   return eflag_set(MethodData::estimated);
 }
@@ -373,7 +437,6 @@
 }
 
 void ciMethodData::dump_replay_data(outputStream* out) {
-  ASSERT_IN_VM;
   ResourceMark rm;
   MethodData* mdo = get_MethodData();
   Method* method = mdo->method();
@@ -477,7 +540,50 @@
   }
 }
 
-void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
+void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
+  if (TypeEntries::is_type_none(k)) {
+    st->print("none");
+  } else if (TypeEntries::is_type_unknown(k)) {
+    st->print("unknown");
+  } else {
+    valid_ciklass(k)->print_name_on(st);
+  }
+  if (TypeEntries::was_null_seen(k)) {
+    st->print(" (null seen)");
+  }
+}
+
+void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
+  for (int i = 0; i < _number_of_entries; i++) {
+    _pd->tab(st);
+    st->print("%d: stack (%u) ", i, stack_slot(i));
+    print_ciklass(st, type(i));
+    st->cr();
+  }
+}
+
+void ciReturnTypeEntry::print_data_on(outputStream* st) const {
+  _pd->tab(st);
+  st->print("ret ");
+  print_ciklass(st, type());
+  st->cr();
+}
+
+void ciCallTypeData::print_data_on(outputStream* st) const {
+  print_shared(st, "ciCallTypeData");
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    args()->print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    ret()->print_data_on(st);
+  }
+}
+
+void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -493,13 +599,33 @@
   }
 }
 
-void ciReceiverTypeData::print_data_on(outputStream* st) {
+void ciReceiverTypeData::print_data_on(outputStream* st) const {
   print_shared(st, "ciReceiverTypeData");
   print_receiver_data_on(st);
 }
 
-void ciVirtualCallData::print_data_on(outputStream* st) {
+void ciVirtualCallData::print_data_on(outputStream* st) const {
   print_shared(st, "ciVirtualCallData");
   rtd_super()->print_receiver_data_on(st);
 }
+
+void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
+  print_shared(st, "ciVirtualCallTypeData");
+  rtd_super()->print_receiver_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    args()->print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    ret()->print_data_on(st);
+  }
+}
+
+void ciParametersTypeData::print_data_on(outputStream* st) const {
+  st->print_cr("Parametertypes");
+  parameters()->print_data_on(st);
+}
 #endif
--- a/src/share/vm/ci/ciMethodData.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciMethodData.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,6 +41,9 @@
 class ciArrayData;
 class ciMultiBranchData;
 class ciArgInfoData;
+class ciCallTypeData;
+class ciVirtualCallTypeData;
+class ciParametersTypeData;
 
 typedef ProfileData ciProfileData;
 
@@ -59,6 +62,119 @@
   ciJumpData(DataLayout* layout) : JumpData(layout) {};
 };
 
+class ciTypeEntries {
+protected:
+  static intptr_t translate_klass(intptr_t k) {
+    Klass* v = TypeEntries::valid_klass(k);
+    if (v != NULL) {
+      ciKlass* klass = CURRENT_ENV->get_klass(v);
+      return with_status(klass, k);
+    }
+    return with_status(NULL, k);
+  }
+
+public:
+  static ciKlass* valid_ciklass(intptr_t k) {
+    if (!TypeEntries::is_type_none(k) &&
+        !TypeEntries::is_type_unknown(k)) {
+      return (ciKlass*)TypeEntries::klass_part(k);
+    } else {
+      return NULL;
+    }
+  }
+
+  static intptr_t with_status(ciKlass* k, intptr_t in) {
+    return TypeEntries::with_status((intptr_t)k, in);
+  }
+
+#ifndef PRODUCT
+  static void print_ciklass(outputStream* st, intptr_t k);
+#endif
+};
+
+class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
+public:
+  void translate_type_data_from(const TypeStackSlotEntries* args);
+
+  ciKlass* valid_type(int i) const {
+    return valid_ciklass(type(i));
+  }
+
+  bool maybe_null(int i) const {
+    return was_null_seen(type(i));
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
+public:
+  void translate_type_data_from(const ReturnTypeEntry* ret);
+
+  ciKlass* valid_type() const {
+    return valid_ciklass(type());
+  }
+
+  bool maybe_null() const {
+    return was_null_seen(type());
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciCallTypeData : public CallTypeData {
+public:
+  ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
+
+  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
+  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
+
+  void translate_from(const ProfileData* data) {
+    if (has_arguments()) {
+      args()->translate_type_data_from(data->as_CallTypeData()->args());
+    }
+    if (has_return()) {
+      ret()->translate_type_data_from(data->as_CallTypeData()->ret());
+    }
+  }
+
+  intptr_t argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->type(i);
+  }
+
+  ciKlass* valid_argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->valid_type(i);
+  }
+
+  intptr_t return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->type();
+  }
+
+  ciKlass* valid_return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->valid_type();
+  }
+
+  bool argument_maybe_null(int i) const {
+    return args()->maybe_null(i);
+  }
+
+  bool return_maybe_null() const {
+    return ret()->maybe_null();
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
 class ciReceiverTypeData : public ReceiverTypeData {
 public:
   ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
@@ -69,7 +185,7 @@
                   (intptr_t) recv);
   }
 
-  ciKlass* receiver(uint row) {
+  ciKlass* receiver(uint row) const {
     assert((uint)row < row_limit(), "oob");
     ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
     assert(recv == NULL || recv->is_klass(), "wrong type");
@@ -77,19 +193,19 @@
   }
 
   // Copy & translate from oop based ReceiverTypeData
-  virtual void translate_from(ProfileData* data) {
+  virtual void translate_from(const ProfileData* data) {
     translate_receiver_data_from(data);
   }
-  void translate_receiver_data_from(ProfileData* data);
+  void translate_receiver_data_from(const ProfileData* data);
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
-  void print_receiver_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+  void print_receiver_data_on(outputStream* st) const;
 #endif
 };
 
 class ciVirtualCallData : public VirtualCallData {
   // Fake multiple inheritance...  It's a ciReceiverTypeData also.
-  ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
+  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
 
 public:
   ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
@@ -103,11 +219,73 @@
   }
 
   // Copy & translate from oop based VirtualCallData
-  virtual void translate_from(ProfileData* data) {
+  virtual void translate_from(const ProfileData* data) {
     rtd_super()->translate_receiver_data_from(data);
   }
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+class ciVirtualCallTypeData : public VirtualCallTypeData {
+private:
+  // Fake multiple inheritance...  It's a ciReceiverTypeData also.
+  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
+public:
+  ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
+
+  void set_receiver(uint row, ciKlass* recv) {
+    rtd_super()->set_receiver(row, recv);
+  }
+
+  ciKlass* receiver(uint row) const {
+    return rtd_super()->receiver(row);
+  }
+
+  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
+  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
+
+  // Copy & translate from oop based VirtualCallData
+  virtual void translate_from(const ProfileData* data) {
+    rtd_super()->translate_receiver_data_from(data);
+    if (has_arguments()) {
+      args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
+    }
+    if (has_return()) {
+      ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
+    }
+  }
+
+  intptr_t argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->type(i);
+  }
+
+  ciKlass* valid_argument_type(int i) const {
+    assert(has_arguments(), "no arg type profiling data");
+    return args()->valid_type(i);
+  }
+
+  intptr_t return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->type();
+  }
+
+  ciKlass* valid_return_type() const {
+    assert(has_return(), "no ret type profiling data");
+    return ret()->valid_type();
+  }
+
+  bool argument_maybe_null(int i) const {
+    return args()->maybe_null(i);
+  }
+
+  bool return_maybe_null() const {
+    return ret()->maybe_null();
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -137,6 +315,29 @@
   ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {};
 };
 
+class ciParametersTypeData : public ParametersTypeData {
+public:
+  ciParametersTypeData(DataLayout* layout) : ParametersTypeData(layout) {}
+
+  virtual void translate_from(const ProfileData* data) {
+    parameters()->translate_type_data_from(data->as_ParametersTypeData()->parameters());
+  }
+
+  ciTypeStackSlotEntries* parameters() const { return (ciTypeStackSlotEntries*)ParametersTypeData::parameters(); }
+
+  ciKlass* valid_parameter_type(int i) const {
+    return parameters()->valid_type(i);
+  }
+
+  bool parameter_maybe_null(int i) const {
+    return parameters()->maybe_null(i);
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
 // ciMethodData
 //
 // This class represents a MethodData* in the HotSpot virtual
@@ -182,6 +383,10 @@
   // Coherent snapshot of original header.
   MethodData _orig;
 
+  // Dedicated area dedicated to parameters. Null if no parameter
+  // profiling for this method.
+  DataLayout* _parameters;
+
   ciMethodData(MethodData* md);
   ciMethodData();
 
@@ -232,8 +437,6 @@
 public:
   bool is_method_data() const { return true; }
 
-  void set_mature() { _state = mature_state; }
-
   bool is_empty()  { return _state == empty_state; }
   bool is_mature() { return _state == mature_state; }
 
@@ -249,6 +452,11 @@
   // Also set the numer of loops and blocks in the method.
   // Again, this is used to determine if a method is trivial.
   void set_compilation_stats(short loops, short blocks);
+  // If the compiler finds a profiled type that is known statically
+  // for sure, set it in the MethodData
+  void set_argument_type(int bci, int i, ciKlass* k);
+  void set_parameter_type(int i, ciKlass* k);
+  void set_return_type(int bci, ciKlass* k);
 
   void load_data();
 
@@ -312,6 +520,10 @@
   bool is_arg_returned(int i) const;
   uint arg_modified(int arg) const;
 
+  ciParametersTypeData* parameters_type_data() const {
+    return _parameters != NULL ? new ciParametersTypeData(_parameters) : NULL;
+  }
+
   // Code generation helper
   ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data);
   int      byte_offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_slot(data, slot_offset_in_data)); }
--- a/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -179,3 +179,16 @@
 ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
   GUARDED_VM_ENTRY(return make_impl(element_klass);)
 }
+
+ciKlass* ciObjArrayKlass::exact_klass() {
+  ciType* base = base_element_type();
+  if (base->is_instance_klass()) {
+    ciInstanceKlass* ik = base->as_instance_klass();
+    if (ik->exact_klass() != NULL) {
+      return this;
+    }
+  } else if (base->is_primitive_type()) {
+    return this;
+  }
+  return NULL;
+}
--- a/src/share/vm/ci/ciObjArrayKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciObjArrayKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -73,6 +73,8 @@
   bool is_obj_array_klass() const { return true; }
 
   static ciObjArrayKlass* make(ciKlass* element_klass);
+
+  virtual ciKlass* exact_klass();
 };
 
 #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
--- a/src/share/vm/ci/ciObjectFactory.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -563,7 +563,10 @@
   return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
 }
 
-
+ciInstance* ciObjectFactory::get_unloaded_object_constant() {
+  if (ciEnv::_Object_klass == NULL)  return NULL;
+  return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
+}
 
 //------------------------------------------------------------------
 // ciObjectFactory::get_empty_methodData
--- a/src/share/vm/ci/ciObjectFactory.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciObjectFactory.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -131,6 +131,8 @@
   ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
 
 
+  ciInstance* get_unloaded_object_constant();
+
   // Get the ciMethodData representing the methodData for a method
   // with none.
   ciMethodData* get_empty_methodData();
--- a/src/share/vm/ci/ciReplay.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciReplay.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -965,14 +965,12 @@
     tty->cr();
   } else {
     EXCEPTION_CONTEXT;
-    MethodCounters* mcs = method->method_counters();
     // m->_instructions_size = rec->instructions_size;
     m->_instructions_size = -1;
     m->_interpreter_invocation_count = rec->interpreter_invocation_count;
     m->_interpreter_throwout_count = rec->interpreter_throwout_count;
-    if (mcs == NULL) {
-      mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
-    }
+    MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
+    guarantee(mcs != NULL, "method counters allocation failed");
     mcs->invocation_counter()->_counter = rec->invocation_counter;
     mcs->backedge_counter()->_counter = rec->backedge_counter;
   }
--- a/src/share/vm/ci/ciStreams.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciStreams.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -277,11 +277,14 @@
 class ciSignatureStream : public StackObj {
 private:
   ciSignature* _sig;
-  int    _pos;
+  int          _pos;
+  // holder is a method's holder
+  ciKlass*     _holder;
 public:
-  ciSignatureStream(ciSignature* signature) {
+  ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
     _sig = signature;
     _pos = 0;
+    _holder = holder;
   }
 
   bool at_return_type() { return _pos == _sig->count(); }
@@ -301,6 +304,23 @@
       return _sig->type_at(_pos);
     }
   }
+
+  // next klass in the signature
+  ciKlass* next_klass() {
+    ciKlass* sig_k;
+    if (_holder != NULL) {
+      sig_k = _holder;
+      _holder = NULL;
+    } else {
+      while (!type()->is_klass()) {
+        next();
+      }
+      assert(!at_return_type(), "passed end of signature");
+      sig_k = type()->as_klass();
+      next();
+    }
+    return sig_k;
+  }
 };
 
 
--- a/src/share/vm/ci/ciSymbol.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciSymbol.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
   friend class ciInstanceKlass;
   friend class ciSignature;
   friend class ciMethod;
+  friend class ciField;
   friend class ciObjArrayKlass;
 
 private:
--- a/src/share/vm/ci/ciTypeArray.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciTypeArray.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -39,5 +39,10 @@
 jchar ciTypeArray::char_at(int index) {
   VM_ENTRY_MARK;
   assert(index >= 0 && index < length(), "out of range");
-  return get_typeArrayOop()->char_at(index);
+  jchar c = get_typeArrayOop()->char_at(index);
+#ifdef ASSERT
+  jchar d = element_value(index).as_char();
+  assert(c == d, "");
+#endif //ASSERT
+  return c;
 }
--- a/src/share/vm/ci/ciTypeArrayKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/ci/ciTypeArrayKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -57,6 +57,10 @@
 
   // Make an array klass corresponding to the specified primitive type.
   static ciTypeArrayKlass* make(BasicType type);
+
+  virtual ciKlass* exact_klass() {
+    return this;
+  }
 };
 
 #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classFileParser.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,7 +28,6 @@
 #include "classfile/classLoaderData.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -889,6 +888,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
     u2 attribute_name_index = cfs->get_u2_fast();
@@ -947,15 +947,27 @@
         assert(runtime_invisible_annotations != NULL, "null invisible annotations");
         cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
       }
@@ -1775,6 +1787,10 @@
     if (_location != _in_method)  break;  // only allow for methods
     if (!privileged)              break;  // only allow in privileged code
     return _method_LambdaForm_Hidden;
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature):
+    if (_location != _in_field)   break;  // only allow for fields
+    if (!privileged)              break;  // only allow in privileged code
+    return _field_Stable;
   case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
     if (_location != _in_field && _location != _in_class)          break;  // only allow for fields and classes
     if (!EnableContended || (RestrictContended && !privileged))    break;  // honor privileges
@@ -1787,6 +1803,8 @@
 void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
   if (is_contended())
     f->set_contended_group(contended_group());
+  if (is_stable())
+    f->set_stable(true);
 }
 
 ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
@@ -2061,6 +2079,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* annotation_default = NULL;
   int annotation_default_length = 0;
 
@@ -2178,8 +2197,8 @@
           }
           if (lvt_cnt == max_lvt_cnt) {
             max_lvt_cnt <<= 1;
-            REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt);
-            REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
+            localvariable_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_table_length, lvt_cnt, max_lvt_cnt);
+            localvariable_table_start  = REALLOC_RESOURCE_ARRAY(u2*, localvariable_table_start, lvt_cnt, max_lvt_cnt);
           }
           localvariable_table_start[lvt_cnt] =
             parse_localvariable_table(code_length,
@@ -2207,8 +2226,8 @@
           // Parse local variable type table
           if (lvtt_cnt == max_lvtt_cnt) {
             max_lvtt_cnt <<= 1;
-            REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt);
-            REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
+            localvariable_type_table_length = REALLOC_RESOURCE_ARRAY(u2, localvariable_type_table_length, lvtt_cnt, max_lvtt_cnt);
+            localvariable_type_table_start  = REALLOC_RESOURCE_ARRAY(u2*, localvariable_type_table_start, lvtt_cnt, max_lvtt_cnt);
           }
           localvariable_type_table_start[lvtt_cnt] =
             parse_localvariable_table(code_length,
@@ -2317,16 +2336,30 @@
         assert(annotation_default != NULL, "null annotation default");
         cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        }
         runtime_visible_type_annotations_length = method_attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = method_attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = method_attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
       } else {
         // Skip unknown attributes
         cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@@ -2512,7 +2545,9 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
-      if (is_interface && !method->is_abstract() && !method->is_static()) {
+      if (is_interface && !(*has_default_methods)
+        && !method->is_abstract() && !method->is_static()
+        && !method->is_private()) {
         // default method
         *has_default_methods = true;
       }
@@ -2819,6 +2854,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* inner_classes_attribute_start = NULL;
   u4  inner_classes_attribute_length = 0;
   u2  enclosing_method_class_index = 0;
@@ -2922,16 +2958,28 @@
         parsed_bootstrap_methods_attribute = true;
         parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         // Unknown attribute
         cfs->skip_u1(attribute_length, CHECK);
@@ -3039,35 +3087,6 @@
   return annotations;
 }
 
-
-#ifdef ASSERT
-static void parseAndPrintGenericSignatures(
-    instanceKlassHandle this_klass, TRAPS) {
-  assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
-  ResourceMark rm;
-
-  if (this_klass->generic_signature() != NULL) {
-    using namespace generic;
-    ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
-
-    tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
-    spec->print_on(tty);
-
-    for (int i = 0; i < this_klass->methods()->length(); ++i) {
-      Method* m = this_klass->methods()->at(i);
-      MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
-      Symbol* sig = m->generic_signature();
-      if (sig == NULL) {
-        sig = m->signature();
-      }
-      tty->print_cr("Parsing %s", sig->as_C_string());
-      method_spec->print_on(tty);
-    }
-  }
-}
-#endif // def ASSERT
-
-
 instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
                                                        TRAPS) {
   instanceKlassHandle super_klass;
@@ -3978,9 +3997,8 @@
       this_klass->set_has_final_method();
     }
     this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
-    // The InstanceKlass::_methods_jmethod_ids cache and the
-    // InstanceKlass::_methods_cached_itable_indices cache are
-    // both managed on the assumption that the initial cache
+    // The InstanceKlass::_methods_jmethod_ids cache
+    // is managed on the assumption that the initial cache
     // size is equal to the number of methods in the class. If
     // that changes, then InstanceKlass::idnum_can_increment()
     // has to be changed accordingly.
@@ -4060,16 +4078,9 @@
     java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
 
 
-#ifdef ASSERT
-    if (ParseAllGenericSignatures) {
-      parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
-    }
-#endif
-
     // Generate any default methods - default methods are interface methods
     // that have a default implementation.  This is new with Lambda project.
-    if (has_default_methods && !access_flags.is_interface() &&
-        local_interfaces->length() > 0) {
+    if (has_default_methods && !access_flags.is_interface() ) {
       DefaultMethods::generate_default_methods(
           this_klass(), &all_mirandas, CHECK_(nullHandle));
     }
@@ -4472,9 +4483,8 @@
   for (int index = 0; index < num_methods; index++) {
     Method* m = methods->at(index);
 
-    // skip private, static and <init> methods
-    if ((!m->is_private()) &&
-        (!m->is_static()) &&
+    // skip static and <init> methods
+    if ((!m->is_static()) &&
         (m->name() != vmSymbols::object_initializer_name())) {
 
       Symbol* name = m->name();
--- a/src/share/vm/classfile/classFileParser.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classFileParser.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -125,6 +125,7 @@
       _method_LambdaForm_Compiled,
       _method_LambdaForm_Hidden,
       _sun_misc_Contended,
+      _field_Stable,
       _annotation_LIMIT
     };
     const Location _location;
@@ -143,14 +144,23 @@
       assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
       _annotations_present |= nth_bit((int)id);
     }
+
+    void remove_annotation(ID id) {
+      assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+      _annotations_present &= ~nth_bit((int)id);
+    }
+
     // Report if the annotation is present.
-    bool has_any_annotations() { return _annotations_present != 0; }
-    bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
+    bool has_any_annotations() const { return _annotations_present != 0; }
+    bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
 
     void set_contended_group(u2 group) { _contended_group = group; }
-    u2 contended_group() { return _contended_group; }
+    u2 contended_group() const { return _contended_group; }
 
-    bool is_contended() { return has_annotation(_sun_misc_Contended); }
+    bool is_contended() const { return has_annotation(_sun_misc_Contended); }
+
+    void set_stable(bool stable) { set_annotation(_field_Stable); }
+    bool is_stable() const { return has_annotation(_field_Stable); }
   };
 
   // This class also doubles as a holder for metadata cleanup.
--- a/src/share/vm/classfile/classLoader.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classLoader.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -200,7 +200,7 @@
 }
 
 
-ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
   // construct full path name
   char path[JVM_MAXPATHLEN];
   if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
@@ -243,7 +243,7 @@
   FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
 }
 
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
   // enable call to C land
   JavaThread* thread = JavaThread::current();
   ThreadToNativeFromVM ttn(thread);
@@ -287,24 +287,24 @@
   }
 }
 
-LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
   _path = strdup(path);
-  _st = st;
+  _st = *st;
   _meta_index = NULL;
   _resolved_entry = NULL;
+  _has_error = false;
 }
 
 bool LazyClassPathEntry::is_jar_file() {
   return ((_st.st_mode & S_IFREG) == S_IFREG);
 }
 
-ClassPathEntry* LazyClassPathEntry::resolve_entry() {
+ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
   if (_resolved_entry != NULL) {
     return (ClassPathEntry*) _resolved_entry;
   }
   ClassPathEntry* new_entry = NULL;
-  ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
-  assert(new_entry != NULL, "earlier code should have caught this");
+  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
   {
     ThreadCritical tc;
     if (_resolved_entry == NULL) {
@@ -317,12 +317,21 @@
   return (ClassPathEntry*) _resolved_entry;
 }
 
-ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
+ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) {
   if (_meta_index != NULL &&
       !_meta_index->may_contain(name)) {
     return NULL;
   }
-  return resolve_entry()->open_stream(name);
+  if (_has_error) {
+    return NULL;
+  }
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe == NULL) {
+    _has_error = true;
+    return NULL;
+  } else {
+    return cpe->open_stream(name, THREAD);
+  }
 }
 
 bool LazyClassPathEntry::is_lazy() {
@@ -468,20 +477,19 @@
   }
 }
 
-void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
+ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
   JavaThread* thread = JavaThread::current();
   if (lazy) {
-    *new_entry = new LazyClassPathEntry(path, st);
-    return;
+    return new LazyClassPathEntry(path, st);
   }
-  if ((st.st_mode & S_IFREG) == S_IFREG) {
+  ClassPathEntry* new_entry = NULL;
+  if ((st->st_mode & S_IFREG) == S_IFREG) {
     // Regular file, should be a zip file
     // Canonicalized filename
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
       // This matches the classic VM
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
+      THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
     }
     char* error_msg = NULL;
     jzfile* zip;
@@ -492,7 +500,7 @@
       zip = (*ZipOpen)(canonical_path, &error_msg);
     }
     if (zip != NULL && error_msg == NULL) {
-      *new_entry = new ClassPathZipEntry(zip, path);
+      new_entry = new ClassPathZipEntry(zip, path);
       if (TraceClassLoading) {
         tty->print_cr("[Opened %s]", path);
       }
@@ -507,16 +515,16 @@
         msg = NEW_RESOURCE_ARRAY(char, len); ;
         jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
       }
-      EXCEPTION_MARK;
-      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
+      THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
     }
   } else {
     // Directory
-    *new_entry = new ClassPathDirEntry(path);
+    new_entry = new ClassPathDirEntry(path);
     if (TraceClassLoading) {
       tty->print_cr("[Path %s]", path);
     }
   }
+  return new_entry;
 }
 
 
@@ -575,13 +583,14 @@
   }
 }
 
-void ClassLoader::update_class_path_entry_list(const char *path,
+void ClassLoader::update_class_path_entry_list(char *path,
                                                bool check_for_duplicates) {
   struct stat st;
-  if (os::stat((char *)path, &st) == 0) {
+  if (os::stat(path, &st) == 0) {
     // File or directory found
     ClassPathEntry* new_entry = NULL;
-    create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
+    Thread* THREAD = Thread::current();
+    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
     // The kernel VM adds dynamically to the end of the classloader path and
     // doesn't reorder the bootclasspath which would break java.lang.Package
     // (see PackageInfo).
@@ -900,7 +909,7 @@
                                PerfClassTraceTime::CLASS_LOAD);
     ClassPathEntry* e = _first_entry;
     while (e != NULL) {
-      stream = e->open_stream(name);
+      stream = e->open_stream(name, CHECK_NULL);
       if (stream != NULL) {
         break;
       }
@@ -1260,11 +1269,16 @@
 }
 
 void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
-  resolve_entry()->compile_the_world(loader, CHECK);
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe != NULL) {
+    cpe->compile_the_world(loader, CHECK);
+  }
 }
 
 bool LazyClassPathEntry::is_rt_jar() {
-  return resolve_entry()->is_rt_jar();
+  Thread* THREAD = Thread::current();
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  return (cpe != NULL) ? cpe->is_jar_file() : false;
 }
 
 void ClassLoader::compile_the_world() {
@@ -1308,6 +1322,25 @@
   // The CHECK at the caller will propagate the exception out
 }
 
+/**
+ * Returns if the given method should be compiled when doing compile-the-world.
+ *
+ * TODO:  This should be a private method in a CompileTheWorld class.
+ */
+static bool can_be_compiled(methodHandle m, int comp_level) {
+  assert(CompileTheWorld, "must be");
+
+  // It's not valid to compile a native wrapper for MethodHandle methods
+  // that take a MemberName appendix since the bytecode signature is not
+  // correct.
+  vmIntrinsics::ID iid = m->intrinsic_id();
+  if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
+    return false;
+  }
+
+  return CompilationPolicy::can_be_compiled(m, comp_level);
+}
+
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
   if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1351,8 +1384,7 @@
           int comp_level = CompilationPolicy::policy()->initial_compile_level();
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, k->methods()->at(n));
-            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
-
+            if (can_be_compiled(m, comp_level)) {
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
                 VM_ForceSafepoint op;
@@ -1364,7 +1396,7 @@
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
-                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
               } else {
                 _compile_the_world_method_counter++;
               }
@@ -1380,11 +1412,13 @@
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
                   clear_pending_exception_if_not_oom(CHECK);
-                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
                 } else {
                   _compile_the_world_method_counter++;
                 }
               }
+            } else {
+              tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
             }
 
             nmethod* nm = m->code();
--- a/src/share/vm/classfile/classLoader.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classLoader.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@
   ClassPathEntry();
   // Attempt to locate file_name through this class path entry.
   // Returns a class file parsing stream if successfull.
-  virtual ClassFileStream* open_stream(const char* name) = 0;
+  virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
   // Debugging
   NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
   NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
@@ -77,7 +77,7 @@
   bool is_jar_file()  { return false;  }
   const char* name()  { return _dir; }
   ClassPathDirEntry(char* dir);
-  ClassFileStream* open_stream(const char* name);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
   NOT_PRODUCT(bool is_rt_jar();)
@@ -107,7 +107,7 @@
   const char* name()  { return _zip_name; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
-  ClassFileStream* open_stream(const char* name);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   void contents_do(void f(const char* name, void* context), void* context);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@@ -125,13 +125,14 @@
   char* _path; // dir or file
   struct stat _st;
   MetaIndex* _meta_index;
+  bool _has_error;
   volatile ClassPathEntry* _resolved_entry;
-  ClassPathEntry* resolve_entry();
+  ClassPathEntry* resolve_entry(TRAPS);
  public:
   bool is_jar_file();
   const char* name()  { return _path; }
-  LazyClassPathEntry(char* path, struct stat st);
-  ClassFileStream* open_stream(const char* name);
+  LazyClassPathEntry(char* path, const struct stat* st);
+  ClassFileStream* open_stream(const char* name, TRAPS);
   void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
   virtual bool is_lazy();
   // Debugging
@@ -207,14 +208,15 @@
   static void setup_meta_index();
   static void setup_bootstrap_search_path();
   static void load_zip_library();
-  static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
+  static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
+                                                 bool lazy, TRAPS);
 
   // Canonicalizes path names, so strcmp will work properly. This is mainly
   // to avoid confusing the zip library
   static bool get_canonical_path(char* orig, char* out, int len);
  public:
   // Used by the kernel jvm.
-  static void update_class_path_entry_list(const char *path,
+  static void update_class_path_entry_list(char *path,
                                            bool check_for_duplicates);
   static void print_bootclasspath();
 
--- a/src/share/vm/classfile/classLoaderData.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -131,6 +131,17 @@
   }
 }
 
+void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
+  // Lock to avoid classes being modified/added/removed during iteration
+  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
+  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+    // Do not filter ArrayKlass oops here...
+    if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) {
+      klass_closure->do_klass(k);
+    }
+  }
+}
+
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     if (k->oop_is_instance()) {
@@ -261,7 +272,7 @@
                   k,
                   k->external_name(),
                   k->class_loader_data(),
-                  k->class_loader(),
+                  (void *)k->class_loader(),
                   loader_name());
   }
 }
@@ -297,7 +308,7 @@
   if (TraceClassLoaderData) {
     ResourceMark rm;
     tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
-    tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
+    tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(),
                loader_name());
     if (is_anonymous()) {
       tty->print(" for anonymous class  "PTR_FORMAT " ", _klasses);
@@ -458,7 +469,7 @@
 void ClassLoaderData::dump(outputStream * const out) {
   ResourceMark rm;
   out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
-      this, class_loader(),
+      this, (void *)class_loader(),
       class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
   if (claimed()) out->print(" claimed ");
   if (is_unloading()) out->print(" unloading ");
@@ -553,7 +564,7 @@
         ResourceMark rm;
         tty->print("[ClassLoaderData: ");
         tty->print("create class loader data "PTR_FORMAT, cld);
-        tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
+        tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(),
                    cld->loader_name());
         tty->print_cr("]");
       }
@@ -600,6 +611,12 @@
   }
 }
 
+void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    cld->loaded_classes_do(klass_closure);
+  }
+}
+
 void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
--- a/src/share/vm/classfile/classLoaderData.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/classLoaderData.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -78,6 +78,7 @@
   static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
+  static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
   static bool do_unloading(BoolObjectClosure* is_alive);
 
@@ -186,6 +187,7 @@
   bool keep_alive() const       { return _keep_alive; }
   bool is_alive(BoolObjectClosure* is_alive_closure) const;
   void classes_do(void f(Klass*));
+  void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
 
   // Deallocate free list during class unloading.
--- a/src/share/vm/classfile/defaultMethods.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/defaultMethods.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/bytecodeAssembler.hpp"
 #include "classfile/defaultMethods.hpp"
-#include "classfile/genericSignatures.hpp"
 #include "classfile/symbolTable.hpp"
 #include "memory/allocation.hpp"
 #include "memory/metadataFactory.hpp"
@@ -75,14 +74,6 @@
   }
 };
 
-class ContextMark : public PseudoScopeMark {
- private:
-  generic::Context::Mark _mark;
- public:
-  ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
-  virtual void destroy() { _mark.destroy(); }
-};
-
 #ifndef PRODUCT
 static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
   ResourceMark rm;
@@ -334,6 +325,7 @@
 
   Method* _selected_target;  // Filled in later, if a unique target exists
   Symbol* _exception_message; // If no unique target is found
+  Symbol* _exception_name;    // If no unique target is found
 
   bool contains_method(Method* method) {
     int* lookup = _member_index.get(method);
@@ -353,13 +345,12 @@
   }
 
   Symbol* generate_no_defaults_message(TRAPS) const;
-  Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
   Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
 
  public:
 
   MethodFamily()
-      : _selected_target(NULL), _exception_message(NULL) {}
+      : _selected_target(NULL), _exception_message(NULL), _exception_name(NULL) {}
 
   void set_target_if_empty(Method* m) {
     if (_selected_target == NULL && !m->is_overpass()) {
@@ -392,6 +383,7 @@
 
   Method* get_selected_target() { return _selected_target; }
   Symbol* get_exception_message() { return _exception_message; }
+  Symbol* get_exception_name() { return _exception_name; }
 
   // Either sets the target or the exception error message
   void determine_target(InstanceKlass* root, TRAPS) {
@@ -409,19 +401,21 @@
 
     if (qualified_methods.length() == 0) {
       _exception_message = generate_no_defaults_message(CHECK);
+      _exception_name = vmSymbols::java_lang_AbstractMethodError();
     } else if (qualified_methods.length() == 1) {
+      // leave abstract methods alone, they will be found via normal search path
       Method* method = qualified_methods.at(0);
-      if (method->is_abstract()) {
-        _exception_message = generate_abstract_method_message(method, CHECK);
-      } else {
+      if (!method->is_abstract()) {
         _selected_target = qualified_methods.at(0);
       }
     } else {
       _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+      _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
+      if (TraceDefaultMethods) {
+        _exception_message->print_value_on(tty);
+        tty->print_cr("");
+      }
     }
-
-    assert((has_target() ^ throws_exception()) == 1,
-           "One and only one must be true");
   }
 
   bool contains_signature(Symbol* query) {
@@ -459,13 +453,18 @@
     streamIndentor si(str, indent * 2);
     str->indent().print("Selected method: ");
     print_method(str, _selected_target);
+    Klass* method_holder = _selected_target->method_holder();
+    if (!method_holder->is_interface()) {
+      tty->print(" : in superclass");
+    }
     str->print_cr("");
   }
 
   void print_exception(outputStream* str, int indent) {
     assert(throws_exception(), "Should be called otherwise");
+    assert(_exception_name != NULL, "exception_name should be set");
     streamIndentor si(str, indent * 2);
-    str->indent().print_cr("%s", _exception_message->as_C_string());
+    str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string());
   }
 #endif // ndef PRODUCT
 };
@@ -474,20 +473,6 @@
   return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
 }
 
-Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
-  Symbol* klass = method->klass_name();
-  Symbol* name = method->name();
-  Symbol* sig = method->signature();
-  stringStream ss;
-  ss.print("Method ");
-  ss.write((const char*)klass->bytes(), klass->utf8_length());
-  ss.print(".");
-  ss.write((const char*)name->bytes(), name->utf8_length());
-  ss.write((const char*)sig->bytes(), sig->utf8_length());
-  ss.print(" is abstract");
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
-}
-
 Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
   stringStream ss;
   ss.print("Conflicting default methods:");
@@ -503,38 +488,6 @@
   return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
 }
 
-// A generic method family contains a set of all methods that implement a single
-// language-level method.  Because of erasure, these methods may have different
-// signatures.  As members of the set are collected while walking over the
-// hierarchy, they are tagged with a qualification state.  The qualification
-// state for an erased method is set to disqualified if there exists a path
-// from the root of hierarchy to the method that contains an interleaving
-// language-equivalent method defined in an interface.
-class GenericMethodFamily : public MethodFamily {
- private:
-
-  generic::MethodDescriptor* _descriptor; // language-level description
-
- public:
-
-  GenericMethodFamily(generic::MethodDescriptor* canonical_desc)
-      : _descriptor(canonical_desc) {}
-
-  generic::MethodDescriptor* descriptor() const { return _descriptor; }
-
-  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
-    return descriptor()->covariant_match(md, ctx);
-  }
-
-#ifndef PRODUCT
-  Symbol* get_generic_sig() const {
-
-    generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
-    TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current());
-    return sig;
-  }
-#endif // ndef PRODUCT
-};
 
 class StateRestorer;
 
@@ -571,26 +524,6 @@
   StateRestorer* record_method_and_dq_further(Method* mo);
 };
 
-
-// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the
-// qualification state during hierarchy visitation, and applies that state
-// when adding members to the GenericMethodFamily.
-class StatefulGenericMethodFamily : public StatefulMethodFamily {
-
- public:
-  StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx)
-  : StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) {
-
-  }
-  GenericMethodFamily* get_method_family() {
-    return (GenericMethodFamily*)_method_family;
-  }
-
-  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
-    return get_method_family()->descriptor_matches(md, ctx);
-  }
-};
-
 class StateRestorer : public PseudoScopeMark {
  private:
   StatefulMethodFamily* _method;
@@ -616,39 +549,6 @@
   return mark;
 }
 
-class StatefulGenericMethodFamilies : public ResourceObj {
- private:
-  GrowableArray<StatefulGenericMethodFamily*> _methods;
-
- public:
-  StatefulGenericMethodFamily* find_matching(
-      generic::MethodDescriptor* md, generic::Context* ctx) {
-    for (int i = 0; i < _methods.length(); ++i) {
-      StatefulGenericMethodFamily* existing = _methods.at(i);
-      if (existing->descriptor_matches(md, ctx)) {
-        return existing;
-      }
-    }
-    return NULL;
-  }
-
-  StatefulGenericMethodFamily* find_matching_or_create(
-      generic::MethodDescriptor* md, generic::Context* ctx) {
-    StatefulGenericMethodFamily* method = find_matching(md, ctx);
-    if (method == NULL) {
-      method = new StatefulGenericMethodFamily(md, ctx);
-      _methods.append(method);
-    }
-    return method;
-  }
-
-  void extract_families_into(GrowableArray<GenericMethodFamily*>* array) {
-    for (int i = 0; i < _methods.length(); ++i) {
-      array->append(_methods.at(i)->get_method_family());
-    }
-  }
-};
-
 // Represents a location corresponding to a vtable slot for methods that
 // neither the class nor any of it's ancestors provide an implementaion.
 // Default methods may be present to fill this slot.
@@ -679,6 +579,18 @@
 #endif // ndef PRODUCT
 };
 
+static bool already_in_vtable_slots(GrowableArray<EmptyVtableSlot*>* slots, Method* m) {
+  bool found = false;
+  for (int j = 0; j < slots->length(); ++j) {
+    if (slots->at(j)->name() == m->name() &&
+        slots->at(j)->signature() == m->signature() ) {
+      found = true;
+      break;
+    }
+  }
+  return found;
+}
+
 static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
     InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
 
@@ -688,8 +600,10 @@
 
   // All miranda methods are obvious candidates
   for (int i = 0; i < mirandas->length(); ++i) {
-    EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
-    slots->append(slot);
+    Method* m = mirandas->at(i);
+    if (!already_in_vtable_slots(slots, m)) {
+      slots->append(new EmptyVtableSlot(m));
+    }
   }
 
   // Also any overpasses in our superclasses, that we haven't implemented.
@@ -705,7 +619,26 @@
         // unless we have a real implementation of it in the current class.
         Method* impl = klass->lookup_method(m->name(), m->signature());
         if (impl == NULL || impl->is_overpass()) {
-          slots->append(new EmptyVtableSlot(m));
+          if (!already_in_vtable_slots(slots, m)) {
+            slots->append(new EmptyVtableSlot(m));
+          }
+        }
+      }
+    }
+
+    // also any default methods in our superclasses
+    if (super->default_methods() != NULL) {
+      for (int i = 0; i < super->default_methods()->length(); ++i) {
+        Method* m = super->default_methods()->at(i);
+        // m is a method that would have been a miranda if not for the
+        // default method processing that occurred on behalf of our superclass,
+        // so it's a method we want to re-examine in this new context.  That is,
+        // unless we have a real implementation of it in the current class.
+        Method* impl = klass->lookup_method(m->name(), m->signature());
+        if (impl == NULL || impl->is_overpass()) {
+          if (!already_in_vtable_slots(slots, m)) {
+            slots->append(new EmptyVtableSlot(m));
+          }
         }
       }
     }
@@ -760,7 +693,10 @@
     InstanceKlass* iklass = current_class();
 
     Method* m = iklass->find_method(_method_name, _method_signature);
-    if (m != NULL) {
+    // private interface methods are not candidates for default methods
+    // invokespecial to private interface methods doesn't use default method logic
+    // future: take access controls into account for superclass methods
+    if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
       if (_family == NULL) {
         _family = new StatefulMethodFamily();
       }
@@ -779,146 +715,11 @@
 
 };
 
-// Iterates over the type hierarchy looking for all methods with a specific
-// method name.  The result of this is a set of method families each of
-// which is populated with a set of methods that implement the same
-// language-level signature.
-class FindMethodsByGenericSig : public HierarchyVisitor<FindMethodsByGenericSig> {
- private:
-  // Context data
-  Thread* THREAD;
-  generic::DescriptorCache* _cache;
-  Symbol* _method_name;
-  generic::Context* _ctx;
-  StatefulGenericMethodFamilies _families;
 
- public:
-
-  FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name,
-      generic::Context* ctx, Thread* thread) :
-    _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
-
-  void get_discovered_families(GrowableArray<GenericMethodFamily*>* methods) {
-    _families.extract_families_into(methods);
-  }
-
-  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
-  void free_node_data(void* node_data) {
-    PseudoScope::cast(node_data)->destroy();
-  }
-
-  bool visit() {
-    PseudoScope* scope = PseudoScope::cast(current_data());
-    InstanceKlass* klass = current_class();
-    InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
-
-    ContextMark* cm = new ContextMark(_ctx->mark());
-    scope->add_mark(cm); // will restore context when scope is freed
-
-    _ctx->apply_type_arguments(sub, klass, THREAD);
-
-    int start, end = 0;
-    start = klass->find_method_by_name(_method_name, &end);
-    if (start != -1) {
-      for (int i = start; i < end; ++i) {
-        Method* m = klass->methods()->at(i);
-        // This gets the method's parameter list with its generic type
-        // parameters resolved
-        generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
-
-        // Find all methods on this hierarchy that match this method
-        // (name, signature).   This class collects other families of this
-        // method name.
-        StatefulGenericMethodFamily* family =
-            _families.find_matching_or_create(md, _ctx);
-
-        if (klass->is_interface()) {
-          // ???
-          StateRestorer* restorer = family->record_method_and_dq_further(m);
-          scope->add_mark(restorer);
-        } else {
-          // This is the rule that methods in classes "win" (bad word) over
-          // methods in interfaces.  This works because of single inheritance
-          family->set_target_if_empty(m);
-        }
-      }
-    }
-    return true;
-  }
-};
 
-#ifndef PRODUCT
-static void print_generic_families(
-    GrowableArray<GenericMethodFamily*>* methods, Symbol* match) {
-  streamIndentor si(tty, 4);
-  if (methods->length() == 0) {
-    tty->indent();
-    tty->print_cr("No Logical Method found");
-  }
-  for (int i = 0; i < methods->length(); ++i) {
-    tty->indent();
-    GenericMethodFamily* lm = methods->at(i);
-    if (lm->contains_signature(match)) {
-      tty->print_cr("<Matching>");
-    } else {
-      tty->print_cr("<Non-Matching>");
-    }
-    lm->print_sig_on(tty, lm->get_generic_sig(), 1);
-  }
-}
-#endif // ndef PRODUCT
-
-static void create_overpasses(
+static void create_defaults_and_exceptions(
     GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
 
-static void generate_generic_defaults(
-      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
-      EmptyVtableSlot* slot, int current_slot_index, TRAPS) {
-
-  if (slot->is_bound()) {
-#ifndef PRODUCT
-    if (TraceDefaultMethods) {
-      streamIndentor si(tty, 4);
-      tty->indent().print_cr("Already bound to logical method:");
-      GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding());
-      lm->print_sig_on(tty, lm->get_generic_sig(), 1);
-    }
-#endif // ndef PRODUCT
-    return; // covered by previous processing
-  }
-
-  generic::DescriptorCache cache;
-
-  generic::Context ctx(&cache);
-  FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK);
-  visitor.run(klass);
-
-  GrowableArray<GenericMethodFamily*> discovered_families;
-  visitor.get_discovered_families(&discovered_families);
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    print_generic_families(&discovered_families, slot->signature());
-  }
-#endif // ndef PRODUCT
-
-  // Find and populate any other slots that match the discovered families
-  for (int j = current_slot_index; j < empty_slots->length(); ++j) {
-    EmptyVtableSlot* open_slot = empty_slots->at(j);
-
-    if (slot->name() == open_slot->name()) {
-      for (int k = 0; k < discovered_families.length(); ++k) {
-        GenericMethodFamily* lm = discovered_families.at(k);
-
-        if (lm->contains_signature(open_slot->signature())) {
-          lm->determine_target(klass, CHECK);
-          open_slot->bind_family(lm);
-        }
-      }
-    }
-  }
-}
-
 static void generate_erased_defaults(
      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
      EmptyVtableSlot* slot, TRAPS) {
@@ -937,27 +738,22 @@
 
 static void merge_in_new_methods(InstanceKlass* klass,
     GrowableArray<Method*>* new_methods, TRAPS);
+static void create_default_methods( InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS);
 
 // This is the guts of the default methods implementation.  This is called just
 // after the classfile has been parsed if some ancestor has default methods.
 //
 // First if finds any name/signature slots that need any implementation (either
 // because they are miranda or a superclass's implementation is an overpass
-// itself).  For each slot, iterate over the hierarchy, using generic signature
-// information to partition any methods that match the name into method families
-// where each family contains methods whose signatures are equivalent at the
-// language level (i.e., their reified parameters match and return values are
-// covariant). Check those sets to see if they contain a signature that matches
-// the slot we're looking at (if we're lucky, there might be other empty slots
-// that we can fill using the same analysis).
+// itself).  For each slot, iterate over the hierarchy, to see if they contain a
+// signature that matches the slot we are looking at.
 //
 // For each slot filled, we generate an overpass method that either calls the
 // unique default method candidate using invokespecial, or throws an exception
 // (in the case of no default method candidates, or more than one valid
-// candidate).  These methods are then added to the class's method list.  If
-// the method set we're using contains methods (qualified or not) with a
-// different runtime signature than the method we're creating, then we have to
-// create bridges with those signatures too.
+// candidate).  These methods are then added to the class's method list.
+// The JVM does not create bridges nor handle generic signatures here.
 void DefaultMethods::generate_default_methods(
     InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
 
@@ -997,11 +793,7 @@
     }
 #endif // ndef PRODUCT
 
-    if (ParseGenericDefaults) {
-      generate_generic_defaults(klass, empty_slots, slot, i, CHECK);
-    } else {
-      generate_erased_defaults(klass, empty_slots, slot, CHECK);
-    }
+    generate_erased_defaults(klass, empty_slots, slot, CHECK);
  }
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
@@ -1009,7 +801,7 @@
   }
 #endif // ndef PRODUCT
 
-  create_overpasses(empty_slots, klass, CHECK);
+  create_defaults_and_exceptions(empty_slots, klass, CHECK);
 
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
@@ -1018,366 +810,9 @@
 #endif // ndef PRODUCT
 }
 
-/**
- * Generic analysis was used upon interface '_target' and found a unique
- * default method candidate with generic signature '_method_desc'.  This
- * method is only viable if it would also be in the set of default method
- * candidates if we ran a full analysis on the current class.
- *
- * The only reason that the method would not be in the set of candidates for
- * the current class is if that there's another covariantly matching method
- * which is "more specific" than the found method -- i.e., one could find a
- * path in the interface hierarchy in which the matching method appears
- * before we get to '_target'.
- *
- * In order to determine this, we examine all of the implemented
- * interfaces.  If we find path that leads to the '_target' interface, then
- * we examine that path to see if there are any methods that would shadow
- * the selected method along that path.
- */
-class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
- protected:
-  Thread* THREAD;
-
-  InstanceKlass* _target;
-
-  Symbol* _method_name;
-  InstanceKlass* _method_holder;
-  bool _found_shadow;
-
-
- public:
-
-  ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-                : THREAD(thread), _method_name(name), _method_holder(holder),
-                _target(target), _found_shadow(false) {}
-
-  void* new_node_data(InstanceKlass* cls) { return NULL; }
-  void free_node_data(void* data) { return; }
-
-  bool visit() {
-    InstanceKlass* ik = current_class();
-    if (ik == _target && current_depth() == 1) {
-      return false; // This was the specified super -- no need to search it
-    }
-    if (ik == _method_holder || ik == _target) {
-      // We found a path that should be examined to see if it shadows _method
-      if (path_has_shadow()) {
-        _found_shadow = true;
-        cancel_iteration();
-      }
-      return false; // no need to continue up hierarchy
-    }
-    return true;
-  }
-
-  virtual bool path_has_shadow() = 0;
-  bool found_shadow() { return _found_shadow; }
-};
-
-// Used for Invokespecial.
-// Invokespecial is allowed to invoke a concrete interface method
-// and can be used to disambuiguate among qualified candidates,
-// which are methods in immediate superinterfaces,
-// but may not be used to invoke a candidate that would be shadowed
-// from the perspective of the caller.
-// Invokespecial is also used in the overpass generation today
-// We re-run the shadowchecker because we can't distinguish this case,
-// but it should return the same answer, since the overpass target
-// is now the invokespecial caller.
-class ErasedShadowChecker : public ShadowChecker {
- private:
-  bool path_has_shadow() {
-
-    for (int i = current_depth() - 1; i > 0; --i) {
-      InstanceKlass* ik = class_at_depth(i);
-
-      if (ik->is_interface()) {
-        int end;
-        int start = ik->find_method_by_name(_method_name, &end);
-        if (start != -1) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
- public:
-
-  ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-    : ShadowChecker(thread, name, holder, target) {}
-};
-
-class GenericShadowChecker : public ShadowChecker {
- private:
-  generic::DescriptorCache* _cache;
-  generic::MethodDescriptor* _method_desc;
-
-  bool path_has_shadow() {
-    generic::Context ctx(_cache);
-
-    for (int i = current_depth() - 1; i > 0; --i) {
-      InstanceKlass* ik = class_at_depth(i);
-      InstanceKlass* sub = class_at_depth(i + 1);
-      ctx.apply_type_arguments(sub, ik, THREAD);
-
-      if (ik->is_interface()) {
-        int end;
-        int start = ik->find_method_by_name(_method_name, &end);
-        if (start != -1) {
-          for (int j = start; j < end; ++j) {
-            Method* mo = ik->methods()->at(j);
-            generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
-            if (_method_desc->covariant_match(md, &ctx)) {
-              return true;
-            }
-          }
-        }
-      }
-    }
-    return false;
-  }
-
- public:
-
-  GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread,
-      Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
-      InstanceKlass* target)
-    : ShadowChecker(thread, name, holder, target) {
-      _cache = cache;
-      _method_desc = desc;
- }
-};
-
-
-
-// Find the unique qualified candidate from the perspective of the super_class
-// which is the resolved_klass, which must be an immediate superinterface
-// of klass
-Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  FindMethodsByErasedSig visitor(method_name, sig);
-  visitor.run(super_class);      // find candidates from resolved_klass
-
-  MethodFamily* family;
-  visitor.get_discovered_family(&family);
-
-  if (family != NULL) {
-    family->determine_target(current_class, CHECK_NULL);  // get target from current_class
-  }
-
-  if (family->has_target()) {
-    Method* target = family->get_selected_target();
-    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
-    // Verify that the identified method is valid from the context of
-    // the current class, which is the caller class for invokespecial
-    // link resolution, i.e. ensure there it is not shadowed.
-    // You can use invokespecial to disambiguate interface methods, but
-    // you can not use it to skip over an interface method that would shadow it.
-    ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
-    checker.run(current_class);
+static int assemble_method_error(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
 
-    if (checker.found_shadow()) {
-#ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        tty->print_cr("    Only candidate found was shadowed.");
-      }
-#endif // ndef PRODUCT
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 "Accessible default method not found", NULL);
-    } else {
-#ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        family->print_sig_on(tty, target->signature(), 1);
-      }
-#endif // ndef PRODUCT
-      return target;
-    }
-  } else {
-    assert(family->throws_exception(), "must have target or throw");
-    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-               family->get_exception_message()->as_C_string(), NULL);
-  }
-}
-
-// super_class is assumed to be the direct super of current_class
-Method* find_generic_super_default( InstanceKlass* current_class,
-                                    InstanceKlass* super_class,
-                                    Symbol* method_name, Symbol* sig, TRAPS) {
-  generic::DescriptorCache cache;
-  generic::Context ctx(&cache);
-
-  // Prime the initial generic context for current -> super_class
-  ctx.apply_type_arguments(current_class, super_class, CHECK_NULL);
-
-  FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL);
-  visitor.run(super_class);
-
-  GrowableArray<GenericMethodFamily*> families;
-  visitor.get_discovered_families(&families);
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    print_generic_families(&families, sig);
-  }
-#endif // ndef PRODUCT
-
-  GenericMethodFamily* selected_family = NULL;
-
-  for (int i = 0; i < families.length(); ++i) {
-    GenericMethodFamily* lm = families.at(i);
-    if (lm->contains_signature(sig)) {
-      lm->determine_target(current_class, CHECK_NULL);
-      selected_family = lm;
-    }
-  }
-
-  if (selected_family->has_target()) {
-    Method* target = selected_family->get_selected_target();
-    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
-    // Verify that the identified method is valid from the context of
-    // the current class
-    GenericShadowChecker checker(&cache, THREAD, target->name(),
-        holder, selected_family->descriptor(), super_class);
-    checker.run(current_class);
-
-    if (checker.found_shadow()) {
-#ifndef PRODUCT
-      if (TraceDefaultMethods) {
-        tty->print_cr("    Only candidate found was shadowed.");
-      }
-#endif // ndef PRODUCT
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 "Accessible default method not found", NULL);
-    } else {
-      return target;
-    }
-  } else {
-    assert(selected_family->throws_exception(), "must have target or throw");
-    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-               selected_family->get_exception_message()->as_C_string(), NULL);
-  }
-}
-
-// This is called during linktime when we find an invokespecial call that
-// refers to a direct superinterface.  It indicates that we should find the
-// default method in the hierarchy of that superinterface, and if that method
-// would have been a candidate from the point of view of 'this' class, then we
-// return that method.
-// This logic assumes that the super is a direct superclass of the caller
-Method* DefaultMethods::find_super_default(
-    Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  ResourceMark rm(THREAD);
-
-  assert(cls != NULL && super != NULL, "Need real classes");
-
-  InstanceKlass* current_class = InstanceKlass::cast(cls);
-  InstanceKlass* super_class = InstanceKlass::cast(super);
-
-  // Keep entire hierarchy alive for the duration of the computation
-  KeepAliveRegistrar keepAlive(THREAD);
-  KeepAliveVisitor loadKeepAlive(&keepAlive);
-  loadKeepAlive.run(current_class);   // get hierarchy from current class
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    tty->print_cr("Finding super default method %s.%s%s from %s",
-      super_class->name()->as_C_string(),
-      method_name->as_C_string(), sig->as_C_string(),
-      current_class->name()->as_C_string());
-  }
-#endif // ndef PRODUCT
-
-  assert(super_class->is_interface(), "only call for default methods");
-
-  Method* target = NULL;
-  if (ParseGenericDefaults) {
-    target = find_generic_super_default(current_class, super_class,
-                                        method_name, sig, CHECK_NULL);
-  } else {
-    target = find_erased_super_default(current_class, super_class,
-                                       method_name, sig, CHECK_NULL);
-  }
-
-#ifndef PRODUCT
-  if (target != NULL) {
-    if (TraceDefaultMethods) {
-      tty->print("    Returning ");
-      print_method(tty, target, true);
-      tty->print_cr("");
-    }
-  }
-#endif // ndef PRODUCT
-  return target;
-}
-
-#ifndef PRODUCT
-// Return true is broad type is a covariant return of narrow type
-static bool covariant_return_type(BasicType narrow, BasicType broad) {
-  if (narrow == broad) {
-    return true;
-  }
-  if (broad == T_OBJECT) {
-    return true;
-  }
-  return false;
-}
-#endif // ndef PRODUCT
-
-static int assemble_redirect(
-    BytecodeConstantPool* cp, BytecodeBuffer* buffer,
-    Symbol* incoming, Method* target, TRAPS) {
-
-  BytecodeAssembler assem(buffer, cp);
-
-  SignatureStream in(incoming, true);
-  SignatureStream out(target->signature(), true);
-  u2 parameter_count = 0;
-
-  assem.aload(parameter_count++); // load 'this'
-
-  while (!in.at_return_type()) {
-    assert(!out.at_return_type(), "Parameter counts do not match");
-    BasicType bt = in.type();
-    assert(out.type() == bt, "Parameter types are not compatible");
-    assem.load(bt, parameter_count);
-    if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
-      assem.checkcast(out.as_symbol(THREAD));
-    } else if (bt == T_LONG || bt == T_DOUBLE) {
-      ++parameter_count; // longs and doubles use two slots
-    }
-    ++parameter_count;
-    in.next();
-    out.next();
-  }
-  assert(out.at_return_type(), "Parameter counts do not match");
-  assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
-
-  if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
-    ++parameter_count; // need room for return value
-  }
-  if (target->method_holder()->is_interface()) {
-    assem.invokespecial(target);
-  } else {
-    assem.invokevirtual(target);
-  }
-
-  if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
-    assem.checkcast(in.as_symbol(THREAD));
-  }
-  assem._return(in.type());
-  return parameter_count;
-}
-
-static int assemble_abstract_method_error(
-    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
-
-  Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
   Symbol* init = vmSymbols::object_initializer_name();
   Symbol* sig = vmSymbols::string_void_signature();
 
@@ -1422,7 +857,6 @@
   m->set_max_locals(params);
   m->constMethod()->set_stackmap_data(NULL);
   m->set_code(code_start);
-  m->set_force_inline(true);
 
   return m;
 }
@@ -1448,18 +882,18 @@
   }
 }
 
-// A "bridge" is a method created by javac to bridge the gap between
-// an implementation and a generically-compatible, but different, signature.
-// Bridges have actual bytecode implementation in classfiles.
-// An "overpass", on the other hand, performs the same function as a bridge
-// but does not occur in a classfile; the VM creates overpass itself,
-// when it needs a path to get from a call site to an default method, and
-// a bridge doesn't exist.
-static void create_overpasses(
+// Create default_methods list for the current class.
+// With the VM only processing erased signatures, the VM only
+// creates an overpass in a conflict case or a case with no candidates.
+// This allows virtual methods to override the overpass, but ensures
+// that a local method search will find the exception rather than an abstract
+// or default method that is not a valid candidate.
+static void create_defaults_and_exceptions(
     GrowableArray<EmptyVtableSlot*>* slots,
     InstanceKlass* klass, TRAPS) {
 
   GrowableArray<Method*> overpasses;
+  GrowableArray<Method*> defaults;
   BytecodeConstantPool bpool(klass->constants());
 
   for (int i = 0; i < slots->length(); ++i) {
@@ -1467,7 +901,6 @@
 
     if (slot->is_bound()) {
       MethodFamily* method = slot->get_binding();
-      int max_stack = 0;
       BytecodeBuffer buffer;
 
 #ifndef PRODUCT
@@ -1477,26 +910,30 @@
         tty->print_cr("");
         if (method->has_target()) {
           method->print_selected(tty, 1);
-        } else {
+        } else if (method->throws_exception()) {
           method->print_exception(tty, 1);
         }
       }
 #endif // ndef PRODUCT
+
       if (method->has_target()) {
         Method* selected = method->get_selected_target();
-        max_stack = assemble_redirect(
-            &bpool, &buffer, slot->signature(), selected, CHECK);
+        if (selected->method_holder()->is_interface()) {
+          defaults.push(selected);
+        }
       } else if (method->throws_exception()) {
-        max_stack = assemble_abstract_method_error(
-            &bpool, &buffer, method->get_exception_message(), CHECK);
-      }
-      AccessFlags flags = accessFlags_from(
+        int max_stack = assemble_method_error(&bpool, &buffer,
+           method->get_exception_name(), method->get_exception_message(), CHECK);
+        AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+         Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
-      if (m != NULL) {
-        overpasses.push(m);
+        // We push to the methods list:
+        // overpass methods which are exception throwing methods
+        if (m != NULL) {
+          overpasses.push(m);
+        }
       }
     }
   }
@@ -1504,11 +941,31 @@
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
     tty->print_cr("Created %d overpass methods", overpasses.length());
+    tty->print_cr("Created %d default  methods", defaults.length());
   }
 #endif // ndef PRODUCT
 
-  switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
-  merge_in_new_methods(klass, &overpasses, CHECK);
+  if (overpasses.length() > 0) {
+    switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
+    merge_in_new_methods(klass, &overpasses, CHECK);
+  }
+  if (defaults.length() > 0) {
+    create_default_methods(klass, &defaults, CHECK);
+  }
+}
+
+static void create_default_methods( InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS) {
+
+  int new_size = new_methods->length();
+  Array<Method*>* total_default_methods = MetadataFactory::new_array<Method*>(
+      klass->class_loader_data(), new_size, NULL, CHECK);
+  for (int index = 0; index < new_size; index++ ) {
+    total_default_methods->at_put(index, new_methods->at(index));
+  }
+  Method::sort_methods(total_default_methods, false, false);
+
+  klass->set_default_methods(total_default_methods);
 }
 
 static void sort_methods(GrowableArray<Method*>* methods) {
@@ -1616,4 +1073,3 @@
     MetadataFactory::free_array(cld, original_ordering);
   }
 }
-
--- a/src/share/vm/classfile/defaultMethods.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/defaultMethods.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,15 +44,5 @@
   // the class.
   static void generate_default_methods(
       InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
-
-
-  // Called during linking when an invokespecial to an direct interface
-  // method is found.  Selects and returns a method if there is a unique
-  // default method in the 'super_iface' part of the hierarchy which is
-  // also a candidate default for 'this_klass'.  Otherwise throws an AME.
-  static Method* find_super_default(
-      Klass* this_klass, Klass* super_iface,
-      Symbol* method_name, Symbol* method_sig, TRAPS);
 };
-
 #endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- a/src/share/vm/classfile/dictionary.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/dictionary.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/dictionary.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "utilities/hashtable.inline.hpp"
@@ -38,17 +39,21 @@
   : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
   _current_class_index = 0;
   _current_class_entry = NULL;
+  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
 };
 
 
-
 Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
                        int number_of_entries)
   : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
   _current_class_index = 0;
   _current_class_entry = NULL;
+  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
 };
 
+ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
+  return _pd_cache_table->get(protection_domain);
+}
 
 DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
                                        ClassLoaderData* loader_data) {
@@ -105,11 +110,12 @@
 }
 
 
-void DictionaryEntry::add_protection_domain(oop protection_domain) {
+void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
   assert_locked_or_safepoint(SystemDictionary_lock);
   if (!contains_protection_domain(protection_domain)) {
+    ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
     ProtectionDomainEntry* new_head =
-                new ProtectionDomainEntry(protection_domain, _pd_set);
+                new ProtectionDomainEntry(entry, _pd_set);
     // Warning: Preserve store ordering.  The SystemDictionary is read
     //          without locks.  The new ProtectionDomainEntry must be
     //          complete before other threads can be allowed to see it
@@ -193,7 +199,10 @@
 
 
 void Dictionary::always_strong_oops_do(OopClosure* blk) {
-  // Follow all system classes and temporary placeholders in dictionary
+  // Follow all system classes and temporary placeholders in dictionary; only
+  // protection domain oops contain references into the heap. In a first
+  // pass over the system dictionary determine which need to be treated as
+  // strongly reachable and mark them as such.
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry *probe = bucket(index);
                           probe != NULL;
@@ -201,10 +210,13 @@
       Klass* e = probe->klass();
       ClassLoaderData* loader_data = probe->loader_data();
       if (is_strongly_reachable(loader_data, e)) {
-        probe->protection_domain_set_oops_do(blk);
+        probe->set_strongly_reachable();
       }
     }
   }
+  // Then iterate over the protection domain cache to apply the closure on the
+  // previously marked ones.
+  _pd_cache_table->always_strong_oops_do(blk);
 }
 
 
@@ -266,18 +278,12 @@
   }
 }
 
-
 void Dictionary::oops_do(OopClosure* f) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      probe->protection_domain_set_oops_do(f);
-    }
-  }
+  // Only the protection domain oops contain references into the heap. Iterate
+  // over all of them.
+  _pd_cache_table->oops_do(f);
 }
 
-
 void Dictionary::methods_do(void f(Method*)) {
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry* probe = bucket(index);
@@ -292,6 +298,11 @@
   }
 }
 
+void Dictionary::unlink(BoolObjectClosure* is_alive) {
+  // Only the protection domain cache table may contain references to the heap
+  // that need to be unlinked.
+  _pd_cache_table->unlink(is_alive);
+}
 
 Klass* Dictionary::try_get_next_class() {
   while (true) {
@@ -306,7 +317,6 @@
   // never reached
 }
 
-
 // Add a loaded class to the system dictionary.
 // Readers of the SystemDictionary aren't always locked, so _buckets
 // is volatile. The store of the next field in the constructor is
@@ -396,7 +406,7 @@
   assert(protection_domain() != NULL,
          "real protection domain should be present");
 
-  entry->add_protection_domain(protection_domain());
+  entry->add_protection_domain(this, protection_domain());
 
   assert(entry->contains_protection_domain(protection_domain()),
          "now protection domain should be present");
@@ -446,6 +456,146 @@
   }
 }
 
+ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
+  : Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
+{
+}
+
+void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be");
+  for (int i = 0; i < table_size(); ++i) {
+    ProtectionDomainCacheEntry** p = bucket_addr(i);
+    ProtectionDomainCacheEntry* entry = bucket(i);
+    while (entry != NULL) {
+      if (is_alive->do_object_b(entry->literal())) {
+        p = entry->next_addr();
+      } else {
+        *p = entry->next();
+        free_entry(entry);
+      }
+      entry = *p;
+    }
+  }
+}
+
+void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->oops_do(f);
+    }
+  }
+}
+
+uint ProtectionDomainCacheTable::bucket_size() {
+  return sizeof(ProtectionDomainCacheEntry);
+}
+
+#ifndef PRODUCT
+void ProtectionDomainCacheTable::print() {
+  tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
+                table_size(), number_of_entries());
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->print();
+    }
+  }
+}
+
+void ProtectionDomainCacheEntry::print() {
+  tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
+                this, (void*)literal(), _strongly_reachable, next());
+}
+#endif
+
+void ProtectionDomainCacheTable::verify() {
+  int element_count = 0;
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->verify();
+      element_count++;
+    }
+  }
+  guarantee(number_of_entries() == element_count,
+            "Verify of protection domain cache table failed");
+  debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
+}
+
+void ProtectionDomainCacheEntry::verify() {
+  guarantee(literal()->is_oop(), "must be an oop");
+}
+
+void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
+  // the caller marked the protection domain cache entries that we need to apply
+  // the closure on. Only process them.
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      if (probe->is_strongly_reachable()) {
+        probe->reset_strongly_reachable();
+        probe->oops_do(f);
+      }
+    }
+  }
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
+  unsigned int hash = compute_hash(protection_domain);
+  int index = hash_to_index(hash);
+
+  ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
+  if (entry == NULL) {
+    entry = add_entry(index, hash, protection_domain);
+  }
+  return entry;
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
+  for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
+    if (e->protection_domain() == protection_domain) {
+      return e;
+    }
+  }
+
+  return NULL;
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert(index == index_for(protection_domain), "incorrect index?");
+  assert(find_entry(index, protection_domain) == NULL, "no double entry");
+
+  ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
+  Hashtable<oop, mtClass>::add_entry(index, p);
+  return p;
+}
+
+void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
+  unsigned int hash = compute_hash(to_delete->protection_domain());
+  int index = hash_to_index(hash);
+
+  ProtectionDomainCacheEntry** p = bucket_addr(index);
+  ProtectionDomainCacheEntry* entry = bucket(index);
+  while (true) {
+    assert(entry != NULL, "sanity");
+
+    if (entry == to_delete) {
+      *p = entry->next();
+      Hashtable<oop, mtClass>::free_entry(entry);
+      break;
+    } else {
+      p = entry->next_addr();
+      entry = *p;
+    }
+  }
+}
+
 SymbolPropertyTable::SymbolPropertyTable(int table_size)
   : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
 {
@@ -532,11 +682,13 @@
       tty->cr();
     }
   }
+  tty->cr();
+  _pd_cache_table->print();
+  tty->cr();
 }
 
 #endif
 
-
 void Dictionary::verify() {
   guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
 
@@ -563,5 +715,7 @@
   guarantee(number_of_entries() == element_count,
             "Verify of system dictionary failed");
   debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
+
+  _pd_cache_table->verify();
 }
 
--- a/src/share/vm/classfile/dictionary.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/dictionary.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,11 +27,14 @@
 
 #include "classfile/systemDictionary.hpp"
 #include "oops/instanceKlass.hpp"
-#include "oops/oop.hpp"
+#include "oops/oop.inline.hpp"
 #include "utilities/hashtable.hpp"
 
 class DictionaryEntry;
 class PSPromotionManager;
+class ProtectionDomainCacheTable;
+class ProtectionDomainCacheEntry;
+class BoolObjectClosure;
 
 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 // The data structure for the system dictionary (and the shared system
@@ -45,6 +48,8 @@
   // pointer to the current hash table entry.
   static DictionaryEntry*       _current_class_entry;
 
+  ProtectionDomainCacheTable*   _pd_cache_table;
+
   DictionaryEntry* get_entry(int index, unsigned int hash,
                              Symbol* name, ClassLoaderData* loader_data);
 
@@ -93,6 +98,7 @@
 
   void methods_do(void f(Method*));
 
+  void unlink(BoolObjectClosure* is_alive);
 
   // Classes loaded by the bootstrap loader are always strongly reachable.
   // If we're not doing class unloading, all classes are strongly reachable.
@@ -118,6 +124,7 @@
   // Sharing support
   void reorder_dictionary();
 
+  ProtectionDomainCacheEntry* cache_get(oop protection_domain);
 
 #ifndef PRODUCT
   void print();
@@ -126,21 +133,112 @@
 };
 
 // The following classes can be in dictionary.cpp, but we need these
-// to be in header file so that SA's vmStructs can access.
+// to be in header file so that SA's vmStructs can access them.
+class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
+  friend class VMStructs;
+ private:
+  // Flag indicating whether this protection domain entry is strongly reachable.
+  // Used during iterating over the system dictionary to remember oops that need
+  // to be updated.
+  bool _strongly_reachable;
+ public:
+  oop protection_domain() { return literal(); }
+
+  void init() {
+    _strongly_reachable = false;
+  }
+
+  ProtectionDomainCacheEntry* next() {
+    return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
+  }
+
+  ProtectionDomainCacheEntry** next_addr() {
+    return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
+  }
+
+  void oops_do(OopClosure* f) {
+    f->do_oop(literal_addr());
+  }
+
+  void set_strongly_reachable()   { _strongly_reachable = true; }
+  bool is_strongly_reachable()    { return _strongly_reachable; }
+  void reset_strongly_reachable() { _strongly_reachable = false; }
+
+  void print() PRODUCT_RETURN;
+  void verify();
+};
+
+// The ProtectionDomainCacheTable contains all protection domain oops. The system
+// dictionary entries reference its entries instead of having references to oops
+// directly.
+// This is used to speed up system dictionary iteration: the oops in the
+// protection domain are the only ones referring the Java heap. So when there is
+// need to update these, instead of going over every entry of the system dictionary,
+// we only need to iterate over this set.
+// The amount of different protection domains used is typically magnitudes smaller
+// than the number of system dictionary entries (loaded classes).
+class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
+  friend class VMStructs;
+private:
+  ProtectionDomainCacheEntry* bucket(int i) {
+    return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
+  }
+
+  // The following method is not MT-safe and must be done under lock.
+  ProtectionDomainCacheEntry** bucket_addr(int i) {
+    return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
+  }
+
+  ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
+    ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
+    entry->init();
+    return entry;
+  }
+
+  static unsigned int compute_hash(oop protection_domain) {
+    return (unsigned int)(protection_domain->identity_hash());
+  }
+
+  int index_for(oop protection_domain) {
+    return hash_to_index(compute_hash(protection_domain));
+  }
+
+  ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
+  ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
+
+public:
+
+  ProtectionDomainCacheTable(int table_size);
+
+  ProtectionDomainCacheEntry* get(oop protection_domain);
+  void free(ProtectionDomainCacheEntry* entry);
+
+  void unlink(BoolObjectClosure* cl);
+
+  // GC support
+  void oops_do(OopClosure* f);
+  void always_strong_oops_do(OopClosure* f);
+
+  static uint bucket_size();
+
+  void print() PRODUCT_RETURN;
+  void verify();
+};
+
 
 class ProtectionDomainEntry :public CHeapObj<mtClass> {
   friend class VMStructs;
  public:
   ProtectionDomainEntry* _next;
-  oop                    _protection_domain;
+  ProtectionDomainCacheEntry* _pd_cache;
 
-  ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
-    _protection_domain = protection_domain;
-    _next              = next;
+  ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
+    _pd_cache = pd_cache;
+    _next     = next;
   }
 
   ProtectionDomainEntry* next() { return _next; }
-  oop protection_domain() { return _protection_domain; }
+  oop protection_domain() { return _pd_cache->protection_domain(); }
 };
 
 // An entry in the system dictionary, this describes a class as
@@ -151,6 +249,24 @@
  private:
   // Contains the set of approved protection domains that can access
   // this system dictionary entry.
+  //
+  // This protection domain set is a set of tuples:
+  //
+  // (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
+  //
+  // [Note that C.protection_domain(), which is stored in the java.lang.Class
+  // mirror of C, is NOT the same as PD]
+  //
+  // If such an entry (C, ICL, PD) exists in the table, it means that
+  // it is okay for a class Foo to reference C, where
+  //
+  //    Foo.protection_domain() == PD, and
+  //    Foo's defining class loader == ICL
+  //
+  // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
+  // It is essentially a cache to avoid repeated Java up-calls to
+  // ClassLoader.checkPackageAccess().
+  //
   ProtectionDomainEntry* _pd_set;
   ClassLoaderData*       _loader_data;
 
@@ -158,7 +274,7 @@
   // Tells whether a protection is in the approved set.
   bool contains_protection_domain(oop protection_domain) const;
   // Adds a protection domain to the approved set.
-  void add_protection_domain(oop protection_domain);
+  void add_protection_domain(Dictionary* dict, oop protection_domain);
 
   Klass* klass() const { return (Klass*)literal(); }
   Klass** klass_addr() { return (Klass**)literal_addr(); }
@@ -189,12 +305,11 @@
          : contains_protection_domain(protection_domain());
   }
 
-
-  void protection_domain_set_oops_do(OopClosure* f) {
+  void set_strongly_reachable() {
     for (ProtectionDomainEntry* current = _pd_set;
                                 current != NULL;
                                 current = current->_next) {
-      f->do_oop(&(current->_protection_domain));
+      current->_pd_cache->set_strongly_reachable();
     }
   }
 
@@ -202,7 +317,7 @@
     for (ProtectionDomainEntry* current = _pd_set;
                                 current != NULL;
                                 current = current->_next) {
-      current->_protection_domain->verify();
+      current->_pd_cache->protection_domain()->verify();
     }
   }
 
@@ -264,7 +379,7 @@
     }
     if (method_type() != NULL) {
       if (printed)  st->print(" and ");
-      st->print(INTPTR_FORMAT, method_type());
+      st->print(INTPTR_FORMAT, (void *)method_type());
       printed = true;
     }
     st->print_cr(printed ? "" : "(empty)");
--- a/src/share/vm/classfile/genericSignatures.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1279 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "classfile/genericSignatures.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/resourceArea.hpp"
-
-namespace generic {
-
-// Helper class for parsing the generic signature Symbol in klass and methods
-class DescriptorStream : public ResourceObj {
- private:
-  Symbol* _symbol;
-  int _offset;
-  int _mark;
-  const char* _parse_error;
-
-  void set_parse_error(const char* error) {
-    assert(error != NULL, "Can't set NULL error string");
-    _parse_error = error;
-  }
-
- public:
-  DescriptorStream(Symbol* sym)
-      : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
-
-  const char* parse_error() const {
-    return _parse_error;
-  }
-
-  bool at_end() { return _offset >= _symbol->utf8_length(); }
-
-  char peek() {
-    if (at_end()) {
-      set_parse_error("Peeking past end of signature");
-      return '\0';
-    } else {
-      return _symbol->byte_at(_offset);
-    }
-  }
-
-  char read() {
-    if (at_end()) {
-      set_parse_error("Reading past end of signature");
-      return '\0';
-    } else {
-      return _symbol->byte_at(_offset++);
-    }
-  }
-
-  void read(char expected) {
-    char c = read();
-    assert_char(c, expected, 0);
-  }
-
-  void assert_char(char c, char expected, int pos = -1) {
-    if (c != expected) {
-      const char* fmt = "Parse error at %d: expected %c but got %c";
-      size_t len = strlen(fmt) + 5;
-      char* buffer = NEW_RESOURCE_ARRAY(char, len);
-      jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
-      set_parse_error(buffer);
-    }
-  }
-
-  void push(char c) {
-    assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
-    --_offset;
-  }
-
-  void expect_end() {
-    if (!at_end()) {
-      set_parse_error("Unexpected data trailing signature");
-    }
-  }
-
-  bool has_mark() { return _mark != -1; }
-
-  void set_mark() {
-    _mark = _offset;
-  }
-
-  Identifier* identifier_from_mark() {
-    assert(has_mark(), "Mark should be set");
-    if (!has_mark()) {
-      set_parse_error("Expected mark to be set");
-      return NULL;
-    } else {
-      Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
-      _mark = -1;
-      return id;
-    }
-  }
-};
-
-
-#define CHECK_FOR_PARSE_ERROR()         \
-  if (STREAM->parse_error() != NULL) {   \
-    if (VerifyGenericSignatures) {      \
-      fatal(STREAM->parse_error());      \
-    }                                   \
-    return NULL;                        \
-  } (void)0
-
-#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
-#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
-#define PUSH(c) STREAM->push(c)
-#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
-#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
-#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
-
-#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
-
-#ifndef PRODUCT
-void Identifier::print_on(outputStream* str) const {
-  for (int i = _begin; i < _end; ++i) {
-    str->print("%c", (char)_sym->byte_at(i));
-  }
-}
-#endif // ndef PRODUCT
-
-bool Identifier::equals(Identifier* other) {
-  if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
-    return true;
-  } else if (_end - _begin != other->_end - other->_begin) {
-    return false;
-  } else {
-    size_t len = _end - _begin;
-    char* addr = ((char*)_sym->bytes()) + _begin;
-    char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
-    return strncmp(addr, oaddr, len) == 0;
-  }
-}
-
-bool Identifier::equals(Symbol* sym) {
-  Identifier id(sym, 0, sym->utf8_length());
-  return equals(&id);
-}
-
-/**
- * A formal type parameter may be found in the the enclosing class, but it could
- * also come from an enclosing method or outer class, in the case of inner-outer
- * classes or anonymous classes.  For example:
- *
- * class Outer<T,V> {
- *   class Inner<W> {
- *     void m(T t, V v, W w);
- *   }
- * }
- *
- * In this case, the type variables in m()'s signature are not all found in the
- * immediate enclosing class (Inner).  class Inner has only type parameter W,
- * but it's outer_class field will reference Outer's descriptor which contains
- * T & V (no outer_method in this case).
- *
- * If you have an anonymous class, it has both an enclosing method *and* an
- * enclosing class where type parameters can be declared:
- *
- * class MOuter<T> {
- *   <V> void bar(V v) {
- *     Runnable r = new Runnable() {
- *       public void run() {}
- *       public void foo(T t, V v) { ... }
- *     };
- *   }
- * }
- *
- * In this case, foo will be a member of some class, Runnable$1, which has no
- * formal parameters itself, but has an outer_method (bar()) which provides
- * type parameter V, and an outer class MOuter with type parameter T.
- *
- * It is also possible that the outer class is itself an inner class to some
- * other class (or an anonymous class with an enclosing method), so we need to
- * follow the outer_class/outer_method chain to it's end when looking for a
- * type parameter.
- */
-TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
-
-  int current_depth = 0;
-
-  MethodDescriptor* outer_method = as_method_signature();
-  ClassDescriptor* outer_class = as_class_signature();
-
-  if (outer_class == NULL) { // 'this' is a method signature; use the holder
-    outer_class = outer_method->outer_class();
-  }
-
-  while (outer_method != NULL || outer_class != NULL) {
-    if (outer_method != NULL) {
-      for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
-        TypeParameter* p = outer_method->type_parameters().at(i);
-        if (p->identifier()->equals(id)) {
-          *depth = -1; // indicates this this is a method parameter
-          return p;
-        }
-      }
-    }
-    if (outer_class != NULL) {
-      for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
-        TypeParameter* p = outer_class->type_parameters().at(i);
-        if (p->identifier()->equals(id)) {
-          *depth = current_depth;
-          return p;
-        }
-      }
-      outer_method = outer_class->outer_method();
-      outer_class = outer_class->outer_class();
-      ++current_depth;
-    }
-  }
-
-  if (VerifyGenericSignatures) {
-    fatal("Could not resolve identifier");
-  }
-
-  return NULL;
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
-  return parse_generic_signature(klass, NULL, CHECK_NULL);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(
-      Klass* klass, Symbol* original_name, TRAPS) {
-
-  InstanceKlass* ik = InstanceKlass::cast(klass);
-  Symbol* sym = ik->generic_signature();
-
-  ClassDescriptor* spec;
-
-  if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
-    spec = ClassDescriptor::placeholder(ik);
-  }
-
-  u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
-  if (outer_index != 0) {
-    if (original_name == NULL) {
-      original_name = ik->name();
-    }
-    Handle class_loader = Handle(THREAD, ik->class_loader());
-    Handle protection_domain = Handle(THREAD, ik->protection_domain());
-
-    Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
-    Klass* outer = SystemDictionary::find(
-        outer_name, class_loader, protection_domain, CHECK_NULL);
-    if (outer == NULL && !THREAD->is_Compiler_thread()) {
-      if (outer_name == ik->super()->name()) {
-        outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name,
-                                                        class_loader, protection_domain,
-                                                        false, CHECK_NULL);
-      }
-      else {
-        outer = SystemDictionary::resolve_or_fail(outer_name, class_loader,
-                                                  protection_domain, false, CHECK_NULL);
-      }
-    }
-
-    InstanceKlass* outer_ik;
-    ClassDescriptor* outer_spec = NULL;
-    if (outer == NULL) {
-      outer_spec = ClassDescriptor::placeholder(ik);
-      assert(false, "Outer class not loaded and not loadable from here");
-    } else {
-      outer_ik = InstanceKlass::cast(outer);
-      outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
-    }
-    spec->set_outer_class(outer_spec);
-
-    u2 encl_method_idx = ik->enclosing_method_method_index();
-    if (encl_method_idx != 0 && outer_ik != NULL) {
-      ConstantPool* cp = ik->constants();
-      u2 name_index = cp->name_ref_index_at(encl_method_idx);
-      u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
-      Symbol* name = cp->symbol_at(name_index);
-      Symbol* sig = cp->symbol_at(sig_index);
-      Method* m = outer_ik->find_method(name, sig);
-      if (m != NULL) {
-        Symbol* gsig = m->generic_signature();
-        if (gsig != NULL) {
-          MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
-          spec->set_outer_method(gms);
-        }
-      } else if (VerifyGenericSignatures) {
-        ResourceMark rm;
-        stringStream ss;
-        ss.print("Could not find method %s %s in class %s",
-          name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
-        fatal(ss.as_string());
-      }
-    }
-  }
-
-  spec->bind_variables_to_parameters();
-  return spec;
-}
-
-ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
-  GrowableArray<TypeParameter*> formals;
-  GrowableArray<ClassType*> interfaces;
-  ClassType* super_type = NULL;
-
-  Klass* super_klass = klass->super();
-  if (super_klass != NULL) {
-    InstanceKlass* super = InstanceKlass::cast(super_klass);
-    super_type = ClassType::from_symbol(super->name());
-  }
-
-  for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
-    InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
-    interfaces.append(ClassType::from_symbol(iface->name()));
-  }
-  return new ClassDescriptor(formals, super_type, interfaces);
-}
-
-ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
-
-  DescriptorStream ds(sym);
-  DescriptorStream* STREAM = &ds;
-
-  GrowableArray<TypeParameter*> parameters(8);
-  char c = READ();
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
-      parameters.append(ftp);
-      c = READ();
-    }
-  } else {
-    PUSH(c);
-  }
-
-  EXPECT('L');
-  ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
-
-  GrowableArray<ClassType*> signatures(2);
-  while (!STREAM->at_end()) {
-    EXPECT('L');
-    ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
-    signatures.append(iface);
-  }
-
-  EXPECT_END();
-
-  return new ClassDescriptor(parameters, super, signatures);
-}
-
-#ifndef PRODUCT
-void ClassDescriptor::print_on(outputStream* str) const {
-  str->indent().print_cr("ClassDescriptor {");
-  {
-    streamIndentor si(str);
-    if (_type_parameters.length() > 0) {
-      str->indent().print_cr("Formals {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _type_parameters.length(); ++i) {
-          _type_parameters.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_super != NULL) {
-      str->indent().print_cr("Superclass: ");
-      {
-        streamIndentor si(str);
-        _super->print_on(str);
-      }
-    }
-    if (_interfaces.length() > 0) {
-      str->indent().print_cr("SuperInterfaces: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _interfaces.length(); ++i) {
-          _interfaces.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_method != NULL) {
-      str->indent().print_cr("Outer Method: {");
-      {
-        streamIndentor si(str);
-        _outer_method->print_on(str);
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_class != NULL) {
-      str->indent().print_cr("Outer Class: {");
-      {
-        streamIndentor si(str);
-        _outer_class->print_on(str);
-      }
-      str->indent().print_cr("}");
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    if (_interfaces.at(i)->identifier()->equals(sym)) {
-      return _interfaces.at(i);
-    }
-  }
-  if (VerifyGenericSignatures) {
-    fatal("Did not find expected interface");
-  }
-  return NULL;
-}
-
-void ClassDescriptor::bind_variables_to_parameters() {
-  if (_outer_class != NULL) {
-    _outer_class->bind_variables_to_parameters();
-  }
-  if (_outer_method != NULL) {
-    _outer_method->bind_variables_to_parameters();
-  }
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
-  }
-  if (_super != NULL) {
-    _super->bind_variables_to_parameters(this);
-  }
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    _interfaces.at(i)->bind_variables_to_parameters(this);
-  }
-}
-
-ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
-
-  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  ClassDescriptor* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx);
-
-  ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
-
-  GrowableArray<ClassType*> interfaces(_interfaces.length());
-  for (int i = 0; i < _interfaces.length(); ++i) {
-    interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
-  }
-
-  MethodDescriptor* md = _outer_method == NULL ? NULL :
-      _outer_method->canonicalize(ctx);
-
-  return new ClassDescriptor(type_params, super, interfaces, outer, md);
-}
-
-u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
-  int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
-  int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
-  int name_offset = InstanceKlass::inner_class_inner_name_offset;
-  int next_offset = InstanceKlass::inner_class_next_offset;
-
-  if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
-    // No inner class info => no declaring class
-    return 0;
-  }
-
-  Array<u2>* i_icls = klass->inner_classes();
-  ConstantPool* i_cp = klass->constants();
-  int i_length = i_icls->length();
-
-  // Find inner_klass attribute
-  for (int i = 0; i + next_offset < i_length; i += next_offset) {
-    u2 ioff = i_icls->at(i + inner_index);
-    u2 ooff = i_icls->at(i + outer_index);
-    u2 noff = i_icls->at(i + name_offset);
-    if (ioff != 0) {
-      // Check to see if the name matches the class we're looking for
-      // before attempting to find the class.
-      if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
-        return ooff;
-      }
-    }
-  }
-
-  // It may be anonymous; try for that.
-  u2 encl_method_class_idx = klass->enclosing_method_class_index();
-  if (encl_method_class_idx != 0) {
-    return encl_method_class_idx;
-  }
-
-  return 0;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
-  Symbol* generic_sig = m->generic_signature();
-  MethodDescriptor* md = NULL;
-  if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
-    md = parse_generic_signature(m->signature(), outer);
-  }
-  assert(md != NULL, "Could not parse method signature");
-  md->bind_variables_to_parameters();
-  return md;
-}
-
-MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
-
-  DescriptorStream ds(sym);
-  DescriptorStream* STREAM = &ds;
-
-  GrowableArray<TypeParameter*> params(8);
-  char c = READ();
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
-      params.append(ftp);
-      c = READ();
-    }
-  } else {
-    PUSH(c);
-  }
-
-  EXPECT('(');
-
-  GrowableArray<Type*> parameters(8);
-  c = READ();
-  while (c != ')') {
-    PUSH(c);
-    Type* arg = Type::parse_generic_signature(CHECK_STREAM);
-    parameters.append(arg);
-    c = READ();
-  }
-
-  Type* rt = Type::parse_generic_signature(CHECK_STREAM);
-
-  GrowableArray<Type*> throws;
-  while (!STREAM->at_end()) {
-    EXPECT('^');
-    Type* spec = Type::parse_generic_signature(CHECK_STREAM);
-    throws.append(spec);
-  }
-
-  return new MethodDescriptor(params, outer, parameters, rt, throws);
-}
-
-void MethodDescriptor::bind_variables_to_parameters() {
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
-  }
-  for (int i = 0; i < _parameters.length(); ++i) {
-    _parameters.at(i)->bind_variables_to_parameters(this);
-  }
-  _return_type->bind_variables_to_parameters(this);
-  for (int i = 0; i < _throws.length(); ++i) {
-    _throws.at(i)->bind_variables_to_parameters(this);
-  }
-}
-
-bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
-
-  if (_parameters.length() == other->_parameters.length()) {
-    for (int i = 0; i < _parameters.length(); ++i) {
-      if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
-        return false;
-      }
-    }
-
-    if (_return_type->as_primitive() != NULL) {
-      return _return_type->covariant_match(other->_return_type, ctx);
-    } else {
-      // return type is a reference
-      return other->_return_type->as_class() != NULL ||
-             other->_return_type->as_variable() != NULL ||
-             other->_return_type->as_array() != NULL;
-    }
-  } else {
-    return false;
-  }
-}
-
-MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
-
-  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
-  for (int i = 0; i < _type_parameters.length(); ++i) {
-    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  ClassDescriptor* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx);
-
-  GrowableArray<Type*> params(_parameters.length());
-  for (int i = 0; i < _parameters.length(); ++i) {
-    params.append(_parameters.at(i)->canonicalize(ctx, 0));
-  }
-
-  Type* rt = _return_type->canonicalize(ctx, 0);
-
-  GrowableArray<Type*> throws(_throws.length());
-  for (int i = 0; i < _throws.length(); ++i) {
-    throws.append(_throws.at(i)->canonicalize(ctx, 0));
-  }
-
-  return new MethodDescriptor(type_params, outer, params, rt, throws);
-}
-
-#ifndef PRODUCT
-TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
-  stringStream ss(256);
-
-  ss.print("(");
-  for (int i = 0; i < _parameters.length(); ++i) {
-    _parameters.at(i)->reify_signature(&ss, ctx);
-  }
-  ss.print(")");
-  _return_type->reify_signature(&ss, ctx);
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
-}
-
-void MethodDescriptor::print_on(outputStream* str) const {
-  str->indent().print_cr("MethodDescriptor {");
-  {
-    streamIndentor si(str);
-    if (_type_parameters.length() > 0) {
-      str->indent().print_cr("Formals: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _type_parameters.length(); ++i) {
-          _type_parameters.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    str->indent().print_cr("Parameters: {");
-    {
-      streamIndentor si(str);
-      for (int i = 0; i < _parameters.length(); ++i) {
-        _parameters.at(i)->print_on(str);
-      }
-    }
-    str->indent().print_cr("}");
-    str->indent().print_cr("Return Type: ");
-    {
-      streamIndentor si(str);
-      _return_type->print_on(str);
-    }
-
-    if (_throws.length() > 0) {
-      str->indent().print_cr("Throws: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _throws.length(); ++i) {
-          _throws.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
-  STREAM->set_mark();
-  char c = READ();
-  while (c != ':') {
-    c = READ();
-  }
-
-  Identifier* id = STREAM->identifier_from_mark();
-
-  ClassType* class_bound = NULL;
-  GrowableArray<ClassType*> interface_bounds(8);
-
-  c = READ();
-  if (c != '>') {
-    if (c != ':') {
-      EXPECTED(c, 'L');
-      class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
-      c = READ();
-    }
-
-    while (c == ':') {
-      EXPECT('L');
-      ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
-      interface_bounds.append(fts);
-      c = READ();
-    }
-  }
-  PUSH(c);
-
-  return new TypeParameter(id, class_bound, interface_bounds);
-}
-
-void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
-  if (_class_bound != NULL) {
-    _class_bound->bind_variables_to_parameters(sig);
-  }
-  for (int i = 0; i < _interface_bounds.length(); ++i) {
-    _interface_bounds.at(i)->bind_variables_to_parameters(sig);
-  }
-  _position = position;
-}
-
-Type* TypeParameter::resolve(
-    Context* ctx, int inner_depth, int ctx_depth) {
-
-  if (inner_depth == -1) {
-    // This indicates that the parameter is a method type parameter, which
-    // isn't resolveable using the class hierarchy context
-    return bound();
-  }
-
-  ClassType* provider = ctx->at_depth(ctx_depth);
-  if (provider != NULL) {
-    for (int i = 0; i < inner_depth && provider != NULL; ++i) {
-      provider = provider->outer_class();
-    }
-    if (provider != NULL) {
-      TypeArgument* arg = provider->type_argument_at(_position);
-      if (arg != NULL) {
-        Type* value = arg->lower_bound();
-        return value->canonicalize(ctx, ctx_depth + 1);
-      }
-    }
-  }
-
-  return bound();
-}
-
-TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
-  ClassType* bound = _class_bound == NULL ? NULL :
-     _class_bound->canonicalize(ctx, ctx_depth);
-
-  GrowableArray<ClassType*> ifaces(_interface_bounds.length());
-  for (int i = 0; i < _interface_bounds.length(); ++i) {
-    ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
-  }
-
-  TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
-  ret->_position = _position;
-  return ret;
-}
-
-ClassType* TypeParameter::bound() {
-  if (_class_bound != NULL) {
-    return _class_bound;
-  }
-
-  if (_interface_bounds.length() == 1) {
-    return _interface_bounds.at(0);
-  }
-
-  return ClassType::java_lang_Object(); // TODO: investigate this case
-}
-
-#ifndef PRODUCT
-void TypeParameter::print_on(outputStream* str) const {
-  str->indent().print_cr("Formal: {");
-  {
-    streamIndentor si(str);
-
-    str->indent().print("Identifier: ");
-    _identifier->print_on(str);
-    str->print_cr("");
-    if (_class_bound != NULL) {
-      str->indent().print_cr("Class Bound: ");
-      streamIndentor si(str);
-      _class_bound->print_on(str);
-    }
-    if (_interface_bounds.length() > 0) {
-      str->indent().print_cr("Interface Bounds: {");
-      {
-        streamIndentor si(str);
-        for (int i = 0; i < _interface_bounds.length(); ++i) {
-          _interface_bounds.at(i)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    str->indent().print_cr("Ordinal Position: %d", _position);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
-  char c = READ();
-  switch (c) {
-    case 'L':
-      return ClassType::parse_generic_signature(CHECK_STREAM);
-    case 'T':
-      return TypeVariable::parse_generic_signature(CHECK_STREAM);
-    case '[':
-      return ArrayType::parse_generic_signature(CHECK_STREAM);
-    default:
-      return new PrimitiveType(c);
-  }
-}
-
-Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
-    bool* has_inner, DescriptorStream* STREAM) {
-  STREAM->set_mark();
-
-  char c = READ();
-  while (c != ';' && c != '.' && c != '<') { c = READ(); }
-  Identifier* id = STREAM->identifier_from_mark();
-
-  if (c == '<') {
-    c = READ();
-    while (c != '>') {
-      PUSH(c);
-      TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
-      args->append(arg);
-      c = READ();
-    }
-    c = READ();
-  }
-
-  *has_inner = (c == '.');
-  if (!(*has_inner)) {
-    EXPECTED(c, ';');
-  }
-
-  return id;
-}
-
-ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
-  return parse_generic_signature(NULL, CHECK_STREAM);
-}
-
-ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
-  GrowableArray<TypeArgument*> args;
-  ClassType* gct = NULL;
-  bool has_inner = false;
-
-  Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
-  if (id != NULL) {
-    gct = new ClassType(id, args, outer);
-
-    if (has_inner) {
-      gct = parse_generic_signature(gct, CHECK_STREAM);
-    }
-  }
-  return gct;
-}
-
-ClassType* ClassType::from_symbol(Symbol* sym) {
-  assert(sym != NULL, "Must not be null");
-  GrowableArray<TypeArgument*> args;
-  Identifier* id = new Identifier(sym, 0, sym->utf8_length());
-  return new ClassType(id, args, NULL);
-}
-
-ClassType* ClassType::java_lang_Object() {
-  return from_symbol(vmSymbols::java_lang_Object());
-}
-
-void ClassType::bind_variables_to_parameters(Descriptor* sig) {
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    _type_arguments.at(i)->bind_variables_to_parameters(sig);
-  }
-  if (_outer_class != NULL) {
-    _outer_class->bind_variables_to_parameters(sig);
-  }
-}
-
-TypeArgument* ClassType::type_argument_at(int i) {
-  if (i >= 0 && i < _type_arguments.length()) {
-    return _type_arguments.at(i);
-  } else {
-    return NULL;
-  }
-}
-
-#ifndef PRODUCT
-void ClassType::reify_signature(stringStream* ss, Context* ctx) {
-  ss->print("L");
-  _identifier->print_on(ss);
-  ss->print(";");
-}
-
-void ClassType::print_on(outputStream* str) const {
-  str->indent().print_cr("Class {");
-  {
-    streamIndentor si(str);
-    str->indent().print("Name: ");
-    _identifier->print_on(str);
-    str->print_cr("");
-    if (_type_arguments.length() != 0) {
-      str->indent().print_cr("Type Arguments: {");
-      {
-        streamIndentor si(str);
-        for (int j = 0; j < _type_arguments.length(); ++j) {
-          _type_arguments.at(j)->print_on(str);
-        }
-      }
-      str->indent().print_cr("}");
-    }
-    if (_outer_class != NULL) {
-      str->indent().print_cr("Outer Class: ");
-      streamIndentor sir(str);
-      _outer_class->print_on(str);
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool ClassType::covariant_match(Type* other, Context* ctx) {
-
-  if (other == this) {
-    return true;
-  }
-
-  TypeVariable* variable = other->as_variable();
-  if (variable != NULL) {
-    other = variable->resolve(ctx, 0);
-  }
-
-  ClassType* outer = outer_class();
-  ClassType* other_class = other->as_class();
-
-  if (other_class == NULL ||
-      (outer == NULL) != (other_class->outer_class() == NULL)) {
-    return false;
-  }
-
-  if (!_identifier->equals(other_class->_identifier)) {
-    return false;
-  }
-
-  if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
-    return false;
-  }
-
-  return true;
-}
-
-ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
-
-  GrowableArray<TypeArgument*> args(_type_arguments.length());
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
-  }
-
-  ClassType* outer = _outer_class == NULL ? NULL :
-      _outer_class->canonicalize(ctx, ctx_depth);
-
-  return new ClassType(_identifier, args, outer);
-}
-
-TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
-  STREAM->set_mark();
-  char c = READ();
-  while (c != ';') {
-    c = READ();
-  }
-  Identifier* id = STREAM->identifier_from_mark();
-
-  return new TypeVariable(id);
-}
-
-void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
-  _parameter = sig->find_type_parameter(_id, &_inner_depth);
-  if (VerifyGenericSignatures && _parameter == NULL) {
-    fatal("Could not find formal parameter");
-  }
-}
-
-Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
-  if (parameter() != NULL) {
-    return parameter()->resolve(ctx, inner_depth(), ctx_depth);
-  } else {
-    if (VerifyGenericSignatures) {
-      fatal("Type variable matches no parameter");
-    }
-    return NULL;
-  }
-}
-
-bool TypeVariable::covariant_match(Type* other, Context* ctx) {
-
-  if (other == this) {
-    return true;
-  }
-
-  Context my_context(NULL); // empty, results in erasure
-  Type* my_type = resolve(&my_context, 0);
-  if (my_type == NULL) {
-    return false;
-  }
-
-  return my_type->covariant_match(other, ctx);
-}
-
-Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
-  return resolve(ctx, ctx_depth);
-}
-
-#ifndef PRODUCT
-void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
-  Type* type = resolve(ctx, 0);
-  if (type != NULL) {
-    type->reify_signature(ss, ctx);
-  }
-}
-
-void TypeVariable::print_on(outputStream* str) const {
-  str->indent().print_cr("Type Variable {");
-  {
-    streamIndentor si(str);
-    str->indent().print("Name: ");
-    _id->print_on(str);
-    str->print_cr("");
-    str->indent().print_cr("Inner depth: %d", _inner_depth);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
-  Type* base = Type::parse_generic_signature(CHECK_STREAM);
-  return new ArrayType(base);
-}
-
-void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
-  assert(_base != NULL, "Invalid base");
-  _base->bind_variables_to_parameters(sig);
-}
-
-bool ArrayType::covariant_match(Type* other, Context* ctx) {
-  assert(_base != NULL, "Invalid base");
-
-  if (other == this) {
-    return true;
-  }
-
-  ArrayType* other_array = other->as_array();
-  return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
-}
-
-ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
-  assert(_base != NULL, "Invalid base");
-  return new ArrayType(_base->canonicalize(ctx, ctx_depth));
-}
-
-#ifndef PRODUCT
-void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
-  assert(_base != NULL, "Invalid base");
-  ss->print("[");
-  _base->reify_signature(ss, ctx);
-}
-
-void ArrayType::print_on(outputStream* str) const {
-  str->indent().print_cr("Array {");
-  {
-    streamIndentor si(str);
-    _base->print_on(str);
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
-
-  PrimitiveType* other_prim = other->as_primitive();
-  return (other_prim != NULL && _type == other_prim->_type);
-}
-
-PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
-  return this;
-}
-
-#ifndef PRODUCT
-void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
-  ss->print("%c", _type);
-}
-
-void PrimitiveType::print_on(outputStream* str) const {
-  str->indent().print_cr("Primitive: '%c'", _type);
-}
-#endif // ndef PRODUCT
-
-void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
-}
-
-TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
-  char c = READ();
-  Type* type = NULL;
-
-  switch (c) {
-    case '*':
-      return new TypeArgument(ClassType::java_lang_Object(), NULL);
-      break;
-    default:
-      PUSH(c);
-      // fall-through
-    case '+':
-    case '-':
-      type = Type::parse_generic_signature(CHECK_STREAM);
-      if (c == '+') {
-        return new TypeArgument(type, NULL);
-      } else if (c == '-') {
-        return new TypeArgument(ClassType::java_lang_Object(), type);
-      } else {
-        return new TypeArgument(type, type);
-      }
-  }
-}
-
-void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-  _lower_bound->bind_variables_to_parameters(sig);
-  if (_upper_bound != NULL && _upper_bound != _lower_bound) {
-    _upper_bound->bind_variables_to_parameters(sig);
-  }
-}
-
-bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-
-  if (other == this) {
-    return true;
-  }
-
-  if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
-    return false;
-  }
-  return true;
-}
-
-TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
-  assert(_lower_bound != NULL, "Invalid lower bound");
-  Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
-  Type* upper = NULL;
-
-  if (_upper_bound == _lower_bound) {
-    upper = lower;
-  } else if (_upper_bound != NULL) {
-    upper = _upper_bound->canonicalize(ctx, ctx_depth);
-  }
-
-  return new TypeArgument(lower, upper);
-}
-
-#ifndef PRODUCT
-void TypeArgument::print_on(outputStream* str) const {
-  str->indent().print_cr("TypeArgument {");
-  {
-    streamIndentor si(str);
-    if (_lower_bound != NULL) {
-      str->indent().print("Lower bound: ");
-      _lower_bound->print_on(str);
-    }
-    if (_upper_bound != NULL) {
-      str->indent().print("Upper bound: ");
-      _upper_bound->print_on(str);
-    }
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-void Context::Mark::destroy() {
-  if (is_active()) {
-    _context->reset_to_mark(_marked_size);
-  }
-  deactivate();
-}
-
-void Context::apply_type_arguments(
-    InstanceKlass* current, InstanceKlass* super, TRAPS) {
-  assert(_cache != NULL, "Cannot use an empty context");
-  ClassType* spec = NULL;
-  if (current != NULL) {
-    ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
-    if (super == current->super()) {
-      spec = descriptor->super();
-    } else {
-      spec = descriptor->interface_desc(super->name());
-    }
-    if (spec != NULL) {
-      _type_arguments.push(spec);
-    }
-  }
-}
-
-void Context::reset_to_mark(int size) {
-  _type_arguments.trunc_to(size);
-}
-
-ClassType* Context::at_depth(int i) const {
-  if (i < _type_arguments.length()) {
-    return _type_arguments.at(_type_arguments.length() - 1 - i);
-  }
-  return NULL;
-}
-
-#ifndef PRODUCT
-void Context::print_on(outputStream* str) const {
-  str->indent().print_cr("Context {");
-  for (int i = 0; i < _type_arguments.length(); ++i) {
-    streamIndentor si(str);
-    str->indent().print("leval %d: ", i);
-    ClassType* ct = at_depth(i);
-    if (ct == NULL) {
-      str->print_cr("<empty>");
-      continue;
-    } else {
-      str->print_cr("{");
-    }
-
-    for (int j = 0; j < ct->type_arguments_length(); ++j) {
-      streamIndentor si(str);
-      TypeArgument* ta = ct->type_argument_at(j);
-      Type* bound = ta->lower_bound();
-      bound->print_on(str);
-    }
-    str->indent().print_cr("}");
-  }
-  str->indent().print_cr("}");
-}
-#endif // ndef PRODUCT
-
-ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
-
-  ClassDescriptor** existing = _class_descriptors.get(ik);
-  if (existing == NULL) {
-    ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
-    _class_descriptors.put(ik, cd);
-    return cd;
-  } else {
-    return *existing;
-  }
-}
-
-MethodDescriptor* DescriptorCache::descriptor_for(
-    Method* mh, ClassDescriptor* cd, TRAPS) {
-  assert(mh != NULL && cd != NULL, "Should not be NULL");
-  MethodDescriptor** existing = _method_descriptors.get(mh);
-  if (existing == NULL) {
-    MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
-    _method_descriptors.put(mh, md);
-    return md;
-  } else {
-    return *existing;
-  }
-}
-MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
-  ClassDescriptor* cd = descriptor_for(
-      InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
-  return descriptor_for(mh, cd, THREAD);
-}
-
-} // namespace generic
--- a/src/share/vm/classfile/genericSignatures.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,467 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
-#include "classfile/symbolTable.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/signature.hpp"
-#include "utilities/growableArray.hpp"
-#include "utilities/resourceHash.hpp"
-
-class stringStream;
-
-namespace generic {
-
-class Identifier;
-class ClassDescriptor;
-class MethodDescriptor;
-
-class TypeParameter; // a formal type parameter declared in generic signatures
-class TypeArgument;  // The "type value" passed to fill parameters in supertypes
-class TypeVariable;  // A usage of a type parameter as a value
-/**
- * Example:
- *
- * <T, V> class Foo extends Bar<String> { int m(V v) {} }
- * ^^^^^^                       ^^^^^^          ^^
- * type parameters            type argument    type variable
- *
- * Note that a type variable could be passed as an argument too:
- * <T, V> class Foo extends Bar<T> { int m(V v) {} }
- *                             ^^^
- *                             type argument's value is a type variable
- */
-
-
-class Type;
-class ClassType;
-class ArrayType;
-class PrimitiveType;
-class Context;
-class DescriptorCache;
-
-class DescriptorStream;
-
-class Identifier : public ResourceObj {
- private:
-  Symbol* _sym;
-  int _begin;
-  int _end;
-
- public:
-  Identifier(Symbol* sym, int begin, int end) :
-    _sym(sym), _begin(begin), _end(end) {}
-
-  bool equals(Identifier* other);
-  bool equals(Symbol* sym);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif // ndef PRODUCT
-};
-
-class Descriptor : public ResourceObj {
- protected:
-  GrowableArray<TypeParameter*> _type_parameters;
-  ClassDescriptor* _outer_class;
-
-  Descriptor(GrowableArray<TypeParameter*>& params,
-    ClassDescriptor* outer)
-    : _type_parameters(params), _outer_class(outer) {}
-
- public:
-
-  ClassDescriptor* outer_class() { return _outer_class; }
-  void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
-
-  virtual ClassDescriptor* as_class_signature() { return NULL; }
-  virtual MethodDescriptor* as_method_signature() { return NULL; }
-
-  bool is_class_signature() { return as_class_signature() != NULL; }
-  bool is_method_signature() { return as_method_signature() != NULL; }
-
-  GrowableArray<TypeParameter*>& type_parameters() {
-    return _type_parameters;
-  }
-
-  TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
-
-  virtual void bind_variables_to_parameters() = 0;
-
-#ifndef PRODUCT
-  virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassDescriptor : public Descriptor {
- private:
-  ClassType* _super;
-  GrowableArray<ClassType*> _interfaces;
-  MethodDescriptor* _outer_method;
-
-  ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
-      GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
-      MethodDescriptor* outer_method = NULL)
-        : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
-          _outer_method(outer_method) {}
-
-  static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
-  static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
-
- public:
-
-  virtual ClassDescriptor* as_class_signature() { return this; }
-
-  MethodDescriptor* outer_method() { return _outer_method; }
-  void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
-
-  ClassType* super() { return _super; }
-  ClassType* interface_desc(Symbol* sym);
-
-  static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
-  static ClassDescriptor* parse_generic_signature(Symbol* sym);
-
-  // For use in superclass chains in positions where this is no generic info
-  static ClassDescriptor* placeholder(InstanceKlass* klass);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-
-  ClassDescriptor* canonicalize(Context* ctx);
-
-  // Linking sets the position index in any contained TypeVariable type
-  // to correspond to the location of that identifier in the formal type
-  // parameters.
-  void bind_variables_to_parameters();
-};
-
-class MethodDescriptor : public Descriptor {
- private:
-  GrowableArray<Type*> _parameters;
-  Type* _return_type;
-  GrowableArray<Type*> _throws;
-
-  MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
-      GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
-      : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
-        _throws(throws) {}
-
- public:
-
-  static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
-  static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
-
-  MethodDescriptor* as_method_signature() { return this; }
-
-  // Performs generic analysis on the method parameters to determine
-  // if both methods refer to the same argument types.
-  bool covariant_match(MethodDescriptor* other, Context* ctx);
-
-  // Returns a new method descriptor with all generic variables
-  // removed and replaced with whatever is indicated using the Context.
-  MethodDescriptor* canonicalize(Context* ctx);
-
-  void bind_variables_to_parameters();
-
-#ifndef PRODUCT
-  TempNewSymbol reify_signature(Context* ctx, TRAPS);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeParameter : public ResourceObj {
- private:
-  Identifier* _identifier;
-  ClassType* _class_bound;
-  GrowableArray<ClassType*> _interface_bounds;
-
-  // The position is the ordinal location of the parameter within the
-  // formal parameter list (excluding outer classes).  It is only set for
-  // formal type parameters that are associated with a class -- method
-  // type parameters are left as -1.  When resolving a generic variable to
-  // find the actual type, this index is used to access the generic type
-  // argument in the provided context object.
-  int _position; // Assigned during variable linking
-
-  TypeParameter(Identifier* id, ClassType* class_bound,
-    GrowableArray<ClassType*>& interface_bounds) :
-      _identifier(id), _class_bound(class_bound),
-      _interface_bounds(interface_bounds), _position(-1) {}
-
- public:
-  static TypeParameter* parse_generic_signature(DescriptorStream* str);
-
-  ClassType* bound();
-  int position() { return _position; }
-
-  void bind_variables_to_parameters(Descriptor* sig, int position);
-  Identifier* identifier() { return _identifier; }
-
-  Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
-  TypeParameter* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class Type : public ResourceObj {
- public:
-  static Type* parse_generic_signature(DescriptorStream* str);
-
-  virtual ClassType* as_class() { return NULL; }
-  virtual TypeVariable* as_variable() { return NULL; }
-  virtual ArrayType* as_array() { return NULL; }
-  virtual PrimitiveType* as_primitive() { return NULL; }
-
-  virtual bool covariant_match(Type* gt, Context* ctx) = 0;
-  virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
-
-  virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
-
-#ifndef PRODUCT
-  virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
-  virtual void print_on(outputStream* str) const = 0;
-#endif
-};
-
-class ClassType : public Type {
-  friend class ClassDescriptor;
- protected:
-  Identifier* _identifier;
-  GrowableArray<TypeArgument*> _type_arguments;
-  ClassType* _outer_class;
-
-  ClassType(Identifier* identifier,
-      GrowableArray<TypeArgument*>& args,
-      ClassType* outer)
-      : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
-
-  // Returns true if there are inner classes to read
-  static Identifier* parse_generic_signature_simple(
-      GrowableArray<TypeArgument*>* args,
-      bool* has_inner, DescriptorStream* str);
-
-  static ClassType* parse_generic_signature(ClassType* outer,
-      DescriptorStream* str);
-  static ClassType* from_symbol(Symbol* sym);
-
- public:
-  ClassType* as_class() { return this; }
-
-  static ClassType* parse_generic_signature(DescriptorStream* str);
-  static ClassType* java_lang_Object();
-
-  Identifier* identifier() { return _identifier; }
-  int type_arguments_length() { return _type_arguments.length(); }
-  TypeArgument* type_argument_at(int i);
-
-  virtual ClassType* outer_class() { return _outer_class; }
-
-  bool covariant_match(Type* gt, Context* ctx);
-  ClassType* canonicalize(Context* ctx, int context_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeVariable : public Type {
- private:
-  Identifier* _id;
-  TypeParameter* _parameter; // assigned during linking
-
-  // how many steps "out" from inner classes, -1 if method
-  int _inner_depth;
-
-  TypeVariable(Identifier* id)
-      : _id(id), _parameter(NULL), _inner_depth(0) {}
-
- public:
-  TypeVariable* as_variable() { return this; }
-
-  static TypeVariable* parse_generic_signature(DescriptorStream* str);
-
-  Identifier* identifier() { return _id; }
-  TypeParameter* parameter() { return _parameter; }
-  int inner_depth() { return _inner_depth; }
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-  Type* resolve(Context* ctx, int ctx_depth);
-  bool covariant_match(Type* gt, Context* ctx);
-  Type* canonicalize(Context* ctx, int ctx_depth);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class ArrayType : public Type {
- private:
-  Type* _base;
-
-  ArrayType(Type* base) : _base(base) {}
-
- public:
-  ArrayType* as_array() { return this; }
-
-  static ArrayType* parse_generic_signature(DescriptorStream* str);
-
-  bool covariant_match(Type* gt, Context* ctx);
-  ArrayType* canonicalize(Context* ctx, int ctx_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class PrimitiveType : public Type {
-  friend class Type;
- private:
-  char _type; // includes V for void
-
-  PrimitiveType(char& type) : _type(type) {}
-
- public:
-  PrimitiveType* as_primitive() { return this; }
-
-  bool covariant_match(Type* gt, Context* ctx);
-  PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
-
-  void bind_variables_to_parameters(Descriptor* sig);
-
-#ifndef PRODUCT
-  void reify_signature(stringStream* ss, Context* ctx);
-  void print_on(outputStream* str) const;
-#endif
-};
-
-class TypeArgument : public ResourceObj {
- private:
-  Type* _lower_bound;
-  Type* _upper_bound; // may be null or == _lower_bound
-
-  TypeArgument(Type* lower_bound, Type* upper_bound)
-      : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
-
- public:
-
-  static TypeArgument* parse_generic_signature(DescriptorStream* str);
-
-  Type* lower_bound() { return _lower_bound; }
-  Type* upper_bound() { return _upper_bound; }
-
-  void bind_variables_to_parameters(Descriptor* sig);
-  TypeArgument* canonicalize(Context* ctx, int ctx_depth);
-
-  bool covariant_match(TypeArgument* a, Context* ctx);
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-
-class Context : public ResourceObj {
- private:
-  DescriptorCache* _cache;
-  GrowableArray<ClassType*> _type_arguments;
-
-  void reset_to_mark(int size);
-
- public:
-  // When this object goes out of scope or 'destroy' is
-  // called, then the application of the type to the
-  // context is wound-back (unless it's been deactivated).
-  class Mark : public StackObj {
-   private:
-    mutable Context* _context;
-    int _marked_size;
-
-    bool is_active() const { return _context != NULL; }
-    void deactivate() const { _context = NULL; }
-
-   public:
-    Mark() : _context(NULL), _marked_size(0) {}
-    Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
-    Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
-      m.deactivate(); // Ownership is transferred
-    }
-
-    Mark& operator=(const Mark& cm) {
-      destroy();
-      _context = cm._context;
-      _marked_size = cm._marked_size;
-      cm.deactivate();
-      return *this;
-    }
-
-    void destroy();
-    ~Mark() { destroy(); }
-  };
-
-  Context(DescriptorCache* cache) : _cache(cache) {}
-
-  Mark mark() { return Mark(this, _type_arguments.length()); }
-  void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
-
-  ClassType* at_depth(int i) const;
-
-#ifndef PRODUCT
-  void print_on(outputStream* str) const;
-#endif
-};
-
-/**
- * Contains a cache of descriptors for classes and methods so they can be
- * looked-up instead of reparsing each time they are needed.
- */
-class DescriptorCache : public ResourceObj {
- private:
-  ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
-  ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
-
- public:
-  ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
-
-  MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
-  // Class descriptor derived from method holder
-  MethodDescriptor* descriptor_for(Method* mh, TRAPS);
-};
-
-} // namespace generic
-
-#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
-
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -438,6 +438,29 @@
   return true;
 }
 
+bool java_lang_String::equals(oop str1, oop str2) {
+  assert(str1->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  assert(str2->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  typeArrayOop value1  = java_lang_String::value(str1);
+  int          offset1 = java_lang_String::offset(str1);
+  int          length1 = java_lang_String::length(str1);
+  typeArrayOop value2  = java_lang_String::value(str2);
+  int          offset2 = java_lang_String::offset(str2);
+  int          length2 = java_lang_String::length(str2);
+
+  if (length1 != length2) {
+    return false;
+  }
+  for (int i = 0; i < length1; i++) {
+    if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void java_lang_String::print(Handle java_string, outputStream* st) {
   oop          obj    = java_string();
   assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
@@ -1353,8 +1376,15 @@
   const char* klass_name  = holder->external_name();
   int buf_len = (int)strlen(klass_name);
 
-  // pushing to the stack trace added one.
+  // The method id may point to an obsolete method, can't get more stack information
   Method* method = holder->method_with_idnum(method_id);
+  if (method == NULL) {
+    char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
+    // This is what the java code prints in this case - added Redefined
+    sprintf(buf, "\tat %s.null (Redefined)", klass_name);
+    return buf;
+  }
+
   char* method_name = method->name()->as_C_string();
   buf_len += (int)strlen(method_name);
 
@@ -1750,7 +1780,8 @@
   return element;
 }
 
-oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) {
+oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
+                                        int version, int bci, TRAPS) {
   // Allocate java.lang.StackTraceElement instance
   Klass* k = SystemDictionary::StackTraceElement_klass();
   assert(k != NULL, "must be loaded in 1.4+");
@@ -1767,8 +1798,16 @@
   oop classname = StringTable::intern((char*) str, CHECK_0);
   java_lang_StackTraceElement::set_declaringClass(element(), classname);
 
+  Method* method = holder->method_with_idnum(method_id);
+  // Method on stack may be obsolete because it was redefined so cannot be
+  // found by idnum.
+  if (method == NULL) {
+    // leave name and fileName null
+    java_lang_StackTraceElement::set_lineNumber(element(), -1);
+    return element();
+  }
+
   // Fill in method name
-  Method* method = holder->method_with_idnum(method_id);
   oop methodname = StringTable::intern(method->name(), CHECK_0);
   java_lang_StackTraceElement::set_methodName(element(), methodname);
 
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/javaClasses.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -182,6 +182,7 @@
   static unsigned int hash_string(oop java_string);
 
   static bool equals(oop java_string, jchar* chars, int len);
+  static bool equals(oop str1, oop str2);
 
   // Conversion between '.' and '/' formats
   static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
--- a/src/share/vm/classfile/symbolTable.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/symbolTable.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -341,7 +341,7 @@
 
 Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
                                unsigned int hashValue_arg, bool c_heap, TRAPS) {
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   // Don't allow symbols to be created which cannot fit in a Symbol*.
@@ -685,7 +685,7 @@
   if (found_string != NULL) return found_string;
 
   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   Handle string;
@@ -807,6 +807,8 @@
   }
 }
 
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
     HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -825,6 +827,162 @@
   the_table()->dump_table(st, "StringTable");
 }
 
+StringTable::VerifyRetTypes StringTable::compare_entries(
+                                      int bkt1, int e_cnt1,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                      int bkt2, int e_cnt2,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr2) {
+  // These entries are sanity checked by verify_and_compare_entries()
+  // before this function is called.
+  oop str1 = e_ptr1->literal();
+  oop str2 = e_ptr2->literal();
+
+  if (str1 == str2) {
+    tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+                  "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  (void *)str1, bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  if (java_lang_String::equals(str1, str2)) {
+    tty->print_cr("ERROR: identical String values in entry @ "
+                  "bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  return _verify_pass;
+}
+
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr,
+                                      StringTable::VerifyMesgModes mesg_mode) {
+
+  VerifyRetTypes ret = _verify_pass;  // be optimistic
+
+  oop str = e_ptr->literal();
+  if (str == NULL) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+                    e_cnt);
+    }
+    // NULL oop means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  if (str->klass() != SystemDictionary::String_klass()) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+                    bkt, e_cnt);
+    }
+    // not a String means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  unsigned int h = java_lang_String::hash_string(str);
+  if (e_ptr->hash() != h) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+                    "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+    }
+    ret = _verify_fail_continue;
+  }
+
+  if (the_table()->hash_to_index(h) != bkt) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+                    "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+                    the_table()->hash_to_index(h));
+    }
+    ret = _verify_fail_continue;
+  }
+
+  return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+  assert(StringTable_lock->is_locked(), "sanity check");
+
+  int  fail_cnt = 0;
+
+  // first, verify all the entries individually:
+  for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+    for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+      VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+      if (ret != _verify_pass) {
+        fail_cnt++;
+      }
+    }
+  }
+
+  // Optimization: if the above check did not find any failures, then
+  // the comparison loop below does not need to call verify_entry()
+  // before calling compare_entries(). If there were failures, then we
+  // have to call verify_entry() to see if the entry can be passed to
+  // compare_entries() safely. When we call verify_entry() in the loop
+  // below, we do so quietly to void duplicate messages and we don't
+  // increment fail_cnt because the failures have already been counted.
+  bool need_entry_verify = (fail_cnt != 0);
+
+  // second, verify all entries relative to each other:
+  for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+    for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+      if (need_entry_verify) {
+        VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+                                          _verify_quietly);
+        if (ret == _verify_fail_done) {
+          // cannot use the current entry to compare against other entries
+          continue;
+        }
+      }
+
+      for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+        HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+        int e_cnt2;
+        for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+          if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+            // skip the entries up to and including the one that
+            // we're comparing against
+            continue;
+          }
+
+          if (need_entry_verify) {
+            VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+                                              _verify_quietly);
+            if (ret == _verify_fail_done) {
+              // cannot compare against this entry
+              continue;
+            }
+          }
+
+          // compare two entries, report and count any failures:
+          if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+              != _verify_pass) {
+            fail_cnt++;
+          }
+        }
+      }
+    }
+  }
+  return fail_cnt;
+}
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing strings.   Set flag to use the alternate hash code afterwards.
--- a/src/share/vm/classfile/symbolTable.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/symbolTable.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -107,18 +107,13 @@
     add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
   }
 
-  // Table size
-  enum {
-    symbol_table_size = 20011
-  };
-
   Symbol* lookup(int index, const char* name, int len, unsigned int hash);
 
   SymbolTable()
-    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
+    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
 
   SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
-    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
+    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
                 number_of_entries) {}
 
   // Arena for permanent symbols (null class loader) that are never unloaded
@@ -136,6 +131,9 @@
   // The symbol table
   static SymbolTable* the_table() { return _the_table; }
 
+  // Size of one bucket in the string table.  Used when checking for rollover.
+  static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
+
   static void create_table() {
     assert(_the_table == NULL, "One symbol table allowed.");
     _the_table = new SymbolTable();
@@ -145,8 +143,11 @@
   static void create_table(HashtableBucket<mtSymbol>* t, int length,
                            int number_of_entries) {
     assert(_the_table == NULL, "One symbol table allowed.");
-    assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>),
-           "bad shared symbol size.");
+
+    // If CDS archive used a different symbol table size, use that size instead
+    // which is better than giving an error.
+    SymbolTableSize = length/bucket_size();
+
     _the_table = new SymbolTable(t, number_of_entries);
     // if CDS give symbol table a default arena size since most symbols
     // are already allocated in the shared misc section.
@@ -311,6 +312,26 @@
   static void verify();
   static void dump(outputStream* st);
 
+  enum VerifyMesgModes {
+    _verify_quietly    = 0,
+    _verify_with_mesgs = 1
+  };
+
+  enum VerifyRetTypes {
+    _verify_pass          = 0,
+    _verify_fail_continue = 1,
+    _verify_fail_done     = 2
+  };
+
+  static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                        int bkt2, int e_cnt2,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr2);
+  static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+                                     HashtableEntry<oop, mtSymbol>* e_ptr,
+                                     VerifyMesgModes mesg_mode);
+  static int verify_and_compare_entries();
+
   // Sharing
   static void copy_buckets(char** top, char*end) {
     the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1697,6 +1697,24 @@
   return newsize;
 }
 
+#ifdef ASSERT
+class VerifySDReachableAndLiveClosure : public OopClosure {
+private:
+  BoolObjectClosure* _is_alive;
+
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
+  }
+
+public:
+  VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
+
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+#endif
+
 // Assumes classes in the SystemDictionary are only unloaded at a safepoint
 // Note: anonymous classes are not in the SD.
 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
@@ -1707,7 +1725,15 @@
     unloading_occurred = dictionary()->do_unloading();
     constraints()->purge_loader_constraints();
     resolution_errors()->purge_resolution_errors();
-}
+  }
+  // Oops referenced by the system dictionary may get unreachable independently
+  // of the class loader (eg. cached protection domain oops). So we need to
+  // explicitly unlink them here instead of in Dictionary::do_unloading.
+  dictionary()->unlink(is_alive);
+#ifdef ASSERT
+  VerifySDReachableAndLiveClosure cl(is_alive);
+  dictionary()->oops_do(&cl);
+#endif
   return unloading_occurred;
 }
 
@@ -2334,6 +2360,11 @@
   objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
   assert(appendix_box->obj_at(0) == NULL, "");
 
+  // This should not happen.  JDK code should take care of that.
+  if (accessing_klass.is_null() || method_type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokehandle", empty);
+  }
+
   // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName
   JavaCallArguments args;
   args.push_oop(accessing_klass()->java_mirror());
@@ -2459,6 +2490,9 @@
   Handle type;
   if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
     type = find_method_handle_type(signature, caller, CHECK_(empty));
+  } else if (caller.is_null()) {
+    // This should not happen.  JDK code should take care of that.
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad MH constant", empty);
   } else {
     ResourceMark rm(THREAD);
     SignatureStream ss(signature, false);
@@ -2522,6 +2556,11 @@
   Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
   Handle method_type = find_method_handle_type(type, caller, CHECK_(empty));
 
+  // This should not happen.  JDK code should take care of that.
+  if (caller.is_null() || method_type.is_null()) {
+    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokedynamic", empty);
+  }
+
   objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
   assert(appendix_box->obj_at(0) == NULL, "");
 
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -173,8 +173,6 @@
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
   do_klass(nio_Buffer_klass,                            java_nio_Buffer,                           Opt                 ) \
                                                                                                                          \
-  do_klass(PostVMInitHook_klass,                        sun_misc_PostVMInitHook,                   Opt                 ) \
-                                                                                                                         \
   /* Preload boxing klasses */                                                                                           \
   do_klass(Boolean_klass,                               java_lang_Boolean,                         Pre                 ) \
   do_klass(Character_klass,                             java_lang_Character,                       Pre                 ) \
--- a/src/share/vm/classfile/verifier.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/verifier.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -188,6 +188,10 @@
 bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
   Symbol* name = klass->name();
   Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
+  Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
+
+  bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
+  bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
 
   return (should_verify_for(klass->class_loader(), should_verify_class) &&
     // return if the class is a bootstrapping class
@@ -210,9 +214,9 @@
     // sun/reflect/SerializationConstructorAccessor.
     // NOTE: this is called too early in the bootstrapping process to be
     // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
-    (refl_magic_klass == NULL ||
-     !klass->is_subtype_of(refl_magic_klass) ||
-     VerifyReflectionBytecodes)
+    // Also for lambda generated code, gte jdk8
+    (!is_reflect || VerifyReflectionBytecodes) &&
+    (!is_lambda || VerifyLambdaBytecodes)
   );
 }
 
@@ -2318,9 +2322,6 @@
       types = 1 << JVM_CONSTANT_InvokeDynamic;
       break;
     case Bytecodes::_invokespecial:
-      types = (1 << JVM_CONSTANT_InterfaceMethodref) |
-              (1 << JVM_CONSTANT_Methodref);
-      break;
     case Bytecodes::_invokestatic:
       types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
         (1 << JVM_CONSTANT_Methodref) :
@@ -2438,8 +2439,14 @@
              && !ref_class_type.equals(current_type())
              && !ref_class_type.equals(VerificationType::reference_type(
                   current_class()->super()->name()))) {
-    bool subtype = ref_class_type.is_assignable_from(
-      current_type(), this, CHECK_VERIFY(this));
+    bool subtype = false;
+    if (!current_class()->is_anonymous()) {
+      subtype = ref_class_type.is_assignable_from(
+                 current_type(), this, CHECK_VERIFY(this));
+    } else {
+      subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
+                 current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
+    }
     if (!subtype) {
       verify_error(ErrorContext::bad_code(bci),
           "Bad invokespecial instruction: "
@@ -2460,7 +2467,24 @@
     } else {   // other methods
       // Ensures that target class is assignable to method class.
       if (opcode == Bytecodes::_invokespecial) {
-        current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
+        if (!current_class()->is_anonymous()) {
+          current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
+        } else {
+          // anonymous class invokespecial calls: check if the
+          // objectref is a subtype of the host_klass of the current class
+          // to allow an anonymous class to reference methods in the host_klass
+          VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
+          VerificationType hosttype =
+            VerificationType::reference_type(current_class()->host_klass()->name());
+          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
+          if (!subtype) {
+            verify_error( ErrorContext::bad_type(current_frame->offset(),
+              current_frame->stack_top_ctx(),
+              TypeOrigin::implicit(top)),
+              "Bad type on operand stack");
+            return;
+          }
+        }
       } else if (opcode == Bytecodes::_invokevirtual) {
         VerificationType stack_object_type =
           current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -270,6 +270,7 @@
   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
+  template(java_lang_invoke_Stable_signature,         "Ljava/lang/invoke/Stable;")                \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
   template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
@@ -623,6 +624,7 @@
   do_class(java_lang_StrictMath,          "java/lang/StrictMath")                                                       \
   do_signature(double2_double_signature,  "(DD)D")                                                                      \
   do_signature(int2_int_signature,        "(II)I")                                                                      \
+  do_signature(long2_long_signature,      "(JJ)J")                                                                         \
                                                                                                                         \
   /* here are the math names, all together: */                                                                          \
   do_name(abs_name,"abs")       do_name(sin_name,"sin")         do_name(cos_name,"cos")                                 \
@@ -630,6 +632,13 @@
   do_name(log_name,"log")       do_name(log10_name,"log10")     do_name(pow_name,"pow")                                 \
   do_name(exp_name,"exp")       do_name(min_name,"min")         do_name(max_name,"max")                                 \
                                                                                                                         \
+  do_name(addExact_name,"addExact")                                                                                     \
+  do_name(decrementExact_name,"decrementExact")                                                                         \
+  do_name(incrementExact_name,"incrementExact")                                                                         \
+  do_name(multiplyExact_name,"multiplyExact")                                                                           \
+  do_name(negateExact_name,"negateExact")                                                                               \
+  do_name(subtractExact_name,"subtractExact")                                                                           \
+                                                                                                                        \
   do_intrinsic(_dabs,                     java_lang_Math,         abs_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dsin,                     java_lang_Math,         sin_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dcos,                     java_lang_Math,         cos_name,   double_double_signature,           F_S)   \
@@ -642,6 +651,18 @@
   do_intrinsic(_dexp,                     java_lang_Math,         exp_name,   double_double_signature,           F_S)   \
   do_intrinsic(_min,                      java_lang_Math,         min_name,   int2_int_signature,                F_S)   \
   do_intrinsic(_max,                      java_lang_Math,         max_name,   int2_int_signature,                F_S)   \
+  do_intrinsic(_addExactI,                java_lang_Math,         addExact_name, int2_int_signature,             F_S)   \
+  do_intrinsic(_addExactL,                java_lang_Math,         addExact_name, long2_long_signature,           F_S)   \
+  do_intrinsic(_decrementExactI,          java_lang_Math,         decrementExact_name, int_int_signature,        F_S)   \
+  do_intrinsic(_decrementExactL,          java_lang_Math,         decrementExact_name, long2_long_signature,     F_S)   \
+  do_intrinsic(_incrementExactI,          java_lang_Math,         incrementExact_name, int_int_signature,        F_S)   \
+  do_intrinsic(_incrementExactL,          java_lang_Math,         incrementExact_name, long2_long_signature,     F_S)   \
+  do_intrinsic(_multiplyExactI,           java_lang_Math,         multiplyExact_name, int2_int_signature,        F_S)   \
+  do_intrinsic(_multiplyExactL,           java_lang_Math,         multiplyExact_name, long2_long_signature,      F_S)   \
+  do_intrinsic(_negateExactI,             java_lang_Math,         negateExact_name, int_int_signature,           F_S)   \
+  do_intrinsic(_negateExactL,             java_lang_Math,         negateExact_name, long_long_signature,         F_S)   \
+  do_intrinsic(_subtractExactI,           java_lang_Math,         subtractExact_name, int2_int_signature,        F_S)   \
+  do_intrinsic(_subtractExactL,           java_lang_Math,         subtractExact_name, long2_long_signature,      F_S)   \
                                                                                                                         \
   do_intrinsic(_floatToRawIntBits,        java_lang_Float,        floatToRawIntBits_name,   float_int_signature, F_S)   \
    do_name(     floatToRawIntBits_name,                          "floatToRawIntBits")                                   \
--- a/src/share/vm/code/codeBlob.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/codeBlob.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,8 +245,8 @@
 }
 
 
-void* BufferBlob::operator new(size_t s, unsigned size) {
-  void* p = CodeCache::allocate(size);
+void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
+  void* p = CodeCache::allocate(size, is_critical);
   return p;
 }
 
@@ -277,7 +277,10 @@
   unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) AdapterBlob(size, cb);
+    // The parameter 'true' indicates a critical memory allocation.
+    // This means that CodeCacheMinimumFreeSpace is used, if necessary
+    const bool is_critical = true;
+    blob = new (size, is_critical) AdapterBlob(size, cb);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
@@ -299,7 +302,10 @@
   size += round_to(buffer_size, oopSize);
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    blob = new (size) MethodHandlesAdapterBlob(size);
+    // The parameter 'true' indicates a critical memory allocation.
+    // This means that CodeCacheMinimumFreeSpace is used, if necessary
+    const bool is_critical = true;
+    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
@@ -347,14 +353,14 @@
 }
 
 
-void* RuntimeStub::operator new(size_t s, unsigned size) {
+void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
   void* p = CodeCache::allocate(size, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
 
 // operator new shared by all singletons:
-void* SingletonBlob::operator new(size_t s, unsigned size) {
+void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
   void* p = CodeCache::allocate(size, true);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
--- a/src/share/vm/code/codeBlob.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/codeBlob.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -209,7 +209,7 @@
   BufferBlob(const char* name, int size);
   BufferBlob(const char* name, int size, CodeBuffer* cb);
 
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
 
  public:
   // Creation
@@ -253,7 +253,6 @@
 class MethodHandlesAdapterBlob: public BufferBlob {
 private:
   MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
-  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
 
 public:
   // Creation
@@ -283,7 +282,7 @@
     bool        caller_must_gc_arguments
   );
 
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
   // Creation
@@ -321,7 +320,7 @@
   friend class VMStructs;
 
  protected:
-  void* operator new(size_t s, unsigned size);
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
    SingletonBlob(
--- a/src/share/vm/code/codeCache.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/codeCache.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -124,7 +124,6 @@
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-nmethod* CodeCache::_saved_nmethods = NULL;
 
 int CodeCache::_codemem_full_count = 0;
 
@@ -464,96 +463,11 @@
 }
 #endif //PRODUCT
 
-/**
- * Remove and return nmethod from the saved code list in order to reanimate it.
- */
-nmethod* CodeCache::reanimate_saved_code(Method* m) {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved->is_in_use() && saved->method() == m) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-      saved->set_speculatively_disconnected(false);
-      saved->set_saved_nmethod_link(NULL);
-      if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected");
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
-        xtty->method(m);
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return saved;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  return NULL;
-}
-
-/**
- * Remove nmethod from the saved code list in order to discard it permanently
- */
-void CodeCache::remove_saved_code(nmethod* nm) {
-  // For conc swpr this will be called with CodeCache_lock taken by caller
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved == nm) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  ShouldNotReachHere();
-}
-
-void CodeCache::speculatively_disconnect(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
-  nm->set_saved_nmethod_link(_saved_nmethods);
-  _saved_nmethods = nm;
-  if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected");
-  }
-  if (LogCompilation && (xtty != NULL)) {
-    ttyLocker ttyl;
-    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
-    xtty->method(nm->method());
-    xtty->stamp();
-    xtty->end_elem();
-  }
-  nm->method()->clear_code();
-  nm->set_speculatively_disconnected(true);
-}
-
 
 void CodeCache::gc_prologue() {
   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
-
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
   FOR_ALL_ALIVE_BLOBS(cb) {
--- a/src/share/vm/code/codeCache.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/codeCache.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -57,7 +57,6 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
-  static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -167,17 +166,12 @@
   static size_t  capacity()                      { return _heap->capacity(); }
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
-  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
   static double  reverse_free_ratio();
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
-  static nmethod* reanimate_saved_code(Method* m);
-  static void remove_saved_code(nmethod* nm);
-  static void speculatively_disconnect(nmethod* nm);
-
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/src/share/vm/code/compiledIC.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/compiledIC.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,32 +160,42 @@
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
 
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
-  methodHandle method = call_info->selected_method();
-  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 
   address entry;
-  if (is_invoke_interface) {
-    int index = klassItable::compute_itable_index(call_info->resolved_method()());
-    entry = VtableStubs::create_stub(false, index, method());
-    assert(entry != NULL, "entry not computed");
+  if (call_info->call_kind() == CallInfo::itable_call) {
+    assert(bytecode == Bytecodes::_invokeinterface, "");
+    int itable_index = call_info->itable_index();
+    entry = VtableStubs::find_itable_stub(itable_index);
+    if (entry == false) {
+      return false;
+    }
+#ifdef ASSERT
+    int index = call_info->resolved_method()->itable_index();
+    assert(index == itable_index, "CallInfo pre-computes this");
+#endif //ASSERT
     InstanceKlass* k = call_info->resolved_method()->method_holder();
-    assert(k->is_interface(), "sanity check");
+    assert(k->verify_itable_index(itable_index), "sanity check");
     InlineCacheBuffer::create_transition_stub(this, k, entry);
   } else {
-    // Can be different than method->vtable_index(), due to package-private etc.
+    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+    // Can be different than selected_method->vtable_index(), due to package-private etc.
     int vtable_index = call_info->vtable_index();
-    entry = VtableStubs::create_stub(true, vtable_index, method());
-    InlineCacheBuffer::create_transition_stub(this, method(), entry);
+    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
+    entry = VtableStubs::find_vtable_stub(vtable_index);
+    if (entry == NULL) {
+      return false;
+    }
+    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   }
 
   if (TraceICs) {
     ResourceMark rm;
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
-                   instruction_address(), method->print_value_string(), entry);
+                   instruction_address(), call_info->selected_method()->print_value_string(), entry);
   }
 
   // We can't check this anymore. With lazy deopt we could have already
@@ -195,6 +205,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_megamorphic(), "sanity check");
+  return true;
 }
 
 
--- a/src/share/vm/code/compiledIC.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/compiledIC.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -226,7 +226,10 @@
   //
   void set_to_clean();  // Can only be called during a safepoint operation
   void set_to_monomorphic(CompiledICInfo& info);
-  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+  // Returns true if successful and false otherwise. The call can fail if memory
+  // allocation in the code cache fails.
+  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 
   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/src/share/vm/code/debugInfoRec.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/debugInfoRec.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
   int  _length; // number of bytes in the stream
   int  _hash;   // hash of stream bytes (for quicker reuse)
 
-  void* operator new(size_t ignore, DebugInformationRecorder* dir) {
+  void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
     assert(ignore == sizeof(DIR_Chunk), "");
     if (dir->_next_chunk >= dir->_next_chunk_limit) {
       const int CHUNK = 100;
--- a/src/share/vm/code/dependencies.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/dependencies.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -812,8 +812,8 @@
     Klass* k = ctxk;
     Method* lm = k->lookup_method(m->name(), m->signature());
     if (lm == NULL && k->oop_is_instance()) {
-      // It might be an abstract interface method, devoid of mirandas.
-      lm = ((InstanceKlass*)k)->lookup_method_in_all_interfaces(m->name(),
+      // It might be an interface method
+        lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(),
                                                                 m->signature());
     }
     if (lm == m)
--- a/src/share/vm/code/nmethod.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/nmethod.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,18 +93,21 @@
 #endif
 
 bool nmethod::is_compiled_by_c1() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c1();
 }
 bool nmethod::is_compiled_by_c2() const {
-  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
-  if (is_native_method()) return false;
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_c2();
 }
 bool nmethod::is_compiled_by_shark() const {
-  if (is_native_method()) return false;
-  assert(compiler() != NULL, "must be");
+  if (compiler() == NULL) {
+    return false;
+  }
   return compiler()->is_shark();
 }
 
@@ -459,7 +462,6 @@
   _state                      = alive;
   _marked_for_reclamation     = 0;
   _has_flushed_dependencies   = 0;
-  _speculatively_disconnected = 0;
   _has_unsafe_access          = 0;
   _has_method_handle_invokes  = 0;
   _lazy_critical_native       = 0;
@@ -478,7 +480,6 @@
   _osr_link                = NULL;
   _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
-  _saved_nmethod_link      = NULL;
   _compiler                = NULL;
 
 #ifdef HAVE_DTRACE_H
@@ -683,6 +684,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@@ -767,6 +769,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     debug_only(verify_scavenge_root_oops());
@@ -800,7 +803,7 @@
 }
 #endif // def HAVE_DTRACE_H
 
-void* nmethod::operator new(size_t size, int nmethod_size) {
+void* nmethod::operator new(size_t size, int nmethod_size) throw() {
   // Not critical, may return null if there is too little continuous memory
   return CodeCache::allocate(nmethod_size);
 }
@@ -839,6 +842,7 @@
     _comp_level              = comp_level;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     // Section offsets
     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
@@ -1173,7 +1177,7 @@
 
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
-  assert(is_not_entrant(), "must be a non-entrant method");
+  assert(is_alive(), "Must be an alive method");
   // Set the traversal mark to ensure that the sweeper does 2
   // cleaning passes before moving to zombie.
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1258,7 +1262,7 @@
 
   set_osr_link(NULL);
   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 }
 
 void nmethod::invalidate_osr_method() {
@@ -1348,6 +1352,15 @@
       nmethod_needs_unregister = true;
     }
 
+    // Must happen before state change. Otherwise we have a race condition in
+    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
+    // transition its state from 'not_entrant' to 'zombie' without having to wait
+    // for stack scanning.
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+      OrderAccess::storestore();
+    }
+
     // Change state
     _state = state;
 
@@ -1366,11 +1379,6 @@
       HandleMark hm;
       method()->clear_code();
     }
-
-    if (state == not_entrant) {
-      mark_as_seen_on_stack();
-    }
-
   } // leave critical region under Patching_lock
 
   // When the nmethod becomes zombie it is no longer alive so the
@@ -1401,6 +1409,9 @@
     // nmethods aren't scanned for GC.
     _oops_are_stale = true;
 #endif
+     // the Method may be reclaimed by class unloading now that the
+     // nmethod is in zombie state
+    set_method(NULL);
   } else {
     assert(state == not_entrant, "other cases may need to be handled differently");
   }
@@ -1410,7 +1421,7 @@
   }
 
   // Make sweeper aware that there is a zombie method that needs to be removed
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 
   return true;
 }
@@ -1445,10 +1456,6 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
-  if (is_speculatively_disconnected()) {
-    CodeCache::remove_saved_code(this);
-  }
-
 #ifdef SHARK
   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
 #endif // SHARK
@@ -1959,7 +1966,7 @@
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
     tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
-                  (intptr_t)(*p), (intptr_t)p);
+                  (void *)(*p), (intptr_t)p);
     (*p)->print();
   }
 #endif //PRODUCT
@@ -2339,7 +2346,7 @@
       _ok = false;
     }
     tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
@@ -2460,7 +2467,7 @@
       _ok = false;
     }
     tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
--- a/src/share/vm/code/nmethod.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/nmethod.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,6 @@
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
-  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -165,7 +164,6 @@
 
   // protected by CodeCache_lock
   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
-  bool _speculatively_disconnected;          // Marked for potential unload
 
   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
   bool _marked_for_deoptimization;           // Used for stack deoptimization
@@ -180,7 +178,7 @@
   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 
   // Protected by Patching_lock
-  unsigned char _state;                      // {alive, not_entrant, zombie, unloaded}
+  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
@@ -202,11 +200,18 @@
 
   // not_entrant method removal. Each mark_sweep pass will update
   // this mark to current sweep invocation count if it is seen on the
-  // stack.  An not_entrant method can be removed when there is no
+  // stack.  An not_entrant method can be removed when there are no
   // more activations, i.e., when the _stack_traversal_mark is less than
   // current sweep traversal index.
   long _stack_traversal_mark;
 
+  // The _hotness_counter indicates the hotness of a method. The higher
+  // the value the hotter the method. The hotness counter of a nmethod is
+  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
+  // is active while stack scanning (mark_active_nmethods()). The hotness
+  // counter is decreased (by 1) while sweeping.
+  int _hotness_counter;
+
   ExceptionCache *_exception_cache;
   PcDescCache     _pc_desc_cache;
 
@@ -265,7 +270,7 @@
           int comp_level);
 
   // helper methods
-  void* operator new(size_t size, int nmethod_size);
+  void* operator new(size_t size, int nmethod_size) throw();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
   // Returns true if this thread changed the state of the nmethod or
@@ -382,6 +387,10 @@
 
   int total_size        () const;
 
+  void dec_hotness_counter()        { _hotness_counter--; }
+  void set_hotness_counter(int val) { _hotness_counter = val; }
+  int  hotness_counter() const      { return _hotness_counter; }
+
   // Containment
   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
@@ -408,8 +417,8 @@
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
   // another thread performed the transition.
-  bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
-  bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
+  bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 
   // used by jvmti to track if the unload event has been reported
   bool  unload_reported()                         { return _unload_reported; }
@@ -437,9 +446,6 @@
   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 
-  bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
-  void  set_speculatively_disconnected(bool z)    { _speculatively_disconnected = z; }
-
   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 
@@ -499,9 +505,6 @@
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 
-  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
-  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
-
  public:
 
   // Sweeper support
--- a/src/share/vm/code/relocInfo.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/relocInfo.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -678,7 +678,7 @@
   }
 
  public:
-  void* operator new(size_t size, const RelocationHolder& holder) {
+  void* operator new(size_t size, const RelocationHolder& holder) throw() {
     if (size > sizeof(holder._relocbuf)) guarantee_size();
     assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
     return holder.reloc();
--- a/src/share/vm/code/vtableStubs.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/vtableStubs.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,12 +46,9 @@
 address VtableStub::_chunk_end         = NULL;
 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 
-static int num_vtable_chunks = 0;
 
-
-void* VtableStub::operator new(size_t size, int code_size) {
+void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
-  num_vtable_chunks++;
   // compute real VtableStub size (rounded to nearest word)
   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   // malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+      return NULL;
     }
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
@@ -111,7 +108,7 @@
 }
 
 
-address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
+address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   assert(vtable_index >= 0, "must be positive");
 
   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@@ -121,6 +118,12 @@
     } else {
       s = create_itable_stub(vtable_index);
     }
+
+    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+    if (s == NULL) {
+      return NULL;
+    }
+
     enter(is_vtable_stub, vtable_index, s);
     if (PrintAdapterHandlers) {
       tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- a/src/share/vm/code/vtableStubs.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/code/vtableStubs.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
   bool           _is_vtable_stub;    // True if vtable stub, false, is itable stub
   /* code follows here */            // The vtableStub code
 
-  void* operator new(size_t size, int code_size);
+  void* operator new(size_t size, int code_size) throw();
 
   VtableStub(bool is_vtable_stub, int index)
         : _next(NULL), _is_vtable_stub(is_vtable_stub),
@@ -121,9 +121,11 @@
   static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
   static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
   static inline uint hash              (bool is_vtable_stub, int vtable_index);
+  static address     find_stub         (bool is_vtable_stub, int vtable_index);
 
  public:
-  static address     create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
+  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
+  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
   static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
--- a/src/share/vm/compiler/abstractCompiler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/compiler/abstractCompiler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -24,41 +24,42 @@
 
 #include "precompiled.hpp"
 #include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
 #include "runtime/mutexLocker.hpp"
-void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
-  if (*state != initialized) {
+
+bool AbstractCompiler::should_perform_init() {
+  if (_compiler_state != initialized) {
+    MutexLocker only_one(CompileThread_lock);
 
-    // We are thread in native here...
-    CompilerThread* thread = CompilerThread::current();
-    bool do_initialization = false;
-    {
-      ThreadInVMfromNative tv(thread);
-      ResetNoHandleMark rnhm;
-      MutexLocker only_one(CompileThread_lock, thread);
-      if ( *state == uninitialized) {
-        do_initialization = true;
-        *state = initializing;
-      } else {
-        while (*state == initializing ) {
-          CompileThread_lock->wait();
-        }
+    if (_compiler_state == uninitialized) {
+      _compiler_state = initializing;
+      return true;
+    } else {
+      while (_compiler_state == initializing) {
+        CompileThread_lock->wait();
       }
     }
-    if (do_initialization) {
-      // We can not hold any locks here since JVMTI events may call agents
+  }
+  return false;
+}
 
-      // Compiler(s) run as native
-
-      (*f)();
-
-      // To in_vm so we can use the lock
+bool AbstractCompiler::should_perform_shutdown() {
+  // Since this method can be called by multiple threads, the lock ensures atomicity of
+  // decrementing '_num_compiler_threads' and the following operations.
+  MutexLocker only_one(CompileThread_lock);
+  _num_compiler_threads--;
+  assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
 
-      ThreadInVMfromNative tv(thread);
-      ResetNoHandleMark rnhm;
-      MutexLocker only_one(CompileThread_lock, thread);
-      assert(*state == initializing, "wrong state");
-      *state = initialized;
-      CompileThread_lock->notify_all();
-    }
+  // Only the last thread will perform shutdown operations
+  if (_num_compiler_threads == 0) {
+    return true;
   }
+  return false;
 }
+
+void AbstractCompiler::set_state(int state) {
+  // Ensure that ste is only set by one thread at a time
+  MutexLocker only_one(CompileThread_lock);
+  _compiler_state =  state;
+  CompileThread_lock->notify_all();
+}
--- a/src/share/vm/compiler/abstractCompiler.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/compiler/abstractCompiler.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -27,22 +27,25 @@
 
 #include "ci/compilerInterface.hpp"
 
-typedef void (*initializer)(void);
-
 class AbstractCompiler : public CHeapObj<mtCompiler> {
  private:
-  bool _is_initialized; // Mark whether compiler object is initialized
+  volatile int _num_compiler_threads;
 
  protected:
+  volatile int _compiler_state;
   // Used for tracking global state of compiler runtime initialization
-  enum { uninitialized, initializing, initialized };
+  enum { uninitialized, initializing, initialized, failed, shut_down };
 
-  // This method will call the initialization method "f" once (per compiler class/subclass)
-  // and do so without holding any locks
-  void initialize_runtimes(initializer f, volatile int* state);
+  // This method returns true for the first compiler thread that reaches that methods.
+  // This thread will initialize the compiler runtime.
+  bool should_perform_init();
 
  public:
-  AbstractCompiler() : _is_initialized(false)    {}
+  AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
+
+  // This function determines the compiler thread that will perform the
+  // shutdown of the corresponding compiler runtime.
+  bool should_perform_shutdown();
 
   // Name of this compiler
   virtual const char* name() = 0;
@@ -74,17 +77,18 @@
 #endif // TIERED
 
   // Customization
-  virtual bool needs_stubs            ()         = 0;
+  virtual void initialize () = 0;
 
-  void mark_initialized()                        { _is_initialized = true; }
-  bool is_initialized()                          { return _is_initialized; }
+  void set_num_compiler_threads(int num) { _num_compiler_threads = num;  }
+  int num_compiler_threads()             { return _num_compiler_threads; }
 
-  virtual void initialize()                      = 0;
-
+  // Get/set state of compiler objects
+  bool is_initialized()           { return _compiler_state == initialized; }
+  bool is_failed     ()           { return _compiler_state == failed;}
+  void set_state     (int state);
+  void set_shut_down ()           { set_state(shut_down); }
   // Compilation entry point for methods
-  virtual void compile_method(ciEnv* env,
-                              ciMethod* target,
-                              int entry_bci) {
+  virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
     ShouldNotReachHere();
   }
 
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -186,7 +186,7 @@
 CompileQueue* CompileBroker::_c1_method_queue    = NULL;
 CompileTask*  CompileBroker::_task_free_list     = NULL;
 
-GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
+GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
 
 
 class CompilationLog : public StringEventLog {
@@ -587,9 +587,6 @@
 
 
 
-// ------------------------------------------------------------------
-// CompileQueue::add
-//
 // Add a CompileTask to a CompileQueue
 void CompileQueue::add(CompileTask* task) {
   assert(lock()->owned_by_self(), "must own lock");
@@ -626,6 +623,16 @@
   lock()->notify_all();
 }
 
+void CompileQueue::delete_all() {
+  assert(lock()->owned_by_self(), "must own lock");
+  if (_first != NULL) {
+    for (CompileTask* task = _first; task != NULL; task = task->next()) {
+      delete task;
+    }
+    _first = NULL;
+  }
+}
+
 // ------------------------------------------------------------------
 // CompileQueue::get
 //
@@ -634,22 +641,52 @@
   NMethodSweeper::possibly_sweep();
 
   MutexLocker locker(lock());
-  // Wait for an available CompileTask.
+  // If _first is NULL we have no more compile jobs. There are two reasons for
+  // having no compile jobs: First, we compiled everything we wanted. Second,
+  // we ran out of code cache so compilation has been disabled. In the latter
+  // case we perform code cache sweeps to free memory such that we can re-enable
+  // compilation.
   while (_first == NULL) {
-    // There is no work to be done right now.  Wait.
-    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
-      // During the emergency sweeping periods, wake up and sweep occasionally
-      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
-      if (timedout) {
+    // Exit loop if compilation is disabled forever
+    if (CompileBroker::is_compilation_disabled_forever()) {
+      return NULL;
+    }
+
+    if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
+      // Wait a certain amount of time to possibly do another sweep.
+      // We must wait until stack scanning has happened so that we can
+      // transition a method's state from 'not_entrant' to 'zombie'.
+      long wait_time = NmethodSweepCheckInterval * 1000;
+      if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
+        // Only one thread at a time can do sweeping. Scale the
+        // wait time according to the number of compiler threads.
+        // As a result, the next sweep is likely to happen every 100ms
+        // with an arbitrary number of threads that do sweeping.
+        wait_time = 100 * CICompilerCount;
+      }
+      bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
+      if (timeout) {
         MutexUnlocker ul(lock());
-        // When otherwise not busy, run nmethod sweeping
         NMethodSweeper::possibly_sweep();
       }
     } else {
-      // During normal operation no need to wake up on timer
-      lock()->wait();
+      // If there are no compilation tasks and we can compile new jobs
+      // (i.e., there is enough free space in the code cache) there is
+      // no need to invoke the sweeper. As a result, the hotness of methods
+      // remains unchanged. This behavior is desired, since we want to keep
+      // the stable state, i.e., we do not want to evict methods from the
+      // code cache if it is unnecessary.
+      // We need a timed wait here, since compiler threads can exit if compilation
+      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
+      // is not critical and we do not want idle compiler threads to wake up too often.
+      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
     }
   }
+
+  if (CompileBroker::is_compilation_disabled_forever()) {
+    return NULL;
+  }
+
   CompileTask* task = CompilationPolicy::policy()->select_task(this);
   remove(task);
   return task;
@@ -743,6 +780,10 @@
 void CompileBroker::compilation_init() {
   _last_method_compiled[0] = '\0';
 
+  // No need to initialize compilation system if we do not use it.
+  if (!UseCompiler) {
+    return;
+  }
 #ifndef SHARK
   // Set the interface to the current compiler(s).
   int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
@@ -874,10 +915,8 @@
 }
 
 
-
-// ------------------------------------------------------------------
-// CompileBroker::make_compiler_thread
-CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
+CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
+                                                    AbstractCompiler* comp, TRAPS) {
   CompilerThread* compiler_thread = NULL;
 
   Klass* k =
@@ -944,6 +983,7 @@
     java_lang_Thread::set_daemon(thread_oop());
 
     compiler_thread->set_threadObj(thread_oop());
+    compiler_thread->set_compiler(comp);
     Threads::add(compiler_thread);
     Thread::start(compiler_thread);
   }
@@ -955,25 +995,24 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::init_compiler_threads
-//
-// Initialize the compilation queue
 void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
 #if !defined(ZERO) && !defined(SHARK) && !defined(PPC64)
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
 #endif // !ZERO && !SHARK
+  // Initialize the compilation queue
   if (c2_compiler_count > 0) {
     _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
     _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
   int compiler_count = c1_compiler_count + c2_compiler_count;
 
-  _method_threads =
+  _compiler_threads =
     new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
 
   char name_buffer[256];
@@ -981,21 +1020,22 @@
     // Create a name for our thread.
     sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
-    _method_threads->append(new_thread);
+    // Shark and C2
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
+    _compiler_threads->append(new_thread);
   }
 
   for (int i = c2_compiler_count; i < compiler_count; i++) {
     // Create a name for our thread.
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
-    _method_threads->append(new_thread);
+    // C1
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
+    _compiler_threads->append(new_thread);
   }
 
   if (UsePerfData) {
-    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
-                                     compiler_count, CHECK);
+    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
   }
 }
 
@@ -1012,27 +1052,6 @@
 }
 
 // ------------------------------------------------------------------
-// CompileBroker::is_idle
-bool CompileBroker::is_idle() {
-  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
-    return false;
-  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
-    return false;
-  } else {
-    int num_threads = _method_threads->length();
-    for (int i=0; i<num_threads; i++) {
-      if (_method_threads->at(i)->task() != NULL) {
-        return false;
-      }
-    }
-
-    // No pending or active compilations.
-    return true;
-  }
-}
-
-
-// ------------------------------------------------------------------
 // CompileBroker::compile_method
 //
 // Request compilation of a method.
@@ -1227,16 +1246,9 @@
         return method_code;
       }
     }
-    if (method->is_not_compilable(comp_level)) return NULL;
-
-    if (UseCodeCacheFlushing) {
-      nmethod* saved = CodeCache::reanimate_saved_code(method());
-      if (saved != NULL) {
-        method->set_code(method, saved);
-        return saved;
-      }
+    if (method->is_not_compilable(comp_level)) {
+      return NULL;
     }
-
   } else {
     // osr compilation
 #ifndef TIERED
@@ -1289,13 +1301,6 @@
     method->jmethod_id();
   }
 
-  // If the compiler is shut off due to code cache getting full
-  // fail out now so blocking compiles dont hang the java thread
-  if (!should_compile_new_jobs()) {
-    CompilationPolicy::policy()->delay_compilation(method());
-    return NULL;
-  }
-
   // do the compilation
   if (method->is_native()) {
     if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
@@ -1305,11 +1310,22 @@
         MutexLocker locker(MethodCompileQueue_lock, THREAD);
         compile_id = assign_compile_id(method, standard_entry_bci);
       }
+      // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
+      // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
+      //
+      // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
+      // in this case.  If we can't generate one and use it we can not execute the out-of-line method handle calls.
       (void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id);
     } else {
       return NULL;
     }
   } else {
+    // If the compiler is shut off due to code cache getting full
+    // fail out now so blocking compiles dont hang the java thread
+    if (!should_compile_new_jobs()) {
+      CompilationPolicy::policy()->delay_compilation(method());
+      return NULL;
+    }
     compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
   }
 
@@ -1541,6 +1557,101 @@
   free_task(task);
 }
 
+// Initialize compiler thread(s) + compiler object(s). The postcondition
+// of this function is that the compiler runtimes are initialized and that
+//compiler threads can start compiling.
+bool CompileBroker::init_compiler_runtime() {
+  CompilerThread* thread = CompilerThread::current();
+  AbstractCompiler* comp = thread->compiler();
+  // Final sanity check - the compiler object must exist
+  guarantee(comp != NULL, "Compiler object must exist");
+
+  int system_dictionary_modification_counter;
+  {
+    MutexLocker locker(Compile_lock, thread);
+    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
+  }
+
+  {
+    // Must switch to native to allocate ci_env
+    ThreadToNativeFromVM ttn(thread);
+    ciEnv ci_env(NULL, system_dictionary_modification_counter);
+    // Cache Jvmti state
+    ci_env.cache_jvmti_state();
+    // Cache DTrace flags
+    ci_env.cache_dtrace_flags();
+
+    // Switch back to VM state to do compiler initialization
+    ThreadInVMfromNative tv(thread);
+    ResetNoHandleMark rnhm;
+
+
+    if (!comp->is_shark()) {
+      // Perform per-thread and global initializations
+      comp->initialize();
+    }
+  }
+
+  if (comp->is_failed()) {
+    disable_compilation_forever();
+    // If compiler initialization failed, no compiler thread that is specific to a
+    // particular compiler runtime will ever start to compile methods.
+
+    shutdown_compiler_runtime(comp, thread);
+    return false;
+  }
+
+  // C1 specific check
+  if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
+    warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
+    return false;
+  }
+
+  return true;
+}
+
+// If C1 and/or C2 initialization failed, we shut down all compilation.
+// We do this to keep things simple. This can be changed if it ever turns out to be
+// a problem.
+void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
+  // Free buffer blob, if allocated
+  if (thread->get_buffer_blob() != NULL) {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    CodeCache::free(thread->get_buffer_blob());
+  }
+
+  if (comp->should_perform_shutdown()) {
+    // There are two reasons for shutting down the compiler
+    // 1) compiler runtime initialization failed
+    // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
+    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
+
+    // Only one thread per compiler runtime object enters here
+    // Set state to shut down
+    comp->set_shut_down();
+
+    MutexLocker mu(MethodCompileQueue_lock, thread);
+    CompileQueue* queue;
+    if (_c1_method_queue != NULL) {
+      _c1_method_queue->delete_all();
+      queue = _c1_method_queue;
+      _c1_method_queue = NULL;
+      delete _c1_method_queue;
+    }
+
+    if (_c2_method_queue != NULL) {
+      _c2_method_queue->delete_all();
+      queue = _c2_method_queue;
+      _c2_method_queue = NULL;
+      delete _c2_method_queue;
+    }
+
+    // We could delete compiler runtimes also. However, there are references to
+    // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
+    // fail. This can be done later if necessary.
+  }
+}
+
 // ------------------------------------------------------------------
 // CompileBroker::compiler_thread_loop
 //
@@ -1548,7 +1659,6 @@
 void CompileBroker::compiler_thread_loop() {
   CompilerThread* thread = CompilerThread::current();
   CompileQueue* queue = thread->queue();
-
   // For the thread that initializes the ciObjectFactory
   // this resource mark holds all the shared objects
   ResourceMark rm;
@@ -1577,68 +1687,78 @@
     log->end_elem();
   }
 
-  while (true) {
-    {
-      // We need this HandleMark to avoid leaking VM handles.
-      HandleMark hm(thread);
+  // If compiler thread/runtime initialization fails, exit the compiler thread
+  if (!init_compiler_runtime()) {
+    return;
+  }
+
+  // Poll for new compilation tasks as long as the JVM runs. Compilation
+  // should only be disabled if something went wrong while initializing the
+  // compiler runtimes. This, in turn, should not happen. The only known case
+  // when compiler runtime initialization fails is if there is not enough free
+  // space in the code cache to generate the necessary stubs, etc.
+  while (!is_compilation_disabled_forever()) {
+    // We need this HandleMark to avoid leaking VM handles.
+    HandleMark hm(thread);
 
-      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-        // the code cache is really full
-        handle_full_code_cache();
-      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
-        // Attempt to start cleaning the code cache while there is still a little headroom
-        NMethodSweeper::handle_full_code_cache(false);
-      }
+    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
+      // the code cache is really full
+      handle_full_code_cache();
+    }
 
-      CompileTask* task = queue->get();
+    CompileTask* task = queue->get();
+    if (task == NULL) {
+      continue;
+    }
 
-      // Give compiler threads an extra quanta.  They tend to be bursty and
-      // this helps the compiler to finish up the job.
-      if( CompilerThreadHintNoPreempt )
-        os::hint_no_preempt();
-
-      // trace per thread time and compile statistics
-      CompilerCounters* counters = ((CompilerThread*)thread)->counters();
-      PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
+    // Give compiler threads an extra quanta.  They tend to be bursty and
+    // this helps the compiler to finish up the job.
+    if( CompilerThreadHintNoPreempt )
+      os::hint_no_preempt();
 
-      // Assign the task to the current thread.  Mark this compilation
-      // thread as active for the profiler.
-      CompileTaskWrapper ctw(task);
-      nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
-      task->set_code_handle(&result_handle);
-      methodHandle method(thread, task->method());
+    // trace per thread time and compile statistics
+    CompilerCounters* counters = ((CompilerThread*)thread)->counters();
+    PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
 
-      // Never compile a method if breakpoints are present in it
-      if (method()->number_of_breakpoints() == 0) {
-        // Compile the method.
-        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
+    // Assign the task to the current thread.  Mark this compilation
+    // thread as active for the profiler.
+    CompileTaskWrapper ctw(task);
+    nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
+    task->set_code_handle(&result_handle);
+    methodHandle method(thread, task->method());
+
+    // Never compile a method if breakpoints are present in it
+    if (method()->number_of_breakpoints() == 0) {
+      // Compile the method.
+      if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
 #ifdef COMPILER1
-          // Allow repeating compilations for the purpose of benchmarking
-          // compile speed. This is not useful for customers.
-          if (CompilationRepeat != 0) {
-            int compile_count = CompilationRepeat;
-            while (compile_count > 0) {
-              invoke_compiler_on_method(task);
-              nmethod* nm = method->code();
-              if (nm != NULL) {
-                nm->make_zombie();
-                method->clear_code();
-              }
-              compile_count--;
+        // Allow repeating compilations for the purpose of benchmarking
+        // compile speed. This is not useful for customers.
+        if (CompilationRepeat != 0) {
+          int compile_count = CompilationRepeat;
+          while (compile_count > 0) {
+            invoke_compiler_on_method(task);
+            nmethod* nm = method->code();
+            if (nm != NULL) {
+              nm->make_zombie();
+              method->clear_code();
             }
+            compile_count--;
           }
+        }
 #endif /* COMPILER1 */
-          invoke_compiler_on_method(task);
-        } else {
-          // After compilation is disabled, remove remaining methods from queue
-          method->clear_queued_for_compilation();
-        }
+        invoke_compiler_on_method(task);
+      } else {
+        // After compilation is disabled, remove remaining methods from queue
+        method->clear_queued_for_compilation();
       }
     }
   }
+
+  // Shut down compiler runtime
+  shutdown_compiler_runtime(thread->compiler(), thread);
 }
 
-
 // ------------------------------------------------------------------
 // CompileBroker::init_compiler_thread_log
 //
@@ -1718,7 +1838,7 @@
     CodeCache::print_summary(&s, detailed);
   }
   ttyLocker ttyl;
-  tty->print_cr(s.as_string());
+  tty->print(s.as_string());
 }
 
 // ------------------------------------------------------------------
@@ -1943,10 +2063,17 @@
     }
 #endif
     if (UseCodeCacheFlushing) {
-      NMethodSweeper::handle_full_code_cache(true);
+      // Since code cache is full, immediately stop new compiles
+      if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
+        NMethodSweeper::log_sweep("disable_compiler");
+
+        // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
+        // without having to consider the state in which the current thread is.
+        ThreadInVMfromUnknown in_vm;
+        NMethodSweeper::possibly_sweep();
+      }
     } else {
-      UseCompiler               = false;
-      AlwaysCompileLoopMethods  = false;
+      disable_compilation_forever();
     }
   }
   codecache_print(/* detailed= */ true);
--- a/src/share/vm/compiler/compileBroker.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/compiler/compileBroker.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -213,8 +213,12 @@
 
   // Redefine Classes support
   void mark_on_stack();
+  void delete_all();
+  void         print();
 
-  void         print();
+  ~CompileQueue() {
+    assert (is_empty(), " Compile Queue must be empty");
+  }
 };
 
 // CompileTaskWrapper
@@ -266,7 +270,7 @@
   static CompileQueue* _c1_method_queue;
   static CompileTask* _task_free_list;
 
-  static GrowableArray<CompilerThread*>* _method_threads;
+  static GrowableArray<CompilerThread*>* _compiler_threads;
 
   // performance counters
   static PerfCounter* _perf_total_compilation;
@@ -311,7 +315,7 @@
   static int _sum_nmethod_code_size;
   static long _peak_compilation_time;
 
-  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
+  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
@@ -351,6 +355,9 @@
     if (is_c1_compile(comp_level)) return _c1_method_queue;
     return NULL;
   }
+  static bool init_compiler_runtime();
+  static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
+
  public:
   enum {
     // The entry bci used for non-OSR compilations.
@@ -378,9 +385,7 @@
                                  const char* comment, Thread* thread);
 
   static void compiler_thread_loop();
-
   static uint get_compilation_id() { return _compilation_id; }
-  static bool is_idle();
 
   // Set _should_block.
   // Call this from the VM, with Threads_lock held and a safepoint requested.
@@ -391,8 +396,9 @@
 
   enum {
     // Flags for toggling compiler activity
-    stop_compilation = 0,
-    run_compilation  = 1
+    stop_compilation    = 0,
+    run_compilation     = 1,
+    shutdown_compilaton = 2
   };
 
   static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
@@ -401,6 +407,16 @@
     jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
     return (old == (1-new_state));
   }
+
+  static void disable_compilation_forever() {
+    UseCompiler               = false;
+    AlwaysCompileLoopMethods  = false;
+    Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
+  }
+
+  static bool is_compilation_disabled_forever() {
+    return _should_compile_new_jobs == shutdown_compilaton;
+  }
   static void handle_full_code_cache();
 
   // Return total compilation ticks
--- a/src/share/vm/compiler/oopMap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/compiler/oopMap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -628,7 +628,7 @@
 
 
 // Returns value of location as an int
-intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
+intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
 
 
 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -230,7 +230,7 @@
   // depends on this property.
   debug_only(
     FreeChunk* junk = NULL;
-    assert(UseCompressedKlassPointers ||
+    assert(UseCompressedClassPointers ||
            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
            "Offset of FreeChunk::_prev within FreeChunk must match"
            "  that of OopDesc::_klass within OopDesc");
@@ -1407,7 +1407,7 @@
   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   OrderAccess::storestore();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Copy gap missed by (aligned) header size calculation below
     obj->set_klass_gap(old->klass_gap());
   }
@@ -9065,7 +9065,7 @@
   return !stack->isEmpty();
 }
 
-#define BUSY  (oop(0x1aff1aff))
+#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
 // (MT-safe) Get a prefix of at most "num" from the list.
 // The overflow list is chained through the mark word of
 // each object in the list. We fetch the entire list,
@@ -9098,7 +9098,7 @@
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   Thread* tid = Thread::current();
   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   // set to ParallelGCThreads.
@@ -9113,7 +9113,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
       // Try and grab the prefix
-      prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
     }
   }
   // If the list was found to be empty, or we spun long
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -481,9 +481,8 @@
 
 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
   _g1h(g1h),
-  _markBitMap1(MinObjAlignment - 1),
-  _markBitMap2(MinObjAlignment - 1),
-
+  _markBitMap1(log2_intptr(MinObjAlignment)),
+  _markBitMap2(log2_intptr(MinObjAlignment)),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
@@ -2695,7 +2694,7 @@
 
     if (print_it) {
       _out->print_cr(" "PTR_FORMAT"%s",
-                     o, (over_tams) ? " >" : (marked) ? " M" : "");
+                     (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
       PrintReachableOopClosure oopCl(_out, _vo, _all);
       o->oop_iterate_no_header(&oopCl);
     }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -81,7 +81,7 @@
                                          size_t* marked_bytes_array,
                                          BitMap* task_card_bm) {
   G1CollectedHeap* g1h = _g1h;
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
+  CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
 
   HeapWord* start = mr.start();
   HeapWord* end = mr.end();
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -55,7 +55,7 @@
   // then _alloc_region is NULL and this object should not be used to
   // satisfy allocation requests (it was done this way to force the
   // correct use of init() and release()).
-  HeapRegion* _alloc_region;
+  HeapRegion* volatile _alloc_region;
 
   // It keeps track of the distinct number of regions that are used
   // for allocation in the active interval of this object, i.e.,
@@ -132,8 +132,9 @@
   static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
 
   HeapRegion* get() const {
+    HeapRegion * hr = _alloc_region;
     // Make sure that the dummy region does not escape this class.
-    return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
+    return (hr == _dummy_region) ? NULL : hr;
   }
 
   uint count() { return _count; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
+#ifndef PRODUCT
+void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
+  guarantee(_base != NULL, "Array not initialized");
+  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
+    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
+    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+class TestMappedArray : public G1BiasedMappedArray<int> {
+protected:
+  virtual int default_value() const { return 0xBAADBABE; }
+public:
+  static void test_biasedarray() {
+    const size_t REGION_SIZE_IN_WORDS = 512;
+    const size_t NUM_REGIONS = 20;
+    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
+
+    TestMappedArray array;
+    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
+            REGION_SIZE_IN_WORDS * HeapWordSize);
+    // Check address calculation (bounds)
+    assert(array.bottom_address_mapped() == fake_heap,
+      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
+    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
+
+    int* bottom = array.address_mapped_to(fake_heap);
+    assert((void*)bottom == (void*) array.base(), "must be");
+    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
+    assert((void*)end == (void*)(array.base() + array.length()), "must be");
+    // The entire array should contain default value elements
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Test setting values in the table
+
+    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
+    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
+
+    // Set/get by address tests: invert some value; first retrieve one
+    int actual_value = array.get_by_index(NUM_REGIONS / 2);
+    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
+    // Get the same value by address, should correspond to the start of the "region"
+    int value = array.get_by_address(region_start_address);
+    assert(value == ~actual_value, "must be");
+    // Get the same value by address, at one HeapWord before the start
+    value = array.get_by_address(region_start_address - 1);
+    assert(value == array.default_value(), "must be");
+    // Get the same value by address, at the end of the "region"
+    value = array.get_by_address(region_end_address);
+    assert(value == ~actual_value, "must be");
+    // Make sure the next value maps to another index
+    value = array.get_by_address(region_end_address + 1);
+    assert(value == array.default_value(), "must be");
+
+    // Reset the value in the array
+    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
+
+    // The entire array should have the default value again
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Set/get by index tests: invert some value
+    idx_t index = NUM_REGIONS / 2;
+    actual_value = array.get_by_index(index);
+    array.set_by_index(index, ~actual_value);
+
+    value = array.get_by_index(index);
+    assert(value == ~actual_value, "must be");
+
+    value = array.get_by_index(index - 1);
+    assert(value == array.default_value(), "must be");
+
+    value = array.get_by_index(index + 1);
+    assert(value == array.default_value(), "must be");
+
+    array.set_by_index(0, 0);
+    value = array.get_by_index(0);
+    assert(value == 0, "must be");
+
+    array.set_by_index(array.length() - 1, 0);
+    value = array.get_by_index(array.length() - 1);
+    assert(value == 0, "must be");
+
+    array.set_by_index(index, 0);
+
+    // The array should have three zeros, and default values otherwise
+    size_t num_zeros = 0;
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value() || *current == 0, "must be");
+      if (*current == 0) {
+        num_zeros++;
+      }
+    }
+    assert(num_zeros == 3, "must be");
+  }
+};
+
+void TestG1BiasedArray_test() {
+  TestMappedArray::test_biasedarray();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+
+#include "utilities/debug.hpp"
+#include "memory/allocation.inline.hpp"
+
+// Implements the common base functionality for arrays that contain provisions
+// for accessing its elements using a biased index.
+// The element type is defined by the instantiating the template.
+class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+public:
+  typedef size_t idx_t;
+protected:
+  address _base;          // the real base address
+  size_t _length;         // the length of the array
+  address _biased_base;   // base address biased by "bias" elements
+  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
+  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
+
+protected:
+
+  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
+    _bias(0), _shift_by(0) { }
+
+  // Allocate a new array, generic version.
+  static address create_new_base_array(size_t length, size_t elem_size) {
+    assert(length > 0, "just checking");
+    assert(elem_size > 0, "just checking");
+    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
+  }
+
+  // Initialize the members of this class. The biased start address of this array
+  // is the bias (in elements) multiplied by the element size.
+  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
+    assert(base != NULL, "just checking");
+    assert(length > 0, "just checking");
+    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
+    _base = base;
+    _length = length;
+    _biased_base = base - (bias * elem_size);
+    _bias = bias;
+    _shift_by = shift_by;
+  }
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
+    assert(mapping_granularity_in_bytes > 0, "just checking");
+    assert(is_power_of_2(mapping_granularity_in_bytes),
+      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
+    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
+      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, bottom));
+    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
+      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, end));
+    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
+    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
+    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
+  }
+
+  size_t bias() const { return _bias; }
+  uint shift_by() const { return _shift_by; }
+
+  void verify_index(idx_t index) const PRODUCT_RETURN;
+  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
+  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
+
+public:
+   // Return the length of the array in elements.
+   size_t length() const { return _length; }
+};
+
+// Array that provides biased access and mapping from (valid) addresses in the
+// heap into this array.
+template<class T>
+class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
+public:
+  typedef G1BiasedMappedArrayBase::idx_t idx_t;
+
+  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
+  // Return the element of the given array at the given index. Assume
+  // the index is valid. This is a convenience method that does sanity
+  // checking on the index.
+  T get_by_index(idx_t index) const {
+    verify_index(index);
+    return this->base()[index];
+  }
+
+  // Set the element of the given array at the given index to the
+  // given value. Assume the index is valid. This is a convenience
+  // method that does sanity checking on the index.
+  void set_by_index(idx_t index, T value) {
+    verify_index(index);
+    this->base()[index] = value;
+  }
+
+  // The raw biased base pointer.
+  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
+
+  // Return the element of the given array that covers the given word in the
+  // heap. Assumes the index is valid.
+  T get_by_address(HeapWord* value) const {
+    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    return biased_base()[biased_index];
+  }
+
+  // Set the value of the array entry that corresponds to the given array.
+  void set_by_address(HeapWord * address, T value) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    biased_base()[biased_index] = value;
+  }
+
+protected:
+  // Returns the address of the element the given address maps to
+  T* address_mapped_to(HeapWord* address) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index_inclusive_end(biased_index);
+    return biased_base() + biased_index;
+  }
+
+public:
+  // Return the smallest address (inclusive) in the heap that this array covers.
+  HeapWord* bottom_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
+  }
+
+  // Return the highest address (exclusive) in the heap that this array covers.
+  HeapWord* end_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
+  }
+
+protected:
+  virtual T default_value() const = 0;
+  // Set all elements of the given array to the given value.
+  void clear() {
+    T value = default_value();
+    for (idx_t i = 0; i < length(); i++) {
+      set_by_index(i, value);
+    }
+  }
+public:
+  G1BiasedMappedArray() {}
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
+    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
+    this->clear();
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -33,8 +33,8 @@
 
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    check_card_num(from_card_num,
-                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
@@ -65,9 +65,7 @@
     // threshold limit is no more than this.
     guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
 
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ct_bs = (CardTableModRefBS*)bs;
+    _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
     // Allocate/Reserve the counts table
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -72,25 +72,21 @@
     return has_reserved_count_table() && _committed_max_card_num > 0;
   }
 
-  void check_card_num(size_t card_num, const char* msg) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
-  }
-
   size_t ptr_2_card_num(const jbyte* card_ptr) {
     assert(card_ptr >= _ct_bot,
-           err_msg("Inavalied card pointer: "
+           err_msg("Invalid card pointer: "
                    "card_ptr: " PTR_FORMAT ", "
                    "_ct_bot: " PTR_FORMAT,
                    card_ptr, _ct_bot));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    check_card_num(card_num,
-                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    check_card_num(card_num,
-                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -125,10 +125,8 @@
   int _histo[256];
 public:
   ClearLoggedCardTableEntryClosure() :
-    _calls(0)
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
   {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
@@ -158,11 +156,8 @@
   CardTableModRefBS* _ctbs;
 public:
   RedirtyLoggedCardTableEntryClosure() :
-    _calls(0)
-  {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
-  }
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
+
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
@@ -478,7 +473,7 @@
 
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  CardTableModRefBS* ct_bs = g1_barrier_set();
 
   // Count the dirty cards at the start.
   CountNonCleanMemRegionClosure count1(this);
@@ -1205,7 +1200,7 @@
 };
 
 void G1CollectedHeap::clear_rsets_post_compaction() {
-  PostMCRemSetClearClosure rs_clear(this, mr_bs());
+  PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
   heap_region_iterate(&rs_clear);
 }
 
@@ -1777,7 +1772,6 @@
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
@@ -1787,6 +1781,13 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
+  if (_g1_storage.uncommitted_size() == 0) {
+    ergo_verbose0(ErgoHeapSizing,
+                      "did not expand the heap",
+                      ergo_format_reason("heap already fully expanded"));
+    return false;
+  }
+
   // First commit the memory.
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
@@ -1845,7 +1846,6 @@
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
@@ -2045,20 +2045,13 @@
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
-  if (barrier_set()->is_a(BarrierSet::ModRef)) {
-    _mr_bs = (ModRefBarrierSet*)_barrier_set;
-  } else {
-    vm_exit_during_initialization("G1 requires a mod ref bs.");
+  if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
+    vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
     return JNI_ENOMEM;
   }
 
   // Also create a G1 rem set.
-  if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
-    _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
-  } else {
-    vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
-    return JNI_ENOMEM;
-  }
+  _g1_rem_set = new G1RemSet(this, g1_barrier_set());
 
   // Carve out the G1 part of the heap.
 
@@ -2069,8 +2062,10 @@
   _g1_storage.initialize(g1_rs, 0);
   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
   _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end(),
-                  _expansion_regions);
+                  (HeapWord*) _g1_reserved.end());
+  assert(_hrs.max_length() == _expansion_regions,
+         err_msg("max length: %u expansion regions: %u",
+                 _hrs.max_length(), _expansion_regions));
 
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
@@ -2191,6 +2186,10 @@
   return JNI_OK;
 }
 
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+  return HeapRegion::max_region_size();
+}
+
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
@@ -2493,11 +2492,11 @@
 
 void G1CollectedHeap::register_concurrent_cycle_end() {
   if (_concurrent_cycle_started) {
-    _gc_timer_cm->register_gc_end(os::elapsed_counter());
-
     if (_cm->has_aborted()) {
       _gc_tracer_cm->report_concurrent_mode_failure();
     }
+
+    _gc_timer_cm->register_gc_end(os::elapsed_counter());
     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 
     _concurrent_cycle_started = false;
@@ -3675,6 +3674,11 @@
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
   ensure_parsability(true);
+
+  if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
+      (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
+    g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
+  }
 }
 
 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
@@ -3683,7 +3687,7 @@
       (G1SummarizeRSetStatsPeriod > 0) &&
       // we are at the end of the GC. Total collections has already been increased.
       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_periodic_summary_info();
+    g1_rem_set()->print_periodic_summary_info("After GC RS summary");
   }
 
   // FIXME: what is this about?
@@ -4544,7 +4548,7 @@
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
+    _ct_bs(g1h->g1_barrier_set()),
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
@@ -4611,7 +4615,7 @@
   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
   oop p = oopDesc::load_decode_heap_oop(ref);
   assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   return true;
 }
 
@@ -4621,11 +4625,11 @@
     // Must be in the collection set--it's already been copied.
     oop p = clear_partial_array_mask(ref);
     assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   } else {
     oop p = oopDesc::load_decode_heap_oop(ref);
     assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   }
   return true;
 }
@@ -5973,11 +5977,11 @@
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
   HeapRegion* volatile _su_head;
 public:
-  G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
+  G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
                      G1CollectedHeap* g1h) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs), _g1h(g1h) { }
@@ -6000,9 +6004,9 @@
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
+  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
     : _g1h(g1h), _ct_bs(ct_bs) { }
   virtual bool doHeapRegion(HeapRegion* r) {
     if (r->is_survivor()) {
@@ -6016,7 +6020,7 @@
 
 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
   // All of the region should be clean.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->end());
   ct_bs->verify_not_dirty_region(mr);
 }
@@ -6029,13 +6033,17 @@
   // not dirty that area (one less thing to have to do while holding
   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   // is dirty.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
-  ct_bs->verify_dirty_region(mr);
+  if (hr->is_young()) {
+    ct_bs->verify_g1_young_region(mr);
+  } else {
+    ct_bs->verify_dirty_region(mr);
+  }
 }
 
 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
     verify_dirty_region(hr);
   }
@@ -6047,7 +6055,7 @@
 #endif
 
 void G1CollectedHeap::cleanUpCardTable() {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   double start = os::elapsedTime();
 
   {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
@@ -703,7 +704,7 @@
     if (_g1_committed.contains((HeapWord*) obj)) {
       // no need to subtract the bottom of the heap from obj,
       // _in_cset_fast_test is biased
-      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
+      uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
       bool ret = _in_cset_fast_test[index];
       // let's make sure the result is consistent with what the slower
       // test returns
@@ -791,8 +792,6 @@
 
   // The g1 remembered set of the heap.
   G1RemSet* _g1_rem_set;
-  // And it's mod ref barrier set, used to track updates for the above.
-  ModRefBarrierSet* _mr_bs;
 
   // A set of cards that cover the objects for which the Rsets should be updated
   // concurrently after the collection.
@@ -1092,6 +1091,9 @@
   // specified by the policy object.
   jint initialize();
 
+  // Return the (conservative) maximum heap alignment for any G1 heap
+  static size_t conservative_max_heap_alignment();
+
   // Initialize weak reference processing.
   virtual void ref_processing_init();
 
@@ -1124,7 +1126,6 @@
 
   // The rem set and barrier set.
   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
-  ModRefBarrierSet* mr_bs() const { return _mr_bs; }
 
   unsigned get_gc_time_stamp() {
     return _gc_time_stamp;
@@ -1343,6 +1344,10 @@
 
   virtual bool is_in_closed_subset(const void* p) const;
 
+  G1SATBCardTableModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableModRefBS*) barrier_set();
+  }
+
   // This resets the card table to all zeros.  It is used after
   // a collection pause which used the card table to claim cards.
   void cleanUpCardTable();
@@ -1872,7 +1877,7 @@
   G1CollectedHeap* _g1h;
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
   G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
@@ -1911,7 +1916,7 @@
   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  CardTableModRefBS* ctbs()                      { return _ct_bs; }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
   template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
     if (!from->is_survivor()) {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
@@ -134,7 +135,7 @@
   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 
   MemRegion mr(start, end);
-  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
+  g1_barrier_set()->g1_mark_as_young(mr);
 }
 
 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -168,7 +168,15 @@
   // Set up the region size and associated fields. Given that the
   // policy is created before the heap, we have to set this up here,
   // so it's done as soon as possible.
-  HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
+
+  // It would have been natural to pass initial_heap_byte_size() and
+  // max_heap_byte_size() to setup_heap_region_size() but those have
+  // not been set up at this point since they should be aligned with
+  // the region size. So, there is a circular dependency here. We base
+  // the region size on the heap size, but the heap size should be
+  // aligned with the region size. To get around this we use the
+  // unaligned values for the heap.
+  HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   HeapRegionRemSet::setup_remset_size();
 
   G1ErgoVerbose::initialize();
@@ -311,10 +319,10 @@
 }
 
 void G1CollectorPolicy::initialize_flags() {
-  set_min_alignment(HeapRegion::GrainBytes);
+  _min_alignment = HeapRegion::GrainBytes;
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
-  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
+  _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
@@ -336,6 +344,10 @@
     }
   }
 
+  if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) {
+    vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size");
+  }
+
   if (FLAG_IS_CMDLINE(NewSize)) {
     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
                                      1U);
--- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,11 +41,11 @@
 private:
   G1CollectedHeap* _g1;
   DirtyCardQueue *_dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 
 public:
   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
-    _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
+    _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -220,7 +220,7 @@
 public:
   G1PrepareCompactClosure(CompactibleSpace* cs)
   : _g1h(G1CollectedHeap::heap()),
-    _mrbs(G1CollectedHeap::heap()->mr_bs()),
+    _mrbs(_g1h->g1_barrier_set()),
     _cp(NULL, cs, cs->initialize_threshold()),
     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
 
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,12 +91,12 @@
 }
 
 template <class T> inline T* set_partial_array_mask(T obj) {
-  assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-  return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
+  assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 }
 
 template <class T> inline oop clear_partial_array_mask(T* ref) {
-  return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 }
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -83,7 +83,9 @@
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
-  _prev_period_summary.initialize(this, n_workers());
+  if (G1SummarizeRSetStats) {
+    _prev_period_summary.initialize(this);
+  }
 }
 
 G1RemSet::~G1RemSet() {
@@ -109,7 +111,7 @@
   CodeBlobToOopClosure* _code_root_cl;
 
   G1BlockOffsetSharedArray* _bot_shared;
-  CardTableModRefBS *_ct_bs;
+  G1SATBCardTableModRefBS *_ct_bs;
 
   double _strong_code_root_scan_time_sec;
   int    _worker_i;
@@ -130,7 +132,7 @@
   {
     _g1h = G1CollectedHeap::heap();
     _bot_shared = _g1h->bot_shared();
-    _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
+    _ct_bs = _g1h->g1_barrier_set();
     _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
   }
 
@@ -505,12 +507,7 @@
   ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
     _g1h(G1CollectedHeap::heap()),
     _region_bm(region_bm), _card_bm(card_bm),
-    _ctbs(NULL)
-  {
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ctbs = (CardTableModRefBS*)bs;
-  }
+    _ctbs(_g1h->g1_barrier_set()) {}
 
   bool doHeapRegion(HeapRegion* r) {
     if (!r->continuesHumongous()) {
@@ -731,19 +728,19 @@
   return has_refs_into_cset;
 }
 
-void G1RemSet::print_periodic_summary_info() {
+void G1RemSet::print_periodic_summary_info(const char* header) {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   _prev_period_summary.subtract_from(&current);
-  print_summary_info(&_prev_period_summary);
+  print_summary_info(&_prev_period_summary, header);
 
   _prev_period_summary.set(&current);
 }
 
 void G1RemSet::print_summary_info() {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   print_summary_info(&current, " Cumulative RS summary");
 }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -145,7 +145,7 @@
   virtual void print_summary_info();
 
   // Print accumulated summary info from the last time called.
-  virtual void print_periodic_summary_info();
+  virtual void print_periodic_summary_info(const char* header);
 
   // Prepare remembered set for verification.
   virtual void prepare_for_verify();
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -77,12 +77,12 @@
   return _rs_threads_vtimes[thread];
 }
 
-void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
+void G1RemSetSummary::initialize(G1RemSet* remset) {
   assert(_rs_threads_vtimes == NULL, "just checking");
   assert(remset != NULL, "just checking");
 
   _remset = remset;
-  _num_vtimes = num_workers;
+  _num_vtimes = ConcurrentG1Refine::thread_num();
   _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
   memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
 
@@ -125,25 +125,119 @@
   _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
 }
 
+static double percent_of(size_t numerator, size_t denominator) {
+  if (denominator != 0) {
+    return (double)numerator / denominator * 100.0f;
+  } else {
+    return 0.0f;
+  }
+}
+
+static size_t round_to_K(size_t value) {
+  return value / K;
+}
+
+class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
+private:
+  const char* _name;
+
+  size_t _rs_mem_size;
+  size_t _cards_occupied;
+  size_t _amount;
+
+  size_t _code_root_mem_size;
+  size_t _code_root_elems;
+
+  double rs_mem_size_percent_of(size_t total) {
+    return percent_of(_rs_mem_size, total);
+  }
+
+  double cards_occupied_percent_of(size_t total) {
+    return percent_of(_cards_occupied, total);
+  }
+
+  double code_root_mem_size_percent_of(size_t total) {
+    return percent_of(_code_root_mem_size, total);
+  }
+
+  double code_root_elems_percent_of(size_t total) {
+    return percent_of(_code_root_elems, total);
+  }
+
+  size_t amount() const { return _amount; }
+
+public:
+
+  RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0),
+    _amount(0), _code_root_mem_size(0), _code_root_elems(0) { }
+
+  void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size,
+    size_t code_root_elems) {
+    _rs_mem_size += rs_mem_size;
+    _cards_occupied += cards_occupied;
+    _code_root_mem_size += code_root_mem_size;
+    _code_root_elems += code_root_elems;
+    _amount++;
+  }
+
+  size_t rs_mem_size() const { return _rs_mem_size; }
+  size_t cards_occupied() const { return _cards_occupied; }
+
+  size_t code_root_mem_size() const { return _code_root_mem_size; }
+  size_t code_root_elems() const { return _code_root_elems; }
+
+  void print_rs_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
+        round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_cards_occupied_info_on(outputStream * out, size_t total) {
+    out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) entries by "SIZE_FORMAT" %s regions",
+        cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
+        round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_elems_info_on(outputStream * out, size_t total) {
+    out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) elements by "SIZE_FORMAT" %s regions",
+        code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
+  }
+};
+
+
 class HRRSStatsIter: public HeapRegionClosure {
-  size_t _occupied;
+private:
+  RegionTypeCounter _young;
+  RegionTypeCounter _humonguous;
+  RegionTypeCounter _free;
+  RegionTypeCounter _old;
+  RegionTypeCounter _all;
 
-  size_t _total_rs_mem_sz;
   size_t _max_rs_mem_sz;
   HeapRegion* _max_rs_mem_sz_region;
 
-  size_t _total_code_root_mem_sz;
+  size_t total_rs_mem_sz() const            { return _all.rs_mem_size(); }
+  size_t total_cards_occupied() const       { return _all.cards_occupied(); }
+
+  size_t max_rs_mem_sz() const              { return _max_rs_mem_sz; }
+  HeapRegion* max_rs_mem_sz_region() const  { return _max_rs_mem_sz_region; }
+
   size_t _max_code_root_mem_sz;
   HeapRegion* _max_code_root_mem_sz_region;
+
+  size_t total_code_root_mem_sz() const     { return _all.code_root_mem_size(); }
+  size_t total_code_root_elems() const      { return _all.code_root_elems(); }
+
+  size_t max_code_root_mem_sz() const       { return _max_code_root_mem_sz; }
+  HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
+
 public:
-  HRRSStatsIter() :
-    _occupied(0),
-    _total_rs_mem_sz(0),
-    _max_rs_mem_sz(0),
-    _max_rs_mem_sz_region(NULL),
-    _total_code_root_mem_sz(0),
-    _max_code_root_mem_sz(0),
-    _max_code_root_mem_sz_region(NULL)
+  HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
+    _free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
+    _max_rs_mem_sz(0), _max_code_root_mem_sz(0)
   {}
 
   bool doHeapRegion(HeapRegion* r) {
@@ -156,46 +250,95 @@
       _max_rs_mem_sz = rs_mem_sz;
       _max_rs_mem_sz_region = r;
     }
-    _total_rs_mem_sz += rs_mem_sz;
-
+    size_t occupied_cards = hrrs->occupied();
     size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
-    if (code_root_mem_sz > _max_code_root_mem_sz) {
-      _max_code_root_mem_sz = code_root_mem_sz;
+    if (code_root_mem_sz > max_code_root_mem_sz()) {
       _max_code_root_mem_sz_region = r;
     }
-    _total_code_root_mem_sz += code_root_mem_sz;
+    size_t code_root_elems = hrrs->strong_code_roots_list_length();
 
-    size_t occ = hrrs->occupied();
-    _occupied += occ;
+    RegionTypeCounter* current = NULL;
+    if (r->is_young()) {
+      current = &_young;
+    } else if (r->isHumongous()) {
+      current = &_humonguous;
+    } else if (r->is_empty()) {
+      current = &_free;
+    } else {
+      current = &_old;
+    }
+    current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+    _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+
     return false;
   }
-  size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
-  size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
-  HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
-  size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
-  size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
-  HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
-  size_t occupied() { return _occupied; }
+
+  void print_summary_on(outputStream* out) {
+    RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
+
+    out->print_cr("\n Current rem set statistics");
+    out->print_cr("  Total per region rem sets sizes = "SIZE_FORMAT"K."
+                  " Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
+    }
+
+    out->print_cr("   Static structures = "SIZE_FORMAT"K,"
+                  " free_lists = "SIZE_FORMAT"K.",
+                  round_to_K(HeapRegionRemSet::static_mem_size()),
+                  round_to_K(HeapRegionRemSet::fl_mem_size()));
+
+    out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
+                  total_cards_occupied());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_cards_occupied_info_on(out, total_cards_occupied());
+    }
+
+    // Largest sized rem set region statistics
+    HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
+    out->print_cr("    Region with largest rem set = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
+                  HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
+                  round_to_K(rem_set->mem_size()),
+                  round_to_K(rem_set->occupied()));
+
+    // Strong code root statistics
+    HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
+    out->print_cr("  Total heap region code root sets sizes = "SIZE_FORMAT"K."
+                  "  Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_code_root_mem_sz()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
+    }
+
+    out->print_cr("    "SIZE_FORMAT" code roots represented.",
+                  total_code_root_elems());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_elems_info_on(out, total_code_root_elems());
+    }
+
+    out->print_cr("    Region with largest amount of code roots = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
+                  HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_list_length()));
+  }
 };
 
-double calc_percentage(size_t numerator, size_t denominator) {
-  if (denominator != 0) {
-    return (double)numerator / denominator * 100.0;
-  } else {
-    return 0.0f;
-  }
-}
-
 void G1RemSetSummary::print_on(outputStream* out) {
-  out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
+  out->print_cr("\n Recent concurrent refinement statistics");
+  out->print_cr("  Processed "SIZE_FORMAT" cards",
                 num_concurrent_refined_cards());
-  out->print_cr("  Of %d completed buffers:", num_processed_buf_total());
-  out->print_cr("     %8d (%5.1f%%) by concurrent RS threads.",
+  out->print_cr("  Of "SIZE_FORMAT" completed buffers:", num_processed_buf_total());
+  out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) by concurrent RS threads.",
                 num_processed_buf_total(),
-                calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
-  out->print_cr("     %8d (%5.1f%%) by mutator threads.",
+                percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
+  out->print_cr("     "SIZE_FORMAT_W(8)" (%5.1f%%) by mutator threads.",
                 num_processed_buf_mutator(),
-                calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
+                percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
+  out->print_cr("  Did "SIZE_FORMAT" coarsenings.", num_coarsenings());
   out->print_cr("  Concurrent RS threads times (s)");
   out->print("     ");
   for (uint i = 0; i < _num_vtimes; i++) {
@@ -207,33 +350,5 @@
 
   HRRSStatsIter blk;
   G1CollectedHeap::heap()->heap_region_iterate(&blk);
-  // RemSet stats
-  out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
-  out->print_cr("  Static structures = "SIZE_FORMAT"K,"
-                " free_lists = "SIZE_FORMAT"K.",
-                HeapRegionRemSet::static_mem_size() / K,
-                HeapRegionRemSet::fl_mem_size() / K);
-  out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
-                blk.occupied());
-  HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
-  HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
-                HR_FORMAT_PARAMS(max_rs_mem_sz_region),
-                (max_rs_rem_set->mem_size() + K - 1)/K,
-                (max_rs_rem_set->occupied() + K - 1)/K);
-  out->print_cr("    Did %d coarsenings.", num_coarsenings());
-  // Strong code root stats
-  out->print_cr("  Total heap region code-root set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
-  HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
-  HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
-                HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
-                (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
-                (max_code_root_rem_set->strong_code_roots_list_length()));
+  blk.print_summary_on(out);
 }
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -84,7 +84,7 @@
   void subtract_from(G1RemSetSummary* other);
 
   // initialize and get the first sampling
-  void initialize(G1RemSet* remset, uint num_workers);
+  void initialize(G1RemSet* remset);
 
   void print_on(outputStream* out);
 
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,46 @@
   }
 }
 
+bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
+  jbyte val = _byte_map[card_index];
+  // It's already processed
+  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
+    return false;
+  }
+
+  if  (val == g1_young_gen) {
+    // the card is for a young gen region. We don't need to keep track of all pointers into young
+    return false;
+  }
+
+  // Cached bit can be installed either on a clean card or on a claimed card.
+  jbyte new_val = val;
+  if (val == clean_card_val()) {
+    new_val = (jbyte)deferred_card_val();
+  } else {
+    if (val & claimed_card_val()) {
+      new_val = val | (jbyte)deferred_card_val();
+    }
+  }
+  if (new_val != val) {
+    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
+  }
+  return true;
+}
+
+void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
+  jbyte *const first = byte_for(mr.start());
+  jbyte *const last = byte_after(mr.last());
+
+  memset(first, g1_young_gen, last - first);
+}
+
+#ifndef PRODUCT
+void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
+  verify_region(mr, g1_young_gen,  true);
+}
+#endif
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
@@ -76,7 +116,11 @@
 void
 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
                                                      oop new_val) {
-  jbyte* byte = byte_for(field);
+  volatile jbyte* byte = byte_for(field);
+  if (*byte == g1_young_gen) {
+    return;
+  }
+  OrderAccess::storeload();
   if (*byte != dirty_card) {
     *byte = dirty_card;
     Thread* thr = Thread::current();
@@ -95,7 +139,7 @@
 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
                                                        oop new_val) {
   uintptr_t field_uint = (uintptr_t)field;
-  uintptr_t new_val_uint = (uintptr_t)new_val;
+  uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
   uintptr_t comb = field_uint ^ new_val_uint;
   comb = comb >> HeapRegion::LogOfHRGrainBytes;
   if (comb == 0) return;
@@ -108,7 +152,7 @@
 
 void
 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
-  jbyte* byte = byte_for(mr.start());
+  volatile jbyte* byte = byte_for(mr.start());
   jbyte* last_byte = byte_for(mr.last());
   Thread* thr = Thread::current();
   if (whole_heap) {
@@ -117,25 +161,35 @@
       byte++;
     }
   } else {
-    // Enqueue if necessary.
-    if (thr->is_Java_thread()) {
-      JavaThread* jt = (JavaThread*)thr;
-      while (byte <= last_byte) {
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
-          jt->dirty_card_queue().enqueue(byte);
+    // skip all consecutive young cards
+    for (; byte <= last_byte && *byte == g1_young_gen; byte++);
+
+    if (byte <= last_byte) {
+      OrderAccess::storeload();
+      // Enqueue if necessary.
+      if (thr->is_Java_thread()) {
+        JavaThread* jt = (JavaThread*)thr;
+        for (; byte <= last_byte; byte++) {
+          if (*byte == g1_young_gen) {
+            continue;
+          }
+          if (*byte != dirty_card) {
+            *byte = dirty_card;
+            jt->dirty_card_queue().enqueue(byte);
+          }
         }
-        byte++;
-      }
-    } else {
-      MutexLockerEx x(Shared_DirtyCardQ_lock,
-                      Mutex::_no_safepoint_check_flag);
-      while (byte <= last_byte) {
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
-          _dcqs.shared_dirty_card_queue()->enqueue(byte);
+      } else {
+        MutexLockerEx x(Shared_DirtyCardQ_lock,
+                        Mutex::_no_safepoint_check_flag);
+        for (; byte <= last_byte; byte++) {
+          if (*byte == g1_young_gen) {
+            continue;
+          }
+          if (*byte != dirty_card) {
+            *byte = dirty_card;
+            _dcqs.shared_dirty_card_queue()->enqueue(byte);
+          }
         }
-        byte++;
       }
     }
   }
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,14 @@
 // snapshot-at-the-beginning marking.
 
 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
+protected:
+  enum G1CardValues {
+    g1_young_gen = CT_MR_BS_last_reserved << 1
+  };
+
 public:
+  static int g1_young_card_val()   { return g1_young_gen; }
+
   // Add "pre_val" to a set of objects that may have been disconnected from the
   // pre-marking object graph.
   static void enqueue(oop pre_val);
@@ -89,6 +96,45 @@
       write_ref_array_pre_work(dst, count);
     }
   }
+
+/*
+   Claimed and deferred bits are used together in G1 during the evacuation
+   pause. These bits can have the following state transitions:
+   1. The claimed bit can be put over any other card state. Except that
+      the "dirty -> dirty and claimed" transition is checked for in
+      G1 code and is not used.
+   2. Deferred bit can be set only if the previous state of the card
+      was either clean or claimed. mark_card_deferred() is wait-free.
+      We do not care if the operation is be successful because if
+      it does not it will only result in duplicate entry in the update
+      buffer because of the "cache-miss". So it's not worth spinning.
+ */
+
+  bool is_card_claimed(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
+  }
+
+  void set_card_claimed(size_t card_index) {
+      jbyte val = _byte_map[card_index];
+      if (val == clean_card_val()) {
+        val = (jbyte)claimed_card_val();
+      } else {
+        val |= (jbyte)claimed_card_val();
+      }
+      _byte_map[card_index] = val;
+  }
+
+  void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
+  void g1_mark_as_young(const MemRegion& mr);
+
+  bool mark_card_deferred(size_t card_index);
+
+  bool is_card_deferred(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
+  }
+
 };
 
 // Adds card-table logging to the post-barrier.
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -149,18 +149,15 @@
 // many regions in the heap (based on the min heap size).
 #define TARGET_REGION_NUMBER          2048
 
-void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
-  // region_size in bytes
+size_t HeapRegion::max_region_size() {
+  return (size_t)MAX_REGION_SIZE;
+}
+
+void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
-    // We base the automatic calculation on the min heap size. This
-    // can be problematic if the spread between min and max is quite
-    // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
-    // the max size, the region size might be way too large for the
-    // min size. Either way, some users might have to set the region
-    // size manually for some -Xms / -Xmx combos.
-
-    region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
+    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
+    region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
                        (uintx) MIN_REGION_SIZE);
   }
 
@@ -640,7 +637,7 @@
           gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
                                  "["PTR_FORMAT", "PTR_FORMAT") is above "
                                  "top "PTR_FORMAT,
-                                 obj, _hr->bottom(), _hr->end(), _hr->top());
+                                 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
           _failures = true;
           return;
         }
@@ -954,12 +951,12 @@
         Klass* klass = obj->klass();
         if (!klass->is_metaspace_object()) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                 "not metadata", klass, obj);
+                                 "not metadata", klass, (void *)obj);
           *failures = true;
           return;
         } else if (!klass->is_klass()) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                 "not a klass", klass, obj);
+                                 "not a klass", klass, (void *)obj);
           *failures = true;
           return;
         } else {
@@ -974,7 +971,7 @@
           }
         }
       } else {
-        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
+        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
         *failures = true;
         return;
       }
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -355,13 +355,15 @@
                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   }
 
+  static size_t max_region_size();
+
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
   // CardsPerRegion). All those fields are considered constant
   // throughout the JVM's execution, therefore they should only be set
   // up once during initialization time.
-  static void setup_heap_region_size(uintx min_heap_size);
+  static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
 
   enum ClaimValues {
     InitialClaimValue          = 0,
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -91,8 +91,8 @@
       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
                              from,
                              UseCompressedOops
-                             ? oopDesc::load_decode_heap_oop((narrowOop*)from)
-                             : oopDesc::load_decode_heap_oop((oop*)from));
+                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
+                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
     }
 
     HeapRegion* loc_hr = hr();
@@ -403,8 +403,8 @@
     gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
                                                     from,
                                                     UseCompressedOops
-                                                    ? oopDesc::load_decode_heap_oop((narrowOop*)from)
-                                                    : oopDesc::load_decode_heap_oop((oop*)from));
+                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
+                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
   }
 
   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -71,27 +71,16 @@
 
 // Public
 
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
-                               uint max_length) {
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
   assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
          "bottom should be heap region aligned");
   assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
          "end should be heap region aligned");
 
-  _length = 0;
-  _heap_bottom = bottom;
-  _heap_end = end;
-  _region_shift = HeapRegion::LogOfHRGrainBytes;
   _next_search_index = 0;
   _allocated_length = 0;
-  _max_length = max_length;
 
-  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
-  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
-  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
-
-  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
-         "bottom should be included in the region with index 0");
+  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
 }
 
 MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@@ -101,15 +90,15 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   HeapWord* next_bottom = old_end;
-  assert(_heap_bottom <= next_bottom, "invariant");
+  assert(heap_bottom() <= next_bottom, "invariant");
   while (next_bottom < new_end) {
-    assert(next_bottom < _heap_end, "invariant");
+    assert(next_bottom < heap_end(), "invariant");
     uint index = length();
 
-    assert(index < _max_length, "otherwise we cannot expand further");
+    assert(index < max_length(), "otherwise we cannot expand further");
     if (index == 0) {
       // We have not allocated any regions so far
-      assert(next_bottom == _heap_bottom, "invariant");
+      assert(next_bottom == heap_bottom(), "invariant");
     } else {
       // next_bottom should match the end of the last/previous region
       assert(next_bottom == at(index - 1)->end(), "invariant");
@@ -122,8 +111,8 @@
         // allocation failed, we bail out and return what we have done so far
         return MemRegion(old_end, next_bottom);
       }
-      assert(_regions[index] == NULL, "invariant");
-      _regions[index] = new_hr;
+      assert(_regions.get_by_index(index) == NULL, "invariant");
+      _regions.set_by_index(index, new_hr);
       increment_allocated_length();
     }
     // Have to increment the length first, otherwise we will get an
@@ -228,26 +217,26 @@
 
 #ifndef PRODUCT
 void HeapRegionSeq::verify_optional() {
-  guarantee(_length <= _allocated_length,
+  guarantee(length() <= _allocated_length,
             err_msg("invariant: _length: %u _allocated_length: %u",
-                    _length, _allocated_length));
-  guarantee(_allocated_length <= _max_length,
+                    length(), _allocated_length));
+  guarantee(_allocated_length <= max_length(),
             err_msg("invariant: _allocated_length: %u _max_length: %u",
-                    _allocated_length, _max_length));
-  guarantee(_next_search_index <= _length,
+                    _allocated_length, max_length()));
+  guarantee(_next_search_index <= length(),
             err_msg("invariant: _next_search_index: %u _length: %u",
-                    _next_search_index, _length));
+                    _next_search_index, length()));
 
-  HeapWord* prev_end = _heap_bottom;
+  HeapWord* prev_end = heap_bottom();
   for (uint i = 0; i < _allocated_length; i += 1) {
-    HeapRegion* hr = _regions[i];
+    HeapRegion* hr = _regions.get_by_index(i);
     guarantee(hr != NULL, err_msg("invariant: i: %u", i));
     guarantee(hr->bottom() == prev_end,
               err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
                       i, HR_FORMAT_PARAMS(hr), prev_end));
     guarantee(hr->hrs_index() == i,
               err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
-    if (i < _length) {
+    if (i < length()) {
       // Asserts will fire if i is >= _length
       HeapWord* addr = hr->bottom();
       guarantee(addr_to_region(addr) == hr, "sanity");
@@ -265,8 +254,8 @@
       prev_end = hr->end();
     }
   }
-  for (uint i = _allocated_length; i < _max_length; i += 1) {
-    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
+  for (uint i = _allocated_length; i < max_length(); i += 1) {
+    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
   }
 }
 #endif // PRODUCT
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,10 +25,17 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
 class HeapRegion;
 class HeapRegionClosure;
 class FreeRegionList;
 
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+   virtual HeapRegion* default_value() const { return NULL; }
+};
+
 // This class keeps track of the region metadata (i.e., HeapRegion
 // instances). They are kept in the _regions array in address
 // order. A region's index in the array corresponds to its index in
@@ -44,35 +51,21 @@
 //
 // We keep track of three lengths:
 //
-// * _length (returned by length()) is the number of currently
+// * _committed_length (returned by length()) is the number of currently
 //   committed regions.
 // * _allocated_length (not exposed outside this class) is the
 //   number of regions for which we have HeapRegions.
-// * _max_length (returned by max_length()) is the maximum number of
-//   regions the heap can have.
+// * max_length() returns the maximum number of regions the heap can have.
 //
-// and maintain that: _length <= _allocated_length <= _max_length
+// and maintain that: _committed_length <= _allocated_length <= max_length()
 
 class HeapRegionSeq: public CHeapObj<mtGC> {
   friend class VMStructs;
 
-  // The array that holds the HeapRegions.
-  HeapRegion** _regions;
-
-  // Version of _regions biased to address 0
-  HeapRegion** _regions_biased;
+  G1HeapRegionTable _regions;
 
   // The number of regions committed in the heap.
-  uint _length;
-
-  // The address of the first reserved word in the heap.
-  HeapWord* _heap_bottom;
-
-  // The address of the last reserved word in the heap - 1.
-  HeapWord* _heap_end;
-
-  // The log of the region byte size.
-  uint _region_shift;
+  uint _committed_length;
 
   // A hint for which index to start searching from for humongous
   // allocations.
@@ -81,37 +74,33 @@
   // The number of regions for which we have allocated HeapRegions for.
   uint _allocated_length;
 
-  // The maximum number of regions in the heap.
-  uint _max_length;
-
   // Find a contiguous set of empty regions of length num, starting
   // from the given index.
   uint find_contiguous_from(uint from, uint num);
 
-  // Map a heap address to a biased region index. Assume that the
-  // address is valid.
-  inline uintx addr_to_index_biased(HeapWord* addr) const;
-
   void increment_allocated_length() {
-    assert(_allocated_length < _max_length, "pre-condition");
+    assert(_allocated_length < max_length(), "pre-condition");
     _allocated_length++;
   }
 
   void increment_length() {
-    assert(_length < _max_length, "pre-condition");
-    _length++;
+    assert(length() < max_length(), "pre-condition");
+    _committed_length++;
   }
 
   void decrement_length() {
-    assert(_length > 0, "pre-condition");
-    _length--;
+    assert(length() > 0, "pre-condition");
+    _committed_length--;
   }
 
+  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
  public:
   // Empty contructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() { }
+  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
 
-  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
+  void initialize(HeapWord* bottom, HeapWord* end);
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -126,10 +115,10 @@
   inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
 
   // Return the number of regions that have been committed in the heap.
-  uint length() const { return _length; }
+  uint length() const { return _committed_length; }
 
   // Return the maximum number of regions in the heap.
-  uint max_length() const { return _max_length; }
+  uint max_length() const { return (uint)_regions.length(); }
 
   // Expand the sequence to reflect that the heap has grown from
   // old_end to new_end. Either create new HeapRegions, or re-use
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,28 +28,16 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 
-inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
-  assert(_heap_bottom <= addr && addr < _heap_end,
-         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
-                 addr, _heap_bottom, _heap_end));
-  uintx index = (uintx) addr >> _region_shift;
-  return index;
-}
-
 inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
-  assert(_heap_bottom <= addr && addr < _heap_end,
-         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
-                 addr, _heap_bottom, _heap_end));
-  uintx index_biased = addr_to_index_biased(addr);
-  HeapRegion* hr = _regions_biased[index_biased];
+  HeapRegion* hr = _regions.get_by_address(addr);
   assert(hr != NULL, "invariant");
   return hr;
 }
 
 inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
-  if (addr != NULL && addr < _heap_end) {
-    assert(addr >= _heap_bottom,
-          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+  if (addr != NULL && addr < heap_end()) {
+    assert(addr >= heap_bottom(),
+          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
     return addr_to_region_unsafe(addr);
   }
   return NULL;
@@ -57,7 +45,7 @@
 
 inline HeapRegion* HeapRegionSeq::at(uint index) const {
   assert(index < length(), "pre-condition");
-  HeapRegion* hr = _regions[index];
+  HeapRegion* hr = _regions.get_by_index(index);
   assert(hr != NULL, "sanity");
   assert(hr->hrs_index() == index, "sanity");
   return hr;
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -38,6 +38,7 @@
 
 class PtrQueueSet;
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
 
 protected:
   // The ptr queue set to which this queue belongs.
@@ -79,6 +80,10 @@
 
   void reset() { if (_buf != NULL) _index = _sz; }
 
+  void enqueue(volatile void* ptr) {
+    enqueue((void*)(ptr));
+  }
+
   // Enqueues the given "obj".
   void enqueue(void* ptr) {
     if (!_active) return;
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -31,10 +31,17 @@
 
 #define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
                                                                               \
-  static_field(HeapRegion, GrainBytes, size_t)                                \
+  static_field(HeapRegion, GrainBytes,        size_t)                         \
+  static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
-  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
-  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
+  nonstatic_field(G1HeapRegionTable, _base,             address)              \
+  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
+  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
+  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
+  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
+                                                                              \
+  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
+  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
@@ -57,6 +64,8 @@
 
 #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
                                                                               \
+  declare_toplevel_type(G1HeapRegionTable)                                    \
+                                                                              \
   declare_type(G1CollectedHeap, SharedHeap)                                   \
                                                                               \
   declare_type(HeapRegion, ContiguousSpace)                                   \
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1103,7 +1103,7 @@
   }
 }
 
-static const oop ClaimedForwardPtr = oop(0x4);
+static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
 
 // Because of concurrency, there are times where an object for which
 // "is_forwarded()" is true contains an "interim" forwarding pointer
@@ -1226,7 +1226,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   }
 #endif
 
@@ -1347,7 +1347,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   }
 #endif
 
@@ -1436,7 +1436,7 @@
 // (although some performance comparisons would be useful since
 // single global lists have their own performance disadvantages
 // as we were made painfully aware not long ago, see 6786503).
-#define BUSY (oop(0x1aff1aff))
+#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
   assert(is_in_reserved(from_space_obj), "Should be from this generation");
   if (ParGCUseLocalOverflow) {
@@ -1512,7 +1512,7 @@
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
-  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   // Trim off a prefix of at most objsFromOverflow items
   Thread* tid = Thread::current();
   size_t spin_count = (size_t)ParallelGCThreads;
@@ -1526,7 +1526,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
      // try and grab the prefix
-     prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
     }
   }
   if (prefix == NULL || prefix == BUSY) {
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,7 @@
         Space* sp = gch->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
-        tty->print_cr("Object: " PTR_FORMAT, obj);
+        tty->print_cr("Object: " PTR_FORMAT, (void *)obj);
         tty->print_cr("-------");
         obj->print();
         tty->print_cr("-----");
@@ -110,7 +110,7 @@
         if (TraceScavenge) {
           gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
              "forwarded ",
-             new_obj->klass()->internal_name(), p, obj, new_obj, new_obj->size());
+             new_obj->klass()->internal_name(), p, (void *)obj, (void *)new_obj, new_obj->size());
         }
 #endif
 
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,10 +40,8 @@
 
   void initialize_flags() {
     // Do basic sizing work
-    this->TwoGenerationCollectorPolicy::initialize_flags();
+    TwoGenerationCollectorPolicy::initialize_flags();
 
-    // If the user hasn't explicitly set the number of worker
-    // threads, set the count.
     assert(UseSerialGC ||
            !FLAG_IS_DEFAULT(ParallelGCThreads) ||
            (ParallelGCThreads > 0),
@@ -68,9 +66,6 @@
   size_t min_old_gen_size()   { return _min_gen1_size; }
   size_t old_gen_size()       { return _initial_gen1_size; }
   size_t max_old_gen_size()   { return _max_gen1_size; }
-
-  size_t metaspace_size()      { return MetaspaceSize; }
-  size_t max_metaspace_size()  { return MaxMetaspaceSize; }
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -86,6 +86,11 @@
     set_alignment(_old_gen_alignment, intra_heap_alignment());
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return intra_heap_alignment();
+  }
+
   // For use by VM operations
   enum CollectionType {
     Scavenge,
@@ -122,7 +127,7 @@
 
   // The alignment used for eden and survivors within the young gen
   // and for boundary between young gen and old gen.
-  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
+  static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
 
   size_t capacity() const;
   size_t used() const;
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,7 +53,6 @@
 
 // Forward decls
 class elapsedTimer;
-class GenerationSizer;
 
 class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
  friend class PSGCAdaptivePolicyCounters;
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -26,7 +26,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -333,7 +333,7 @@
     gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
                            "promotion-failure",
                            obj->klass()->internal_name(),
-                           obj, obj->size());
+                           (void *)obj, obj->size());
 
   }
 #endif
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -126,7 +126,7 @@
 
   oop* mask_chunked_array_oop(oop obj) {
     assert(!is_oop_masked((oop*) obj), "invariant");
-    oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
+    oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
     assert(is_oop_masked(ret), "invariant");
     return ret;
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -226,7 +226,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
   }
 #endif
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -27,7 +27,6 @@
 #include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
   if (TraceScavenge &&  o->is_forwarded()) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        "forwarding",
-       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
   }
 #endif
 
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/shared/gcUtil.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
-#endif // INCLUDE_ALL_GCS
+#include "gc_implementation/shared/gcUtil.hpp"
 
 class AllocationStats VALUE_OBJ_CLASS_SPEC {
   // A duration threshold (in ms) used to filter
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "runtime/os.hpp"
 #include "trace/tracing.hpp"
 #include "trace/traceBackend.hpp"
 #if INCLUDE_ALL_GCS
@@ -54,11 +55,12 @@
 }
 
 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
-  EventGCReferenceStatistics e;
+  EventGCReferenceStatistics e(UNTIMED);
   if (e.should_commit()) {
       e.set_gcId(_shared_gc_info.id());
       e.set_type((u1)type);
       e.set_count(count);
+      e.set_endtime(os::elapsed_counter());
       e.commit();
   }
 }
@@ -105,20 +107,22 @@
 }
 
 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
-  EventPromotionFailed e;
+  EventPromotionFailed e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_data(to_trace_struct(pf_info));
     e.set_thread(pf_info.thread()->thread_id());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
 
 // Common to CMS and G1
 void OldGCTracer::send_concurrent_mode_failure_event() {
-  EventConcurrentModeFailure e;
+  EventConcurrentModeFailure e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -136,7 +140,7 @@
 }
 
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
-  EventEvacuationInfo e;
+  EventEvacuationInfo e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_cSetRegions(info->collectionset_regions());
@@ -147,15 +151,17 @@
     e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
     e.set_bytesCopied(info->bytes_copied());
     e.set_regionsFreed(info->regions_freed());
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
 
 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
-  EventEvacuationFailed e;
+  EventEvacuationFailed e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_data(to_trace_struct(ef_info));
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -189,12 +195,13 @@
   void visit(const GCHeapSummary* heap_summary) const {
     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 
-    EventGCHeapSummary e;
+    EventGCHeapSummary e(UNTIMED);
     if (e.should_commit()) {
       e.set_gcId(_id);
       e.set_when((u1)_when);
       e.set_heapSpace(to_trace_struct(heap_space));
       e.set_heapUsed(heap_summary->used());
+      e.set_endtime(os::elapsed_counter());
       e.commit();
     }
   }
@@ -209,7 +216,7 @@
     const SpaceSummary& from_space = ps_heap_summary->from();
     const SpaceSummary& to_space = ps_heap_summary->to();
 
-    EventPSHeapSummary e;
+    EventPSHeapSummary e(UNTIMED);
     if (e.should_commit()) {
       e.set_gcId(_id);
       e.set_when((u1)_when);
@@ -220,6 +227,7 @@
       e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
       e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
       e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
+      e.set_endtime(os::elapsed_counter());
       e.commit();
     }
   }
@@ -241,13 +249,14 @@
 }
 
 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
-  EventMetaspaceSummary e;
+  EventMetaspaceSummary e(UNTIMED);
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
     e.set_when((u1) when);
     e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
     e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
     e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
+    e.set_endtime(os::elapsed_counter());
     e.commit();
   }
 }
@@ -282,8 +291,6 @@
       default: /* Ignore sending this phase */ break;
     }
   }
-
-#undef send_phase
 };
 
 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,9 +144,9 @@
     _padded_avg(0.0), _deviation(0.0), _padding(padding) {}
 
   // Placement support
-  void* operator new(size_t ignored, void* p) { return p; }
+  void* operator new(size_t ignored, void* p) throw() { return p; }
   // Allocator
-  void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
+  void* operator new(size_t size) throw() { return CHeapObj<mtGC>::operator new(size); }
 
   // Accessor
   float padded_average() const         { return _padded_avg; }
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
 #include "runtime/perfData.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // A HSpaceCounter is a holder class for performance counters
 // that track a collections (logical spaces) in a heap;
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -211,7 +211,7 @@
   // a GC that freed space for the allocation.
   if (!MetadataAllocationFailALot) {
     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-    }
+  }
 
   if (_result == NULL) {
     if (UseConcMarkSweepGC) {
@@ -223,9 +223,7 @@
         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
     }
     if (_result == NULL) {
-      // Don't clear the soft refs.  This GC is for reclaiming metadata
-      // and is unrelated to the fullness of the Java heap which should
-      // be the criteria for clearing SoftReferences.
+      // Don't clear the soft refs yet.
       if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       }
@@ -235,7 +233,7 @@
       _result =
         _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
     }
-    if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
+    if (_result == NULL) {
       // If still failing, allow the Metaspace to expand.
       // See delta_capacity_until_GC() for explanation of the
       // amount of the expansion.
@@ -243,7 +241,16 @@
       // or a MaxMetaspaceSize has been specified on the command line.
       _result =
         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-
+      if (_result == NULL) {
+        // If expansion failed, do a last-ditch collection and try allocating
+        // again.  A last-ditch collection will clear softrefs.  This
+        // behavior is similar to the last-ditch collection done for perm
+        // gen when it was full and a collection for failed allocation
+        // did not free perm gen space.
+        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
+        _result =
+          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+      }
     }
     if (Verbose && PrintGCDetails && _result == NULL) {
       gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -214,9 +214,6 @@
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
   }
-  ~VM_CollectForMetadataAllocation()  {
-    MetaspaceGC::set_expand_after_GC(false);
-  }
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -87,15 +87,15 @@
   const MetaspaceSizes meta_space(
       MetaspaceAux::allocated_capacity_bytes(),
       MetaspaceAux::allocated_used_bytes(),
-      MetaspaceAux::reserved_in_bytes());
+      MetaspaceAux::reserved_bytes());
   const MetaspaceSizes data_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   const MetaspaceSizes class_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
 
   return MetaspaceSummary(meta_space, data_space, class_space);
 }
@@ -202,12 +202,6 @@
       ShouldNotReachHere(); // Unexpected use of this function
   }
 }
-MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
-                                              ClassLoaderData* loader_data,
-                                              size_t size, Metaspace::MetadataType mdtype) {
-  return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
-}
-
 
 void CollectedHeap::pre_initialize() {
   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -475,11 +475,6 @@
   // the context of the vm thread.
   virtual void collect_as_vm_thread(GCCause::Cause cause);
 
-  // Callback from VM_CollectForMetadataAllocation operation.
-  MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
-                                               size_t size,
-                                               Metaspace::MetadataType mdtype);
-
   // Returns the barrier set for this heap
   BarrierSet* barrier_set() { return _barrier_set; }
 
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -30,11 +30,8 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/top.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
-# include "interp_masm_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
-# include "interp_masm_x86_64.hpp"
+#ifdef TARGET_ARCH_x86
+# include "interp_masm_x86.hpp"
 #endif
 #ifdef TARGET_ARCH_MODEL_sparc
 # include "interp_masm_sparc.hpp"
@@ -164,8 +161,8 @@
   // Runtime support
 
   // length = invoke bytecode length (to advance to next bytecode)
-  static address    deopt_entry   (TosState state, int length) { ShouldNotReachHere(); return NULL; }
-  static address    return_entry  (TosState state, int length) { ShouldNotReachHere(); return NULL; }
+  static address deopt_entry(TosState state, int length) { ShouldNotReachHere(); return NULL; }
+  static address return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotReachHere(); return NULL; }
 
   static address    rethrow_exception_entry()                   { return _rethrow_exception_entry; }
 
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -215,7 +215,7 @@
       st->print_cr(" %s", buf);
     }
   } else {
-    st->print_cr(" " PTR_FORMAT, (intptr_t) value);
+    st->print_cr(" " PTR_FORMAT, (void *)value);
   }
 }
 
--- a/src/share/vm/interpreter/cppInterpreter.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/cppInterpreter.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -78,7 +78,7 @@
   static address    stack_result_to_stack(int index)            { return _stack_to_stack[index]; }
   static address    stack_result_to_native(int index)           { return _stack_to_native_abi[index]; }
 
-  static address    return_entry  (TosState state, int length);
+  static address    return_entry  (TosState state, int length, Bytecodes::Code code);
   static address    deopt_entry   (TosState state, int length);
 
 #ifdef TARGET_ARCH_x86
--- a/src/share/vm/interpreter/interpreter.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/interpreter.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -329,15 +329,21 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Deoptimization support
 
-// If deoptimization happens, this function returns the point of next bytecode to continue execution
+/**
+ * If a deoptimization happens, this function returns the point of next bytecode to continue execution.
+ */
 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
   assert(method->contains(bcp), "just checkin'");
-  Bytecodes::Code code   = Bytecodes::java_code_at(method, bcp);
+
+  // Get the original and rewritten bytecode.
+  Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
   assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
-  int             bci    = method->bci_from(bcp);
-  int             length = -1; // initial value for debugging
+
+  const int bci = method->bci_from(bcp);
+
   // compute continuation length
-  length = Bytecodes::length_at(method, bcp);
+  const int length = Bytecodes::length_at(method, bcp);
+
   // compute result type
   BasicType type = T_ILLEGAL;
 
@@ -393,7 +399,7 @@
   return
     is_top_frame
     ? Interpreter::deopt_entry (as_TosState(type), length)
-    : Interpreter::return_entry(as_TosState(type), length);
+    : Interpreter::return_entry(as_TosState(type), length, code);
 }
 
 // If deoptimization happens, this function returns the point where the interpreter reexecutes
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -523,15 +523,15 @@
 
 IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
   // resolve field
-  FieldAccessInfo info;
+  fieldDescriptor info;
   constantPoolHandle pool(thread, method(thread)->constants());
   bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_putstatic);
   bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
 
   {
     JvmtiHideSingleStepping jhss(thread);
-    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
-                                bytecode, false, CHECK);
+    LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
+                                       bytecode, CHECK);
   } // end JvmtiHideSingleStepping
 
   // check if link resolution caused cpCache to be updated
@@ -551,7 +551,7 @@
   // class is intitialized.  This is required so that access to the static
   // field will call the initialization function every time until the class
   // is completely initialized ala. in 2.17.5 in JVM Specification.
-  InstanceKlass *klass = InstanceKlass::cast(info.klass()());
+  InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
   bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
                                !klass->is_initialized());
   Bytecodes::Code get_code = (Bytecodes::Code)0;
@@ -566,9 +566,9 @@
   cache_entry(thread)->set_field(
     get_code,
     put_code,
-    info.klass(),
-    info.field_index(),
-    info.field_offset(),
+    info.field_holder(),
+    info.index(),
+    info.offset(),
     state,
     info.access_flags().is_final(),
     info.access_flags().is_volatile(),
@@ -713,29 +713,55 @@
   if (already_resolved(thread)) return;
 
   if (bytecode == Bytecodes::_invokeinterface) {
-
     if (TraceItables && Verbose) {
       ResourceMark rm(thread);
       tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
     }
+  }
+#ifdef ASSERT
+  if (bytecode == Bytecodes::_invokeinterface) {
     if (info.resolved_method()->method_holder() ==
                                             SystemDictionary::Object_klass()) {
       // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
-      // (see also cpCacheOop.cpp for details)
+      // (see also CallInfo::set_interface for details)
+      assert(info.call_kind() == CallInfo::vtable_call ||
+             info.call_kind() == CallInfo::direct_call, "");
       methodHandle rm = info.resolved_method();
       assert(rm->is_final() || info.has_vtable_index(),
              "should have been set already");
-      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
+    } else if (!info.resolved_method()->has_itable_index()) {
+      // Resolved something like CharSequence.toString.  Use vtable not itable.
+      assert(info.call_kind() != CallInfo::itable_call, "");
     } else {
       // Setup itable entry
-      int index = klassItable::compute_itable_index(info.resolved_method()());
-      cache_entry(thread)->set_interface_call(info.resolved_method(), index);
+      assert(info.call_kind() == CallInfo::itable_call, "");
+      int index = info.resolved_method()->itable_index();
+      assert(info.itable_index() == index, "");
     }
   } else {
-    cache_entry(thread)->set_method(
+    assert(info.call_kind() == CallInfo::direct_call ||
+           info.call_kind() == CallInfo::vtable_call, "");
+  }
+#endif
+  switch (info.call_kind()) {
+  case CallInfo::direct_call:
+    cache_entry(thread)->set_direct_call(
+      bytecode,
+      info.resolved_method());
+    break;
+  case CallInfo::vtable_call:
+    cache_entry(thread)->set_vtable_call(
       bytecode,
       info.resolved_method(),
       info.vtable_index());
+    break;
+  case CallInfo::itable_call:
+    cache_entry(thread)->set_itable_call(
+      bytecode,
+      info.resolved_method(),
+      info.itable_index());
+    break;
+  default:  ShouldNotReachHere();
   }
 }
 IRT_END
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/linkResolver.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -46,19 +46,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of FieldAccessInfo
-
-void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-BasicType field_type, AccessFlags access_flags) {
-  _klass        = klass;
-  _name         = name;
-  _field_index  = field_index;
-  _field_offset = field_offset;
-  _field_type   = field_type;
-  _access_flags = access_flags;
-}
-
 
 //------------------------------------------------------------------------------------------------------------------------
 // Implementation of CallInfo
@@ -66,26 +53,25 @@
 
 void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
   int vtable_index = Method::nonvirtual_vtable_index;
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
 }
 
 
-void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
+void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
   // This is only called for interface methods. If the resolved_method
   // comes from java/lang/Object, it can be the subject of a virtual call, so
   // we should pick the vtable index from the resolved method.
-  // Other than that case, there is no valid vtable index to specify.
-  int vtable_index = Method::invalid_vtable_index;
-  if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
-    assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
-    vtable_index = resolved_method->vtable_index();
-  }
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  // In that case, the caller must call set_virtual instead of set_interface.
+  assert(resolved_method->method_holder()->is_interface(), "");
+  assert(itable_index == resolved_method()->itable_index(), "");
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
 }
 
 void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
+  CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
   assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
 }
 
@@ -98,20 +84,29 @@
          resolved_method->is_compiled_lambda_form(),
          "linkMethod must return one of these");
   int vtable_index = Method::nonvirtual_vtable_index;
-  assert(resolved_method->vtable_index() == vtable_index, "");
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  assert(!resolved_method->has_vtable_index(), "");
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
   _resolved_appendix    = resolved_appendix;
   _resolved_method_type = resolved_method_type;
 }
 
-void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
+void CallInfo::set_common(KlassHandle resolved_klass,
+                          KlassHandle selected_klass,
+                          methodHandle resolved_method,
+                          methodHandle selected_method,
+                          CallKind kind,
+                          int index,
+                          TRAPS) {
   assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   _resolved_klass  = resolved_klass;
   _selected_klass  = selected_klass;
   _resolved_method = resolved_method;
   _selected_method = selected_method;
-  _vtable_index    = vtable_index;
+  _call_kind       = kind;
+  _call_index      = index;
   _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());  // verify before making side effects
+
   if (CompilationPolicy::must_be_compiled(selected_method)) {
     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 
@@ -138,6 +133,81 @@
   }
 }
 
+// utility query for unreflecting a method
+CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
+  Klass* resolved_method_holder = resolved_method->method_holder();
+  if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
+    resolved_klass = resolved_method_holder;
+  }
+  _resolved_klass  = resolved_klass;
+  _selected_klass  = resolved_klass;
+  _resolved_method = resolved_method;
+  _selected_method = resolved_method;
+  // classify:
+  CallKind kind = CallInfo::unknown_kind;
+  int index = resolved_method->vtable_index();
+  if (resolved_method->can_be_statically_bound()) {
+    kind = CallInfo::direct_call;
+  } else if (!resolved_method_holder->is_interface()) {
+    // Could be an Object method inherited into an interface, but still a vtable call.
+    kind = CallInfo::vtable_call;
+  } else if (!resolved_klass->is_interface()) {
+    // A miranda method.  Compute the vtable index.
+    ResourceMark rm;
+    klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
+    index = vt->index_of_miranda(resolved_method->name(),
+                                 resolved_method->signature());
+    kind = CallInfo::vtable_call;
+  } else if (resolved_method->has_vtable_index()) {
+    // Can occur if an interface redeclares a method of Object.
+
+#ifdef ASSERT
+    // Ensure that this is really the case.
+    KlassHandle object_klass = SystemDictionary::Object_klass();
+    Method * object_resolved_method = object_klass()->vtable()->method_at(index);
+    assert(object_resolved_method->name() == resolved_method->name(),
+      err_msg("Object and interface method names should match at vtable index %d, %s != %s",
+      index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
+    assert(object_resolved_method->signature() == resolved_method->signature(),
+      err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
+      index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
+#endif // ASSERT
+
+    kind = CallInfo::vtable_call;
+  } else {
+    // A regular interface call.
+    kind = CallInfo::itable_call;
+    index = resolved_method->itable_index();
+  }
+  assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
+  _call_kind  = kind;
+  _call_index = index;
+  _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());
+}
+
+#ifdef ASSERT
+void CallInfo::verify() {
+  switch (call_kind()) {  // the meaning and allowed value of index depends on kind
+  case CallInfo::direct_call:
+    if (_call_index == Method::nonvirtual_vtable_index)  break;
+    // else fall through to check vtable index:
+  case CallInfo::vtable_call:
+    assert(resolved_klass()->verify_vtable_index(_call_index), "");
+    break;
+  case CallInfo::itable_call:
+    assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
+    break;
+  case CallInfo::unknown_kind:
+    assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
+    break;
+  default:
+    fatal(err_msg_res("Unexpected call kind %d", call_kind()));
+  }
+}
+#endif //ASSERT
+
+
 
 //------------------------------------------------------------------------------------------------------------------------
 // Klass resolution
@@ -163,20 +233,22 @@
   result = KlassHandle(THREAD, result_oop);
 }
 
-void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
-  Klass* result_oop =
-         ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
-  result = KlassHandle(THREAD, result_oop);
-}
-
-
 //------------------------------------------------------------------------------------------------------------------------
 // Method resolution
 //
 // According to JVM spec. $5.4.3c & $5.4.3d
 
+// Look up method in klasses, including static methods
+// Then look up local default methods
 void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   Method* result_oop = klass->uncached_lookup_method(name, signature);
+  if (result_oop == NULL) {
+    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
+    if (default_methods != NULL) {
+      result_oop = InstanceKlass::find_method(default_methods, name, signature);
+    }
+  }
+
   if (EnableInvokeDynamic && result_oop != NULL) {
     vmIntrinsics::ID iid = result_oop->intrinsic_id();
     if (MethodHandles::is_signature_polymorphic(iid)) {
@@ -188,20 +260,46 @@
 }
 
 // returns first instance method
+// Looks up method in classes, then looks up local default methods
 void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   Method* result_oop = klass->uncached_lookup_method(name, signature);
   result = methodHandle(THREAD, result_oop);
-  while (!result.is_null() && result->is_static()) {
+  while (!result.is_null() && result->is_static() && result->method_holder()->super() != NULL) {
     klass = KlassHandle(THREAD, result->method_holder()->super());
     result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
   }
+
+  if (result.is_null()) {
+    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
+    if (default_methods != NULL) {
+      result = methodHandle(InstanceKlass::find_method(default_methods, name, signature));
+      assert(result.is_null() || !result->is_static(), "static defaults not allowed");
+    }
+  }
 }
 
+int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
+                                          methodHandle resolved_method, TRAPS) {
 
-int LinkResolver::vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
-  ResourceMark rm(THREAD);
-  klassVtable *vt = InstanceKlass::cast(klass())->vtable();
-  return vt->index_of_miranda(name, signature);
+  int vtable_index = Method::invalid_vtable_index;
+  Symbol* name = resolved_method->name();
+  Symbol* signature = resolved_method->signature();
+
+  // First check in default method array
+  if (!resolved_method->is_abstract()  &&
+    (InstanceKlass::cast(klass())->default_methods() != NULL)) {
+    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
+    if (index >= 0 ) {
+      vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
+    }
+  }
+  if (vtable_index == Method::invalid_vtable_index) {
+    // get vtable_index for miranda methods
+    ResourceMark rm(THREAD);
+    klassVtable *vt = InstanceKlass::cast(klass())->vtable();
+    vtable_index = vt->index_of_miranda(name, signature);
+  }
+  return vtable_index;
 }
 
 void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
@@ -320,18 +418,28 @@
 
   AccessFlags flags = sel_method->access_flags();
 
-  // Special case:  arrays always override "clone". JVMS 2.15.
+  // Special case #1:  arrays always override "clone". JVMS 2.15.
   // If the resolved klass is an array class, and the declaring class
   // is java.lang.Object and the method is "clone", set the flags
   // to public.
+  // Special case #2:  If the resolved klass is an interface, and
+  // the declaring class is java.lang.Object, and the method is
+  // "clone" or "finalize", set the flags to public. If the
+  // resolved interface does not contain "clone" or "finalize"
+  // methods, the method/interface method resolution looks to
+  // the interface's super class, java.lang.Object.  With JDK 8
+  // interface accessability check requirement, special casing
+  // this scenario is necessary to avoid an IAE.
   //
-  // We'll check for the method name first, as that's most likely
-  // to be false (so we'll short-circuit out of these tests).
-  if (sel_method->name() == vmSymbols::clone_name() &&
-      sel_klass() == SystemDictionary::Object_klass() &&
-      resolved_klass->oop_is_array()) {
+  // We'll check for each method name first and then java.lang.Object
+  // to best short-circuit out of these tests.
+  if (((sel_method->name() == vmSymbols::clone_name() &&
+        (resolved_klass->oop_is_array() || resolved_klass->is_interface())) ||
+       (sel_method->name() == vmSymbols::finalize_method_name() &&
+        resolved_klass->is_interface())) &&
+      sel_klass() == SystemDictionary::Object_klass()) {
     // We need to change "protected" to "public".
-    assert(flags.is_protected(), "clone not protected?");
+    assert(flags.is_protected(), "clone or finalize not protected?");
     jint new_flags = flags.as_int();
     new_flags = new_flags & (~JVM_ACC_PROTECTED);
     new_flags = new_flags | JVM_ACC_PUBLIC;
@@ -360,14 +468,19 @@
 
 void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
                                              Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
-
+  // This method is used only
+  // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
+  // and
+  // (2) in Bytecode_invoke::static_target
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
   // resolve klass
   if (code == Bytecodes::_invokedynamic) {
     resolved_klass = SystemDictionary::MethodHandle_klass();
     Symbol* method_name = vmSymbols::invoke_name();
     Symbol* method_signature = pool->signature_ref_at(index);
     KlassHandle  current_klass(THREAD, pool->pool_holder());
-    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
     return;
   }
 
@@ -389,22 +502,34 @@
 
   if (code == Bytecodes::_invokeinterface) {
     resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+  } else if (code == Bytecodes::_invokevirtual) {
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
   } else {
-    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
   }
 }
 
 void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass,
                                   Symbol* method_name, Symbol* method_signature,
-                                  KlassHandle current_klass, bool check_access, TRAPS) {
+                                  KlassHandle current_klass, bool check_access,
+                                  bool require_methodref, TRAPS) {
 
   Handle nested_exception;
 
-  // 1. lookup method in resolved klass and its super klasses
+  // 1. check if methodref required, that resolved_klass is not interfacemethodref
+  if (require_methodref && resolved_klass->is_interface()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
+        resolved_klass()->external_name());
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
+  // 2. lookup method in resolved klass and its super klasses
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
   if (resolved_method.is_null()) { // not found in the class hierarchy
-    // 2. lookup method in all the interfaces implemented by the resolved klass
+    // 3. lookup method in all the interfaces implemented by the resolved klass
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
     if (resolved_method.is_null()) {
@@ -418,7 +543,7 @@
     }
 
     if (resolved_method.is_null()) {
-      // 3. method lookup failed
+      // 4. method lookup failed
       ResourceMark rm(THREAD);
       THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
                       Method::name_and_sig_as_C_string(resolved_klass(),
@@ -428,15 +553,6 @@
     }
   }
 
-  // 4. check if klass is not interface
-  if (resolved_klass->is_interface() && resolved_method->is_abstract()) {
-    ResourceMark rm(THREAD);
-    char buf[200];
-    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
-        resolved_klass()->external_name());
-    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-  }
-
   // 5. check if method is concrete
   if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
     ResourceMark rm(THREAD);
@@ -521,6 +637,16 @@
   }
 
   if (check_access) {
+    // JDK8 adds non-public interface methods, and accessability check requirement
+    assert(current_klass.not_null() , "current_klass should not be null");
+
+    // check if method can be accessed by the referring class
+    check_method_accessability(current_klass,
+                               resolved_klass,
+                               KlassHandle(THREAD, resolved_method->method_holder()),
+                               resolved_method,
+                               CHECK);
+
     HandleMark hm(THREAD);
     Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
     Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
@@ -552,6 +678,26 @@
       }
     }
   }
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   resolved_method->method_holder()->internal_name()
+                  );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 //------------------------------------------------------------------------------------------------------------------------
@@ -580,45 +726,49 @@
   }
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
-  resolve_field(result, pool, index, byte, check_only, true, CHECK);
+void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+  // Load these early in case the resolve of the containing klass fails
+  Symbol* field = pool->name_ref_at(index);
+  Symbol* sig   = pool->signature_ref_at(index);
+
+  // resolve specified klass
+  KlassHandle resolved_klass;
+  resolve_klass(resolved_klass, pool, index, CHECK);
+
+  KlassHandle  current_klass(THREAD, pool->pool_holder());
+  resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
+void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
+                                 KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
+                                 TRAPS) {
   assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
-         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield, "bad bytecode");
+         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield  ||
+         (byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
 
   bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
   bool is_put    = (byte == Bytecodes::_putfield  || byte == Bytecodes::_putstatic);
 
-  // resolve specified klass
-  KlassHandle resolved_klass;
-  if (update_pool) {
-    resolve_klass(resolved_klass, pool, index, CHECK);
-  } else {
-    resolve_klass_no_update(resolved_klass, pool, index, CHECK);
-  }
-  // Load these early in case the resolve of the containing klass fails
-  Symbol* field = pool->name_ref_at(index);
-  Symbol* sig   = pool->signature_ref_at(index);
   // Check if there's a resolved klass containing the field
-  if( resolved_klass.is_null() ) {
+  if (resolved_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
   // Resolve instance field
-  fieldDescriptor fd; // find_field initializes fd if found
   KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
   // check if field exists; i.e., if a klass containing the field def has been selected
-  if (sel_klass.is_null()){
+  if (sel_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
+  if (!check_access)
+    // Access checking may be turned off when calling from within the VM.
+    return;
+
   // check access
-  KlassHandle ref_klass(THREAD, pool->pool_holder());
-  check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
+  check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
 
   // check for errors
   if (is_static != fd.is_static()) {
@@ -629,7 +779,7 @@
   }
 
   // Final fields can only be accessed from its own class.
-  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
+  if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
     THROW(vmSymbols::java_lang_IllegalAccessError());
   }
 
@@ -639,19 +789,18 @@
   //
   // note 2: we don't want to force initialization if we are just checking
   //         if the field access is legal; e.g., during compilation
-  if (is_static && !check_only) {
+  if (is_static && initialize_class) {
     sel_klass->initialize(CHECK);
   }
 
-  {
+  if (sel_klass() != current_klass()) {
     HandleMark hm(THREAD);
-    Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
+    Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
     Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
-    Symbol*  signature_ref  = pool->signature_ref_at(index);
     {
       ResourceMark rm(THREAD);
       Symbol* failed_type_symbol =
-        SystemDictionary::check_signature_loaders(signature_ref,
+        SystemDictionary::check_signature_loaders(sig,
                                                   ref_loader, sel_loader,
                                                   false,
                                                   CHECK);
@@ -677,9 +826,6 @@
 
   // return information. note that the klass is set to the actual klass containing the
   // field, otherwise access of static fields in superclasses will not work.
-  KlassHandle holder (THREAD, fd.field_holder());
-  Symbol*  name   = fd.name();
-  result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
 }
 
 
@@ -716,7 +862,7 @@
                                                   Symbol* method_name, Symbol* method_signature,
                                                   KlassHandle current_klass, bool check_access, TRAPS) {
 
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
   assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
 
   // check if static
@@ -743,28 +889,14 @@
                                                    Symbol* method_name, Symbol* method_signature,
                                                    KlassHandle current_klass, bool check_access, TRAPS) {
 
-  if (resolved_klass->is_interface() && current_klass() != NULL) {
-    // If the target class is a direct interface, treat this as a "super"
-    // default call.
-    //
-    // If the current method is an overpass that happens to call a direct
-    // super-interface's method, then we'll end up rerunning the default method
-    // analysis even though we don't need to, but that's ok since it will end
-    // up with the same answer.
-    InstanceKlass* ik = InstanceKlass::cast(current_klass());
-    Array<Klass*>* interfaces = ik->local_interfaces();
-    int num_interfaces = interfaces->length();
-    for (int index = 0; index < num_interfaces; index++) {
-      if (interfaces->at(index) == resolved_klass()) {
-        Method* method = DefaultMethods::find_super_default(current_klass(),
-            resolved_klass(), method_name, method_signature, CHECK);
-        resolved_method = methodHandle(THREAD, method);
-        return;
-      }
-    }
-  }
+  // Invokespecial is called for multiple special reasons:
+  // <init>
+  // local private method invocation, for classes and interfaces
+  // superclass.method, which can also resolve to a default method
+  // and the selected method is recalculated relative to the direct superclass
+  // superinterface.method, which explicitly does not check shadowing
 
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
 
   // check if method name is <init>, that it is found in same klass as static type
   if (resolved_method->name() == vmSymbols::object_initializer_name() &&
@@ -792,6 +924,26 @@
                                                          resolved_method->signature()));
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   }
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                Method::name_and_sig_as_C_string(resolved_klass(),
+                                                 resolved_method->name(),
+                                                 resolved_method->signature()),
+                resolved_method->method_holder()->internal_name()
+               );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 // throws runtime exceptions
@@ -799,23 +951,24 @@
                                                   KlassHandle current_klass, bool check_access, TRAPS) {
 
   // resolved method is selected method unless we have an old-style lookup
+  // for a superclass method
+  // Invokespecial for a superinterface, resolved method is selected method,
+  // no checks for shadowing
   methodHandle sel_method(THREAD, resolved_method());
 
   // check if this is an old-style super call and do a new lookup if so
   { KlassHandle method_klass  = KlassHandle(THREAD,
                                             resolved_method->method_holder());
 
-    const bool direct_calling_default_method =
-      resolved_klass() != NULL && resolved_method() != NULL &&
-      resolved_klass->is_interface() && !resolved_method->is_abstract();
-
-    if (!direct_calling_default_method &&
-        check_access &&
+    if (check_access &&
         // a) check if ACC_SUPER flag is set for the current class
         (current_klass->is_super() || !AllowNonVirtualCalls) &&
-        // b) check if the method class is a superclass of the current class (superclass relation is not reflexive!)
-        current_klass->is_subtype_of(method_klass()) &&
-        current_klass() != method_klass() &&
+        // b) check if the class of the resolved_klass is a superclass
+        // (not supertype in order to exclude interface classes) of the current class.
+        // This check is not performed for super.invoke for interface methods
+        // in super interfaces.
+        current_klass->is_subclass_of(resolved_klass()) &&
+        current_klass() != resolved_klass() &&
         // c) check if the method is not <init>
         resolved_method->name() != vmSymbols::object_initializer_name()) {
       // Lookup super method
@@ -853,6 +1006,25 @@
                                                       sel_method->signature()));
   }
 
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokespecial selected method: resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
+                 (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  sel_method->name(),
+                                                  sel_method->signature()),
+                 sel_method->method_holder()->internal_name()
+                );
+    sel_method->access_flags().print_on(tty);
+    if (sel_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (sel_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
+
   // setup result
   result.set_static(resolved_klass, sel_method, CHECK);
 }
@@ -870,11 +1042,23 @@
                                                    Symbol* method_name, Symbol* method_signature,
                                                    KlassHandle current_klass, bool check_access, TRAPS) {
   // normal method resolution
-  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
+  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK);
 
   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
 
+  // check if private interface method
+  if (resolved_klass->is_interface() && resolved_method->is_private()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokevirtual: method %s, caller-class:%s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  resolved_method->name(),
+                                                  resolved_method->signature()),
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   // check if not static
   if (resolved_method->is_static()) {
     ResourceMark rm(THREAD);
@@ -884,6 +1068,26 @@
                                                                                                              resolved_method->signature()));
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   }
+
+  if (PrintVtables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   resolved_method->method_holder()->internal_name()
+                  );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 // throws runtime exceptions
@@ -907,19 +1111,14 @@
   }
 
   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
-  // has not been rewritten, and the vtable initialized.
-  assert(resolved_method->method_holder()->is_linked(), "must be linked");
-
-  // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
   // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
   // a missing receiver might result in a bogus lookup.
   assert(resolved_method->method_holder()->is_linked(), "must be linked");
 
   // do lookup based on receiver klass using the vtable index
   if (resolved_method->method_holder()->is_interface()) { // miranda method
-    vtable_index = vtable_index_of_miranda_method(resolved_klass,
-                           resolved_method->name(),
-                           resolved_method->signature(), CHECK);
+    vtable_index = vtable_index_of_interface_method(resolved_klass,
+                           resolved_method, CHECK);
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
 
     InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -927,6 +1126,7 @@
   } else {
     // at this point we are sure that resolved_method is virtual and not
     // a miranda method; therefore, it must have a valid vtable index.
+    assert(!resolved_method->has_itable_index(), "");
     vtable_index = resolved_method->vtable_index();
     // We could get a negative vtable_index for final methods,
     // because as an optimization they are they are never put in the vtable,
@@ -962,6 +1162,26 @@
                                                       selected_method->signature()));
   }
 
+  if (PrintVtables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokevirtual selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, vtable_index:%d, access_flags: ",
+                   (recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   selected_method->method_holder()->internal_name(),
+                   vtable_index
+                  );
+    selected_method->access_flags().print_on(tty);
+    if (selected_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (selected_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
   // setup result
   result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK);
 }
@@ -992,6 +1212,17 @@
     THROW(vmSymbols::java_lang_NullPointerException());
   }
 
+  // check if private interface method
+  if (resolved_klass->is_interface() && resolved_method->is_private()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokeinterface: method %s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  resolved_method->name(),
+                                                  resolved_method->signature()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   // check if receiver klass implements the resolved interface
   if (!recv_klass->is_subtype_of(resolved_klass())) {
     ResourceMark rm(THREAD);
@@ -1006,6 +1237,12 @@
   lookup_instance_method_in_klasses(sel_method, recv_klass,
             resolved_method->name(),
             resolved_method->signature(), CHECK);
+  if (sel_method.is_null() && !check_null_and_abstract) {
+    // In theory this is a harmless placeholder value, but
+    // in practice leaving in null affects the nsk default method tests.
+    // This needs further study.
+    sel_method = resolved_method;
+  }
   // check if method exists
   if (sel_method.is_null()) {
     ResourceMark rm(THREAD);
@@ -1015,27 +1252,13 @@
                                                       resolved_method->signature()));
   }
   // check access
-  if (sel_method->method_holder()->is_interface()) {
-    // Method holder is an interface. Throw Illegal Access Error if sel_method
-    // is neither public nor private.
-    if (!(sel_method->is_public() || sel_method->is_private())) {
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
-                Method::name_and_sig_as_C_string(recv_klass(),
-                                                 sel_method->name(),
-                                                 sel_method->signature()));
-    }
-  }
-  else {
-    // Method holder is a class. Throw Illegal Access Error if sel_method
-    // is not public.
-    if (!sel_method->is_public()) {
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
-                Method::name_and_sig_as_C_string(recv_klass(),
-                                                 sel_method->name(),
-                                                 sel_method->signature()));
-    }
+  // Throw Illegal Access Error if sel_method is not public.
+  if (!sel_method->is_public()) {
+    ResourceMark rm(THREAD);
+    THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
+              Method::name_and_sig_as_C_string(recv_klass(),
+                                               sel_method->name(),
+                                               sel_method->signature()));
   }
   // check if abstract
   if (check_null_and_abstract && sel_method->is_abstract()) {
@@ -1046,7 +1269,34 @@
                                                       sel_method->signature()));
   }
   // setup result
-  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
+  if (!resolved_method->has_itable_index()) {
+    int vtable_index = resolved_method->vtable_index();
+    assert(vtable_index == sel_method->vtable_index(), "sanity check");
+    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+    return;
+  }
+  int itable_index = resolved_method()->itable_index();
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokeinterface selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   sel_method->method_holder()->internal_name()
+                  );
+    sel_method->access_flags().print_on(tty);
+    if (sel_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (sel_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
+  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
 }
 
 
@@ -1293,7 +1543,8 @@
   }
 
   if (TraceMethodHandles) {
-    tty->print_cr("resolve_invokedynamic #%d %s %s",
+      ResourceMark rm(THREAD);
+      tty->print_cr("resolve_invokedynamic #%d %s %s",
                   ConstantPool::decode_invokedynamic_index(index),
                   method_name->as_C_string(), method_signature->as_C_string());
     tty->print("  BSM info: "); bootstrap_specifier->print();
@@ -1320,7 +1571,7 @@
                                                      THREAD);
   if (HAS_PENDING_EXCEPTION) {
     if (TraceMethodHandles) {
-      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION);
+      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, (void *)PENDING_EXCEPTION);
       PENDING_EXCEPTION->print();
     }
     if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
@@ -1342,9 +1593,16 @@
 //------------------------------------------------------------------------------------------------------------------------
 #ifndef PRODUCT
 
-void FieldAccessInfo::print() {
+void CallInfo::print() {
   ResourceMark rm;
-  tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
+  const char* kindstr = "unknown";
+  switch (_call_kind) {
+  case direct_call: kindstr = "direct"; break;
+  case vtable_call: kindstr = "vtable"; break;
+  case itable_call: kindstr = "itable"; break;
+  }
+  tty->print_cr("Call %s@%d %s", kindstr, _call_index,
+                _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
 }
 
 #endif
--- a/src/share/vm/interpreter/linkResolver.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/linkResolver.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,63 +30,54 @@
 
 // All the necessary definitions for run-time link resolution.
 
-// LinkInfo & its subclasses provide all the information gathered
-// for a particular link after resolving it. A link is any reference
+// CallInfo provides all the information gathered for a particular
+// linked call site after resolving it. A link is any reference
 // made from within the bytecodes of a method to an object outside of
 // that method. If the info is invalid, the link has not been resolved
 // successfully.
 
-class LinkInfo VALUE_OBJ_CLASS_SPEC {
-};
-
-
-// Link information for getfield/putfield & getstatic/putstatic bytecodes.
-
-class FieldAccessInfo: public LinkInfo {
- protected:
-  KlassHandle  _klass;
-  Symbol*      _name;
-  AccessFlags  _access_flags;
-  int          _field_index;  // original index in the klass
-  int          _field_offset;
-  BasicType    _field_type;
-
+class CallInfo VALUE_OBJ_CLASS_SPEC {
  public:
-  void         set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-                 BasicType field_type, AccessFlags access_flags);
-  KlassHandle  klass() const                     { return _klass; }
-  Symbol* name() const                           { return _name; }
-  int          field_index() const               { return _field_index; }
-  int          field_offset() const              { return _field_offset; }
-  BasicType    field_type() const                { return _field_type; }
-  AccessFlags  access_flags() const              { return _access_flags; }
-
-  // debugging
-  void print()  PRODUCT_RETURN;
-};
-
-
-// Link information for all calls.
-
-class CallInfo: public LinkInfo {
+  // Ways that a method call might be selected (or not) based on receiver type.
+  // Note that an invokevirtual instruction might be linked with no_dispatch,
+  // and an invokeinterface instruction might be linked with any of the three options
+  enum CallKind {
+    direct_call,                        // jump into resolved_method (must be concrete)
+    vtable_call,                        // select recv.klass.method_at_vtable(index)
+    itable_call,                        // select recv.klass.method_at_itable(resolved_method.holder, index)
+    unknown_kind = -1
+  };
  private:
-  KlassHandle  _resolved_klass;         // static receiver klass
+  KlassHandle  _resolved_klass;         // static receiver klass, resolved from a symbolic reference
   KlassHandle  _selected_klass;         // dynamic receiver class (same as static, or subklass)
   methodHandle _resolved_method;        // static target method
   methodHandle _selected_method;        // dynamic (actual) target method
-  int          _vtable_index;           // vtable index of selected method
+  CallKind     _call_kind;              // kind of call (static(=bytecode static/special +
+                                        //               others inferred), vtable, itable)
+  int          _call_index;             // vtable or itable index of selected class method (if any)
   Handle       _resolved_appendix;      // extra argument in constant pool (if CPCE::has_appendix)
   Handle       _resolved_method_type;   // MethodType (for invokedynamic and invokehandle call sites)
 
   void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                       , TRAPS);
-  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                         , TRAPS);
+  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index       , TRAPS);
   void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
   void         set_handle(                                                           methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
-  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
+  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
 
   friend class LinkResolver;
 
  public:
+  CallInfo() {
+#ifndef PRODUCT
+    _call_kind  = CallInfo::unknown_kind;
+    _call_index = Method::garbage_vtable_index;
+#endif //PRODUCT
+  }
+
+  // utility to extract an effective CallInfo from a method and an optional receiver limit
+  // does not queue the method for compilation
+  CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
+
   KlassHandle  resolved_klass() const            { return _resolved_klass; }
   KlassHandle  selected_klass() const            { return _selected_klass; }
   methodHandle resolved_method() const           { return _resolved_method; }
@@ -95,21 +86,43 @@
   Handle       resolved_method_type() const      { return _resolved_method_type; }
 
   BasicType    result_type() const               { return selected_method()->result_type(); }
-  bool         has_vtable_index() const          { return _vtable_index >= 0; }
-  bool         is_statically_bound() const       { return _vtable_index == Method::nonvirtual_vtable_index; }
+  CallKind     call_kind() const                 { return _call_kind; }
+  int          call_index() const                { return _call_index; }
   int          vtable_index() const {
     // Even for interface calls the vtable index could be non-negative.
     // See CallInfo::set_interface.
     assert(has_vtable_index() || is_statically_bound(), "");
-    return _vtable_index;
+    assert(call_kind() == vtable_call || call_kind() == direct_call, "");
+    // The returned value is < 0 if the call is statically bound.
+    // But, the returned value may be >= 0 even if the kind is direct_call.
+    // It is up to the caller to decide which way to go.
+    return _call_index;
   }
+  int          itable_index() const {
+    assert(call_kind() == itable_call, "");
+    // The returned value is always >= 0, a valid itable index.
+    return _call_index;
+  }
+
+  // debugging
+#ifdef ASSERT
+  bool         has_vtable_index() const          { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
+  bool         is_statically_bound() const       { return _call_index == Method::nonvirtual_vtable_index; }
+#endif //ASSERT
+  void         verify() PRODUCT_RETURN;
+  void         print()  PRODUCT_RETURN;
 };
 
+// Link information for getfield/putfield & getstatic/putstatic bytecodes
+// is represented using a fieldDescriptor.
 
 // The LinkResolver is used to resolve constant-pool references at run-time.
 // It does all necessary link-time checks & throws exceptions if necessary.
 
 class LinkResolver: AllStatic {
+  friend class klassVtable;
+  friend class klassItable;
+
  private:
   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
@@ -117,15 +130,13 @@
   static void lookup_polymorphic_method         (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
                                                  KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
 
-  static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
-
+  static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
   static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
-  static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
 
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
 
   static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
-  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
+  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS);
 
   static void linktime_resolve_static_method    (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
   static void linktime_resolve_special_method   (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
@@ -148,9 +159,16 @@
                                         Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
 
   // runtime/static resolving for fields
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
-  // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
+  static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
+  static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
+                            KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
+
+  // source of access_kind codes:
+  static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
+    return (is_static
+            ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
+            : (is_put ? Bytecodes::_putfield  : Bytecodes::_getfield ));
+  }
 
   // runtime resolving:
   //   resolved_klass = specified class (i.e., static receiver class)
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -184,8 +184,9 @@
 EntryPoint TemplateInterpreter::_continuation_entry;
 EntryPoint TemplateInterpreter::_safept_entry;
 
-address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs];
+address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs];
 
 DispatchTable TemplateInterpreter::_active_table;
 DispatchTable TemplateInterpreter::_normal_table;
@@ -237,22 +238,37 @@
 #endif // !PRODUCT
 
   { CodeletMark cm(_masm, "return entry points");
+    const int index_size = sizeof(u2);
     for (int i = 0; i < Interpreter::number_of_return_entries; i++) {
       Interpreter::_return_entry[i] =
         EntryPoint(
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(atos, i),
-          generate_return_entry_for(itos, i),
-          generate_return_entry_for(ltos, i),
-          generate_return_entry_for(ftos, i),
-          generate_return_entry_for(dtos, i),
-          generate_return_entry_for(vtos, i)
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(atos, i, index_size),
+          generate_return_entry_for(itos, i, index_size),
+          generate_return_entry_for(ltos, i, index_size),
+          generate_return_entry_for(ftos, i, index_size),
+          generate_return_entry_for(dtos, i, index_size),
+          generate_return_entry_for(vtos, i, index_size)
         );
     }
   }
 
+  { CodeletMark cm(_masm, "invoke return entry points");
+    const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos};
+    const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic);
+    const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface);
+    const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic);
+
+    for (int i = 0; i < Interpreter::number_of_return_addrs; i++) {
+      TosState state = states[i];
+      Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2));
+      Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2));
+      Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4));
+    }
+  }
+
   { CodeletMark cm(_masm, "earlyret entry points");
     Interpreter::_earlyret_entry =
       EntryPoint(
@@ -298,13 +314,6 @@
     }
   }
 
-  for (int j = 0; j < number_of_states; j++) {
-    const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
-    int index = Interpreter::TosState_as_index(states[j]);
-    Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
-    Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
-  }
-
   { CodeletMark cm(_masm, "continuation entry points");
     Interpreter::_continuation_entry =
       EntryPoint(
@@ -534,9 +543,46 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Entry points
 
-address TemplateInterpreter::return_entry(TosState state, int length) {
+/**
+ * Returns the return entry table for the given invoke bytecode.
+ */
+address* TemplateInterpreter::invoke_return_entry_table_for(Bytecodes::Code code) {
+  switch (code) {
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokehandle:
+    return Interpreter::invoke_return_entry_table();
+  case Bytecodes::_invokeinterface:
+    return Interpreter::invokeinterface_return_entry_table();
+  case Bytecodes::_invokedynamic:
+    return Interpreter::invokedynamic_return_entry_table();
+  default:
+    fatal(err_msg("invalid bytecode: %s", Bytecodes::name(code)));
+    return NULL;
+  }
+}
+
+/**
+ * Returns the return entry address for the given top-of-stack state and bytecode.
+ */
+address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
   guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length");
-  return _return_entry[length].entry(state);
+  const int index = TosState_as_index(state);
+  switch (code) {
+  case Bytecodes::_invokestatic:
+  case Bytecodes::_invokespecial:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokehandle:
+    return _invoke_return_entry[index];
+  case Bytecodes::_invokeinterface:
+    return _invokeinterface_return_entry[index];
+  case Bytecodes::_invokedynamic:
+    return _invokedynamic_return_entry[index];
+  default:
+    assert(!Bytecodes::is_invoke(code), err_msg("invoke instructions should be handled separately: %s", Bytecodes::name(code)));
+    return _return_entry[length].entry(state);
+  }
 }
 
 
--- a/src/share/vm/interpreter/templateInterpreter.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/templateInterpreter.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -120,8 +120,9 @@
   static EntryPoint _continuation_entry;
   static EntryPoint _safept_entry;
 
-  static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
-  static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
+  static address _invoke_return_entry[number_of_return_addrs];           // for invokestatic, invokespecial, invokevirtual return entries
+  static address _invokeinterface_return_entry[number_of_return_addrs];  // for invokeinterface return entries
+  static address _invokedynamic_return_entry[number_of_return_addrs];    // for invokedynamic return entries
 
   static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
   static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
@@ -161,12 +162,15 @@
   static address*   normal_table()                              { return _normal_table.table_for(); }
 
   // Support for invokes
-  static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
-  static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
-  static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
+  static address*   invoke_return_entry_table()                 { return _invoke_return_entry; }
+  static address*   invokeinterface_return_entry_table()        { return _invokeinterface_return_entry; }
+  static address*   invokedynamic_return_entry_table()          { return _invokedynamic_return_entry; }
+  static int        TosState_as_index(TosState state);
 
-  static address    return_entry  (TosState state, int length);
-  static address    deopt_entry   (TosState state, int length);
+  static address* invoke_return_entry_table_for(Bytecodes::Code code);
+
+  static address deopt_entry(TosState state, int length);
+  static address return_entry(TosState state, int length, Bytecodes::Code code);
 
   // Safepoint support
   static void       notice_safepoints();                        // stops the thread when reaching a safepoint
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,7 +53,7 @@
   address generate_ClassCastException_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
-  address generate_return_entry_for(TosState state, int step);
+  address generate_return_entry_for(TosState state, int step, size_t index_size);
   address generate_earlyret_entry_for(TosState state);
   address generate_deopt_entry_for(TosState state, int step);
   address generate_safept_entry_for(TosState state, address runtime_entry);
--- a/src/share/vm/interpreter/templateTable.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/interpreter/templateTable.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,11 +28,8 @@
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/frame.hpp"
-#ifdef TARGET_ARCH_MODEL_x86_32
-# include "interp_masm_x86_32.hpp"
-#endif
-#ifdef TARGET_ARCH_MODEL_x86_64
-# include "interp_masm_x86_64.hpp"
+#ifdef TARGET_ARCH_x86
+# include "interp_masm_x86.hpp"
 #endif
 #ifdef TARGET_ARCH_MODEL_sparc
 # include "interp_masm_sparc.hpp"
--- a/src/share/vm/libadt/port.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/libadt/port.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,8 +161,8 @@
 extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size);
 extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size);
 extern char *safe_strdup (const char *file, unsigned line, const char *src);
-inline void *operator new( size_t size ) { return malloc(size); }
-inline void operator delete( void *ptr ) { free(ptr); }
+inline void *operator new( size_t size ) throw() { return malloc(size); }
+inline void operator delete( void *ptr )         { free(ptr); }
 #endif
 
 //-----------------------------------------------------------------------------
--- a/src/share/vm/memory/allocation.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/allocation.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,19 +52,19 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-void* StackObj::operator new(size_t size)       { ShouldNotCallThis(); return 0; }
-void  StackObj::operator delete(void* p)        { ShouldNotCallThis(); }
-void* StackObj::operator new [](size_t size)    { ShouldNotCallThis(); return 0; }
-void  StackObj::operator delete [](void* p)     { ShouldNotCallThis(); }
+void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
+void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
+void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
+void  StackObj::operator delete [](void* p)           { ShouldNotCallThis(); }
 
-void* _ValueObj::operator new(size_t size)      { ShouldNotCallThis(); return 0; }
-void  _ValueObj::operator delete(void* p)       { ShouldNotCallThis(); }
-void* _ValueObj::operator new [](size_t size)   { ShouldNotCallThis(); return 0; }
-void  _ValueObj::operator delete [](void* p)    { ShouldNotCallThis(); }
+void* _ValueObj::operator new(size_t size)    throw() { ShouldNotCallThis(); return 0; }
+void  _ValueObj::operator delete(void* p)             { ShouldNotCallThis(); }
+void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
+void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
 
 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
                                  size_t word_size, bool read_only,
-                                 MetaspaceObj::Type type, TRAPS) {
+                                 MetaspaceObj::Type type, TRAPS) throw() {
   // Klass has it's own operator new
   return Metaspace::allocate(loader_data, word_size, read_only,
                              type, CHECK_NULL);
@@ -83,7 +83,7 @@
   st->print(" {"INTPTR_FORMAT"}", this);
 }
 
-void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
+void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
   address res;
   switch (type) {
    case C_HEAP:
@@ -100,12 +100,12 @@
   return res;
 }
 
-void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) {
+void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
   return (address) operator new(size, type, flags);
 }
 
 void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
-    allocation_type type, MEMFLAGS flags) {
+    allocation_type type, MEMFLAGS flags) throw() {
   //should only call this with std::nothrow, use other operator new() otherwise
   address res;
   switch (type) {
@@ -124,7 +124,7 @@
 }
 
 void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-    allocation_type type, MEMFLAGS flags) {
+    allocation_type type, MEMFLAGS flags) throw() {
   return (address)operator new(size, nothrow_constant, type, flags);
 }
 
@@ -373,7 +373,7 @@
 //--------------------------------------------------------------------------------------
 // Chunk implementation
 
-void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
+void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
   // requested_size is equal to sizeof(Chunk) but in order for the arena
   // allocations to come out aligned as expected the size must be aligned
   // to expected arena alignment.
@@ -481,18 +481,18 @@
   NOT_PRODUCT(Atomic::dec(&_instance_count);)
 }
 
-void* Arena::operator new(size_t size) {
+void* Arena::operator new(size_t size) throw() {
   assert(false, "Use dynamic memory type binding");
   return NULL;
 }
 
-void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
+void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Use dynamic memory type binding");
   return NULL;
 }
 
   // dynamic memory type binding
-void* Arena::operator new(size_t size, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
 #ifdef ASSERT
   void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -502,7 +502,7 @@
 #endif
 }
 
-void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
+void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
 #ifdef ASSERT
   void* p = os::malloc(size, flags|otArena, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
@@ -691,22 +691,22 @@
 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
 //
 #ifndef ALLOW_OPERATOR_NEW_USAGE
-void* operator new(size_t size){
+void* operator new(size_t size) throw() {
   assert(false, "Should not call global operator new");
   return 0;
 }
 
-void* operator new [](size_t size){
+void* operator new [](size_t size) throw() {
   assert(false, "Should not call global operator new[]");
   return 0;
 }
 
-void* operator new(size_t size, const std::nothrow_t&  nothrow_constant){
+void* operator new(size_t size, const std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Should not call global operator new");
   return 0;
 }
 
-void* operator new [](size_t size, std::nothrow_t&  nothrow_constant){
+void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
   assert(false, "Should not call global operator new[]");
   return 0;
 }
--- a/src/share/vm/memory/allocation.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/allocation.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -204,12 +204,12 @@
 
 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
+  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
   _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0);
-  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
+                               address caller_pc = 0) throw();
+  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
   _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0);
+                               address caller_pc = 0) throw();
   void  operator delete(void* p);
   void  operator delete [] (void* p);
 };
@@ -219,8 +219,8 @@
 
 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
  private:
-  void* operator new(size_t size);
-  void* operator new [](size_t size);
+  void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
 #ifdef __IBMCPP__
  public:
 #endif
@@ -248,9 +248,9 @@
 //
 class _ValueObj {
  private:
-  void* operator new(size_t size);
+  void* operator new(size_t size) throw();
   void  operator delete(void* p);
-  void* operator new [](size_t size);
+  void* operator new [](size_t size) throw();
   void  operator delete [](void* p);
 };
 
@@ -319,7 +319,7 @@
 
   void* operator new(size_t size, ClassLoaderData* loader_data,
                      size_t word_size, bool read_only,
-                     Type type, Thread* thread);
+                     Type type, Thread* thread) throw();
                      // can't use TRAPS from this header file.
   void operator delete(void* p) { ShouldNotCallThis(); }
 };
@@ -342,7 +342,7 @@
   Chunk*       _next;     // Next Chunk in list
   const size_t _len;      // Size of this Chunk
  public:
-  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
+  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
   void  operator delete(void* p);
   Chunk(size_t length);
 
@@ -425,12 +425,12 @@
   char* hwm() const             { return _hwm; }
 
   // new operators
-  void* operator new (size_t size);
-  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
+  void* operator new (size_t size) throw();
+  void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
 
   // dynamic memory type tagging
-  void* operator new(size_t size, MEMFLAGS flags);
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
+  void* operator new(size_t size, MEMFLAGS flags) throw();
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
   void  operator delete(void* p);
 
   // Fast allocate in the arena.  Common case is: pointer test + increment.
@@ -586,44 +586,44 @@
 #endif // ASSERT
 
  public:
-  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
-  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
+  void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
+  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
   void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
-      allocation_type type, MEMFLAGS flags);
+      allocation_type type, MEMFLAGS flags) throw();
   void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-      allocation_type type, MEMFLAGS flags);
+      allocation_type type, MEMFLAGS flags) throw();
 
-  void* operator new(size_t size, Arena *arena) {
+  void* operator new(size_t size, Arena *arena) throw() {
       address res = (address)arena->Amalloc(size);
       DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
 
-  void* operator new [](size_t size, Arena *arena) {
+  void* operator new [](size_t size, Arena *arena) throw() {
       address res = (address)arena->Amalloc(size);
       DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
 
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
       address res = (address)resource_allocate_bytes(size);
       DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
       address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
       DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new [](size_t size) {
+  void* operator new [](size_t size) throw() {
       address res = (address)resource_allocate_bytes(size);
       DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
 
-  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
       address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
       DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
       return res;
--- a/src/share/vm/memory/allocation.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/allocation.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -85,7 +85,7 @@
 
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
-      address caller_pc){
+      address caller_pc) throw() {
     void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
 #ifdef ASSERT
     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
@@ -94,7 +94,7 @@
   }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) {
+  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
   void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
       AllocFailStrategy::RETURN_NULL);
 #ifdef ASSERT
@@ -104,12 +104,12 @@
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-      address caller_pc){
+      address caller_pc) throw() {
     return CHeapObj<F>::operator new(size, caller_pc);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) {
+  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
     return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
 }
 
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,14 @@
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/freeBlockDictionary.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
 
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -423,60 +423,6 @@
   inline_write_ref_field(field, newVal);
 }
 
-/*
-   Claimed and deferred bits are used together in G1 during the evacuation
-   pause. These bits can have the following state transitions:
-   1. The claimed bit can be put over any other card state. Except that
-      the "dirty -> dirty and claimed" transition is checked for in
-      G1 code and is not used.
-   2. Deferred bit can be set only if the previous state of the card
-      was either clean or claimed. mark_card_deferred() is wait-free.
-      We do not care if the operation is be successful because if
-      it does not it will only result in duplicate entry in the update
-      buffer because of the "cache-miss". So it's not worth spinning.
- */
-
-
-bool CardTableModRefBS::claim_card(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
-  while (val == clean_card_val() ||
-         (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
-    jbyte new_val = val;
-    if (val == clean_card_val()) {
-      new_val = (jbyte)claimed_card_val();
-    } else {
-      new_val = val | (jbyte)claimed_card_val();
-    }
-    jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
-    if (res == val) {
-      return true;
-    }
-    val = res;
-  }
-  return false;
-}
-
-bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  // It's already processed
-  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
-    return false;
-  }
-  // Cached bit can be installed either on a clean card or on a claimed card.
-  jbyte new_val = val;
-  if (val == clean_card_val()) {
-    new_val = (jbyte)deferred_card_val();
-  } else {
-    if (val & claimed_card_val()) {
-      new_val = val | (jbyte)deferred_card_val();
-    }
-  }
-  if (new_val != val) {
-    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
-  }
-  return true;
-}
 
 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
                                                                  MemRegion mr,
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -339,34 +339,10 @@
     _byte_map[card_index] = dirty_card_val();
   }
 
-  bool is_card_claimed(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
-  }
-
-  void set_card_claimed(size_t card_index) {
-      jbyte val = _byte_map[card_index];
-      if (val == clean_card_val()) {
-        val = (jbyte)claimed_card_val();
-      } else {
-        val |= (jbyte)claimed_card_val();
-      }
-      _byte_map[card_index] = val;
-  }
-
-  bool claim_card(size_t card_index);
-
   bool is_card_clean(size_t card_index) {
     return _byte_map[card_index] == clean_card_val();
   }
 
-  bool is_card_deferred(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
-  }
-
-  bool mark_card_deferred(size_t card_index);
-
   // Card marking array base (adjusted for heap low boundary)
   // This would be the 0th element of _byte_map, if the heap started at 0x0.
   // But since the heap starts at some higher address, this points to somewhere
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/collectorPolicy.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -48,71 +48,52 @@
 // CollectorPolicy methods.
 
 void CollectorPolicy::initialize_flags() {
-  assert(max_alignment() >= min_alignment(),
-      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
-          max_alignment(), min_alignment()));
-  assert(max_alignment() % min_alignment() == 0,
-      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
-          max_alignment(), min_alignment()));
+  assert(_max_alignment >= _min_alignment,
+         err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
+                 _max_alignment, _min_alignment));
+  assert(_max_alignment % _min_alignment == 0,
+         err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
+                 _max_alignment, _min_alignment));
 
   if (MaxHeapSize < InitialHeapSize) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
-  if (MetaspaceSize > MaxMetaspaceSize) {
-    MaxMetaspaceSize = MetaspaceSize;
-  }
-  MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
-  // Don't increase Metaspace size limit above specified.
-  MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
-  if (MetaspaceSize > MaxMetaspaceSize) {
-    MetaspaceSize = MaxMetaspaceSize;
-  }
-
-  MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
-  MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
-
-  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
-
-  assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
-  assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
-  if (MetaspaceSize < 256*K) {
-    vm_exit_during_initialization("Too small initial Metaspace size");
-  }
+  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
 }
 
 void CollectorPolicy::initialize_size_info() {
   // User inputs from -mx and ms must be aligned
-  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
-  set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
-  set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
+  _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
+  _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
+  _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
 
   // Check heap parameter properties
-  if (initial_heap_byte_size() < M) {
+  if (_initial_heap_byte_size < M) {
     vm_exit_during_initialization("Too small initial heap");
   }
   // Check heap parameter properties
-  if (min_heap_byte_size() < M) {
+  if (_min_heap_byte_size < M) {
     vm_exit_during_initialization("Too small minimum heap");
   }
-  if (initial_heap_byte_size() <= NewSize) {
+  if (_initial_heap_byte_size <= NewSize) {
      // make sure there is at least some room in old space
     vm_exit_during_initialization("Too small initial heap for new size specified");
   }
-  if (max_heap_byte_size() < min_heap_byte_size()) {
+  if (_max_heap_byte_size < _min_heap_byte_size) {
     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   }
-  if (initial_heap_byte_size() < min_heap_byte_size()) {
+  if (_initial_heap_byte_size < _min_heap_byte_size) {
     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   }
-  if (max_heap_byte_size() < initial_heap_byte_size()) {
+  if (_max_heap_byte_size < _initial_heap_byte_size) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
-      min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
+      _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
   }
 }
 
@@ -124,15 +105,8 @@
 
 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
                                            int max_covered_regions) {
-  switch (rem_set_name()) {
-  case GenRemSet::CardTable: {
-    CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
-    return res;
-  }
-  default:
-    guarantee(false, "unrecognized GenRemSet::Name");
-    return NULL;
-  }
+  assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
+  return new CardTableRS(whole_heap, max_covered_regions);
 }
 
 void CollectorPolicy::cleared_all_soft_refs() {
@@ -145,20 +119,44 @@
   _all_soft_refs_clear = true;
 }
 
+size_t CollectorPolicy::compute_max_alignment() {
+  // The card marking array and the offset arrays for old generations are
+  // committed in os pages as well. Make sure they are entirely full (to
+  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+  // byte entry and the os page size is 4096, the maximum heap size should
+  // be 512*4096 = 2MB aligned.
+
+  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
+  // is supported.
+  // Requirements of any new remembered set implementations must be added here.
+  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+
+  // Parallel GC does its own alignment of the generations to avoid requiring a
+  // large page (256M on some platforms) for the permanent generation.  The
+  // other collectors should also be updated to do their own alignment and then
+  // this use of lcm() should be removed.
+  if (UseLargePages && !UseParallelGC) {
+      // in presence of large pages we have to make sure that our
+      // alignment is large page aware
+      alignment = lcm(os::large_page_size(), alignment);
+  }
+
+  return alignment;
+}
 
 // GenCollectorPolicy methods.
 
 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
   size_t x = base_size / (NewRatio+1);
-  size_t new_gen_size = x > min_alignment() ?
-                     align_size_down(x, min_alignment()) :
-                     min_alignment();
+  size_t new_gen_size = x > _min_alignment ?
+                     align_size_down(x, _min_alignment) :
+                     _min_alignment;
   return new_gen_size;
 }
 
 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
                                                  size_t maximum_size) {
-  size_t alignment = min_alignment();
+  size_t alignment = _min_alignment;
   size_t max_minus = maximum_size - alignment;
   return desired_size < max_minus ? desired_size : max_minus;
 }
@@ -175,33 +173,10 @@
                                         GCTimeRatio);
 }
 
-size_t GenCollectorPolicy::compute_max_alignment() {
-  // The card marking array and the offset arrays for old generations are
-  // committed in os pages as well. Make sure they are entirely full (to
-  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
-  // byte entry and the os page size is 4096, the maximum heap size should
-  // be 512*4096 = 2MB aligned.
-  size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
-      // alignment is large page aware
-      alignment = lcm(os::large_page_size(), alignment);
-  }
-
-  assert(alignment >= min_alignment(), "Must be");
-
-  return alignment;
-}
-
 void GenCollectorPolicy::initialize_flags() {
   // All sizes must be multiples of the generation granularity.
-  set_min_alignment((uintx) Generation::GenGrain);
-  set_max_alignment(compute_max_alignment());
+  _min_alignment = (uintx) Generation::GenGrain;
+  _max_alignment = compute_max_alignment();
 
   CollectorPolicy::initialize_flags();
 
@@ -211,26 +186,26 @@
   if (NewSize > MaxNewSize) {
     MaxNewSize = NewSize;
   }
-  NewSize = align_size_down(NewSize, min_alignment());
-  MaxNewSize = align_size_down(MaxNewSize, min_alignment());
+  NewSize = align_size_down(NewSize, _min_alignment);
+  MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
 
   // Check validity of heap flags
-  assert(NewSize     % min_alignment() == 0, "eden space alignment");
-  assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
+  assert(NewSize     % _min_alignment == 0, "eden space alignment");
+  assert(MaxNewSize  % _min_alignment == 0, "survivor space alignment");
 
-  if (NewSize < 3*min_alignment()) {
+  if (NewSize < 3 * _min_alignment) {
      // make sure there room for eden and two survivor spaces
     vm_exit_during_initialization("Too small new size specified");
   }
   if (SurvivorRatio < 1 || NewRatio < 1) {
-    vm_exit_during_initialization("Invalid heap ratio specified");
+    vm_exit_during_initialization("Invalid young gen ratio specified");
   }
 }
 
 void TwoGenerationCollectorPolicy::initialize_flags() {
   GenCollectorPolicy::initialize_flags();
 
-  OldSize = align_size_down(OldSize, min_alignment());
+  OldSize = align_size_down(OldSize, _min_alignment);
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
     // NewRatio will be used later to set the young generation size so we use
@@ -239,11 +214,11 @@
     assert(NewRatio > 0, "NewRatio should have been set up earlier");
     size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
 
-    calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
+    calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
     MaxHeapSize = calculated_heapsize;
     InitialHeapSize = calculated_heapsize;
   }
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   // adjust max heap size if necessary
   if (NewSize + OldSize > MaxHeapSize) {
@@ -253,18 +228,18 @@
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
       // align
-      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
       // OldSize is already aligned because above we aligned MaxHeapSize to
-      // max_alignment(), and we just made sure that NewSize is aligned to
-      // min_alignment(). In initialize_flags() we verified that max_alignment()
-      // is a multiple of min_alignment().
+      // _max_alignment, and we just made sure that NewSize is aligned to
+      // _min_alignment. In initialize_flags() we verified that _max_alignment
+      // is a multiple of _min_alignment.
       OldSize = MaxHeapSize - NewSize;
     } else {
       MaxHeapSize = NewSize + OldSize;
     }
   }
   // need to do this again
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   // adjust max heap size if necessary
   if (NewSize + OldSize > MaxHeapSize) {
@@ -274,24 +249,24 @@
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
       // align
-      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
       // OldSize is already aligned because above we aligned MaxHeapSize to
-      // max_alignment(), and we just made sure that NewSize is aligned to
-      // min_alignment(). In initialize_flags() we verified that max_alignment()
-      // is a multiple of min_alignment().
+      // _max_alignment, and we just made sure that NewSize is aligned to
+      // _min_alignment. In initialize_flags() we verified that _max_alignment
+      // is a multiple of _min_alignment.
       OldSize = MaxHeapSize - NewSize;
     } else {
       MaxHeapSize = NewSize + OldSize;
     }
   }
   // need to do this again
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   always_do_update_barrier = UseConcMarkSweepGC;
 
   // Check validity of heap flags
-  assert(OldSize     % min_alignment() == 0, "old space alignment");
-  assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
+  assert(OldSize     % _min_alignment == 0, "old space alignment");
+  assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
 }
 
 // Values set on the command line win over any ergonomically
@@ -306,7 +281,7 @@
 void GenCollectorPolicy::initialize_size_info() {
   CollectorPolicy::initialize_size_info();
 
-  // min_alignment() is used for alignment within a generation.
+  // _min_alignment is used for alignment within a generation.
   // There is additional alignment done down stream for some
   // collectors that sometimes causes unwanted rounding up of
   // generations sizes.
@@ -315,18 +290,18 @@
 
   size_t max_new_size = 0;
   if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
-    if (MaxNewSize < min_alignment()) {
-      max_new_size = min_alignment();
+    if (MaxNewSize < _min_alignment) {
+      max_new_size = _min_alignment;
     }
-    if (MaxNewSize >= max_heap_byte_size()) {
-      max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
-                                     min_alignment());
+    if (MaxNewSize >= _max_heap_byte_size) {
+      max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
+                                     _min_alignment);
       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
         "greater than the entire heap (" SIZE_FORMAT "k).  A "
         "new generation size of " SIZE_FORMAT "k will be used.",
-        MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
+        MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
     } else {
-      max_new_size = align_size_down(MaxNewSize, min_alignment());
+      max_new_size = align_size_down(MaxNewSize, _min_alignment);
     }
 
   // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
@@ -344,7 +319,7 @@
   // just accept those choices.  The choices currently made are
   // not always "wise".
   } else {
-    max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
+    max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
     // Bound the maximum size by NewSize below (since it historically
     // would have been NewSize and because the NewRatio calculation could
     // yield a size that is too small) and bound it by MaxNewSize above.
@@ -357,13 +332,13 @@
   // Given the maximum gen0 size, determine the initial and
   // minimum gen0 sizes.
 
-  if (max_heap_byte_size() == min_heap_byte_size()) {
+  if (_max_heap_byte_size == _min_heap_byte_size) {
     // The maximum and minimum heap sizes are the same so
     // the generations minimum and initial must be the
     // same as its maximum.
-    set_min_gen0_size(max_new_size);
-    set_initial_gen0_size(max_new_size);
-    set_max_gen0_size(max_new_size);
+    _min_gen0_size = max_new_size;
+    _initial_gen0_size = max_new_size;
+    _max_gen0_size = max_new_size;
   } else {
     size_t desired_new_size = 0;
     if (!FLAG_IS_DEFAULT(NewSize)) {
@@ -384,43 +359,37 @@
       // Use the default NewSize as the floor for these values.  If
       // NewRatio is overly large, the resulting sizes can be too
       // small.
-      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
-                          NewSize);
+      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
       desired_new_size =
-        MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
-             NewSize);
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
     }
 
     assert(_min_gen0_size > 0, "Sanity check");
-    set_initial_gen0_size(desired_new_size);
-    set_max_gen0_size(max_new_size);
+    _initial_gen0_size = desired_new_size;
+    _max_gen0_size = max_new_size;
 
     // At this point the desirable initial and minimum sizes have been
     // determined without regard to the maximum sizes.
 
     // Bound the sizes by the corresponding overall heap sizes.
-    set_min_gen0_size(
-      bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
-    set_initial_gen0_size(
-      bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
-    set_max_gen0_size(
-      bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
+    _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
+    _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
+    _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
 
     // At this point all three sizes have been checked against the
     // maximum sizes but have not been checked for consistency
     // among the three.
 
     // Final check min <= initial <= max
-    set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
-    set_initial_gen0_size(
-      MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
-    set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
+    _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
+    _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
+    _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-      min_gen0_size(), initial_gen0_size(), max_gen0_size());
+      _min_gen0_size, _initial_gen0_size, _max_gen0_size);
   }
 }
 
@@ -440,19 +409,17 @@
 
   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
     if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
-        (heap_size >= min_gen1_size + min_alignment())) {
+        (heap_size >= min_gen1_size + _min_alignment)) {
       // Adjust gen0 down to accommodate min_gen1_size
       *gen0_size_ptr = heap_size - min_gen1_size;
       *gen0_size_ptr =
-        MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
-             min_alignment());
+        MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
       assert(*gen0_size_ptr > 0, "Min gen0 is too large");
       result = true;
     } else {
       *gen1_size_ptr = heap_size - *gen0_size_ptr;
       *gen1_size_ptr =
-        MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
-                       min_alignment());
+        MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
     }
   }
   return result;
@@ -473,10 +440,9 @@
   // The maximum gen1 size can be determined from the maximum gen0
   // and maximum heap size since no explicit flags exits
   // for setting the gen1 maximum.
-  _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
+  _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
   _max_gen1_size =
-    MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
-         min_alignment());
+    MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
   // If no explicit command line flag has been set for the
   // gen1 size, use what is left for gen1.
   if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
@@ -485,70 +451,66 @@
     // with the overall heap size).  In either case make
     // the minimum, maximum and initial sizes consistent
     // with the gen0 sizes and the overall heap sizes.
-    assert(min_heap_byte_size() > _min_gen0_size,
+    assert(_min_heap_byte_size > _min_gen0_size,
       "gen0 has an unexpected minimum size");
-    set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
-    set_min_gen1_size(
-      MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
-           min_alignment()));
-    set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
-    set_initial_gen1_size(
-      MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
-           min_alignment()));
-
+    _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
+    _min_gen1_size =
+      MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
+    _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
+    _initial_gen1_size =
+      MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
   } else {
     // It's been explicitly set on the command line.  Use the
     // OldSize and then determine the consequences.
-    set_min_gen1_size(OldSize);
-    set_initial_gen1_size(OldSize);
+    _min_gen1_size = OldSize;
+    _initial_gen1_size = OldSize;
 
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
     // The generation minimums and the overall heap mimimum should
     // be within one heap alignment.
-    if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
-           min_heap_byte_size()) {
+    if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
       warning("Inconsistency between minimum heap size and minimum "
-          "generation sizes: using minimum heap = " SIZE_FORMAT,
-          min_heap_byte_size());
+              "generation sizes: using minimum heap = " SIZE_FORMAT,
+              _min_heap_byte_size);
     }
     if ((OldSize > _max_gen1_size)) {
       warning("Inconsistency between maximum heap size and maximum "
-          "generation sizes: using maximum heap = " SIZE_FORMAT
-          " -XX:OldSize flag is being ignored",
-          max_heap_byte_size());
+              "generation sizes: using maximum heap = " SIZE_FORMAT
+              " -XX:OldSize flag is being ignored",
+              _max_heap_byte_size);
     }
     // If there is an inconsistency between the OldSize and the minimum and/or
     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
-                          min_heap_byte_size(), OldSize)) {
+                          _min_heap_byte_size, OldSize)) {
       if (PrintGCDetails && Verbose) {
         gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-              min_gen0_size(), initial_gen0_size(), max_gen0_size());
+              _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
     // Initial size
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
-                         initial_heap_byte_size(), OldSize)) {
+                          _initial_heap_byte_size, OldSize)) {
       if (PrintGCDetails && Verbose) {
         gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-          min_gen0_size(), initial_gen0_size(), max_gen0_size());
+          _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
   }
   // Enforce the maximum gen1 size.
-  set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
+  _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
 
   // Check that min gen1 <= initial gen1 <= max gen1
-  set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
-  set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
+  _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
+  _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
       SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
-      min_gen1_size(), initial_gen1_size(), max_gen1_size());
+      _min_gen1_size, _initial_gen1_size, _max_gen1_size);
   }
 }
 
--- a/src/share/vm/memory/collectorPolicy.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/collectorPolicy.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -98,17 +98,15 @@
   {}
 
  public:
-  void set_min_alignment(size_t align)         { _min_alignment = align; }
+  // Return maximum heap alignment that may be imposed by the policy
+  static size_t compute_max_alignment();
+
   size_t min_alignment()                       { return _min_alignment; }
-  void set_max_alignment(size_t align)         { _max_alignment = align; }
   size_t max_alignment()                       { return _max_alignment; }
 
   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
-  void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
-  void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
-  void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
 
   enum Name {
     CollectorPolicyKind,
@@ -234,9 +232,6 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // compute max heap alignment
-  size_t compute_max_alignment();
-
  // Scale the base_size by NewRation according to
  //     result = base_size / (NewRatio + 1)
  // and align by min_alignment()
@@ -248,12 +243,9 @@
 
  public:
   // Accessors
-  size_t min_gen0_size() { return _min_gen0_size; }
-  void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
+  size_t min_gen0_size()     { return _min_gen0_size; }
   size_t initial_gen0_size() { return _initial_gen0_size; }
-  void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
-  size_t max_gen0_size() { return _max_gen0_size; }
-  void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
+  size_t max_gen0_size()     { return _max_gen0_size; }
 
   virtual int number_of_generations() = 0;
 
@@ -302,12 +294,9 @@
 
  public:
   // Accessors
-  size_t min_gen1_size() { return _min_gen1_size; }
-  void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
+  size_t min_gen1_size()     { return _min_gen1_size; }
   size_t initial_gen1_size() { return _initial_gen1_size; }
-  void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
-  size_t max_gen1_size() { return _max_gen1_size; }
-  void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
+  size_t max_gen1_size()     { return _max_gen1_size; }
 
   // Inherited methods
   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
--- a/src/share/vm/memory/filemap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/filemap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -55,6 +55,7 @@
               " shared archive file.\n");
   jio_vfprintf(defaultStream::error_stream(), msg, ap);
   jio_fprintf(defaultStream::error_stream(), "\n");
+  // Do not change the text of the below message because some tests check for it.
   vm_exit_during_initialization("Unable to use shared archive.", NULL);
 }
 
--- a/src/share/vm/memory/filemap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/filemap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_MEMORY_FILEMAP_HPP
 
 #include "memory/metaspaceShared.hpp"
+#include "memory/metaspace.hpp"
 
 // Layout of the file:
 //  header: dump of archive instance plus versioning info, datestamp, etc.
--- a/src/share/vm/memory/freeBlockDictionary.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/freeBlockDictionary.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,7 +28,6 @@
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
 #include "memory/freeBlockDictionary.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/share/vm/memory/freeList.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/freeList.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/freeList.hpp"
-#include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/sharedHeap.hpp"
 #include "runtime/globals.hpp"
--- a/src/share/vm/memory/gcLocker.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/gcLocker.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -122,7 +122,7 @@
     // strictly needed. It's added here to make it clear that
     // the GC will NOT be performed if any other caller
     // of GC_locker::lock() still needs GC locked.
-    if (!is_active()) {
+    if (!is_active_internal()) {
       _doing_gc = true;
       {
         // Must give up the lock while at a safepoint
--- a/src/share/vm/memory/gcLocker.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/gcLocker.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -88,7 +88,7 @@
  public:
   // Accessors
   static bool is_active() {
-    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
     return is_active_internal();
   }
   static bool needs_gc()       { return _needs_gc;                        }
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,11 @@
     return gen_policy()->size_policy();
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return Generation::GenGrain;
+  }
+
   size_t capacity() const;
   size_t used() const;
 
--- a/src/share/vm/memory/genRemSet.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/genRemSet.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -32,13 +32,8 @@
 // enumeration.)
 
 uintx GenRemSet::max_alignment_constraint(Name nm) {
-  switch (nm) {
-  case GenRemSet::CardTable:
-    return CardTableRS::ct_max_alignment_constraint();
-  default:
-    guarantee(false, "Unrecognized GenRemSet type.");
-    return (0); // Make Windows compiler happy
-  }
+  assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
+  return CardTableRS::ct_max_alignment_constraint();
 }
 
 class HasAccumulatedModifiedOopsClosure : public KlassClosure {
--- a/src/share/vm/memory/heapInspection.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/heapInspection.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -73,6 +73,10 @@
         "Number of bytes used by the InstanceKlass::methods() array") \
     f(method_ordering_bytes, IK_method_ordering, \
         "Number of bytes used by the InstanceKlass::method_ordering() array") \
+    f(default_methods_array_bytes, IK_default_methods, \
+        "Number of bytes used by the InstanceKlass::default_methods() array") \
+    f(default_vtable_indices_bytes, IK_default_vtable_indices, \
+        "Number of bytes used by the InstanceKlass::default_vtable_indices() array") \
     f(local_interfaces_bytes, IK_local_interfaces, \
         "Number of bytes used by the InstanceKlass::local_interfaces() array") \
     f(transitive_interfaces_bytes, IK_transitive_interfaces, \
@@ -150,11 +154,11 @@
   HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD)
 
   static int count(oop x) {
-    return (HeapWordSize * ((x) ? (x)->size() : 0));
+    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
   }
 
   static int count_array(objArrayOop x) {
-    return (HeapWordSize * ((x) ? (x)->size() : 0));
+    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
   }
 
   template <class T> static int count(T* x) {
--- a/src/share/vm/memory/memRegion.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/memRegion.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,11 +102,11 @@
   return MemRegion();
 }
 
-void* MemRegion::operator new(size_t size) {
+void* MemRegion::operator new(size_t size) throw() {
   return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
 }
 
-void* MemRegion::operator new [](size_t size) {
+void* MemRegion::operator new [](size_t size) throw() {
   return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
 }
 void  MemRegion::operator delete(void* p) {
--- a/src/share/vm/memory/memRegion.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/memRegion.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,8 +94,8 @@
   size_t word_size() const { return _word_size; }
 
   bool is_empty() const { return word_size() == 0; }
-  void* operator new(size_t size);
-  void* operator new [](size_t size);
+  void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
   void  operator delete(void* p);
   void  operator delete [](void* p);
 };
@@ -111,13 +111,13 @@
 
 class MemRegionClosureRO: public MemRegionClosure {
 public:
-  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
+  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() {
         return ResourceObj::operator new(size, type, flags);
   }
-  void* operator new(size_t size, Arena *arena) {
+  void* operator new(size_t size, Arena *arena) throw() {
         return ResourceObj::operator new(size, arena);
   }
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
         return ResourceObj::operator new(size);
   }
 
--- a/src/share/vm/memory/metablock.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "memory/allocation.hpp"
-#include "memory/metablock.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/debug.hpp"
-
-// Blocks of space for metadata are allocated out of Metachunks.
-//
-// Metachunk are allocated out of MetadataVirtualspaces and once
-// allocated there is no explicit link between a Metachunk and
-// the MetadataVirtualspaces from which it was allocated.
-//
-// Each SpaceManager maintains a
-// list of the chunks it is using and the current chunk.  The current
-// chunk is the chunk from which allocations are done.  Space freed in
-// a chunk is placed on the free list of blocks (BlockFreelist) and
-// reused from there.
-//
-// Future modification
-//
-// The Metachunk can conceivable be replaced by the Chunk in
-// allocation.hpp.  Note that the latter Chunk is the space for
-// allocation (allocations from the chunk are out of the space in
-// the Chunk after the header for the Chunk) where as Metachunks
-// point to space in a VirtualSpace.  To replace Metachunks with
-// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
-size_t Metablock::_min_block_byte_size = sizeof(Metablock);
-
-#ifdef ASSERT
-size_t Metablock::_overhead =
-  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
-size_t Metablock::_overhead = 0;
-#endif
-
-// New blocks returned by the Metaspace are zero initialized.
-// We should fix the constructors to not assume this instead.
-Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
-  if (p == NULL) {
-    return NULL;
-  }
-
-  Metablock* result = (Metablock*) p;
-
-  // Clear the memory
-  Copy::fill_to_aligned_words((HeapWord*)result, word_size);
-#ifdef ASSERT
-  result->set_word_size(word_size);
-#endif
-  return result;
-}
--- a/src/share/vm/memory/metablock.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
-#define SHARE_VM_MEMORY_METABLOCK_HPP
-
-// Metablock are the unit of allocation from a Chunk.  It is initialized
-// with the size of the requested allocation.  That size is overwritten
-// once the allocation returns.
-//
-// A Metablock may be reused by its SpaceManager but are never moved between
-// SpaceManagers.  There is no explicit link to the Metachunk
-// from which it was allocated.  Metablock may be deallocated and
-// put on a freelist but the space is never freed, rather
-// the Metachunk it is a part of will be deallocated when it's
-// associated class loader is collected.
-
-class Metablock VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
- private:
-  // Used to align the allocation (see below).
-  union block_t {
-    void* _data[3];
-    struct header_t {
-      size_t _word_size;
-      Metablock* _next;
-      Metablock* _prev;
-    } _header;
-  } _block;
-  static size_t _min_block_byte_size;
-  static size_t _overhead;
-
-  typedef union block_t Block;
-  typedef struct header_t Header;
-  const Block* block() const { return &_block; }
-  const Block::header_t* header() const { return &(block()->_header); }
- public:
-
-  static Metablock* initialize(MetaWord* p, size_t word_size);
-
-  // This places the body of the block at a 2 word boundary
-  // because every block starts on a 2 word boundary.  Work out
-  // how to make the body on a 2 word boundary if the block
-  // starts on a arbitrary boundary.  JJJ
-
-  size_t word_size() const  { return header()->_word_size; }
-  void set_word_size(size_t v) { _block._header._word_size = v; }
-  size_t size() const volatile { return _block._header._word_size; }
-  void set_size(size_t v) { _block._header._word_size = v; }
-  Metablock* next() const { return header()->_next; }
-  void set_next(Metablock* v) { _block._header._next = v; }
-  Metablock* prev() const { return header()->_prev; }
-  void set_prev(Metablock* v) { _block._header._prev = v; }
-
-  static size_t min_block_byte_size() { return _min_block_byte_size; }
-  static size_t overhead() { return _overhead; }
-
-  bool is_free()                 { return header()->_word_size != 0; }
-  void clear_next()              { set_next(NULL); }
-  void link_prev(Metablock* ptr) { set_prev(ptr); }
-  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
-  bool cantCoalesce() const     { return false; }
-  void link_next(Metablock* ptr) { set_next(ptr); }
-  void link_after(Metablock* ptr){
-    link_next(ptr);
-    if (ptr != NULL) ptr->link_prev(this);
-  }
-
-  // Should not be needed in a free list of Metablocks
-  void markNotFree()            { ShouldNotReachHere(); }
-
-  // Debug support
-#ifdef ASSERT
-  void* prev_addr() const { return (void*)&_block._header._prev; }
-  void* next_addr() const { return (void*)&_block._header._next; }
-  void* size_addr() const { return (void*)&_block._header._word_size; }
-#endif
-  bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
-  bool verify_par_locked() { return true; }
-
-  void assert_is_mangled() const {/* Don't check "\*/}
-};
-#endif // SHARE_VM_MEMORY_METABLOCK_HPP
--- a/src/share/vm/memory/metachunk.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metachunk.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,42 +29,39 @@
 #include "utilities/debug.hpp"
 
 class VirtualSpaceNode;
-//
-// Future modification
-//
-// The Metachunk can conceivable be replaced by the Chunk in
-// allocation.hpp.  Note that the latter Chunk is the space for
-// allocation (allocations from the chunk are out of the space in
-// the Chunk after the header for the Chunk) where as Metachunks
-// point to space in a VirtualSpace.  To replace Metachunks with
-// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
 
 const size_t metadata_chunk_initialize = 0xf7f7f7f7;
 
-size_t Metachunk::_overhead =
-  Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
+size_t Metachunk::object_alignment() {
+  // Must align pointers and sizes to 8,
+  // so that 64 bit types get correctly aligned.
+  const size_t alignment = 8;
+
+  // Make sure that the Klass alignment also agree.
+  STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes);
+
+  return alignment;
+}
+
+size_t Metachunk::overhead() {
+  return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
+}
 
 // Metachunk methods
 
 Metachunk::Metachunk(size_t word_size,
-                     VirtualSpaceNode* container) :
-    _word_size(word_size),
-    _bottom(NULL),
-    _end(NULL),
+                     VirtualSpaceNode* container)
+    : Metabase<Metachunk>(word_size),
     _top(NULL),
-    _next(NULL),
-    _prev(NULL),
     _container(container)
 {
-  _bottom = (MetaWord*)this;
-  _top = (MetaWord*)this + _overhead;
-  _end = (MetaWord*)this + word_size;
+  _top = initial_top();
 #ifdef ASSERT
-  set_is_free(false);
+  set_is_tagged_free(false);
   size_t data_word_size = pointer_delta(end(),
-                                        top(),
+                                        _top,
                                         sizeof(MetaWord));
-  Copy::fill_to_words((HeapWord*) top(),
+  Copy::fill_to_words((HeapWord*)_top,
                       data_word_size,
                       metadata_chunk_initialize);
 #endif
@@ -82,22 +79,18 @@
 
 // _bottom points to the start of the chunk including the overhead.
 size_t Metachunk::used_word_size() const {
-  return pointer_delta(_top, _bottom, sizeof(MetaWord));
+  return pointer_delta(_top, bottom(), sizeof(MetaWord));
 }
 
 size_t Metachunk::free_word_size() const {
-  return pointer_delta(_end, _top, sizeof(MetaWord));
-}
-
-size_t Metachunk::capacity_word_size() const {
-  return pointer_delta(_end, _bottom, sizeof(MetaWord));
+  return pointer_delta(end(), _top, sizeof(MetaWord));
 }
 
 void Metachunk::print_on(outputStream* st) const {
   st->print_cr("Metachunk:"
                " bottom " PTR_FORMAT " top " PTR_FORMAT
                " end " PTR_FORMAT " size " SIZE_FORMAT,
-               bottom(), top(), end(), word_size());
+               bottom(), _top, end(), word_size());
   if (Verbose) {
     st->print_cr("    used " SIZE_FORMAT " free " SIZE_FORMAT,
                  used_word_size(), free_word_size());
@@ -109,8 +102,8 @@
   // Mangle the payload of the chunk and not the links that
   // maintain list of chunks.
   HeapWord* start = (HeapWord*)(bottom() + overhead());
-  size_t word_size = capacity_word_size() - overhead();
-  Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
+  size_t size = word_size() - overhead();
+  Copy::fill_to_words(start, size, metadata_chunk_initialize);
 }
 #endif // PRODUCT
 
@@ -118,9 +111,68 @@
 #ifdef ASSERT
   // Cannot walk through the blocks unless the blocks have
   // headers with sizes.
-  assert(_bottom <= _top &&
-         _top <= _end,
+  assert(bottom() <= _top &&
+         _top <= (MetaWord*)end(),
          "Chunk has been smashed");
 #endif
   return;
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestMetachunk {
+ public:
+  static void test() {
+    size_t size = 2 * 1024 * 1024;
+    void* memory = malloc(size);
+    assert(memory != NULL, "Failed to malloc 2MB");
+
+    Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL);
+
+    assert(metachunk->bottom() == (MetaWord*)metachunk, "assert");
+    assert(metachunk->end() == (uintptr_t*)metachunk + metachunk->size(), "assert");
+
+    // Check sizes
+    assert(metachunk->size() == metachunk->word_size(), "assert");
+    assert(metachunk->word_size() == pointer_delta(metachunk->end(), metachunk->bottom(),
+        sizeof(MetaWord*)), "assert");
+
+    // Check usage
+    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(metachunk->top() == metachunk->initial_top(), "assert");
+    assert(metachunk->is_empty(), "assert");
+
+    // Allocate
+    size_t alloc_size = 64; // Words
+    assert(is_size_aligned(alloc_size, Metachunk::object_alignment()), "assert");
+
+    MetaWord* mem = metachunk->allocate(alloc_size);
+
+    // Check post alloc
+    assert(mem == metachunk->initial_top(), "assert");
+    assert(mem + alloc_size == metachunk->top(), "assert");
+    assert(metachunk->used_word_size() == metachunk->overhead() + alloc_size, "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(!metachunk->is_empty(), "assert");
+
+    // Clear chunk
+    metachunk->reset_empty();
+
+    // Check post clear
+    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
+    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
+    assert(metachunk->top() == metachunk->initial_top(), "assert");
+    assert(metachunk->is_empty(), "assert");
+
+    free(memory);
+  }
+};
+
+void TestMetachunk_test() {
+  TestMetachunk::test();
+}
+
+#endif
--- a/src/share/vm/memory/metachunk.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metachunk.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,89 +24,44 @@
 #ifndef SHARE_VM_MEMORY_METACHUNK_HPP
 #define SHARE_VM_MEMORY_METACHUNK_HPP
 
-//  Metachunk - Quantum of allocation from a Virtualspace
-//    Metachunks are reused (when freed are put on a global freelist) and
-//    have no permanent association to a SpaceManager.
-
-//            +--------------+ <- end
-//            |              |          --+       ---+
-//            |              |            | free     |
-//            |              |            |          |
-//            |              |            |          | capacity
-//            |              |            |          |
-//            |              | <- top   --+          |
-//            |              |           ---+        |
-//            |              |              | used   |
-//            |              |              |        |
-//            |              |              |        |
-//            +--------------+ <- bottom ---+     ---+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 class VirtualSpaceNode;
 
-class Metachunk VALUE_OBJ_CLASS_SPEC {
-  // link to support lists of chunks
-  Metachunk* _next;
-  Metachunk* _prev;
-  VirtualSpaceNode* _container;
-
-  MetaWord* _bottom;
-  MetaWord* _end;
-  MetaWord* _top;
+// Super class of Metablock and Metachunk to allow them to
+// be put on the FreeList and in the BinaryTreeDictionary.
+template <class T>
+class Metabase VALUE_OBJ_CLASS_SPEC {
   size_t _word_size;
-  // Used in a guarantee() so included in the Product builds
-  // even through it is only for debugging.
-  bool _is_free;
+  T*     _next;
+  T*     _prev;
 
-  // Metachunks are allocated out of a MetadataVirtualSpace and
-  // and use some of its space to describe itself (plus alignment
-  // considerations).  Metadata is allocated in the rest of the chunk.
-  // This size is the overhead of maintaining the Metachunk within
-  // the space.
-  static size_t _overhead;
+ protected:
+  Metabase(size_t word_size) : _word_size(word_size), _next(NULL), _prev(NULL) {}
 
  public:
-  Metachunk(size_t word_size , VirtualSpaceNode* container);
-
-  // Used to add a Metachunk to a list of Metachunks
-  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
-  void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
-  void set_container(VirtualSpaceNode* v) { _container = v; }
-
-  MetaWord* allocate(size_t word_size);
+  T* next() const         { return _next; }
+  T* prev() const         { return _prev; }
+  void set_next(T* v)     { _next = v; assert(v != this, "Boom");}
+  void set_prev(T* v)     { _prev = v; assert(v != this, "Boom");}
+  void clear_next()       { set_next(NULL); }
+  void clear_prev()       { set_prev(NULL); }
 
-  // Accessors
-  Metachunk* next() const { return _next; }
-  Metachunk* prev() const { return _prev; }
-  VirtualSpaceNode* container() const { return _container; }
-  MetaWord* bottom() const { return _bottom; }
-  MetaWord* end() const { return _end; }
-  MetaWord* top() const { return _top; }
-  size_t word_size() const { return _word_size; }
   size_t size() const volatile { return _word_size; }
   void set_size(size_t v) { _word_size = v; }
-  bool is_free() { return _is_free; }
-  void set_is_free(bool v) { _is_free = v; }
-  static size_t overhead() { return _overhead; }
-  void clear_next()              { set_next(NULL); }
-  void link_prev(Metachunk* ptr) { set_prev(ptr); }
-  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
-  bool cantCoalesce() const     { return false; }
-  void link_next(Metachunk* ptr) { set_next(ptr); }
-  void link_after(Metachunk* ptr){
+
+  void link_next(T* ptr)  { set_next(ptr); }
+  void link_prev(T* ptr)  { set_prev(ptr); }
+  void link_after(T* ptr) {
     link_next(ptr);
-    if (ptr != NULL) ptr->link_prev(this);
+    if (ptr != NULL) ptr->link_prev((T*)this);
   }
 
-  // Reset top to bottom so chunk can be reused.
-  void reset_empty() { _top = (_bottom + _overhead); _next = NULL; _prev = NULL; }
-  bool is_empty() { return _top == (_bottom + _overhead); }
+  uintptr_t* end() const        { return ((uintptr_t*) this) + size(); }
 
-  // used (has been allocated)
-  // free (available for future allocations)
-  // capacity (total size of chunk)
-  size_t used_word_size() const;
-  size_t free_word_size() const;
-  size_t capacity_word_size()const;
+  bool cantCoalesce() const     { return false; }
 
   // Debug support
 #ifdef ASSERT
@@ -114,14 +69,99 @@
   void* next_addr() const { return (void*)&_next; }
   void* size_addr() const { return (void*)&_word_size; }
 #endif
-  bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
+  bool verify_chunk_in_free_list(T* tc) const { return true; }
   bool verify_par_locked() { return true; }
 
   void assert_is_mangled() const {/* Don't check "\*/}
 
+  bool is_free()                 { return true; }
+};
+
+//  Metachunk - Quantum of allocation from a Virtualspace
+//    Metachunks are reused (when freed are put on a global freelist) and
+//    have no permanent association to a SpaceManager.
+
+//            +--------------+ <- end    --+       --+
+//            |              |             |         |
+//            |              |             | free    |
+//            |              |             |         |
+//            |              |             |         | size | capacity
+//            |              |             |         |
+//            |              | <- top   -- +         |
+//            |              |             |         |
+//            |              |             | used    |
+//            |              |             |         |
+//            |              |             |         |
+//            +--------------+ <- bottom --+       --+
+
+class Metachunk : public Metabase<Metachunk> {
+  friend class TestMetachunk;
+  // The VirtualSpaceNode containing this chunk.
+  VirtualSpaceNode* _container;
+
+  // Current allocation top.
+  MetaWord* _top;
+
+  DEBUG_ONLY(bool _is_tagged_free;)
+
+  MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
+  MetaWord* top() const         { return _top; }
+
+ public:
+  // Metachunks are allocated out of a MetadataVirtualSpace and
+  // and use some of its space to describe itself (plus alignment
+  // considerations).  Metadata is allocated in the rest of the chunk.
+  // This size is the overhead of maintaining the Metachunk within
+  // the space.
+
+  // Alignment of each allocation in the chunks.
+  static size_t object_alignment();
+
+  // Size of the Metachunk header, including alignment.
+  static size_t overhead();
+
+  Metachunk(size_t word_size , VirtualSpaceNode* container);
+
+  MetaWord* allocate(size_t word_size);
+
+  VirtualSpaceNode* container() const { return _container; }
+
+  MetaWord* bottom() const { return (MetaWord*) this; }
+
+  // Reset top to bottom so chunk can be reused.
+  void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); }
+  bool is_empty() { return _top == initial_top(); }
+
+  // used (has been allocated)
+  // free (available for future allocations)
+  size_t word_size() const { return size(); }
+  size_t used_word_size() const;
+  size_t free_word_size() const;
+
+#ifdef ASSERT
+  bool is_tagged_free() { return _is_tagged_free; }
+  void set_is_tagged_free(bool v) { _is_tagged_free = v; }
+#endif
+
   NOT_PRODUCT(void mangle();)
 
   void print_on(outputStream* st) const;
   void verify();
 };
+
+// Metablock is the unit of allocation from a Chunk.
+//
+// A Metablock may be reused by its SpaceManager but are never moved between
+// SpaceManagers.  There is no explicit link to the Metachunk
+// from which it was allocated.  Metablock may be deallocated and
+// put on a freelist but the space is never freed, rather
+// the Metachunk it is a part of will be deallocated when it's
+// associated class loader is collected.
+
+class Metablock : public Metabase<Metablock> {
+  friend class VMStructs;
+ public:
+  Metablock(size_t word_size) : Metabase<Metablock>(word_size) {}
+};
+
 #endif  // SHARE_VM_MEMORY_METACHUNK_HPP
--- a/src/share/vm/memory/metadataFactory.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metadataFactory.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,6 +65,7 @@
   static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
     if (data != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
+      assert(!data->is_shared(), "cannot deallocate array in shared spaces");
       int size = data->size();
       if (DumpSharedSpaces) {
         loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
@@ -83,6 +84,7 @@
       // Call metadata's deallocate function which will call deallocate fields
       assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
       assert(!md->on_stack(), "can't deallocate things on stack");
+      assert(!md->is_shared(), "cannot deallocate if in shared spaces");
       md->deallocate_contents(loader_data);
       loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass());
     }
--- a/src/share/vm/memory/metaspace.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metaspace.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -23,39 +23,40 @@
  */
 #include "precompiled.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "memory/allocation.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/collectorPolicy.hpp"
 #include "memory/filemap.hpp"
 #include "memory/freeList.hpp"
-#include "memory/metablock.hpp"
+#include "memory/gcLocker.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "services/memTracker.hpp"
+#include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
 typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
 typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
-// Define this macro to enable slow integrity checking of
-// the free chunk lists
+
+// Set this constant to enable slow integrity checking of the free chunk lists
 const bool metaspace_slow_verify = false;
 
-// Parameters for stress mode testing
-const uint metadata_deallocate_a_lot_block = 10;
-const uint metadata_deallocate_a_lock_chunk = 3;
-size_t const allocation_from_dictionary_limit = 64 * K;
+size_t const allocation_from_dictionary_limit = 4 * K;
 
 MetaWord* last_allocated = 0;
 
-size_t Metaspace::_class_metaspace_size;
+size_t Metaspace::_compressed_class_space_size;
 
 // Used in declarations in SpaceManager and ChunkManager
 enum ChunkIndex {
@@ -74,8 +75,7 @@
   ClassSmallChunk = 256,
   SmallChunk = 512,
   ClassMediumChunk = 4 * K,
-  MediumChunk = 8 * K,
-  HumongousChunkGranularity = 8
+  MediumChunk = 8 * K
 };
 
 static ChunkIndex next_chunk_index(ChunkIndex i) {
@@ -83,35 +83,15 @@
   return (ChunkIndex) (i+1);
 }
 
-// Originally _capacity_until_GC was set to MetaspaceSize here but
-// the default MetaspaceSize before argument processing was being
-// used which was not the desired value.  See the code
-// in should_expand() to see how the initialization is handled
-// now.
-size_t MetaspaceGC::_capacity_until_GC = 0;
-bool MetaspaceGC::_expand_after_GC = false;
+volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 uint MetaspaceGC::_shrink_factor = 0;
 bool MetaspaceGC::_should_concurrent_collect = false;
 
-// Blocks of space for metadata are allocated out of Metachunks.
-//
-// Metachunk are allocated out of MetadataVirtualspaces and once
-// allocated there is no explicit link between a Metachunk and
-// the MetadataVirtualspaces from which it was allocated.
-//
-// Each SpaceManager maintains a
-// list of the chunks it is using and the current chunk.  The current
-// chunk is the chunk from which allocations are done.  Space freed in
-// a chunk is placed on the free list of blocks (BlockFreelist) and
-// reused from there.
-
 typedef class FreeList<Metachunk> ChunkList;
 
 // Manages the global free lists of chunks.
-// Has three lists of free chunks, and a total size and
-// count that includes all three
-
-class ChunkManager VALUE_OBJ_CLASS_SPEC {
+class ChunkManager : public CHeapObj<mtInternal> {
+  friend class TestVirtualSpaceNodeTest;
 
   // Free list of chunks of different sizes.
   //   SpecializedChunk
@@ -120,7 +100,6 @@
   //   HumongousChunk
   ChunkList _free_chunks[NumberOfFreeLists];
 
-
   //   HumongousChunk
   ChunkTreeDictionary _humongous_dictionary;
 
@@ -158,11 +137,15 @@
 
  public:
 
-  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
+  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
+      : _free_chunks_total(0), _free_chunks_count(0) {
+    _free_chunks[SpecializedIndex].set_size(specialized_size);
+    _free_chunks[SmallIndex].set_size(small_size);
+    _free_chunks[MediumIndex].set_size(medium_size);
+  }
 
   // add or delete (return) a chunk to the global freelist.
   Metachunk* chunk_freelist_allocate(size_t word_size);
-  void chunk_freelist_deallocate(Metachunk* chunk);
 
   // Map a size to a list index assuming that there are lists
   // for special, small, medium, and humongous chunks.
@@ -177,8 +160,8 @@
   void return_chunks(ChunkIndex index, Metachunk* chunks);
 
   // Total of the space in the free chunks list
-  size_t free_chunks_total();
-  size_t free_chunks_total_in_bytes();
+  size_t free_chunks_total_words();
+  size_t free_chunks_total_bytes();
 
   // Number of chunks in the free chunks list
   size_t free_chunks_count();
@@ -196,9 +179,7 @@
   // Returns the list for the given chunk word size.
   ChunkList* find_free_chunks_list(size_t word_size);
 
-  // Add and remove from a list by size.  Selects
-  // list based on size of chunk.
-  void free_chunks_put(Metachunk* chuck);
+  // Remove from a list by size.  Selects list based on size of chunk.
   Metachunk* free_chunks_get(size_t chunk_word_size);
 
   // Debug support
@@ -219,14 +200,17 @@
   void locked_print_free_chunks(outputStream* st);
   void locked_print_sum_free_chunks(outputStream* st);
 
-  void print_on(outputStream* st);
+  void print_on(outputStream* st) const;
 };
 
 // Used to manage the free list of Metablocks (a block corresponds
 // to the allocation of a quantum of metadata).
 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   BlockTreeDictionary* _dictionary;
-  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
+
+  // Only allocate and split from freelist if the size of the allocation
+  // is at least 1/4th the size of the available block.
+  const static int WasteMultiplier = 4;
 
   // Accessors
   BlockTreeDictionary* dictionary() const { return _dictionary; }
@@ -250,6 +234,7 @@
   void print_on(outputStream* st) const;
 };
 
+// A VirtualSpaceList node.
 class VirtualSpaceNode : public CHeapObj<mtClass> {
   friend class VirtualSpaceList;
 
@@ -272,11 +257,8 @@
   // VirtualSpace
   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 
-  void inc_container_count();
-#ifdef ASSERT
-  uint container_count_slow();
-#endif
-
+  // Committed but unused space in the virtual space
+  size_t free_words_in_vs() const;
  public:
 
   VirtualSpaceNode(size_t byte_size);
@@ -287,6 +269,11 @@
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
+  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
+  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
+
+  bool is_pre_committed() const { return _virtual_space.special(); }
+
   // address of next available space in _virtual_space;
   // Accessors
   VirtualSpaceNode* next() { return _next; }
@@ -306,15 +293,16 @@
   void inc_top(size_t word_size) { _top += word_size; }
 
   uintx container_count() { return _container_count; }
+  void inc_container_count();
   void dec_container_count();
 #ifdef ASSERT
+  uint container_count_slow();
   void verify_container_count();
 #endif
 
   // used and capacity in this single entry in the list
   size_t used_words_in_vs() const;
   size_t capacity_words_in_vs() const;
-  size_t free_words_in_vs() const;
 
   bool initialize();
 
@@ -323,63 +311,101 @@
 
   // Allocate a chunk from the virtual space and return it.
   Metachunk* get_chunk_vs(size_t chunk_word_size);
-  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
 
   // Expands/shrinks the committed space in a virtual space.  Delegates
   // to Virtualspace
-  bool expand_by(size_t words, bool pre_touch = false);
-  bool shrink_by(size_t words);
+  bool expand_by(size_t min_words, size_t preferred_words);
 
   // In preparation for deleting this node, remove all the chunks
   // in the node from any freelist.
   void purge(ChunkManager* chunk_manager);
 
+  // If an allocation doesn't fit in the current node a new node is created.
+  // Allocate chunks out of the remaining committed space in this node
+  // to avoid wasting that memory.
+  // This always adds up because all the chunk sizes are multiples of
+  // the smallest chunk size.
+  void retire(ChunkManager* chunk_manager);
+
 #ifdef ASSERT
   // Debug support
-  static void verify_virtual_space_total();
-  static void verify_virtual_space_count();
   void mangle();
 #endif
 
   void print_on(outputStream* st) const;
 };
 
+#define assert_is_ptr_aligned(ptr, alignment) \
+  assert(is_ptr_aligned(ptr, alignment),      \
+    err_msg(PTR_FORMAT " is not aligned to "  \
+      SIZE_FORMAT, ptr, alignment))
+
+#define assert_is_size_aligned(size, alignment) \
+  assert(is_size_aligned(size, alignment),      \
+    err_msg(SIZE_FORMAT " is not aligned to "   \
+       SIZE_FORMAT, size, alignment))
+
+
+// Decide if large pages should be committed when the memory is reserved.
+static bool should_commit_large_pages_when_reserving(size_t bytes) {
+  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
+    size_t words = bytes / BytesPerWord;
+    bool is_class = false; // We never reserve large pages for the class space.
+    if (MetaspaceGC::can_expand(words, is_class) &&
+        MetaspaceGC::allowed_expansion() >= words) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
-  // align up to vm allocation granularity
-  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
+VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
+  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   // configurable address, generally at the top of the Java heap so other
   // memory addresses don't conflict.
   if (DumpSharedSpaces) {
-    char* shared_base = (char*)SharedBaseAddress;
-    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
+    bool large_pages = false; // No large pages when dumping the CDS archive.
+    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
+
+    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
     if (_rs.is_reserved()) {
       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
     } else {
       // Get a mmap region anywhere if the SharedBaseAddress fails.
-      _rs = ReservedSpace(byte_size);
+      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
     MetaspaceShared::set_shared_rs(&_rs);
   } else {
-    _rs = ReservedSpace(byte_size);
+    bool large_pages = should_commit_large_pages_when_reserving(bytes);
+
+    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   }
 
-  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
+  if (_rs.is_reserved()) {
+    assert(_rs.base() != NULL, "Catch if we get a NULL address");
+    assert(_rs.size() != 0, "Catch if we get a 0 size");
+    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
+    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
+
+    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
+  }
 }
 
 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
   Metachunk* chunk = first_chunk();
   Metachunk* invalid_chunk = (Metachunk*) top();
   while (chunk < invalid_chunk ) {
-    assert(chunk->is_free(), "Should be marked free");
-      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
-      chunk_manager->remove_chunk(chunk);
-      assert(chunk->next() == NULL &&
-             chunk->prev() == NULL,
-             "Was not removed from its list");
-      chunk = (Metachunk*) next;
+    assert(chunk->is_tagged_free(), "Should be tagged free");
+    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    chunk_manager->remove_chunk(chunk);
+    assert(chunk->next() == NULL &&
+           chunk->prev() == NULL,
+           "Was not removed from its list");
+    chunk = (Metachunk*) next;
   }
 }
 
@@ -393,7 +419,7 @@
     // Don't count the chunks on the free lists.  Those are
     // still part of the VirtualSpaceNode but not currently
     // counted.
-    if (!chunk->is_free()) {
+    if (!chunk->is_tagged_free()) {
       count++;
     }
     chunk = (Metachunk*) next;
@@ -403,8 +429,6 @@
 #endif
 
 // List of VirtualSpaces for metadata allocation.
-// It has a  _next link for singly linked list and a MemRegion
-// for total space in the VirtualSpace.
 class VirtualSpaceList : public CHeapObj<mtClass> {
   friend class VirtualSpaceNode;
 
@@ -412,21 +436,19 @@
     VirtualSpaceSize = 256 * K
   };
 
-  // Global list of virtual spaces
   // Head of the list
   VirtualSpaceNode* _virtual_space_list;
   // virtual space currently being used for allocations
   VirtualSpaceNode* _current_virtual_space;
-  // Free chunk list for all other metadata
-  ChunkManager      _chunk_manager;
-
-  // Can this virtual list allocate >1 spaces?  Also, used to determine
-  // whether to allocate unlimited small chunks in this virtual space
+
+  // Is this VirtualSpaceList used for the compressed class space
   bool _is_class;
-  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
-
-  // Sum of space in all virtual spaces and number of virtual spaces
-  size_t _virtual_space_total;
+
+  // Sum of reserved and committed memory in the virtual spaces
+  size_t _reserved_words;
+  size_t _committed_words;
+
+  // Number of virtual spaces
   size_t _virtual_space_count;
 
   ~VirtualSpaceList();
@@ -440,12 +462,16 @@
     _current_virtual_space = v;
   }
 
-  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
+  void link_vs(VirtualSpaceNode* new_entry);
 
   // Get another virtual space and add it to the list.  This
   // is typically prompted by a failed attempt to allocate a chunk
   // and is typically followed by the allocation of a chunk.
-  bool grow_vs(size_t vs_word_size);
+  bool create_new_virtual_space(size_t vs_word_size);
+
+  // Chunk up the unused committed space in the current
+  // virtual space and add the chunks to the free list.
+  void retire_current_virtual_space();
 
  public:
   VirtualSpaceList(size_t word_size);
@@ -457,37 +483,35 @@
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
 
-  // Get the first chunk for a Metaspace.  Used for
-  // special cases such as the boot class loader, reflection
-  // class loader and anonymous class loader.
-  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
+  bool expand_node_by(VirtualSpaceNode* node,
+                      size_t min_words,
+                      size_t preferred_words);
+
+  bool expand_by(size_t min_words,
+                 size_t preferred_words);
 
   VirtualSpaceNode* current_virtual_space() {
     return _current_virtual_space;
   }
 
-  ChunkManager* chunk_manager() { return &_chunk_manager; }
   bool is_class() const { return _is_class; }
 
-  // Allocate the first virtualspace.
-  void initialize(size_t word_size);
-
-  size_t virtual_space_total() { return _virtual_space_total; }
-
-  void inc_virtual_space_total(size_t v);
-  void dec_virtual_space_total(size_t v);
+  bool initialization_succeeded() { return _virtual_space_list != NULL; }
+
+  size_t reserved_words()  { return _reserved_words; }
+  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
+  size_t committed_words() { return _committed_words; }
+  size_t committed_bytes() { return committed_words() * BytesPerWord; }
+
+  void inc_reserved_words(size_t v);
+  void dec_reserved_words(size_t v);
+  void inc_committed_words(size_t v);
+  void dec_committed_words(size_t v);
   void inc_virtual_space_count();
   void dec_virtual_space_count();
 
   // Unlink empty VirtualSpaceNodes and free it.
-  void purge();
-
-  // Used and capacity in the entire list of virtual spaces.
-  // These are global values shared by all Metaspaces
-  size_t capacity_words_sum();
-  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
-  size_t used_words_sum();
-  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
+  void purge(ChunkManager* chunk_manager);
 
   bool contains(const void *ptr);
 
@@ -515,44 +539,16 @@
 
 class Metadebug : AllStatic {
   // Debugging support for Metaspaces
-  static int _deallocate_block_a_lot_count;
-  static int _deallocate_chunk_a_lot_count;
   static int _allocation_fail_alot_count;
 
  public:
-  static int deallocate_block_a_lot_count() {
-    return _deallocate_block_a_lot_count;
-  }
-  static void set_deallocate_block_a_lot_count(int v) {
-    _deallocate_block_a_lot_count = v;
-  }
-  static void inc_deallocate_block_a_lot_count() {
-    _deallocate_block_a_lot_count++;
-  }
-  static int deallocate_chunk_a_lot_count() {
-    return _deallocate_chunk_a_lot_count;
-  }
-  static void reset_deallocate_chunk_a_lot_count() {
-    _deallocate_chunk_a_lot_count = 1;
-  }
-  static void inc_deallocate_chunk_a_lot_count() {
-    _deallocate_chunk_a_lot_count++;
-  }
 
   static void init_allocation_fail_alot_count();
 #ifdef ASSERT
   static bool test_metadata_failure();
 #endif
-
-  static void deallocate_chunk_a_lot(SpaceManager* sm,
-                                     size_t chunk_word_size);
-  static void deallocate_block_a_lot(SpaceManager* sm,
-                                     size_t chunk_word_size);
-
 };
 
-int Metadebug::_deallocate_block_a_lot_count = 0;
-int Metadebug::_deallocate_chunk_a_lot_count = 0;
 int Metadebug::_allocation_fail_alot_count = 0;
 
 //  SpaceManager - used by Metaspace to handle allocations
@@ -568,18 +564,12 @@
   // Type of metadata allocated.
   Metaspace::MetadataType _mdtype;
 
-  // Chunk related size
-  size_t _medium_chunk_bunch;
-
   // List of chunks in use by this SpaceManager.  Allocations
   // are done from the current chunk.  The list is used for deallocating
   // chunks when the SpaceManager is freed.
   Metachunk* _chunks_in_use[NumberOfInUseLists];
   Metachunk* _current_chunk;
 
-  // Virtual space where allocation comes from.
-  VirtualSpaceList* _vs_list;
-
   // Number of small chunks to allocate to a manager
   // If class space manager, small chunks are unlimited
   static uint const _small_chunk_limit;
@@ -612,7 +602,9 @@
   }
 
   Metaspace::MetadataType mdtype() { return _mdtype; }
-  VirtualSpaceList* vs_list() const    { return _vs_list; }
+
+  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
+  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 
   Metachunk* current_chunk() const { return _current_chunk; }
   void set_current_chunk(Metachunk* v) {
@@ -623,6 +615,7 @@
 
   // Add chunk to the list of chunks in use
   void add_chunk(Metachunk* v, bool make_current);
+  void retire_current_chunk();
 
   Mutex* lock() const { return _lock; }
 
@@ -633,19 +626,22 @@
 
  public:
   SpaceManager(Metaspace::MetadataType mdtype,
-               Mutex* lock,
-               VirtualSpaceList* vs_list);
+               Mutex* lock);
   ~SpaceManager();
 
   enum ChunkMultiples {
     MediumChunkMultiple = 4
   };
 
+  bool is_class() { return _mdtype == Metaspace::ClassType; }
+
   // Accessors
-  size_t specialized_chunk_size() { return SpecializedChunk; }
-  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
-  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
-  size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
+  size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
+  size_t small_chunk_size()       { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
+  size_t medium_chunk_size()      { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
+  size_t medium_chunk_bunch()     { return medium_chunk_size() * MediumChunkMultiple; }
+
+  size_t smallest_chunk_size()  { return specialized_chunk_size(); }
 
   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
   size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
@@ -703,6 +699,9 @@
   // and allocates from that chunk.
   MetaWord* grow_and_allocate(size_t word_size);
 
+  // Notify memory usage to MemoryService.
+  void track_metaspace_memory_usage();
+
   // debugging support.
 
   void dump(outputStream* const out) const;
@@ -717,16 +716,11 @@
 #endif
 
   size_t get_raw_word_size(size_t word_size) {
-    // If only the dictionary is going to be used (i.e., no
-    // indexed free list), then there is a minimum size requirement.
-    // MinChunkSize is a placeholder for the real minimum size JJJ
     size_t byte_size = word_size * BytesPerWord;
 
-    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
-    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
-                                 Metablock::min_block_byte_size());
-    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
+    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
+    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
+
     size_t raw_word_size = raw_bytes_size / BytesPerWord;
     assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
 
@@ -749,7 +743,7 @@
   _container_count++;
   assert(_container_count == container_count_slow(),
          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
-                 "container_count_slow() " SIZE_FORMAT,
+                 " container_count_slow() " SIZE_FORMAT,
                  _container_count, container_count_slow()));
 }
 
@@ -762,7 +756,7 @@
 void VirtualSpaceNode::verify_container_count() {
   assert(_container_count == container_count_slow(),
     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
-            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 }
 #endif
 
@@ -779,17 +773,8 @@
   }
 }
 
-Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
-  Metablock* block = (Metablock*) p;
-  block->set_word_size(word_size);
-  block->set_prev(NULL);
-  block->set_next(NULL);
-
-  return block;
-}
-
 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
-  Metablock* free_chunk = initialize_free_chunk(p, word_size);
+  Metablock* free_chunk = ::new (p) Metablock(word_size);
   if (dictionary() == NULL) {
    _dictionary = new BlockTreeDictionary();
   }
@@ -807,12 +792,25 @@
   }
 
   Metablock* free_block =
-    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
+    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
   if (free_block == NULL) {
     return NULL;
   }
 
-  return (MetaWord*) free_block;
+  const size_t block_size = free_block->size();
+  if (block_size > WasteMultiplier * word_size) {
+    return_block((MetaWord*)free_block, block_size);
+    return NULL;
+  }
+
+  MetaWord* new_block = (MetaWord*)free_block;
+  assert(block_size >= word_size, "Incorrect size of block from freelist");
+  const size_t unused = block_size - word_size;
+  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+    return_block(new_block + word_size, unused);
+  }
+
+  return new_block;
 }
 
 void BlockFreelist::print_on(outputStream* st) const {
@@ -853,11 +851,17 @@
   MetaWord* chunk_limit = top();
   assert(chunk_limit != NULL, "Not safe to call this method");
 
+  // The virtual spaces are always expanded by the
+  // commit granularity to enforce the following condition.
+  // Without this the is_available check will not work correctly.
+  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
+      "The committed memory doesn't match the expanded memory.");
+
   if (!is_available(chunk_word_size)) {
     if (TraceMetadataChunkAllocation) {
-      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
+      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
       // Dump some information about the virtual space that is nearly full
-      print_on(tty);
+      print_on(gclog_or_tty);
     }
     return NULL;
   }
@@ -872,26 +876,24 @@
 
 
 // Expand the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
-  size_t bytes = words * BytesPerWord;
-  bool result =  virtual_space()->expand_by(bytes, pre_touch);
-  if (TraceMetavirtualspaceAllocation && !result) {
-    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
-                           "for byte size " SIZE_FORMAT, bytes);
-    virtual_space()->print();
+bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
+  size_t min_bytes = min_words * BytesPerWord;
+  size_t preferred_bytes = preferred_words * BytesPerWord;
+
+  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
+
+  if (uncommitted < min_bytes) {
+    return false;
   }
+
+  size_t commit = MIN2(preferred_bytes, uncommitted);
+  bool result = virtual_space()->expand_by(commit, false);
+
+  assert(result, "Failed to commit memory");
+
   return result;
 }
 
-// Shrink the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::shrink_by(size_t words) {
-  size_t bytes = words * BytesPerWord;
-  virtual_space()->shrink_by(bytes);
-  return true;
-}
-
-// Add another chunk to the chunk list.
-
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* result = take_from_committed(chunk_word_size);
@@ -901,35 +903,29 @@
   return result;
 }
 
-Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
-
-  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
-
-  if (new_chunk == NULL) {
-    // Only a small part of the virtualspace is committed when first
-    // allocated so committing more here can be expected.
-    size_t page_size_words = os::vm_page_size() / BytesPerWord;
-    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
-                                                    page_size_words);
-    expand_by(aligned_expand_vs_by_words, false);
-    new_chunk = get_chunk_vs(chunk_word_size);
-  }
-  return new_chunk;
-}
-
 bool VirtualSpaceNode::initialize() {
 
   if (!_rs.is_reserved()) {
     return false;
   }
 
-  // An allocation out of this Virtualspace that is larger
-  // than an initial commit size can waste that initial committed
-  // space.
-  size_t committed_byte_size = 0;
-  bool result = virtual_space()->initialize(_rs, committed_byte_size);
+  // These are necessary restriction to make sure that the virtual space always
+  // grows in steps of Metaspace::commit_alignment(). If both base and size are
+  // aligned only the middle alignment of the VirtualSpace is used.
+  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
+  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
+
+  // ReservedSpaces marked as special will have the entire memory
+  // pre-committed. Setting a committed size will make sure that
+  // committed_size and actual_committed_size agrees.
+  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
+
+  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
+                                            Metaspace::commit_alignment());
   if (result) {
+    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
+        "Checking that the pre-committed memory was registered by the VirtualSpace");
+
     set_top((MetaWord*)virtual_space()->low());
     set_reserved(MemRegion((HeapWord*)_rs.base(),
                  (HeapWord*)(_rs.base() + _rs.size())));
@@ -977,13 +973,32 @@
   }
 }
 
-void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+void VirtualSpaceList::inc_reserved_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total + v;
+  _reserved_words = _reserved_words + v;
+}
+void VirtualSpaceList::dec_reserved_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _reserved_words = _reserved_words - v;
 }
-void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+
+#define assert_committed_below_limit()                             \
+  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
+      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
+              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
+          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
+
+void VirtualSpaceList::inc_committed_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total - v;
+  _committed_words = _committed_words + v;
+
+  assert_committed_below_limit();
+}
+void VirtualSpaceList::dec_committed_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _committed_words = _committed_words - v;
+
+  assert_committed_below_limit();
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
@@ -1005,13 +1020,13 @@
   }
 
   // Chunk is being removed from the chunks free list.
-  dec_free_chunks_total(chunk->capacity_word_size());
+  dec_free_chunks_total(chunk->word_size());
 }
 
 // Walk the list of VirtualSpaceNodes and delete
 // nodes with a 0 container_count.  Remove Metachunks in
 // the node from their respective freelists.
-void VirtualSpaceList::purge() {
+void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
   assert_lock_strong(SpaceManager::expand_lock());
   // Don't use a VirtualSpaceListIterator because this
   // list is being changed and a straightforward use of an iterator is not safe.
@@ -1026,15 +1041,16 @@
     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
       // Unlink it from the list
       if (prev_vsl == vsl) {
-        // This is the case of the current note being the first note.
-        assert(vsl == virtual_space_list(), "Expected to be the first note");
+        // This is the case of the current node being the first node.
+        assert(vsl == virtual_space_list(), "Expected to be the first node");
         set_virtual_space_list(vsl->next());
       } else {
         prev_vsl->set_next(vsl->next());
       }
 
-      vsl->purge(chunk_manager());
-      dec_virtual_space_total(vsl->reserved()->word_size());
+      vsl->purge(chunk_manager);
+      dec_reserved_words(vsl->reserved_words());
+      dec_committed_words(vsl->committed_words());
       dec_virtual_space_count();
       purged_vsl = vsl;
       delete vsl;
@@ -1054,68 +1070,61 @@
 #endif
 }
 
-size_t VirtualSpaceList::used_words_sum() {
-  size_t allocated_by_vs = 0;
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    VirtualSpaceNode* vsl = iter.get_next();
-    // Sum used region [bottom, top) in each virtualspace
-    allocated_by_vs += vsl->used_words_in_vs();
-  }
-  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
-    err_msg("Total in free chunks " SIZE_FORMAT
-            " greater than total from virtual_spaces " SIZE_FORMAT,
-            allocated_by_vs, chunk_manager()->free_chunks_total()));
-  size_t used =
-    allocated_by_vs - chunk_manager()->free_chunks_total();
-  return used;
+void VirtualSpaceList::retire_current_virtual_space() {
+  assert_lock_strong(SpaceManager::expand_lock());
+
+  VirtualSpaceNode* vsn = current_virtual_space();
+
+  ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
+                                  Metaspace::chunk_manager_metadata();
+
+  vsn->retire(cm);
 }
 
-// Space available in all MetadataVirtualspaces allocated
-// for metadata.  This is the upper limit on the capacity
-// of chunks allocated out of all the MetadataVirtualspaces.
-size_t VirtualSpaceList::capacity_words_sum() {
-  size_t capacity = 0;
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    VirtualSpaceNode* vsl = iter.get_next();
-    capacity += vsl->capacity_words_in_vs();
+void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
+  for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
+    ChunkIndex index = (ChunkIndex)i;
+    size_t chunk_size = chunk_manager->free_chunks(index)->size();
+
+    while (free_words_in_vs() >= chunk_size) {
+      DEBUG_ONLY(verify_container_count();)
+      Metachunk* chunk = get_chunk_vs(chunk_size);
+      assert(chunk != NULL, "allocation should have been successful");
+
+      chunk_manager->return_chunks(index, chunk);
+      chunk_manager->inc_free_chunks_total(chunk_size);
+      DEBUG_ONLY(verify_container_count();)
+    }
   }
-  return capacity;
+  assert(free_words_in_vs() == 0, "should be empty now");
 }
 
-VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
+VirtualSpaceList::VirtualSpaceList(size_t word_size) :
                                    _is_class(false),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
-  bool initialization_succeeded = grow_vs(word_size);
-
-  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
-  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
-  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
-  assert(initialization_succeeded,
-    " VirtualSpaceList initialization should not fail");
+  create_new_virtual_space(word_size);
 }
 
 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
                                    _is_class(true),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
   bool succeeded = class_entry->initialize();
-  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
-  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
-  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
-  assert(succeeded, " VirtualSpaceList initialization should not fail");
-  link_vs(class_entry, rs.size()/BytesPerWord);
+  if (succeeded) {
+    link_vs(class_entry);
+  }
 }
 
 size_t VirtualSpaceList::free_bytes() {
@@ -1123,14 +1132,24 @@
 }
 
 // Allocate another meta virtual space and add it to the list.
-bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
+bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
-  if (vs_word_size == 0) {
+
+  if (is_class()) {
+    assert(false, "We currently don't support more than one VirtualSpace for"
+                  " the compressed class space. The initialization of the"
+                  " CCS uses another code path and should not hit this path.");
     return false;
   }
+
+  if (vs_word_size == 0) {
+    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
+    return false;
+  }
+
   // Reserve the space
   size_t vs_byte_size = vs_word_size * BytesPerWord;
-  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
+  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
 
   // Allocate the meta virtual space and initialize it.
   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1138,93 +1157,124 @@
     delete new_entry;
     return false;
   } else {
+    assert(new_entry->reserved_words() == vs_word_size,
+        "Reserved memory size differs from requested memory size");
     // ensure lock-free iteration sees fully initialized node
     OrderAccess::storestore();
-    link_vs(new_entry, vs_word_size);
+    link_vs(new_entry);
     return true;
   }
 }
 
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
+void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
   if (virtual_space_list() == NULL) {
       set_virtual_space_list(new_entry);
   } else {
     current_virtual_space()->set_next(new_entry);
   }
   set_current_virtual_space(new_entry);
-  inc_virtual_space_total(vs_word_size);
+  inc_reserved_words(new_entry->reserved_words());
+  inc_committed_words(new_entry->committed_words());
   inc_virtual_space_count();
 #ifdef ASSERT
   new_entry->mangle();
 #endif
   if (TraceMetavirtualspaceAllocation && Verbose) {
     VirtualSpaceNode* vsl = current_virtual_space();
-    vsl->print_on(tty);
+    vsl->print_on(gclog_or_tty);
+  }
+}
+
+bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
+                                      size_t min_words,
+                                      size_t preferred_words) {
+  size_t before = node->committed_words();
+
+  bool result = node->expand_by(min_words, preferred_words);
+
+  size_t after = node->committed_words();
+
+  // after and before can be the same if the memory was pre-committed.
+  assert(after >= before, "Inconsistency");
+  inc_committed_words(after - before);
+
+  return result;
+}
+
+bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
+  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
+  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
+  assert(min_words <= preferred_words, "Invalid arguments");
+
+  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
+    return  false;
   }
+
+  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
+  if (allowed_expansion_words < min_words) {
+    return false;
+  }
+
+  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
+
+  // Commit more memory from the the current virtual space.
+  bool vs_expanded = expand_node_by(current_virtual_space(),
+                                    min_words,
+                                    max_expansion_words);
+  if (vs_expanded) {
+    return true;
+  }
+  retire_current_virtual_space();
+
+  // Get another virtual space.
+  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
+  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
+
+  if (create_new_virtual_space(grow_vs_words)) {
+    if (current_virtual_space()->is_pre_committed()) {
+      // The memory was pre-committed, so we are done here.
+      assert(min_words <= current_virtual_space()->committed_words(),
+          "The new VirtualSpace was pre-committed, so it"
+          "should be large enough to fit the alloc request.");
+      return true;
+    }
+
+    return expand_node_by(current_virtual_space(),
+                          min_words,
+                          max_expansion_words);
+  }
+
+  return false;
 }
 
 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
                                            size_t grow_chunks_by_words,
                                            size_t medium_chunk_bunch) {
 
-  // Get a chunk from the chunk freelist
-  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
+  // Allocate a chunk out of the current virtual space.
+  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
 
   if (next != NULL) {
-    next->container()->inc_container_count();
-  } else {
-    // Allocate a chunk out of the current virtual space.
-    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
+    return next;
   }
 
-  if (next == NULL) {
-    // Not enough room in current virtual space.  Try to commit
-    // more space.
-    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
-                                     grow_chunks_by_words);
-    size_t page_size_words = os::vm_page_size() / BytesPerWord;
-    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
-                                                        page_size_words);
-    bool vs_expanded =
-      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
-    if (!vs_expanded) {
-      // Should the capacity of the metaspaces be expanded for
-      // this allocation?  If it's the virtual space for classes and is
-      // being used for CompressedHeaders, don't allocate a new virtualspace.
-      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
-        // Get another virtual space.
-          size_t grow_vs_words =
-            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
-        if (grow_vs(grow_vs_words)) {
-          // Got it.  It's on the list now.  Get a chunk from it.
-          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
-        }
-      } else {
-        // Allocation will fail and induce a GC
-        if (TraceMetadataChunkAllocation && Verbose) {
-          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
-            " Fail instead of expand the metaspace");
-        }
-      }
-    } else {
-      // The virtual space expanded, get a new chunk
-      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
-      assert(next != NULL, "Just expanded, should succeed");
-    }
+  // The expand amount is currently only determined by the requested sizes
+  // and not how much committed memory is left in the current virtual space.
+
+  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
+  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
+  if (min_word_size >= preferred_word_size) {
+    // Can happen when humongous chunks are allocated.
+    preferred_word_size = min_word_size;
   }
 
-  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
-         "New chunk is still on some list");
-  return next;
-}
-
-Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
-                                                      size_t chunk_bunch) {
-  // Get a chunk from the chunk freelist
-  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
-                                       chunk_word_size,
-                                       chunk_bunch);
-  return new_chunk;
+  bool expanded = expand_by(min_word_size, preferred_word_size);
+  if (expanded) {
+    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
+    assert(next != NULL, "The allocation was expected to succeed after the expansion");
+  }
+
+   return next;
 }
 
 void VirtualSpaceList::print_on(outputStream* st) const {
@@ -1273,95 +1323,96 @@
 // Calculate the amount to increase the high water mark (HWM).
 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 // another expansion is not requested too soon.  If that is not
-// enough to satisfy the allocation (i.e. big enough for a word_size
-// allocation), increase by MaxMetaspaceExpansion.  If that is still
-// not enough, expand by the size of the allocation (word_size) plus
-// some.
-size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
-  size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
-  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
-  size_t page_size_words = os::vm_page_size() / BytesPerWord;
-  size_t size_delta_words = align_size_up(word_size, page_size_words);
-  size_t delta_words = MAX2(size_delta_words, min_delta_words);
-  if (delta_words > min_delta_words) {
+// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
+// If that is still not enough, expand by the size of the allocation
+// plus some.
+size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
+  size_t min_delta = MinMetaspaceExpansion;
+  size_t max_delta = MaxMetaspaceExpansion;
+  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
+
+  if (delta <= min_delta) {
+    delta = min_delta;
+  } else if (delta <= max_delta) {
     // Don't want to hit the high water mark on the next
     // allocation so make the delta greater than just enough
     // for this allocation.
-    delta_words = MAX2(delta_words, max_delta_words);
-    if (delta_words > max_delta_words) {
-      // This allocation is large but the next ones are probably not
-      // so increase by the minimum.
-      delta_words = delta_words + min_delta_words;
-    }
+    delta = max_delta;
+  } else {
+    // This allocation is large but the next ones are probably not
+    // so increase by the minimum.
+    delta = delta + min_delta;
   }
-  return delta_words;
+
+  assert_is_size_aligned(delta, Metaspace::commit_alignment());
+
+  return delta;
+}
+
+size_t MetaspaceGC::capacity_until_GC() {
+  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
+  assert(value >= MetaspaceSize, "Not initialied properly?");
+  return value;
 }
 
-bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
-
-  // If the user wants a limit, impose one.
-  // The reason for someone using this flag is to limit reserved space.  So
-  // for non-class virtual space, compare against virtual spaces that are reserved.
-  // For class virtual space, we only compare against the committed space, not
-  // reserved space, because this is a larger space prereserved for compressed
-  // class pointers.
-  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
-    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
-              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
-    if (real_allocated >= MaxMetaspaceSize) {
+size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
+  assert_is_size_aligned(v, Metaspace::commit_alignment());
+
+  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
+}
+
+size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
+  assert_is_size_aligned(v, Metaspace::commit_alignment());
+
+  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
+}
+
+bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
+  // Check if the compressed class space is full.
+  if (is_class && Metaspace::using_class_space()) {
+    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
       return false;
     }
   }
 
-  // Class virtual space should always be expanded.  Call GC for the other
-  // metadata virtual space.
-  if (Metaspace::using_class_space() &&
-      (vsl == Metaspace::class_space_list())) return true;
-
-  // If this is part of an allocation after a GC, expand
-  // unconditionally.
-  if (MetaspaceGC::expand_after_GC()) {
-    return true;
+  // Check if the user has imposed a limit on the metaspace memory.
+  size_t committed_bytes = MetaspaceAux::committed_bytes();
+  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
+    return false;
   }
 
-
-  // If the capacity is below the minimum capacity, allow the
-  // expansion.  Also set the high-water-mark (capacity_until_GC)
-  // to that minimum capacity so that a GC will not be induced
-  // until that minimum capacity is exceeded.
-  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
-  size_t metaspace_size_bytes = MetaspaceSize;
-  if (committed_capacity_bytes < metaspace_size_bytes ||
-      capacity_until_GC() == 0) {
-    set_capacity_until_GC(metaspace_size_bytes);
-    return true;
-  } else {
-    if (committed_capacity_bytes < capacity_until_GC()) {
-      return true;
-    } else {
-      if (TraceMetadataChunkAllocation && Verbose) {
-        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
-                        "  capacity_until_GC " SIZE_FORMAT
-                        "  allocated_capacity_bytes " SIZE_FORMAT,
-                        word_size,
-                        capacity_until_GC(),
-                        MetaspaceAux::allocated_capacity_bytes());
-      }
-      return false;
-    }
+  return true;
+}
+
+size_t MetaspaceGC::allowed_expansion() {
+  size_t committed_bytes = MetaspaceAux::committed_bytes();
+
+  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
+
+  // Always grant expansion if we are initiating the JVM,
+  // or if the GC_locker is preventing GCs.
+  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
+    return left_until_max / BytesPerWord;
   }
+
+  size_t capacity_until_gc = capacity_until_GC();
+
+  if (capacity_until_gc <= committed_bytes) {
+    return 0;
+  }
+
+  size_t left_until_GC = capacity_until_gc - committed_bytes;
+  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
+
+  return left_to_commit / BytesPerWord;
 }
 
-
-
 void MetaspaceGC::compute_new_size() {
   assert(_shrink_factor <= 100, "invalid shrink factor");
   uint current_shrink_factor = _shrink_factor;
   _shrink_factor = 0;
 
-  // Until a faster way of calculating the "used" quantity is implemented,
-  // use "capacity".
   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 
@@ -1393,9 +1444,10 @@
     // If we have less capacity below the metaspace HWM, then
     // increment the HWM.
     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
+    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
-      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
+      MetaspaceGC::inc_capacity_until_GC(expand_bytes);
     }
     if (PrintGCDetails && Verbose) {
       size_t new_capacity_until_GC = capacity_until_GC;
@@ -1452,6 +1504,9 @@
       // on the third call, and 100% by the fourth call.  But if we recompute
       // size without shrinking, it goes back to 0%.
       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+
+      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
+
       assert(shrink_bytes <= max_shrink_bytes,
         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
           shrink_bytes, max_shrink_bytes));
@@ -1483,60 +1538,12 @@
   // Don't shrink unless it's significant
   if (shrink_bytes >= MinMetaspaceExpansion &&
       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
-    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
+    MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
   }
 }
 
 // Metadebug methods
 
-void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
-                                       size_t chunk_word_size){
-#ifdef ASSERT
-  VirtualSpaceList* vsl = sm->vs_list();
-  if (MetaDataDeallocateALot &&
-      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
-    Metadebug::reset_deallocate_chunk_a_lot_count();
-    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
-      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
-      if (dummy_chunk == NULL) {
-        break;
-      }
-      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
-
-      if (TraceMetadataChunkAllocation && Verbose) {
-        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
-                               sm->sum_count_in_chunks_in_use());
-        dummy_chunk->print_on(gclog_or_tty);
-        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
-                               vsl->chunk_manager()->free_chunks_total(),
-                               vsl->chunk_manager()->free_chunks_count());
-      }
-    }
-  } else {
-    Metadebug::inc_deallocate_chunk_a_lot_count();
-  }
-#endif
-}
-
-void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
-                                       size_t raw_word_size){
-#ifdef ASSERT
-  if (MetaDataDeallocateALot &&
-        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
-    Metadebug::set_deallocate_block_a_lot_count(0);
-    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
-      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
-      if (dummy_block == 0) {
-        break;
-      }
-      sm->deallocate(dummy_block, raw_word_size);
-    }
-  } else {
-    Metadebug::inc_deallocate_block_a_lot_count();
-  }
-#endif
-}
-
 void Metadebug::init_allocation_fail_alot_count() {
   if (MetadataAllocationFailALot) {
     _allocation_fail_alot_count =
@@ -1565,12 +1572,12 @@
 
 // ChunkManager methods
 
-size_t ChunkManager::free_chunks_total() {
+size_t ChunkManager::free_chunks_total_words() {
   return _free_chunks_total;
 }
 
-size_t ChunkManager::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t ChunkManager::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 size_t ChunkManager::free_chunks_count() {
@@ -1680,31 +1687,6 @@
   return free_chunks(index);
 }
 
-void ChunkManager::free_chunks_put(Metachunk* chunk) {
-  assert_lock_strong(SpaceManager::expand_lock());
-  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
-  chunk->set_next(free_list->head());
-  free_list->set_head(chunk);
-  // chunk is being returned to the chunk free list
-  inc_free_chunks_total(chunk->capacity_word_size());
-  slow_locked_verify();
-}
-
-void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
-  // The deallocation of a chunk originates in the freelist
-  // manangement code for a Metaspace and does not hold the
-  // lock.
-  assert(chunk != NULL, "Deallocating NULL");
-  assert_lock_strong(SpaceManager::expand_lock());
-  slow_locked_verify();
-  if (TraceMetadataChunkAllocation) {
-    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
-                  PTR_FORMAT "  size " SIZE_FORMAT,
-                  chunk, chunk->word_size());
-  }
-  free_chunks_put(chunk);
-}
-
 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
 
@@ -1716,7 +1698,6 @@
     assert(free_list != NULL, "Sanity check");
 
     chunk = free_list->head();
-    debug_only(Metachunk* debug_head = chunk;)
 
     if (chunk == NULL) {
       return NULL;
@@ -1725,42 +1706,42 @@
     // Remove the chunk as the head of the list.
     free_list->remove_chunk(chunk);
 
-    // Chunk is being removed from the chunks free list.
-    dec_free_chunks_total(chunk->capacity_word_size());
-
     if (TraceMetadataChunkAllocation && Verbose) {
-      tty->print_cr("ChunkManager::free_chunks_get: free_list "
-                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
-                    free_list, chunk, chunk->word_size());
+      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
+                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+                             free_list, chunk, chunk->word_size());
     }
   } else {
     chunk = humongous_dictionary()->get_chunk(
       word_size,
       FreeBlockDictionary<Metachunk>::atLeast);
 
-    if (chunk != NULL) {
-      if (TraceMetadataHumongousAllocation) {
-        size_t waste = chunk->word_size() - word_size;
-        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
-                      " for requested size " SIZE_FORMAT
-                      " waste " SIZE_FORMAT,
-                      chunk->word_size(), word_size, waste);
-      }
-      // Chunk is being removed from the chunks free list.
-      dec_free_chunks_total(chunk->capacity_word_size());
-    } else {
+    if (chunk == NULL) {
       return NULL;
     }
+
+    if (TraceMetadataHumongousAllocation) {
+      size_t waste = chunk->word_size() - word_size;
+      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+                             SIZE_FORMAT " for requested size " SIZE_FORMAT
+                             " waste " SIZE_FORMAT,
+                             chunk->word_size(), word_size, waste);
+    }
   }
 
+  // Chunk is being removed from the chunks free list.
+  dec_free_chunks_total(chunk->word_size());
+
   // Remove it from the links to this freelist
   chunk->set_next(NULL);
   chunk->set_prev(NULL);
 #ifdef ASSERT
   // Chunk is no longer on any freelist. Setting to false make container_count_slow()
   // work.
-  chunk->set_is_free(false);
+  chunk->set_is_tagged_free(false);
 #endif
+  chunk->container()->inc_container_count();
+
   slow_locked_verify();
   return chunk;
 }
@@ -1786,18 +1767,18 @@
     } else {
       list_count = humongous_dictionary()->total_count();
     }
-    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
-               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
-               this, chunk, chunk->word_size(), list_count);
-    locked_print_free_chunks(tty);
+    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
+                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+                        this, chunk, chunk->word_size(), list_count);
+    locked_print_free_chunks(gclog_or_tty);
   }
 
   return chunk;
 }
 
-void ChunkManager::print_on(outputStream* out) {
+void ChunkManager::print_on(outputStream* out) const {
   if (PrintFLSStatistics != 0) {
-    humongous_dictionary()->report_statistics();
+    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
   }
 }
 
@@ -1889,7 +1870,7 @@
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
       Metachunk* chunk = chunks_in_use(i);
       while (chunk != NULL) {
-        sum += chunk->capacity_word_size();
+        sum += chunk->word_size();
         chunk = chunk->next();
       }
     }
@@ -1944,8 +1925,8 @@
     }
   }
 
-  vs_list()->chunk_manager()->locked_print_free_chunks(st);
-  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
+  chunk_manager()->locked_print_free_chunks(st);
+  chunk_manager()->locked_print_sum_free_chunks(st);
 }
 
 size_t SpaceManager::calc_chunk_size(size_t word_size) {
@@ -1965,12 +1946,12 @@
     chunk_word_size = medium_chunk_size();
   }
 
-  // Might still need a humongous chunk.  Enforce an
-  // eight word granularity to facilitate reuse (some
-  // wastage but better chance of reuse).
+  // Might still need a humongous chunk.  Enforce
+  // humongous allocations sizes to be aligned up to
+  // the smallest chunk size.
   size_t if_humongous_sized_chunk =
     align_size_up(word_size + Metachunk::overhead(),
-                  HumongousChunkGranularity);
+                  smallest_chunk_size());
   chunk_word_size =
     MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
 
@@ -1991,6 +1972,15 @@
   return chunk_word_size;
 }
 
+void SpaceManager::track_metaspace_memory_usage() {
+  if (is_init_completed()) {
+    if (is_class()) {
+      MemoryService::track_compressed_class_memory_usage();
+    }
+    MemoryService::track_metaspace_memory_usage();
+  }
+}
+
 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
   assert(vs_list()->current_virtual_space() != NULL,
          "Should have been set");
@@ -2016,15 +2006,20 @@
   size_t grow_chunks_by_words = calc_chunk_size(word_size);
   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
 
+  MetaWord* mem = NULL;
+
   // If a chunk was available, add it to the in-use chunk list
   // and do an allocation from it.
   if (next != NULL) {
-    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
     // Add to this manager's list of chunks in use.
     add_chunk(next, false);
-    return next->allocate(word_size);
+    mem = next->allocate(word_size);
   }
-  return NULL;
+
+  // Track metaspace memory usage statistic.
+  track_metaspace_memory_usage();
+
+  return mem;
 }
 
 void SpaceManager::print_on(outputStream* st) const {
@@ -2049,9 +2044,7 @@
 }
 
 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
-                           Mutex* lock,
-                           VirtualSpaceList* vs_list) :
-  _vs_list(vs_list),
+                           Mutex* lock) :
   _mdtype(mdtype),
   _allocated_blocks_words(0),
   _allocated_chunks_words(0),
@@ -2121,7 +2114,7 @@
     // Capture the next link before it is changed
     // by the call to return_chunk_at_head();
     Metachunk* next = cur->next();
-    cur->set_is_free(true);
+    DEBUG_ONLY(cur->set_is_tagged_free(true);)
     list->return_chunk_at_head(cur);
     cur = next;
   }
@@ -2137,9 +2130,7 @@
   MutexLockerEx fcl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
 
-  ChunkManager* chunk_manager = vs_list()->chunk_manager();
-
-  chunk_manager->slow_locked_verify();
+  chunk_manager()->slow_locked_verify();
 
   dec_total_from_size_metrics();
 
@@ -2153,8 +2144,8 @@
 
   // Have to update before the chunks_in_use lists are emptied
   // below.
-  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
-                                       sum_count_in_chunks_in_use());
+  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
+                                         sum_count_in_chunks_in_use());
 
   // Add all the chunks in use by this space manager
   // to the global list of free chunks.
@@ -2169,11 +2160,11 @@
                              chunk_size_name(i));
     }
     Metachunk* chunks = chunks_in_use(i);
-    chunk_manager->return_chunks(i, chunks);
+    chunk_manager()->return_chunks(i, chunks);
     set_chunks_in_use(i, NULL);
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print_cr("updated freelist count %d %s",
-                             chunk_manager->free_chunks(i)->count(),
+                             chunk_manager()->free_chunks(i)->count(),
                              chunk_size_name(i));
     }
     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
@@ -2195,7 +2186,7 @@
 
   while (humongous_chunks != NULL) {
 #ifdef ASSERT
-    humongous_chunks->set_is_free(true);
+    humongous_chunks->set_is_tagged_free(true);
 #endif
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
@@ -2204,22 +2195,22 @@
     }
     assert(humongous_chunks->word_size() == (size_t)
            align_size_up(humongous_chunks->word_size(),
-                             HumongousChunkGranularity),
+                             smallest_chunk_size()),
            err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
                    " granularity %d",
-                   humongous_chunks->word_size(), HumongousChunkGranularity));
+                   humongous_chunks->word_size(), smallest_chunk_size()));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
     humongous_chunks->container()->dec_container_count();
-    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
+    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
     humongous_chunks = next_humongous_chunks;
   }
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print_cr("");
     gclog_or_tty->print_cr("updated dictionary count %d %s",
-                     chunk_manager->humongous_dictionary()->total_count(),
+                     chunk_manager()->humongous_dictionary()->total_count(),
                      chunk_size_name(HumongousIndex));
   }
-  chunk_manager->slow_locked_verify();
+  chunk_manager()->slow_locked_verify();
 }
 
 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
@@ -2278,6 +2269,7 @@
   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
 
   if (index != HumongousIndex) {
+    retire_current_chunk();
     set_current_chunk(new_chunk);
     new_chunk->set_next(chunks_in_use(index));
     set_chunks_in_use(index, new_chunk);
@@ -2307,23 +2299,35 @@
     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                         sum_count_in_chunks_in_use());
     new_chunk->print_on(gclog_or_tty);
-    if (vs_list() != NULL) {
-      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+  }
+}
+
+void SpaceManager::retire_current_chunk() {
+  if (current_chunk() != NULL) {
+    size_t remaining_words = current_chunk()->free_word_size();
+    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
+      inc_used_metrics(remaining_words);
     }
   }
 }
 
 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
                                        size_t grow_chunks_by_words) {
-
-  Metachunk* next = vs_list()->get_new_chunk(word_size,
-                                             grow_chunks_by_words,
-                                             medium_chunk_bunch());
-
-  if (TraceMetadataHumongousAllocation &&
+  // Get a chunk from the chunk freelist
+  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
+
+  if (next == NULL) {
+    next = vs_list()->get_new_chunk(word_size,
+                                    grow_chunks_by_words,
+                                    medium_chunk_bunch());
+  }
+
+  if (TraceMetadataHumongousAllocation && next != NULL &&
       SpaceManager::is_humongous(next->word_size())) {
-    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
-                           next->word_size());
+    gclog_or_tty->print_cr("  new humongous chunk word size "
+                           PTR_FORMAT, next->word_size());
   }
 
   return next;
@@ -2346,7 +2350,6 @@
   if (p == NULL) {
     p = allocate_work(raw_word_size);
   }
-  Metadebug::deallocate_block_a_lot(this, raw_word_size);
 
   return p;
 }
@@ -2371,6 +2374,7 @@
     inc_used_metrics(word_size);
     return current_chunk()->allocate(word_size); // caller handles null result
   }
+
   if (current_chunk() != NULL) {
     result = current_chunk()->allocate(word_size);
   }
@@ -2378,7 +2382,8 @@
   if (result == NULL) {
     result = grow_and_allocate(word_size);
   }
-  if (result != 0) {
+
+  if (result != NULL) {
     inc_used_metrics(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
@@ -2441,16 +2446,17 @@
          curr = curr->next()) {
       out->print("%d) ", i++);
       curr->print_on(out);
-      if (TraceMetadataChunkAllocation && Verbose) {
-        block_freelists()->print_on(out);
-      }
       curr_total += curr->word_size();
       used += curr->used_word_size();
-      capacity += curr->capacity_word_size();
+      capacity += curr->word_size();
       waste += curr->free_word_size() + curr->overhead();;
     }
   }
 
+  if (TraceMetadataChunkAllocation && Verbose) {
+    block_freelists()->print_on(out);
+  }
+
   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
   // Free space isn't wasted.
   waste -= free;
@@ -2538,13 +2544,13 @@
   return used * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
   size_t free = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
     if (msp != NULL) {
-      free += msp->free_words(mdtype);
+      free += msp->free_words_slow(mdtype);
     }
   }
   return free * BytesPerWord;
@@ -2567,34 +2573,55 @@
   return capacity * BytesPerWord;
 }
 
-size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->virtual_space_total();
+size_t MetaspaceAux::capacity_bytes_slow() {
+#ifdef PRODUCT
+  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+        " class_capacity + non_class_capacity " SIZE_FORMAT
+        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+        allocated_capacity_bytes(), class_capacity + non_class_capacity,
+        class_capacity, non_class_capacity));
+
+  return class_capacity + non_class_capacity;
 }
 
-size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
-
-size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->reserved_bytes();
+}
+
+size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  if (list == NULL) {
+  return list == NULL ? 0 : list->committed_bytes();
+}
+
+size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
+  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
+  if (chunk_manager == NULL) {
     return 0;
   }
-  ChunkManager* chunk = list->chunk_manager();
-  chunk->slow_verify();
-  return chunk->free_chunks_total();
+  chunk_manager->slow_verify();
+  return chunk_manager->free_chunks_total_words();
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
-  return free_chunks_total(mdtype) * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+  return free_chunks_total_words(mdtype) * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_chunks_total() {
-  return free_chunks_total(Metaspace::ClassType) +
-         free_chunks_total(Metaspace::NonClassType);
+size_t MetaspaceAux::free_chunks_total_words() {
+  return free_chunks_total_words(Metaspace::ClassType) +
+         free_chunks_total_words(Metaspace::NonClassType);
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
@@ -2605,14 +2632,14 @@
                         "("  SIZE_FORMAT ")",
                         prev_metadata_used,
                         allocated_used_bytes(),
-                        reserved_in_bytes());
+                        reserved_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
                         "("  SIZE_FORMAT "K)",
-                        prev_metadata_used / K,
-                        allocated_used_bytes() / K,
-                        reserved_in_bytes()/ K);
+                        prev_metadata_used/K,
+                        allocated_used_bytes()/K,
+                        reserved_bytes()/K);
   }
 
   gclog_or_tty->print("]");
@@ -2622,35 +2649,37 @@
 void MetaspaceAux::print_on(outputStream* out) {
   Metaspace::MetadataType nct = Metaspace::NonClassType;
 
-  out->print_cr(" Metaspace total "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
-
-  out->print_cr("  data space     "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes(nct)/K,
-                allocated_used_bytes(nct)/K,
-                reserved_in_bytes(nct)/K);
+  out->print_cr(" Metaspace       "
+                "used "      SIZE_FORMAT "K, "
+                "capacity "  SIZE_FORMAT "K, "
+                "committed " SIZE_FORMAT "K, "
+                "reserved "  SIZE_FORMAT "K",
+                allocated_used_bytes()/K,
+                allocated_capacity_bytes()/K,
+                committed_bytes()/K,
+                reserved_bytes()/K);
+
   if (Metaspace::using_class_space()) {
     Metaspace::MetadataType ct = Metaspace::ClassType;
     out->print_cr("  class space    "
-                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                  " reserved " SIZE_FORMAT "K",
+                  "used "      SIZE_FORMAT "K, "
+                  "capacity "  SIZE_FORMAT "K, "
+                  "committed " SIZE_FORMAT "K, "
+                  "reserved "  SIZE_FORMAT "K",
+                  allocated_used_bytes(ct)/K,
                   allocated_capacity_bytes(ct)/K,
-                  allocated_used_bytes(ct)/K,
-                  reserved_in_bytes(ct)/K);
+                  committed_bytes(ct)/K,
+                  reserved_bytes(ct)/K);
   }
 }
 
 // Print information for class space and data space separately.
 // This is almost the same as above.
 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
-  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
+  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
   size_t capacity_bytes = capacity_bytes_slow(mdtype);
   size_t used_bytes = used_bytes_slow(mdtype);
-  size_t free_bytes = free_in_bytes(mdtype);
+  size_t free_bytes = free_bytes_slow(mdtype);
   size_t used_and_free = used_bytes + free_bytes +
                            free_chunks_capacity_bytes;
   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
@@ -2732,9 +2761,9 @@
 }
 
 void MetaspaceAux::verify_free_chunks() {
-  Metaspace::space_list()->chunk_manager()->verify();
+  Metaspace::chunk_manager_metadata()->verify();
   if (Metaspace::using_class_space()) {
-    Metaspace::class_space_list()->chunk_manager()->verify();
+    Metaspace::chunk_manager_class()->verify();
   }
 }
 
@@ -2791,6 +2820,9 @@
 size_t Metaspace::_first_chunk_word_size = 0;
 size_t Metaspace::_first_class_chunk_word_size = 0;
 
+size_t Metaspace::_commit_alignment = 0;
+size_t Metaspace::_reserve_alignment = 0;
+
 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
   initialize(lock, type);
 }
@@ -2805,9 +2837,14 @@
 VirtualSpaceList* Metaspace::_space_list = NULL;
 VirtualSpaceList* Metaspace::_class_space_list = NULL;
 
+ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
+ChunkManager* Metaspace::_chunk_manager_class = NULL;
+
 #define VIRTUALSPACEMULTIPLIER 2
 
 #ifdef _LP64
+static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
+
 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
   // narrow_klass_base is the lower of the metaspace base and the cds base
@@ -2817,14 +2854,22 @@
   address higher_address;
   if (UseSharedSpaces) {
     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
-                          (address)(metaspace_base + class_metaspace_size()));
+                          (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
   } else {
-    higher_address = metaspace_base + class_metaspace_size();
+    higher_address = metaspace_base + compressed_class_space_size();
     lower_base = metaspace_base;
+
+    uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
+    // If compressed class space fits in lower 32G, we don't need a base.
+    if (higher_address <= (address)klass_encoding_max) {
+      lower_base = 0; // effectively lower base is zero.
+    }
   }
+
   Universe::set_narrow_klass_base(lower_base);
-  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
+
+  if ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax) {
     Universe::set_narrow_klass_shift(0);
   } else {
     assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
@@ -2836,48 +2881,57 @@
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
-                                (address)(metaspace_base + class_metaspace_size()));
-  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
+                                (address)(metaspace_base + compressed_class_space_size()));
+  return ((uint64_t)(higher_address - lower_base) < UnscaledClassSpaceMax);
 }
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
   assert(using_class_space(), "called improperly");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
-  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
+  assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
-
-  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                             os::vm_allocation_granularity(),
-                                             false, requested_addr, 0);
+  assert_is_ptr_aligned(requested_addr, _reserve_alignment);
+  assert_is_ptr_aligned(cds_base, _reserve_alignment);
+  assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
+
+  // Don't use large pages for the class space.
+  bool large_pages = false;
+
+  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                             _reserve_alignment,
+                                             large_pages,
+                                             requested_addr, 0);
   if (!metaspace_rs.is_reserved()) {
     if (UseSharedSpaces) {
+      size_t increment = align_size_up(1*G, _reserve_alignment);
+
       // Keep trying to allocate the metaspace, increasing the requested_addr
       // by 1GB each time, until we reach an address that will no longer allow
       // use of CDS with compressed klass pointers.
       char *addr = requested_addr;
-      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
-             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
-        addr = addr + 1*G;
-        metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                     os::vm_allocation_granularity(), false, addr, 0);
+      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
+             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
+        addr = addr + increment;
+        metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                     _reserve_alignment, large_pages, addr, 0);
       }
     }
 
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
-    // metaspace as if UseCompressedKlassPointers is off because too much
-    // initialization has happened that depends on UseCompressedKlassPointers.
-    // So, UseCompressedKlassPointers cannot be turned off at this point.
+    // metaspace as if UseCompressedClassPointers is off because too much
+    // initialization has happened that depends on UseCompressedClassPointers.
+    // So, UseCompressedClassPointers cannot be turned off at this point.
     if (!metaspace_rs.is_reserved()) {
-      metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                   os::vm_allocation_granularity(), false);
+      metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                   _reserve_alignment, large_pages);
       if (!metaspace_rs.is_reserved()) {
         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
-                                              class_metaspace_size()));
+                                              compressed_class_space_size()));
       }
     }
   }
@@ -2899,59 +2953,120 @@
   if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
     gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
                             Universe::narrow_klass_base(), Universe::narrow_klass_shift());
-    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
-                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
+    gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
+                           compressed_class_space_size(), metaspace_rs.base(), requested_addr);
   }
 }
 
-// For UseCompressedKlassPointers the class space is reserved above the top of
+// For UseCompressedClassPointers the class space is reserved above the top of
 // the Java heap.  The argument passed in is at the base of the compressed space.
 void Metaspace::initialize_class_space(ReservedSpace rs) {
   // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= ClassMetaspaceSize,
-         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
+  assert(rs.size() >= CompressedClassSpaceSize,
+         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
+  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+
+  if (!_class_space_list->initialization_succeeded()) {
+    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
+  }
 }
 
 #endif
 
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+  return MAX2(alignment, align_size_down_(size, alignment));
+}
+
+void Metaspace::ergo_initialize() {
+  if (DumpSharedSpaces) {
+    // Using large pages when dumping the shared archive is currently not implemented.
+    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
+  }
+
+  size_t page_size = os::vm_page_size();
+  if (UseLargePages && UseLargePagesInMetaspace) {
+    page_size = os::large_page_size();
+  }
+
+  _commit_alignment  = page_size;
+  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
+
+  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
+  // override if MaxMetaspaceSize was set on the command line or not.
+  // This information is needed later to conform to the specification of the
+  // java.lang.management.MemoryUsage API.
+  //
+  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
+  // globals.hpp to the aligned value, but this is not possible, since the
+  // alignment depends on other flags being parsed.
+  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
+
+  if (MetaspaceSize > MaxMetaspaceSize) {
+    MetaspaceSize = MaxMetaspaceSize;
+  }
+
+  MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
+
+  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
+
+  if (MetaspaceSize < 256*K) {
+    vm_exit_during_initialization("Too small initial Metaspace size");
+  }
+
+  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
+  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
+
+  CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
+  set_compressed_class_space_size(CompressedClassSpaceSize);
+}
+
 void Metaspace::global_initialize() {
   // Initialize the alignment for shared spaces.
   int max_alignment = os::vm_page_size();
   size_t cds_total = 0;
 
-  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
-                                         os::vm_allocation_granularity()));
-
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
-    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
+    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
-    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
-    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
+    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
+    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
     cds_total = FileMapInfo::shared_spaces_size();
+    cds_total = align_size_up(cds_total, _reserve_alignment);
     _space_list = new VirtualSpaceList(cds_total/wordSize);
+    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+
+    if (!_space_list->initialization_succeeded()) {
+      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
+    }
 
 #ifdef _LP64
+    if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
+      vm_exit_during_initialization("Unable to dump shared archive.",
+          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
+                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
+                  "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
+                  cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
+    }
+
     // Set the compressed klass pointer base so that decoding of these pointers works
     // properly when creating the shared archive.
-    assert(UseCompressedOops && UseCompressedKlassPointers,
-      "UseCompressedOops and UseCompressedKlassPointers must be set");
+    assert(UseCompressedOops && UseCompressedClassPointers,
+      "UseCompressedOops and UseCompressedClassPointers must be set");
     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
     if (TraceMetavirtualspaceAllocation && Verbose) {
       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
                              _space_list->current_virtual_space()->bottom());
     }
 
-    // Set the shift to zero.
-    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
-           "CDS region is too large");
     Universe::set_narrow_klass_shift(0);
 #endif
 
@@ -2970,22 +3085,25 @@
       // Map in spaces now also
       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
         FileMapInfo::set_current_info(mapinfo);
+        cds_total = FileMapInfo::shared_spaces_size();
+        cds_address = (address)mapinfo->region_base(0);
       } else {
         assert(!mapinfo->is_open() && !UseSharedSpaces,
                "archive file not closed or shared spaces not disabled.");
       }
-      cds_total = FileMapInfo::shared_spaces_size();
-      cds_address = (address)mapinfo->region_base(0);
     }
 
 #ifdef _LP64
-    // If UseCompressedKlassPointers is set then allocate the metaspace area
+    // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
-        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
+        char* cds_end = (char*)(cds_address + cds_total);
+        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
+        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
       } else {
-        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
+        char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
+        allocate_metaspace_compressed_klass_ptrs(base, 0);
       }
     }
 #endif
@@ -2997,22 +3115,45 @@
     // on the medium chunk list.   The next chunk will be small and progress
     // from there.  This size calculated by -version.
     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
-                                       (ClassMetaspaceSize/BytesPerWord)*2);
+                                       (CompressedClassSpaceSize/BytesPerWord)*2);
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
     // Arbitrarily set the initial virtual space to a multiple
     // of the boot class loader size.
-    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
+    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
+    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
+
     // Initialize the list of virtual spaces.
     _space_list = new VirtualSpaceList(word_size);
+    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+
+    if (!_space_list->initialization_succeeded()) {
+      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
+    }
   }
+
+  MetaspaceGC::initialize();
+}
+
+Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
+                                               size_t chunk_word_size,
+                                               size_t chunk_bunch) {
+  // Get a chunk from the chunk freelist
+  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
+  if (chunk != NULL) {
+    return chunk;
+  }
+
+  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
 }
 
 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
 
   assert(space_list() != NULL,
     "Metadata VirtualSpaceList has not been initialized");
-
-  _vsm = new SpaceManager(NonClassType, lock, space_list());
+  assert(chunk_manager_metadata() != NULL,
+    "Metadata ChunkManager has not been initialized");
+
+  _vsm = new SpaceManager(NonClassType, lock);
   if (_vsm == NULL) {
     return;
   }
@@ -3021,11 +3162,13 @@
   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
 
   if (using_class_space()) {
-    assert(class_space_list() != NULL,
-      "Class VirtualSpaceList has not been initialized");
+  assert(class_space_list() != NULL,
+    "Class VirtualSpaceList has not been initialized");
+  assert(chunk_manager_class() != NULL,
+    "Class ChunkManager has not been initialized");
 
     // Allocate SpaceManager for classes.
-    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
+    _class_vsm = new SpaceManager(ClassType, lock);
     if (_class_vsm == NULL) {
       return;
     }
@@ -3034,9 +3177,9 @@
   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 
   // Allocate chunk for metadata objects
-  Metachunk* new_chunk =
-     space_list()->get_initialization_chunk(word_size,
-                                            vsm()->medium_chunk_bunch());
+  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
+                                                  word_size,
+                                                  vsm()->medium_chunk_bunch());
   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
   if (new_chunk != NULL) {
     // Add to this manager's list of chunks in use and current_chunk().
@@ -3045,9 +3188,9 @@
 
   // Allocate chunk for class metadata objects
   if (using_class_space()) {
-    Metachunk* class_chunk =
-       class_space_list()->get_initialization_chunk(class_word_size,
-                                                    class_vsm()->medium_chunk_bunch());
+    Metachunk* class_chunk = get_initialization_chunk(ClassType,
+                                                      class_word_size,
+                                                      class_vsm()->medium_chunk_bunch());
     if (class_chunk != NULL) {
       class_vsm()->add_chunk(class_chunk, true);
     }
@@ -3064,8 +3207,8 @@
 
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
   // DumpSharedSpaces doesn't use class metadata area (yet)
-  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
-  if (mdtype == ClassType && using_class_space()) {
+  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
+  if (is_class_space_allocation(mdtype)) {
     return  class_vsm()->allocate(word_size);
   } else {
     return  vsm()->allocate(word_size);
@@ -3073,19 +3216,18 @@
 }
 
 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
-  MetaWord* result;
-  MetaspaceGC::set_expand_after_GC(true);
-  size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
-  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
+  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
+  assert(delta_bytes > 0, "Must be");
+
+  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
+  size_t before_inc = after_inc - delta_bytes;
+
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
-      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
+        " to " SIZE_FORMAT, before_inc, after_inc);
   }
 
-  result = allocate(word_size, mdtype);
-
-  return result;
+  return allocate(word_size, mdtype);
 }
 
 // Space allocated in the Metaspace.  This may
@@ -3103,7 +3245,7 @@
   }
 }
 
-size_t Metaspace::free_words(MetadataType mdtype) const {
+size_t Metaspace::free_words_slow(MetadataType mdtype) const {
   if (mdtype == ClassType) {
     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
   } else {
@@ -3167,69 +3309,108 @@
   }
 }
 
-Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
+
+MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
                               bool read_only, MetaspaceObj::Type type, TRAPS) {
   if (HAS_PENDING_EXCEPTION) {
     assert(false, "Should not allocate with exception pending");
     return NULL;  // caller does a CHECK_NULL too
   }
 
-  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
-
-  // SSS: Should we align the allocations and make sure the sizes are aligned.
-  MetaWord* result = NULL;
-
   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
         "ClassLoaderData::the_null_class_loader_data() should have been used.");
+
   // Allocate in metaspaces without taking out a lock, because it deadlocks
   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
   // to revisit this for application class data sharing.
   if (DumpSharedSpaces) {
     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
-    result = space->allocate(word_size, NonClassType);
+    MetaWord* result = space->allocate(word_size, NonClassType);
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
-    } else {
-      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
     }
-    return Metablock::initialize(result, word_size);
+
+    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+
+    // Zero initialize.
+    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
+
+    return result;
   }
 
-  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
+  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
+
+  // Try to allocate metadata.
+  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
+
+  if (result == NULL) {
+    // Allocation failed.
+    if (is_init_completed()) {
+      // Only start a GC if the bootstrapping has completed.
+
+      // Try to clean out some memory and retry.
+      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
+          loader_data, word_size, mdtype);
+    }
+  }
 
   if (result == NULL) {
-    // Try to clean out some memory and retry.
-    result =
-      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
-        loader_data, word_size, mdtype);
-
-    // If result is still null, we are out of memory.
-    if (result == NULL) {
-      if (Verbose && TraceMetadataChunkAllocation) {
-        gclog_or_tty->print_cr("Metaspace allocation failed for size "
-          SIZE_FORMAT, word_size);
-        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
-        MetaspaceAux::dump(gclog_or_tty);
-      }
-      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
-                                                         "Metadata space";
-      report_java_out_of_memory(space_string);
-
-      if (JvmtiExport::should_post_resource_exhausted()) {
-        JvmtiExport::post_resource_exhausted(
-            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
-            space_string);
-      }
-      if (mdtype == ClassType) {
-        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
-      } else {
-        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
-      }
+    report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
+  }
+
+  // Zero initialize.
+  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
+
+  return result;
+}
+
+size_t Metaspace::class_chunk_size(size_t word_size) {
+  assert(using_class_space(), "Has to use class space");
+  return class_vsm()->calc_chunk_size(word_size);
+}
+
+void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
+  // If result is still null, we are out of memory.
+  if (Verbose && TraceMetadataChunkAllocation) {
+    gclog_or_tty->print_cr("Metaspace allocation failed for size "
+        SIZE_FORMAT, word_size);
+    if (loader_data->metaspace_or_null() != NULL) {
+      loader_data->dump(gclog_or_tty);
     }
+    MetaspaceAux::dump(gclog_or_tty);
   }
-  return Metablock::initialize(result, word_size);
+
+  bool out_of_compressed_class_space = false;
+  if (is_class_space_allocation(mdtype)) {
+    Metaspace* metaspace = loader_data->metaspace_non_null();
+    out_of_compressed_class_space =
+      MetaspaceAux::committed_bytes(Metaspace::ClassType) +
+      (metaspace->class_chunk_size(word_size) * BytesPerWord) >
+      CompressedClassSpaceSize;
+  }
+
+  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
+  const char* space_string = out_of_compressed_class_space ?
+    "Compressed class space" : "Metaspace";
+
+  report_java_out_of_memory(space_string);
+
+  if (JvmtiExport::should_post_resource_exhausted()) {
+    JvmtiExport::post_resource_exhausted(
+        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
+        space_string);
+  }
+
+  if (!is_init_completed()) {
+    vm_exit_during_initialization("OutOfMemoryError", space_string);
+  }
+
+  if (out_of_compressed_class_space) {
+    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
+  } else {
+    THROW_OOP(Universe::out_of_memory_error_metaspace());
+  }
 }
 
 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
@@ -3264,12 +3445,16 @@
   }
 }
 
+void Metaspace::purge(MetadataType mdtype) {
+  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
+}
+
 void Metaspace::purge() {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
-  space_list()->purge();
+  purge(NonClassType);
   if (using_class_space()) {
-    class_space_list()->purge();
+    purge(ClassType);
   }
 }
 
@@ -3311,3 +3496,160 @@
     class_vsm()->dump(out);
   }
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestMetaspaceAuxTest : AllStatic {
+ public:
+  static void test_reserved() {
+    size_t reserved = MetaspaceAux::reserved_bytes();
+
+    assert(reserved > 0, "assert");
+
+    size_t committed  = MetaspaceAux::committed_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+    assert(reserved_metadata > 0, "assert");
+    assert(reserved_metadata <= reserved, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+      assert(reserved_class > 0, "assert");
+      assert(reserved_class < reserved, "assert");
+    }
+  }
+
+  static void test_committed() {
+    size_t committed = MetaspaceAux::committed_bytes();
+
+    assert(committed > 0, "assert");
+
+    size_t reserved  = MetaspaceAux::reserved_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+    assert(committed_metadata > 0, "assert");
+    assert(committed_metadata <= committed, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+      assert(committed_class > 0, "assert");
+      assert(committed_class < committed, "assert");
+    }
+  }
+
+  static void test_virtual_space_list_large_chunk() {
+    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
+    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
+    // vm_allocation_granularity aligned on Windows.
+    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
+    large_size += (os::vm_page_size()/BytesPerWord);
+    vs_list->get_new_chunk(large_size, large_size, 0);
+  }
+
+  static void test() {
+    test_reserved();
+    test_committed();
+    test_virtual_space_list_large_chunk();
+  }
+};
+
+void TestMetaspaceAux_test() {
+  TestMetaspaceAuxTest::test();
+}
+
+class TestVirtualSpaceNodeTest {
+  static void chunk_up(size_t words_left, size_t& num_medium_chunks,
+                                          size_t& num_small_chunks,
+                                          size_t& num_specialized_chunks) {
+    num_medium_chunks = words_left / MediumChunk;
+    words_left = words_left % MediumChunk;
+
+    num_small_chunks = words_left / SmallChunk;
+    words_left = words_left % SmallChunk;
+    // how many specialized chunks can we get?
+    num_specialized_chunks = words_left / SpecializedChunk;
+    assert(words_left % SpecializedChunk == 0, "should be nothing left");
+  }
+
+ public:
+  static void test() {
+    MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    const size_t vsn_test_size_words = MediumChunk  * 4;
+    const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
+
+    // The chunk sizes must be multiples of eachother, or this will fail
+    STATIC_ASSERT(MediumChunk % SmallChunk == 0);
+    STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
+
+    { // No committed memory in VSN
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.retire(&cm);
+      assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
+    }
+
+    { // All of VSN is committed, half is used by chunks
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
+      vsn.get_chunk_vs(MediumChunk);
+      vsn.get_chunk_vs(MediumChunk);
+      vsn.retire(&cm);
+      assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
+      assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
+    }
+
+    { // 4 pages of VSN is committed, some is used by chunks
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
+      assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
+      vsn.initialize();
+      vsn.expand_by(page_chunks, page_chunks);
+      vsn.get_chunk_vs(SmallChunk);
+      vsn.get_chunk_vs(SpecializedChunk);
+      vsn.retire(&cm);
+
+      // committed - used = words left to retire
+      const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
+
+      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
+      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
+
+      assert(num_medium_chunks == 0, "should not get any medium chunks");
+      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
+      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
+    }
+
+    { // Half of VSN is committed, a humongous chunk is used
+      ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
+      VirtualSpaceNode vsn(vsn_test_size_bytes);
+      vsn.initialize();
+      vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
+      vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
+      vsn.retire(&cm);
+
+      const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
+      size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
+      chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
+
+      assert(num_medium_chunks == 0, "should not get any medium chunks");
+      assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
+      assert(cm.sum_free_chunks() == words_left, "sizes should add up");
+    }
+
+  }
+};
+
+void TestVirtualSpaceNode_test() {
+  TestVirtualSpaceNodeTest::test();
+}
+
+#endif
--- a/src/share/vm/memory/metaspace.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metaspace.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -56,12 +56,15 @@
 //                       +-------------------+
 //
 
+class ChunkManager;
 class ClassLoaderData;
 class Metablock;
+class Metachunk;
 class MetaWord;
 class Mutex;
 class outputStream;
 class SpaceManager;
+class VirtualSpaceList;
 
 // Metaspaces each have a  SpaceManager and allocations
 // are done by the SpaceManager.  Allocations are done
@@ -76,8 +79,6 @@
 // allocate() method returns a block for use as a
 // quantum of metadata.
 
-class VirtualSpaceList;
-
 class Metaspace : public CHeapObj<mtClass> {
   friend class VMStructs;
   friend class SpaceManager;
@@ -86,9 +87,10 @@
   friend class MetaspaceAux;
 
  public:
-  enum MetadataType {ClassType = 0,
-                     NonClassType = ClassType + 1,
-                     MetadataTypeCount = ClassType + 2
+  enum MetadataType {
+    ClassType,
+    NonClassType,
+    MetadataTypeCount
   };
   enum MetaspaceType {
     StandardMetaspaceType,
@@ -102,22 +104,32 @@
  private:
   void initialize(Mutex* lock, MetaspaceType type);
 
+  // Get the first chunk for a Metaspace.  Used for
+  // special cases such as the boot class loader, reflection
+  // class loader and anonymous class loader.
+  Metachunk* get_initialization_chunk(MetadataType mdtype,
+                                      size_t chunk_word_size,
+                                      size_t chunk_bunch);
+
   // Align up the word size to the allocation word size
   static size_t align_word_size_up(size_t);
 
   // Aligned size of the metaspace.
-  static size_t _class_metaspace_size;
+  static size_t _compressed_class_space_size;
 
-  static size_t class_metaspace_size() {
-    return _class_metaspace_size;
+  static size_t compressed_class_space_size() {
+    return _compressed_class_space_size;
   }
-  static void set_class_metaspace_size(size_t metaspace_size) {
-    _class_metaspace_size = metaspace_size;
+  static void set_compressed_class_space_size(size_t size) {
+    _compressed_class_space_size = size;
   }
 
   static size_t _first_chunk_word_size;
   static size_t _first_class_chunk_word_size;
 
+  static size_t _commit_alignment;
+  static size_t _reserve_alignment;
+
   SpaceManager* _vsm;
   SpaceManager* vsm() const { return _vsm; }
 
@@ -127,13 +139,16 @@
   // Allocate space for metadata of type mdtype. This is space
   // within a Metachunk and is used by
   //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
-  // which returns a Metablock.
   MetaWord* allocate(size_t word_size, MetadataType mdtype);
 
   // Virtual Space lists for both classes and other metadata
   static VirtualSpaceList* _space_list;
   static VirtualSpaceList* _class_space_list;
 
+  static ChunkManager* _chunk_manager_metadata;
+  static ChunkManager* _chunk_manager_class;
+
+ public:
   static VirtualSpaceList* space_list()       { return _space_list; }
   static VirtualSpaceList* class_space_list() { return _class_space_list; }
   static VirtualSpaceList* get_space_list(MetadataType mdtype) {
@@ -141,6 +156,14 @@
     return mdtype == ClassType ? class_space_list() : space_list();
   }
 
+  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
+  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
+  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
+    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
+  }
+
+ private:
   // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   // maintain a single list for now.
   void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
@@ -169,28 +192,34 @@
   AllocRecord * _alloc_record_head;
   AllocRecord * _alloc_record_tail;
 
+  size_t class_chunk_size(size_t word_size);
+
  public:
 
   Metaspace(Mutex* lock, MetaspaceType type);
   ~Metaspace();
 
-  // Initialize globals for Metaspace
+  static void ergo_initialize();
   static void global_initialize();
 
   static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
 
+  static size_t reserve_alignment()       { return _reserve_alignment; }
+  static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
+  static size_t commit_alignment()        { return _commit_alignment; }
+  static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
+
   char*  bottom() const;
   size_t used_words_slow(MetadataType mdtype) const;
-  size_t free_words(MetadataType mdtype) const;
+  size_t free_words_slow(MetadataType mdtype) const;
   size_t capacity_words_slow(MetadataType mdtype) const;
-  size_t waste_words(MetadataType mdtype) const;
 
   size_t used_bytes_slow(MetadataType mdtype) const;
   size_t capacity_bytes_slow(MetadataType mdtype) const;
 
-  static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
-                             bool read_only, MetaspaceObj::Type type, TRAPS);
+  static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
+                            bool read_only, MetaspaceObj::Type type, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
 
   MetaWord* expand_and_allocate(size_t size,
@@ -200,8 +229,12 @@
   void dump(outputStream* const out) const;
 
   // Free empty virtualspaces
+  static void purge(MetadataType mdtype);
   static void purge();
 
+  static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
+                                   MetadataType mdtype, TRAPS);
+
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
@@ -213,27 +246,26 @@
 
   void iterate(AllocRecordClosure *closure);
 
-  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
+  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
   static bool using_class_space() {
-    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
+    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
+  }
+
+  static bool is_class_space_allocation(MetadataType mdType) {
+    return mdType == ClassType && using_class_space();
   }
 
 };
 
 class MetaspaceAux : AllStatic {
-  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
-
- public:
-  // Statistics for class space and data space in metaspace.
+  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
 
   // These methods iterate over the classloader data graph
   // for the given Metaspace type.  These are slow.
   static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t free_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
   static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
-
-  // Iterates over the virtual space list.
-  static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t capacity_bytes_slow();
 
   // Running sum of space in all Metachunks that has been
   // allocated to a Metaspace.  This is used instead of
@@ -263,17 +295,16 @@
   }
 
   // Used by MetaspaceCounters
-  static size_t free_chunks_total();
-  static size_t free_chunks_total_in_bytes();
-  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_chunks_total_words();
+  static size_t free_chunks_total_bytes();
+  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
 
   static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
     return _allocated_capacity_words[mdtype];
   }
   static size_t allocated_capacity_words() {
-    return _allocated_capacity_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_capacity_words[Metaspace::ClassType] : 0);
+    return allocated_capacity_words(Metaspace::NonClassType) +
+           allocated_capacity_words(Metaspace::ClassType);
   }
   static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
     return allocated_capacity_words(mdtype) * BytesPerWord;
@@ -286,9 +317,8 @@
     return _allocated_used_words[mdtype];
   }
   static size_t allocated_used_words() {
-    return _allocated_used_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_used_words[Metaspace::ClassType] : 0);
+    return allocated_used_words(Metaspace::NonClassType) +
+           allocated_used_words(Metaspace::ClassType);
   }
   static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
     return allocated_used_words(mdtype) * BytesPerWord;
@@ -300,31 +330,22 @@
   static size_t free_bytes();
   static size_t free_bytes(Metaspace::MetadataType mdtype);
 
-  // Total capacity in all Metaspaces
-  static size_t capacity_bytes_slow() {
-#ifdef PRODUCT
-    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
-    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
-    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
-    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
-    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
-           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
-             " class_capacity + non_class_capacity " SIZE_FORMAT
-             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
-             allocated_capacity_bytes(), class_capacity + non_class_capacity,
-             class_capacity, non_class_capacity));
-
-    return class_capacity + non_class_capacity;
+  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
+  static size_t reserved_bytes() {
+    return reserved_bytes(Metaspace::ClassType) +
+           reserved_bytes(Metaspace::NonClassType);
   }
 
-  // Total space reserved in all Metaspaces
-  static size_t reserved_in_bytes() {
-    return reserved_in_bytes(Metaspace::ClassType) +
-           reserved_in_bytes(Metaspace::NonClassType);
+  static size_t committed_bytes(Metaspace::MetadataType mdtype);
+  static size_t committed_bytes() {
+    return committed_bytes(Metaspace::ClassType) +
+           committed_bytes(Metaspace::NonClassType);
   }
 
-  static size_t min_chunk_size();
+  static size_t min_chunk_size_words();
+  static size_t min_chunk_size_bytes() {
+    return min_chunk_size_words() * BytesPerWord;
+  }
 
   // Print change in used metadata.
   static void print_metaspace_change(size_t prev_metadata_used);
@@ -348,17 +369,10 @@
 
 class MetaspaceGC : AllStatic {
 
-  // The current high-water-mark for inducing a GC.  When
-  // the capacity of all space in the virtual lists reaches this value,
-  // a GC is induced and the value is increased.  This should be changed
-  // to the space actually used for allocations to avoid affects of
-  // fragmentation losses to partially used chunks.  Size is in words.
-  static size_t _capacity_until_GC;
-
-  // After a GC is done any allocation that fails should try to expand
-  // the capacity of the Metaspaces.  This flag is set during attempts
-  // to allocate in the VMGCOperation that does the GC.
-  static bool _expand_after_GC;
+  // The current high-water-mark for inducing a GC.
+  // When committed memory of all metaspaces reaches this value,
+  // a GC is induced and the value is increased. Size is in bytes.
+  static volatile intptr_t _capacity_until_GC;
 
   // For a CMS collection, signal that a concurrent collection should
   // be started.
@@ -366,20 +380,16 @@
 
   static uint _shrink_factor;
 
-  static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
-
   static size_t shrink_factor() { return _shrink_factor; }
   void set_shrink_factor(uint v) { _shrink_factor = v; }
 
  public:
 
-  static size_t capacity_until_GC() { return _capacity_until_GC; }
-  static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
-  static void dec_capacity_until_GC(size_t v) {
-    _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
-  }
-  static bool expand_after_GC()           { return _expand_after_GC; }
-  static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
+  static void initialize() { _capacity_until_GC = MetaspaceSize; }
+
+  static size_t capacity_until_GC();
+  static size_t inc_capacity_until_GC(size_t v);
+  static size_t dec_capacity_until_GC(size_t v);
 
   static bool should_concurrent_collect() { return _should_concurrent_collect; }
   static void set_should_concurrent_collect(bool v) {
@@ -387,11 +397,14 @@
   }
 
   // The amount to increase the high-water-mark (_capacity_until_GC)
-  static size_t delta_capacity_until_GC(size_t word_size);
+  static size_t delta_capacity_until_GC(size_t bytes);
 
-  // It is expected that this will be called when the current capacity
-  // has been used and a GC should be considered.
-  static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
+  // Tells if we have can expand metaspace without hitting set limits.
+  static bool can_expand(size_t words, bool is_class);
+
+  // Returns amount that we can expand without hitting a GC,
+  // measured in words.
+  static size_t allowed_expansion();
 
   // Calculate the new high-water mark at which to induce
   // a GC.
--- a/src/share/vm/memory/metaspaceCounters.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metaspaceCounters.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -65,26 +65,25 @@
 
 MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
 
-size_t MetaspaceCounters::calculate_capacity() {
-  // The total capacity is the sum of
-  //   1) capacity of Metachunks in use by all Metaspaces
-  //   2) unused space at the end of each Metachunk
-  //   3) space in the freelist
-  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
-    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
-  return total_capacity;
+size_t MetaspaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes();
+}
+
+size_t MetaspaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes();
+}
+
+size_t MetaspaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes();
 }
 
 void MetaspaceCounters::initialize_performance_counters() {
   if (UsePerfData) {
     assert(_perf_counters == NULL, "Should only be initialized once");
 
-    size_t min_capacity = MetaspaceAux::min_chunk_size();
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
+    size_t min_capacity = 0;
+    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
+                                               capacity(), max_capacity(), used());
   }
 }
 
@@ -92,31 +91,29 @@
   if (UsePerfData) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
 MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
 
-size_t CompressedClassSpaceCounters::calculate_capacity() {
-    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
-           MetaspaceAux::free_bytes(_class_type) +
-           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
+size_t CompressedClassSpaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
 }
 
 void CompressedClassSpaceCounters::update_performance_counters() {
-  if (UsePerfData && UseCompressedKlassPointers) {
+  if (UsePerfData && UseCompressedClassPointers) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
@@ -125,13 +122,10 @@
     assert(_perf_counters == NULL, "Should only be initialized once");
     const char* ns = "compressedclassspace";
 
-    if (UseCompressedKlassPointers) {
-      size_t min_capacity = MetaspaceAux::min_chunk_size();
-      size_t capacity = calculate_capacity();
-      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
+    if (UseCompressedClassPointers) {
+      size_t min_capacity = 0;
+      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
+                                                 max_capacity(), used());
     } else {
       _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
     }
--- a/src/share/vm/memory/metaspaceCounters.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metaspaceCounters.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -25,13 +25,15 @@
 #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 
-#include "memory/metaspace.hpp"
+#include "memory/allocation.hpp"
 
 class MetaspacePerfCounters;
 
 class MetaspaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
@@ -40,8 +42,9 @@
 
 class CompressedClassSpaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
-  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
--- a/src/share/vm/memory/metaspaceShared.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/metaspaceShared.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -103,9 +103,10 @@
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
       for (int i = 0; i < ik->methods()->length(); i++) {
-        ResourceMark rm;
         Method* m = ik->methods()->at(i);
-        (new Fingerprinter(m))->fingerprint();
+        Fingerprinter fp(m);
+        // The side effect of this call sets method's fingerprint field.
+        fp.fingerprint();
       }
     }
   }
--- a/src/share/vm/memory/referenceProcessor.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -367,7 +367,7 @@
       next_d = java_lang_ref_Reference::discovered(obj);
       if (TraceReferenceGC && PrintGCDetails) {
         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
-                               obj, next_d);
+                               (void *)obj, (void *)next_d);
       }
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "Reference not active; should not be discovered");
@@ -392,7 +392,7 @@
       next_d = java_lang_ref_Reference::discovered(obj);
       if (TraceReferenceGC && PrintGCDetails) {
         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
-                               obj, next_d);
+                               (void *)obj, (void *)next_d);
       }
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "The reference should not be enqueued");
@@ -562,7 +562,7 @@
         !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // Remove Reference object from list
       iter.remove();
@@ -601,7 +601,7 @@
     if (iter.is_referent_alive()) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // The referent is reachable after all.
       // Remove Reference object from list.
@@ -687,7 +687,7 @@
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
                              clear_referent ? "cleared " : "",
-                             iter.obj(), iter.obj()->klass()->internal_name());
+                             (void *)iter.obj(), iter.obj()->klass()->internal_name());
     }
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     iter.next();
@@ -1003,7 +1003,7 @@
           gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
             INTPTR_FORMAT " with next field: " INTPTR_FORMAT
             " and referent: " INTPTR_FORMAT,
-            iter.obj(), next, iter.referent());
+            (void *)iter.obj(), (void *)next, (void *)iter.referent());
         }
       )
       // Remove Reference object from list
@@ -1103,14 +1103,14 @@
 
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
   } else {
     // If retest was non NULL, another thread beat us to it:
     // The reference has already been discovered...
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
   }
 }
@@ -1125,7 +1125,7 @@
   assert(da ? referent->is_oop() : referent->is_oop_or_null(),
          err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
                  INTPTR_FORMAT " during %satomic discovery ",
-                 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
+                 (void *)referent, (void *)obj, da ? "" : "non-"));
 }
 #endif
 
@@ -1205,7 +1205,7 @@
     // The reference has already been discovered...
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
       // assumes that an object is not processed twice;
@@ -1273,7 +1273,7 @@
 
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
-                                obj, obj->klass()->internal_name());
+                                (void *)obj, obj->klass()->internal_name());
     }
   }
   assert(obj->is_oop(), "Discovered a bad reference");
@@ -1372,7 +1372,7 @@
       // active; we need to trace and mark its cohort.
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // Remove Reference object from list
       iter.remove();
--- a/src/share/vm/memory/universe.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/universe.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -602,7 +602,7 @@
   }
 }
 
-static intptr_t non_oop_bits = 0;
+intptr_t Universe::_non_oop_bits = 0;
 
 void* Universe::non_oop_word() {
   // Neither the high bits nor the low bits of this value is allowed
@@ -616,11 +616,11 @@
   // Using the OS-supplied non-memory-address word (usually 0 or -1)
   // will take care of the high bits, however many there are.
 
-  if (non_oop_bits == 0) {
-    non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
+  if (_non_oop_bits == 0) {
+    _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   }
 
-  return (void*)non_oop_bits;
+  return (void*)_non_oop_bits;
 }
 
 jint universe_init() {
@@ -677,13 +677,13 @@
 // HeapBased - Use compressed oops with heap base + encoding.
 
 // 4Gb
-static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
+static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
 // 32Gb
-// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
+// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
-  assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+  assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
   assert(is_size_aligned(heap_size, alignment), "Must be");
 
   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
@@ -702,20 +702,40 @@
     // If the total size is small enough to allow UnscaledNarrowOop then
     // just use UnscaledNarrowOop.
     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
-      if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
+      if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
         // place heap's top on the 4Gb boundary
-        base = (NarrowOopHeapMax - heap_size);
+        base = (UnscaledOopHeapMax - heap_size);
       } else {
         // Can't reserve with NarrowOopShift == 0
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+
         if (mode == UnscaledNarrowOop ||
-            mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
+            mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
+
           // Use zero based compressed oops with encoding and
           // place heap's top on the 32Gb boundary in case
           // total_size > 4Gb or failed to reserve below 4Gb.
-          base = (OopEncodingHeapMax - heap_size);
+          uint64_t heap_top = OopEncodingHeapMax;
+
+          // For small heaps, save some space for compressed class pointer
+          // space so it can be decoded with no base.
+          if (UseCompressedClassPointers && !UseSharedSpaces &&
+              OopEncodingHeapMax <= 32*G) {
+
+            uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
+            assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
+                   alignment), "difference must be aligned too");
+            uint64_t new_top = OopEncodingHeapMax-class_space;
+
+            if (total_size <= new_top) {
+              heap_top = new_top;
+            }
+          }
+
+          // Align base to the adjusted top of the heap
+          base = heap_top - heap_size;
         }
       }
     } else {
@@ -737,7 +757,7 @@
       // Set to a non-NULL value so the ReservedSpace ctor computes
       // the correct no-access prefix.
       // The final value will be set in initialize_heap() below.
-      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+      Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
 #if defined(_WIN64) || defined(AIX)
       if (UseLargePages) {
         // Cannot allocate guard pages for implicit checks in indexed
@@ -838,7 +858,7 @@
         Universe::set_narrow_oop_use_implicit_null_checks(true);
       }
 #endif //  _WIN64
-      if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
+      if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
         // Can't reserve heap below 4Gb.
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
       } else {
@@ -877,13 +897,16 @@
 
 // Reserve the Java heap, which is now the same for all GCs.
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+  assert(alignment <= Arguments::conservative_max_heap_alignment(),
+      err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
+          alignment, Arguments::conservative_max_heap_alignment()));
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
 
   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   assert(!UseLargePages
-      || UseParallelOldGC
+      || UseParallelGC
       || use_large_pages, "Wrong alignment to use large pages");
 
   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
@@ -1031,9 +1054,9 @@
     Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
 
-    msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
+    msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
-    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
+    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
 
     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
--- a/src/share/vm/memory/universe.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/memory/universe.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -179,9 +179,11 @@
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
+  static intptr_t _non_oop_bits;
+
   // For UseCompressedOops.
   static struct NarrowPtrStruct _narrow_oop;
-  // For UseCompressedKlassPointers.
+  // For UseCompressedClassPointers.
   static struct NarrowPtrStruct _narrow_klass;
   static address _narrow_ptrs_base;
 
@@ -229,7 +231,7 @@
     _narrow_oop._base    = base;
   }
   static void     set_narrow_klass_base(address base) {
-    assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
     _narrow_klass._base   = base;
   }
   static void     set_narrow_oop_use_implicit_null_checks(bool use) {
@@ -353,7 +355,7 @@
   static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
 
-  // For UseCompressedKlassPointers
+  // For UseCompressedClassPointers
   static address  narrow_klass_base()                     { return  _narrow_klass._base; }
   static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
--- a/src/share/vm/oops/arrayOop.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/arrayOop.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -65,7 +65,7 @@
   // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
   // it occupies the second half of the _klass field in oopDesc.
   static int length_offset_in_bytes() {
-    return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
+    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
                                sizeof(arrayOopDesc);
   }
 
--- a/src/share/vm/oops/constantPool.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/constantPool.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -40,7 +40,6 @@
 #include "runtime/init.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/signature.hpp"
-#include "runtime/synchronizer.hpp"
 #include "runtime/vframe.hpp"
 
 ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
@@ -70,6 +69,7 @@
 
   // only set to non-zero if constant pool is merged by RedefineClasses
   set_version(0);
+  set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
 
   // initialize tag array
   int length = tags->length();
@@ -95,6 +95,9 @@
 void ConstantPool::release_C_heap_structures() {
   // walk constant pool and decrement symbol reference counts
   unreference_symbols();
+
+  delete _lock;
+  set_lock(NULL);
 }
 
 objArrayOop ConstantPool::resolved_references() const {
@@ -108,16 +111,16 @@
 void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
                                                   intStack reference_map,
                                                   int constant_pool_map_length,
-                                                   TRAPS) {
+                                                  TRAPS) {
   // Initialized the resolved object cache.
   int map_length = reference_map.length();
   if (map_length > 0) {
     // Only need mapping back to constant pool entries.  The map isn't used for
-    // invokedynamic resolved_reference entries.  The constant pool cache index
-    // has the mapping back to both the constant pool and to the resolved
-    // reference index.
+    // invokedynamic resolved_reference entries.  For invokedynamic entries,
+    // the constant pool cache index has the mapping back to both the constant
+    // pool and to the resolved reference index.
     if (constant_pool_map_length > 0) {
-      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
+      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
 
       for (int i = 0; i < constant_pool_map_length; i++) {
         int x = reference_map.at(i);
@@ -151,6 +154,9 @@
       ClassLoaderData* loader_data = pool_holder()->class_loader_data();
       set_resolved_references(loader_data->add_handle(refs_handle));
     }
+
+    // Also need to recreate the mutex.  Make sure this matches the constructor
+    set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
   }
 }
 
@@ -161,37 +167,14 @@
   set_resolved_reference_length(
     resolved_references() != NULL ? resolved_references()->length() : 0);
   set_resolved_references(NULL);
-}
-
-oop ConstantPool::lock() {
-  if (_pool_holder) {
-    // We re-use the _pool_holder's init_lock to reduce footprint.
-    // Notes on deadlocks:
-    // [1] This lock is a Java oop, so it can be recursively locked by
-    //     the same thread without self-deadlocks.
-    // [2] Deadlock will happen if there is circular dependency between
-    //     the <clinit> of two Java classes. However, in this case,
-    //     the deadlock would have happened long before we reach
-    //     ConstantPool::lock(), so reusing init_lock does not
-    //     increase the possibility of deadlock.
-    return _pool_holder->init_lock();
-  } else {
-    return NULL;
-  }
+  set_lock(NULL);
 }
 
 int ConstantPool::cp_to_object_index(int cp_index) {
   // this is harder don't do this so much.
-  for (int i = 0; i< reference_map()->length(); i++) {
-    if (reference_map()->at(i) == cp_index) return i;
-    // Zero entry is divider between constant pool indices for strings,
-    // method handles and method types. After that the index is a constant
-    // pool cache index for invokedynamic.  Stop when zero (which can never
-    // be a constant pool index)
-    if (reference_map()->at(i) == 0) break;
-  }
-  // We might not find the index.
-  return _no_index_sentinel;
+  int i = reference_map()->find(cp_index);
+  // We might not find the index for jsr292 call.
+  return (i < 0) ? _no_index_sentinel : i;
 }
 
 Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@@ -218,9 +201,7 @@
 
   Symbol* name = NULL;
   Handle       loader;
-  {
-    oop cplock = this_oop->lock();
-    ObjectLocker ol(cplock , THREAD, cplock != NULL);
+  {  MonitorLockerEx ml(this_oop->lock());
 
     if (this_oop->tag_at(which).is_unresolved_klass()) {
       if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
@@ -267,8 +248,7 @@
 
       bool throw_orig_error = false;
       {
-        oop cplock = this_oop->lock();
-        ObjectLocker ol(cplock, THREAD, cplock != NULL);
+        MonitorLockerEx ml(this_oop->lock());
 
         // some other thread has beaten us and has resolved the class.
         if (this_oop->tag_at(which).is_klass()) {
@@ -336,8 +316,7 @@
       }
       return k();
     } else {
-      oop cplock = this_oop->lock();
-      ObjectLocker ol(cplock, THREAD, cplock != NULL);
+      MonitorLockerEx ml(this_oop->lock());
       // Only updated constant pool - if it is resolved.
       do_resolve = this_oop->tag_at(which).is_unresolved_klass();
       if (do_resolve) {
@@ -396,32 +375,6 @@
 }
 
 
-// This is an interface for the compiler that allows accessing non-resolved entries
-// in the constant pool - but still performs the validations tests. Must be used
-// in a pre-parse of the compiler - to determine what it can do and not do.
-// Note: We cannot update the ConstantPool from the vm_thread.
-Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
-  int which = this_oop->klass_ref_index_at(index);
-  CPSlot entry = this_oop->slot_at(which);
-  if (entry.is_resolved()) {
-    assert(entry.get_klass()->is_klass(), "must be");
-    return entry.get_klass();
-  } else {
-    assert(entry.is_unresolved(), "must be either symbol or klass");
-    Symbol*  name  = entry.get_symbol();
-    oop loader = this_oop->pool_holder()->class_loader();
-    oop protection_domain = this_oop->pool_holder()->protection_domain();
-    Handle h_loader(THREAD, loader);
-    Handle h_prot  (THREAD, protection_domain);
-    KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
-
-    // Do access check for klasses
-    if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
-    return k();
-  }
-}
-
-
 Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
                                                    int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
@@ -633,8 +586,7 @@
                                      int tag, TRAPS) {
   ResourceMark rm;
   Symbol* error = PENDING_EXCEPTION->klass()->name();
-  oop cplock = this_oop->lock();
-  ObjectLocker ol(cplock, THREAD, cplock != NULL);  // lock cpool to change tag.
+  MonitorLockerEx ml(this_oop->lock());  // lock cpool to change tag.
 
   int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
            JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
@@ -795,8 +747,7 @@
   if (cache_index >= 0) {
     // Cache the oop here also.
     Handle result_handle(THREAD, result_oop);
-    oop cplock = this_oop->lock();
-    ObjectLocker ol(cplock, THREAD, cplock != NULL);  // don't know if we really need this
+    MonitorLockerEx ml(this_oop->lock());  // don't know if we really need this
     oop result = this_oop->resolved_references()->obj_at(cache_index);
     // Benign race condition:  resolved_references may already be filled in while we were trying to lock.
     // The important thing here is that all threads pick up the same result.
@@ -866,8 +817,7 @@
   // If the string has already been interned, this entry will be non-null
   oop str = this_oop->resolved_references()->obj_at(obj_index);
   if (str != NULL) return str;
-
-      Symbol* sym = this_oop->unresolved_string_at(which);
+  Symbol* sym = this_oop->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
   this_oop->string_at_put(which, obj_index, str);
   assert(java_lang_String::is_instance(str), "must be string");
@@ -903,18 +853,9 @@
 bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
        int index2, TRAPS) {
 
-  jbyte t1 = tag_at(index1).value();
-  jbyte t2 = cp2->tag_at(index2).value();
-
-
-  // JVM_CONSTANT_UnresolvedClassInError is equal to JVM_CONSTANT_UnresolvedClass
-  // when comparing
-  if (t1 == JVM_CONSTANT_UnresolvedClassInError) {
-    t1 = JVM_CONSTANT_UnresolvedClass;
-  }
-  if (t2 == JVM_CONSTANT_UnresolvedClassInError) {
-    t2 = JVM_CONSTANT_UnresolvedClass;
-  }
+  // The error tags are equivalent to non-error tags when comparing
+  jbyte t1 = tag_at(index1).non_error_value();
+  jbyte t2 = cp2->tag_at(index2).non_error_value();
 
   if (t1 != t2) {
     // Not the same entry type so there is nothing else to check. Note
@@ -1035,8 +976,8 @@
 
   case JVM_CONSTANT_MethodType:
   {
-    int k1 = method_type_index_at(index1);
-    int k2 = cp2->method_type_index_at(index2);
+    int k1 = method_type_index_at_error_ok(index1);
+    int k2 = cp2->method_type_index_at_error_ok(index2);
     bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
     if (match) {
       return true;
@@ -1045,11 +986,11 @@
 
   case JVM_CONSTANT_MethodHandle:
   {
-    int k1 = method_handle_ref_kind_at(index1);
-    int k2 = cp2->method_handle_ref_kind_at(index2);
+    int k1 = method_handle_ref_kind_at_error_ok(index1);
+    int k2 = cp2->method_handle_ref_kind_at_error_ok(index2);
     if (k1 == k2) {
-      int i1 = method_handle_index_at(index1);
-      int i2 = cp2->method_handle_index_at(index2);
+      int i1 = method_handle_index_at_error_ok(index1);
+      int i2 = cp2->method_handle_index_at_error_ok(index2);
       bool match = compare_entry_to(i1, cp2, i2, CHECK_false);
       if (match) {
         return true;
@@ -1363,14 +1304,6 @@
     }
   } break;
 
-  case JVM_CONSTANT_UnresolvedClassInError:
-  {
-    Symbol* k = from_cp->unresolved_klass_at(from_i);
-    to_cp->unresolved_klass_at_put(to_i, k);
-    to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
-  } break;
-
-
   case JVM_CONSTANT_String:
   {
     Symbol* s = from_cp->unresolved_string_at(from_i);
@@ -1386,15 +1319,17 @@
   } break;
 
   case JVM_CONSTANT_MethodType:
+  case JVM_CONSTANT_MethodTypeInError:
   {
-    jint k = from_cp->method_type_index_at(from_i);
+    jint k = from_cp->method_type_index_at_error_ok(from_i);
     to_cp->method_type_index_at_put(to_i, k);
   } break;
 
   case JVM_CONSTANT_MethodHandle:
+  case JVM_CONSTANT_MethodHandleInError:
   {
-    int k1 = from_cp->method_handle_ref_kind_at(from_i);
-    int k2 = from_cp->method_handle_index_at(from_i);
+    int k1 = from_cp->method_handle_ref_kind_at_error_ok(from_i);
+    int k2 = from_cp->method_handle_index_at_error_ok(from_i);
     to_cp->method_handle_index_at_put(to_i, k1, k2);
   } break;
 
@@ -1645,9 +1580,11 @@
     case JVM_CONSTANT_UnresolvedClassInError:
     case JVM_CONSTANT_StringIndex:
     case JVM_CONSTANT_MethodType:
+    case JVM_CONSTANT_MethodTypeInError:
       return 3;
 
     case JVM_CONSTANT_MethodHandle:
+    case JVM_CONSTANT_MethodHandleInError:
       return 4; //tag, ref_kind, ref_index
 
     case JVM_CONSTANT_Integer:
@@ -1828,8 +1765,8 @@
       case JVM_CONSTANT_MethodHandle:
       case JVM_CONSTANT_MethodHandleInError: {
         *bytes = JVM_CONSTANT_MethodHandle;
-        int kind = method_handle_ref_kind_at(idx);
-        idx1 = method_handle_index_at(idx);
+        int kind = method_handle_ref_kind_at_error_ok(idx);
+        idx1 = method_handle_index_at_error_ok(idx);
         *(bytes+1) = (unsigned char) kind;
         Bytes::put_Java_u2((address) (bytes+2), idx1);
         DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@@ -1838,7 +1775,7 @@
       case JVM_CONSTANT_MethodType:
       case JVM_CONSTANT_MethodTypeInError: {
         *bytes = JVM_CONSTANT_MethodType;
-        idx1 = method_type_index_at(idx);
+        idx1 = method_type_index_at_error_ok(idx);
         Bytes::put_Java_u2((address) (bytes+1), idx1);
         DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
         break;
@@ -1950,7 +1887,7 @@
     st->print_cr(" - holder: " INTPTR_FORMAT, pool_holder());
   }
   st->print_cr(" - cache: " INTPTR_FORMAT, cache());
-  st->print_cr(" - resolved_references: " INTPTR_FORMAT, resolved_references());
+  st->print_cr(" - resolved_references: " INTPTR_FORMAT, (void *)resolved_references());
   st->print_cr(" - reference_map: " INTPTR_FORMAT, reference_map());
 
   for (int index = 1; index < length(); index++) {      // Index 0 is unused
@@ -2026,12 +1963,12 @@
       break;
     case JVM_CONSTANT_MethodHandle :
     case JVM_CONSTANT_MethodHandleInError :
-      st->print("ref_kind=%d", method_handle_ref_kind_at(index));
-      st->print(" ref_index=%d", method_handle_index_at(index));
+      st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
+      st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_MethodType :
     case JVM_CONSTANT_MethodTypeInError :
-      st->print("signature_index=%d", method_type_index_at(index));
+      st->print("signature_index=%d", method_type_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_InvokeDynamic :
       {
--- a/src/share/vm/oops/constantPool.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/constantPool.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -111,6 +111,7 @@
     int                _version;
   } _saved;
 
+  Monitor*             _lock;
 
   void set_tags(Array<u1>* tags)               { _tags = tags; }
   void tag_at_put(int which, jbyte t)          { tags()->at_put(which, t); }
@@ -231,7 +232,6 @@
   static int cache_offset_in_bytes()        { return offset_of(ConstantPool, _cache); }
   static int pool_holder_offset_in_bytes()  { return offset_of(ConstantPool, _pool_holder); }
   static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
-  static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
 
   // Storing constants
 
@@ -475,18 +475,42 @@
     return *int_at_addr(which);
   }
 
+ private:
+  int method_handle_ref_kind_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+  }
+  int method_handle_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+  }
+  int method_type_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_type() ||
+           (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
+    return *int_at_addr(which);
+  }
+ public:
   int method_handle_ref_kind_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+    return method_handle_ref_kind_at(which, false);
+  }
+  int method_handle_ref_kind_at_error_ok(int which) {
+    return method_handle_ref_kind_at(which, true);
   }
   int method_handle_index_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+    return method_handle_index_at(which, false);
+  }
+  int method_handle_index_at_error_ok(int which) {
+    return method_handle_index_at(which, true);
   }
   int method_type_index_at(int which) {
-    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
-    return *int_at_addr(which);
+    return method_type_index_at(which, false);
   }
+  int method_type_index_at_error_ok(int which) {
+    return method_type_index_at(which, true);
+  }
+
   // Derived queries:
   Symbol* method_handle_name_ref_at(int which) {
     int member = method_handle_index_at(which);
@@ -730,8 +754,6 @@
   static oop         method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*            klass_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*        klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
-  // Same as above - but does LinkResolving.
-  static Klass*        klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
 
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than
@@ -822,17 +844,8 @@
 
   void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; }
   int  resolved_reference_length() const  { return _saved._resolved_reference_length; }
-
-  // lock() may return null -- constant pool updates may happen before this lock is
-  // initialized, because the _pool_holder has not been fully initialized and
-  // has not been registered into the system dictionary. In this case, no other
-  // thread can be modifying this constantpool, so no synchronization is
-  // necessary.
-  //
-  // Use cplock() like this:
-  //    oop cplock = cp->lock();
-  //    ObjectLocker ol(cplock , THREAD, cplock != NULL);
-  oop lock();
+  void set_lock(Monitor* lock)            { _lock = lock; }
+  Monitor* lock()                         { return _lock; }
 
   // Decrease ref counts of symbols that are in the constant pool
   // when the holder class is unloaded
--- a/src/share/vm/oops/cpCache.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/cpCache.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -140,9 +140,10 @@
             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 }
 
-void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
-                                        methodHandle method,
-                                        int vtable_index) {
+void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
+                                                       methodHandle method,
+                                                       int vtable_index) {
+  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 
@@ -160,7 +161,8 @@
       // ...and fall through as if we were handling invokevirtual:
     case Bytecodes::_invokevirtual:
       {
-        if (method->can_be_statically_bound()) {
+        if (!is_vtable_call) {
+          assert(method->can_be_statically_bound(), "");
           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
           set_method_flags(as_TosState(method->result_type()),
                            (                             1      << is_vfinal_shift) |
@@ -169,6 +171,7 @@
                            method()->size_of_parameters());
           set_f2_as_vfinal_method(method());
         } else {
+          assert(!method->can_be_statically_bound(), "");
           assert(vtable_index >= 0, "valid index");
           assert(!method->is_final_method(), "sanity");
           set_method_flags(as_TosState(method->result_type()),
@@ -182,6 +185,7 @@
 
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokestatic:
+      assert(!is_vtable_call, "");
       // Note:  Read and preserve the value of the is_vfinal flag on any
       // invokevirtual bytecode shared with this constant pool cache entry.
       // It is cheap and safe to consult is_vfinal() at all times.
@@ -232,8 +236,22 @@
   NOT_PRODUCT(verify(tty));
 }
 
+void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
+  int index = Method::nonvirtual_vtable_index;
+  // index < 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
 
-void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  // either the method is a miranda or its holder should accept the given index
+  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
+  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
+
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  assert(method->method_holder()->verify_itable_index(index), "");
+  assert(invoke_code == Bytecodes::_invokeinterface, "");
   InstanceKlass* interf = method->method_holder();
   assert(interf->is_interface(), "must be an interface");
   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
@@ -266,8 +284,7 @@
   // the lock, so that when the losing writer returns, he can use the linked
   // cache entry.
 
-  oop cplock = cpool->lock();
-  ObjectLocker ol(cplock, Thread::current(), cplock != NULL);
+  MonitorLockerEx ml(cpool->lock());
   if (!is_f1_null()) {
     return;
   }
@@ -288,8 +305,8 @@
   if (TraceInvokeDynamic) {
     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
                   invoke_code,
-                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
-                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
+                  (void *)appendix(),    (has_appendix    ? "" : " (unused)"),
+                  (void *)method_type(), (has_method_type ? "" : " (unused)"),
                   (intptr_t)adapter());
     adapter->print();
     if (has_appendix)  appendix()->print();
--- a/src/share/vm/oops/cpCache.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/cpCache.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -219,15 +219,29 @@
     Klass*          root_klass                   // needed by the GC to dirty the klass
   );
 
-  void set_method(                               // sets entry to resolved method entry
+ private:
+  void set_direct_or_vtable_call(
     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
     methodHandle    method,                      // the method/prototype if any (NULL, otherwise)
     int             vtable_index                 // the vtable index if any, else negative
   );
 
-  void set_interface_call(
-    methodHandle method,                         // Resolved method
-    int index                                    // Method index into interface
+ public:
+  void set_direct_call(                          // sets entry to exact concrete method entry
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method                       // the method to call
+  );
+
+  void set_vtable_call(                          // sets entry to vtable index
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method,                      // resolved method which declares the vtable index
+    int             vtable_index                 // the vtable index
+  );
+
+  void set_itable_call(
+    Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
+    methodHandle method,                         // the resolved interface method
+    int itable_index                             // index into itable for the method
   );
 
   void set_method_handle(
--- a/src/share/vm/oops/fieldInfo.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/fieldInfo.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -240,6 +240,14 @@
     return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
   }
 
+  bool is_stable() const {
+    return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
+  }
+  void set_stable(bool z) {
+    if (z) _shorts[access_flags_offset] |=  JVM_ACC_FIELD_STABLE;
+    else   _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
+  }
+
   Symbol* lookup_symbol(int symbol_index) const {
     assert(is_internal(), "only internal fields");
     return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
--- a/src/share/vm/oops/fieldStreams.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/fieldStreams.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "oops/instanceKlass.hpp"
 #include "oops/fieldInfo.hpp"
+#include "runtime/fieldDescriptor.hpp"
 
 // The is the base class for iteration over the fields array
 // describing the declared fields in the class.  Several subclasses
@@ -43,8 +44,10 @@
   int                 _index;
   int                 _limit;
   int                 _generic_signature_slot;
+  fieldDescriptor     _fd_buf;
 
   FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
+  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
 
   int init_generic_signature_start_slot() {
     int length = _fields->length();
@@ -102,6 +105,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
   FieldStreamBase(instanceKlassHandle klass) {
     _fields = klass->fields();
@@ -109,6 +113,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
 
   // accessors
@@ -180,6 +185,12 @@
     return field()->contended_group();
   }
 
+  // bridge to a heavier API:
+  fieldDescriptor& field_descriptor() const {
+    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+    field.reinitialize(field_holder(), _index);
+    return field;
+  }
 };
 
 // Iterate over only the internal fields
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -106,7 +106,7 @@
       len = name->utf8_length();                                 \
     }                                                            \
     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
-      data, len, (clss)->class_loader(), thread_type);           \
+      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type);           \
   }
 
 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
@@ -119,7 +119,7 @@
       len = name->utf8_length();                                 \
     }                                                            \
     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
-      data, len, (clss)->class_loader(), thread_type, wait);     \
+      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait);     \
   }
 #else /* USDT2 */
 
@@ -238,6 +238,13 @@
   }
 }
 
+// create a new array of vtable_indices for default methods
+Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
+  Array<int>* vtable_indices = MetadataFactory::new_array<int>(class_loader_data(), len, CHECK_NULL);
+  assert(default_vtable_indices() == NULL, "only create once");
+  set_default_vtable_indices(vtable_indices);
+  return vtable_indices;
+}
 
 InstanceKlass::InstanceKlass(int vtable_len,
                              int itable_len,
@@ -263,6 +270,8 @@
   set_array_klasses(NULL);
   set_methods(NULL);
   set_method_ordering(NULL);
+  set_default_methods(NULL);
+  set_default_vtable_indices(NULL);
   set_local_interfaces(NULL);
   set_transitive_interfaces(NULL);
   init_implementor();
@@ -286,7 +295,6 @@
   init_previous_versions();
   set_generic_signature_index(0);
   release_set_methods_jmethod_ids(NULL);
-  release_set_methods_cached_itable_indices(NULL);
   set_annotations(NULL);
   set_jvmti_cached_class_field_map(NULL);
   set_initial_method_idnum(0);
@@ -312,7 +320,8 @@
 
 void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
                                        Array<Method*>* methods) {
-  if (methods != NULL && methods != Universe::the_empty_method_array()) {
+  if (methods != NULL && methods != Universe::the_empty_method_array() &&
+      !methods->is_shared()) {
     for (int i = 0; i < methods->length(); i++) {
       Method* method = methods->at(i);
       if (method == NULL) continue;  // maybe null if error processing
@@ -336,13 +345,14 @@
     // check that the interfaces don't come from super class
     Array<Klass*>* sti = (super_klass == NULL) ? NULL :
                     InstanceKlass::cast(super_klass)->transitive_interfaces();
-    if (ti != sti) {
+    if (ti != sti && ti != NULL && !ti->is_shared()) {
       MetadataFactory::free_array<Klass*>(loader_data, ti);
     }
   }
 
   // local interfaces can be empty
-  if (local_interfaces != Universe::the_empty_klass_array()) {
+  if (local_interfaces != Universe::the_empty_klass_array() &&
+      local_interfaces != NULL && !local_interfaces->is_shared()) {
     MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
   }
 }
@@ -372,16 +382,37 @@
   deallocate_methods(loader_data, methods());
   set_methods(NULL);
 
-  if (method_ordering() != Universe::the_empty_int_array()) {
+  if (method_ordering() != NULL &&
+      method_ordering() != Universe::the_empty_int_array() &&
+      !method_ordering()->is_shared()) {
     MetadataFactory::free_array<int>(loader_data, method_ordering());
   }
   set_method_ordering(NULL);
 
+  // default methods can be empty
+  if (default_methods() != NULL &&
+      default_methods() != Universe::the_empty_method_array() &&
+      !default_methods()->is_shared()) {
+    MetadataFactory::free_array<Method*>(loader_data, default_methods());
+  }
+  // Do NOT deallocate the default methods, they are owned by superinterfaces.
+  set_default_methods(NULL);
+
+  // default methods vtable indices can be empty
+  if (default_vtable_indices() != NULL &&
+      !default_vtable_indices()->is_shared()) {
+    MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
+  }
+  set_default_vtable_indices(NULL);
+
+
   // This array is in Klass, but remove it with the InstanceKlass since
   // this place would be the only caller and it can share memory with transitive
   // interfaces.
-  if (secondary_supers() != Universe::the_empty_klass_array() &&
-      secondary_supers() != transitive_interfaces()) {
+  if (secondary_supers() != NULL &&
+      secondary_supers() != Universe::the_empty_klass_array() &&
+      secondary_supers() != transitive_interfaces() &&
+      !secondary_supers()->is_shared()) {
     MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
   }
   set_secondary_supers(NULL);
@@ -390,24 +421,32 @@
   set_transitive_interfaces(NULL);
   set_local_interfaces(NULL);
 
-  MetadataFactory::free_array<jushort>(loader_data, fields());
+  if (fields() != NULL && !fields()->is_shared()) {
+    MetadataFactory::free_array<jushort>(loader_data, fields());
+  }
   set_fields(NULL, 0);
 
   // If a method from a redefined class is using this constant pool, don't
   // delete it, yet.  The new class's previous version will point to this.
   if (constants() != NULL) {
     assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
-    MetadataFactory::free_metadata(loader_data, constants());
+    if (!constants()->is_shared()) {
+      MetadataFactory::free_metadata(loader_data, constants());
+    }
     set_constants(NULL);
   }
 
-  if (inner_classes() != Universe::the_empty_short_array()) {
+  if (inner_classes() != NULL &&
+      inner_classes() != Universe::the_empty_short_array() &&
+      !inner_classes()->is_shared()) {
     MetadataFactory::free_array<jushort>(loader_data, inner_classes());
   }
   set_inner_classes(NULL);
 
-  // We should deallocate the Annotations instance
-  MetadataFactory::free_metadata(loader_data, annotations());
+  // We should deallocate the Annotations instance if it's not in shared spaces.
+  if (annotations() != NULL && !annotations()->is_shared()) {
+    MetadataFactory::free_metadata(loader_data, annotations());
+  }
   set_annotations(NULL);
 }
 
@@ -457,15 +496,29 @@
   return java_lang_Class::signers(java_mirror());
 }
 
-volatile oop InstanceKlass::init_lock() const {
+oop InstanceKlass::init_lock() const {
   // return the init lock from the mirror
-  return java_lang_Class::init_lock(java_mirror());
+  oop lock = java_lang_Class::init_lock(java_mirror());
+  assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
+         "only fully initialized state can have a null lock");
+  return lock;
+}
+
+// Set the initialization lock to null so the object can be GC'ed.  Any racing
+// threads to get this lock will see a null lock and will not lock.
+// That's okay because they all check for initialized state after getting
+// the lock and return.
+void InstanceKlass::fence_and_clear_init_lock() {
+  // make sure previous stores are all done, notably the init_state.
+  OrderAccess::storestore();
+  java_lang_Class::set_init_lock(java_mirror(), NULL);
+  assert(!is_not_initialized(), "class must be initialized now");
 }
 
 void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
   EXCEPTION_MARK;
-  volatile oop init_lock = this_oop->init_lock();
-  ObjectLocker ol(init_lock, THREAD);
+  oop init_lock = this_oop->init_lock();
+  ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 
   // abort if someone beat us to the initialization
   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
@@ -484,6 +537,7 @@
   } else {
     // linking successfull, mark class as initialized
     this_oop->set_init_state (fully_initialized);
+    this_oop->fence_and_clear_init_lock();
     // trace
     if (TraceClassInitialization) {
       ResourceMark rm(THREAD);
@@ -609,8 +663,8 @@
 
   // verification & rewriting
   {
-    volatile oop init_lock = this_oop->init_lock();
-    ObjectLocker ol(init_lock, THREAD);
+    oop init_lock = this_oop->init_lock();
+    ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
     // rewritten will have been set if loader constraint error found
     // on an earlier link attempt
     // don't verify or rewrite if already rewritten
@@ -732,8 +786,8 @@
   // refer to the JVM book page 47 for description of steps
   // Step 1
   {
-    volatile oop init_lock = this_oop->init_lock();
-    ObjectLocker ol(init_lock, THREAD);
+    oop init_lock = this_oop->init_lock();
+    ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 
     Thread *self = THREAD; // it's passed the current thread
 
@@ -880,9 +934,10 @@
 }
 
 void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
-  volatile oop init_lock = this_oop->init_lock();
-  ObjectLocker ol(init_lock, THREAD);
+  oop init_lock = this_oop->init_lock();
+  ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
   this_oop->set_init_state(state);
+  this_oop->fence_and_clear_init_lock();
   ol.notify_all(CHECK);
 }
 
@@ -1149,7 +1204,7 @@
     Symbol* f_name = fs.name();
     Symbol* f_sig  = fs.signature();
     if (f_name == name && f_sig == sig) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       return true;
     }
   }
@@ -1218,7 +1273,7 @@
 bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.offset() == offset) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       if (fd->is_static() == is_static) return true;
     }
   }
@@ -1251,8 +1306,7 @@
 void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this, fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       cl->do_field(&fd);
     }
   }
@@ -1268,8 +1322,7 @@
 void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this_oop(), fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       f(&fd, CHECK);
     }
   }
@@ -1291,7 +1344,7 @@
   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
   int j = 0;
   for (int i = 0; i < length; i += 1) {
-    fd.initialize(this, i);
+    fd.reinitialize(this, i);
     if (!fd.is_static()) {
       fields_sorted[j + 0] = fd.offset();
       fields_sorted[j + 1] = i;
@@ -1303,7 +1356,7 @@
     // _sort_Fn is defined in growableArray.hpp.
     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
     for (int i = 0; i < length; i += 2) {
-      fd.initialize(this, fields_sorted[i + 1]);
+      fd.reinitialize(this, fields_sorted[i + 1]);
       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
       cl->do_field(&fd);
     }
@@ -1357,32 +1410,44 @@
   return -1;
 }
 
+// find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
   return InstanceKlass::find_method(methods(), name, signature);
 }
 
+// find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
+  int hit = find_method_index(methods, name, signature);
+  return hit >= 0 ? methods->at(hit): NULL;
+}
+
+// Used directly for default_methods to find the index into the
+// default_vtable_indices, and indirectly by find_method
+// find_method_index looks in the local methods array to return the index
+// of the matching name/signature
+int InstanceKlass::find_method_index(
+    Array<Method*>* methods, Symbol* name, Symbol* signature) {
   int hit = binary_search(methods, name);
   if (hit != -1) {
     Method* m = methods->at(hit);
     // Do linear search to find matching signature.  First, quick check
     // for common case
-    if (m->signature() == signature) return m;
+    if (m->signature() == signature) return hit;
     // search downwards through overloaded methods
     int i;
     for (i = hit - 1; i >= 0; --i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (m->signature() == signature) return m;
+        if (m->signature() == signature) return i;
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (m->signature() == signature) return m;
+        if (m->signature() == signature) return i;
     }
     // not found
 #ifdef ASSERT
@@ -1390,9 +1455,8 @@
     assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
   }
-  return NULL;
+  return -1;
 }
-
 int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
   return find_method_by_name(methods(), name, end);
 }
@@ -1411,6 +1475,7 @@
   return -1;
 }
 
+// lookup_method searches both the local methods array and all superclasses methods arrays
 Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
   Klass* klass = const_cast<InstanceKlass*>(this);
   while (klass != NULL) {
@@ -1421,7 +1486,24 @@
   return NULL;
 }
 
+// lookup a method in the default methods list then in all transitive interfaces
+// Do NOT return private or static methods
+Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
+                                                         Symbol* signature) const {
+  Method* m = NULL;
+  if (default_methods() != NULL) {
+    m = find_method(default_methods(), name, signature);
+  }
+  // Look up interfaces
+  if (m == NULL) {
+    m = lookup_method_in_all_interfaces(name, signature);
+  }
+  return m;
+}
+
 // lookup a method in all the interfaces that this class implements
+// Do NOT return private or static methods, new in JDK8 which are not externally visible
+// They should only be found in the initial InterfaceMethodRef
 Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
                                                          Symbol* signature) const {
   Array<Klass*>* all_ifs = transitive_interfaces();
@@ -1430,7 +1512,7 @@
   for (int i = 0; i < num_ifs; i++) {
     ik = InstanceKlass::cast(all_ifs->at(i));
     Method* m = ik->lookup_method(name, signature);
-    if (m != NULL) {
+    if (m != NULL && m->is_public() && !m->is_static()) {
       return m;
     }
   }
@@ -1686,87 +1768,6 @@
 }
 
 
-// Cache an itable index
-void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
-  int* indices = methods_cached_itable_indices_acquire();
-  int* to_dealloc_indices = NULL;
-
-  // We use a double-check locking idiom here because this cache is
-  // performance sensitive. In the normal system, this cache only
-  // transitions from NULL to non-NULL which is safe because we use
-  // release_set_methods_cached_itable_indices() to advertise the
-  // new cache. A partially constructed cache should never be seen
-  // by a racing thread. Cache reads and writes proceed without a
-  // lock, but creation of the cache itself requires no leaks so a
-  // lock is generally acquired in that case.
-  //
-  // If the RedefineClasses() API has been used, then this cache can
-  // grow and we'll have transitions from non-NULL to bigger non-NULL.
-  // Cache creation requires no leaks and we require safety between all
-  // cache accesses and freeing of the old cache so a lock is generally
-  // acquired when the RedefineClasses() API has been used.
-
-  if (indices == NULL || idnum_can_increment()) {
-    // we need a cache or the cache can grow
-    MutexLocker ml(JNICachedItableIndex_lock);
-    // reacquire the cache to see if another thread already did the work
-    indices = methods_cached_itable_indices_acquire();
-    size_t length = 0;
-    // cache size is stored in element[0], other elements offset by one
-    if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
-      size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
-      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
-      new_indices[0] = (int)size;
-      // copy any existing entries
-      size_t i;
-      for (i = 0; i < length; i++) {
-        new_indices[i+1] = indices[i+1];
-      }
-      // Set all the rest to -1
-      for (i = length; i < size; i++) {
-        new_indices[i+1] = -1;
-      }
-      if (indices != NULL) {
-        // We have an old cache to delete so save it for after we
-        // drop the lock.
-        to_dealloc_indices = indices;
-      }
-      release_set_methods_cached_itable_indices(indices = new_indices);
-    }
-
-    if (idnum_can_increment()) {
-      // this cache can grow so we have to write to it safely
-      indices[idnum+1] = index;
-    }
-  } else {
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  }
-
-  if (!idnum_can_increment()) {
-    // The cache cannot grow and this JNI itable index value does not
-    // have to be unique like a jmethodID. If there is a race to set it,
-    // it doesn't matter.
-    indices[idnum+1] = index;
-  }
-
-  if (to_dealloc_indices != NULL) {
-    // we allocated a new cache so free the old one
-    FreeHeap(to_dealloc_indices);
-  }
-}
-
-
-// Retrieve a cached itable index
-int InstanceKlass::cached_itable_index(size_t idnum) {
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != NULL && ((size_t)indices[0]) > idnum) {
-     // indices exist and are long enough, retrieve possible cached
-    return indices[idnum+1];
-  }
-  return -1;
-}
-
-
 //
 // Walk the list of dependent nmethods searching for nmethods which
 // are dependent on the changes that were passed in and mark them for
@@ -2326,12 +2327,6 @@
     }
   }
 
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != (int*)NULL) {
-    release_set_methods_cached_itable_indices(NULL);
-    FreeHeap(indices);
-  }
-
   // release dependencies
   nmethodBucket* b = _dependencies;
   _dependencies = NULL;
@@ -2393,20 +2388,43 @@
 }
 
 address InstanceKlass::static_field_addr(int offset) {
-  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
+  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + cast_from_oop<intptr_t>(java_mirror()));
 }
 
 
 const char* InstanceKlass::signature_name() const {
+  int hash_len = 0;
+  char hash_buf[40];
+
+  // If this is an anonymous class, append a hash to make the name unique
+  if (is_anonymous()) {
+    assert(EnableInvokeDynamic, "EnableInvokeDynamic was not set.");
+    intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0;
+    sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
+    hash_len = (int)strlen(hash_buf);
+  }
+
+  // Get the internal name as a c string
   const char* src = (const char*) (name()->as_C_string());
   const int src_length = (int)strlen(src);
-  char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
-  int src_index = 0;
+
+  char* dest = NEW_RESOURCE_ARRAY(char, src_length + hash_len + 3);
+
+  // Add L as type indicator
   int dest_index = 0;
   dest[dest_index++] = 'L';
-  while (src_index < src_length) {
+
+  // Add the actual class name
+  for (int src_index = 0; src_index < src_length; ) {
     dest[dest_index++] = src[src_index++];
   }
+
+  // If we have a hash, append it
+  for (int hash_index = 0; hash_index < hash_len; ) {
+    dest[dest_index++] = hash_buf[hash_index++];
+  }
+
+  // Add the semicolon and the NULL
   dest[dest_index++] = ';';
   dest[dest_index] = '\0';
   return dest;
@@ -2636,6 +2654,42 @@
   return m;
 }
 
+
+#if INCLUDE_JVMTI
+// update default_methods for redefineclasses for methods that are
+// not yet in the vtable due to concurrent subclass define and superinterface
+// redefinition
+// Note: those in the vtable, should have been updated via adjust_method_entries
+void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
+                                           int methods_length, bool* trace_name_printed) {
+  // search the default_methods for uses of either obsolete or EMCP methods
+  if (default_methods() != NULL) {
+    for (int j = 0; j < methods_length; j++) {
+      Method* old_method = old_methods[j];
+      Method* new_method = new_methods[j];
+
+      for (int index = 0; index < default_methods()->length(); index ++) {
+        if (default_methods()->at(index) == old_method) {
+          default_methods()->at_put(index, new_method);
+          if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+            if (!(*trace_name_printed)) {
+              // RC_TRACE_MESG macro has an embedded ResourceMark
+              RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
+                             external_name(),
+                             old_method->method_holder()->external_name()));
+              *trace_name_printed = true;
+            }
+            RC_TRACE(0x00100000, ("default method update: %s(%s) ",
+                                  new_method->name()->as_C_string(),
+                                  new_method->signature()->as_C_string()));
+          }
+        }
+      }
+    }
+  }
+}
+#endif // INCLUDE_JVMTI
+
 // On-stack replacement stuff
 void InstanceKlass::add_osr_nmethod(nmethod* n) {
   // only one compilation can be active
@@ -2782,6 +2836,18 @@
   "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
 };
 
+static void print_vtable(intptr_t* start, int len, outputStream* st) {
+  for (int i = 0; i < len; i++) {
+    intptr_t e = start[i];
+    st->print("%d : " INTPTR_FORMAT, i, e);
+    if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
+      st->print(" ");
+      ((Metadata*)e)->print_value_on(st);
+    }
+    st->cr();
+  }
+}
+
 void InstanceKlass::print_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
   Klass::print_on(st);
@@ -2816,13 +2882,23 @@
 
   st->print(BULLET"arrays:            "); array_klasses()->print_value_on_maybe_null(st); st->cr();
   st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
-  if (Verbose) {
+  if (Verbose || WizardMode) {
     Array<Method*>* method_array = methods();
-    for(int i = 0; i < method_array->length(); i++) {
+    for (int i = 0; i < method_array->length(); i++) {
       st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
     }
   }
-  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);       st->cr();
+  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);      st->cr();
+  st->print(BULLET"default_methods:   "); default_methods()->print_value_on(st);      st->cr();
+  if (Verbose && default_methods() != NULL) {
+    Array<Method*>* method_array = default_methods();
+    for (int i = 0; i < method_array->length(); i++) {
+      st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
+    }
+  }
+  if (default_vtable_indices() != NULL) {
+    st->print(BULLET"default vtable indices:   "); default_vtable_indices()->print_value_on(st);       st->cr();
+  }
   st->print(BULLET"local interfaces:  "); local_interfaces()->print_value_on(st);      st->cr();
   st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr();
   st->print(BULLET"constants:         "); constants()->print_value_on(st);         st->cr();
@@ -2847,24 +2923,17 @@
   st->print(BULLET"field annotations:       "); fields_annotations()->print_value_on(st); st->cr();
   st->print(BULLET"field type annotations:  "); fields_type_annotations()->print_value_on(st); st->cr();
   {
-    ResourceMark rm;
-    // PreviousVersionInfo objects returned via PreviousVersionWalker
-    // contain a GrowableArray of handles. We have to clean up the
-    // GrowableArray _after_ the PreviousVersionWalker destructor
-    // has destroyed the handles.
-    {
-      bool have_pv = false;
-      PreviousVersionWalker pvw((InstanceKlass*)this);
-      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-           pv_info != NULL; pv_info = pvw.next_previous_version()) {
-        if (!have_pv)
-          st->print(BULLET"previous version:  ");
-        have_pv = true;
-        pv_info->prev_constant_pool_handle()()->print_value_on(st);
-      }
-      if (have_pv)  st->cr();
-    } // pvw is cleaned up
-  } // rm is cleaned up
+    bool have_pv = false;
+    PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+         pv_node != NULL; pv_node = pvw.next_previous_version()) {
+      if (!have_pv)
+        st->print(BULLET"previous version:  ");
+      have_pv = true;
+      pv_node->prev_constant_pool()->print_value_on(st);
+    }
+    if (have_pv) st->cr();
+  } // pvw is cleaned up
 
   if (generic_signature() != NULL) {
     st->print(BULLET"generic signature: ");
@@ -2874,7 +2943,9 @@
   st->print(BULLET"inner classes:     "); inner_classes()->print_value_on(st);     st->cr();
   st->print(BULLET"java mirror:       "); java_mirror()->print_value_on(st);       st->cr();
   st->print(BULLET"vtable length      %d  (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable());  st->cr();
+  if (vtable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_vtable(), vtable_length(), st);
   st->print(BULLET"itable length      %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
+  if (itable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_itable(), itable_length(), st);
   st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
   FieldPrinter print_static_field(st);
   ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
@@ -2896,6 +2967,7 @@
 
 void InstanceKlass::print_value_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
+  if (Verbose || WizardMode)  access_flags().print_on(st);
   name()->print_value_on(st);
 }
 
@@ -3179,6 +3251,19 @@
     }
   }
 
+  // Verify default methods
+  if (default_methods() != NULL) {
+    Array<Method*>* methods = this->default_methods();
+    for (int j = 0; j < methods->length(); j++) {
+      guarantee(methods->at(j)->is_method(), "non-method in methods array");
+    }
+    for (int j = 0; j < methods->length() - 1; j++) {
+      Method* m1 = methods->at(j);
+      Method* m2 = methods->at(j + 1);
+      guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
+    }
+  }
+
   // Verify JNI static field identifiers
   if (jni_ids() != NULL) {
     jni_ids()->verify(this);
@@ -3392,34 +3477,34 @@
   Array<Method*>* old_methods = ikh->methods();
 
   if (cp_ref->on_stack()) {
-  PreviousVersionNode * pv_node = NULL;
-  if (emcp_method_count == 0) {
+    PreviousVersionNode * pv_node = NULL;
+    if (emcp_method_count == 0) {
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
-    RC_TRACE(0x00000400,
-        ("add: all methods are obsolete; flushing any EMCP refs"));
-  } else {
-    int local_count = 0;
+      pv_node = new PreviousVersionNode(cp_ref, NULL);
+      RC_TRACE(0x00000400,
+          ("add: all methods are obsolete; flushing any EMCP refs"));
+    } else {
+      int local_count = 0;
       GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
-        GrowableArray<Method*>(emcp_method_count, true);
-    for (int i = 0; i < old_methods->length(); i++) {
-      if (emcp_methods->at(i)) {
-          // this old method is EMCP. Save it only if it's on the stack
-          Method* old_method = old_methods->at(i);
-          if (old_method->on_stack()) {
-            method_refs->append(old_method);
+          GrowableArray<Method*>(emcp_method_count, true);
+      for (int i = 0; i < old_methods->length(); i++) {
+        if (emcp_methods->at(i)) {
+            // this old method is EMCP. Save it only if it's on the stack
+            Method* old_method = old_methods->at(i);
+            if (old_method->on_stack()) {
+              method_refs->append(old_method);
+            }
+          if (++local_count >= emcp_method_count) {
+            // no more EMCP methods so bail out now
+            break;
           }
-        if (++local_count >= emcp_method_count) {
-          // no more EMCP methods so bail out now
-          break;
         }
       }
-    }
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
+      pv_node = new PreviousVersionNode(cp_ref, method_refs);
     }
     // append new previous version.
-  _previous_versions->append(pv_node);
+    _previous_versions->append(pv_node);
   }
 
   // Since the caller is the VMThread and we are at a safepoint, this
@@ -3520,6 +3605,8 @@
         return m;
       }
     }
+    // None found, return null for the caller to handle.
+    return NULL;
   }
   return m;
 }
@@ -3536,10 +3623,9 @@
 // Construct a PreviousVersionNode entry for the array hung off
 // the InstanceKlass.
 PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
-  bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
+  GrowableArray<Method*>* prev_EMCP_methods) {
 
   _prev_constant_pool = prev_constant_pool;
-  _prev_cp_is_weak = prev_cp_is_weak;
   _prev_EMCP_methods = prev_EMCP_methods;
 }
 
@@ -3555,99 +3641,38 @@
   }
 }
 
-
-// Construct a PreviousVersionInfo entry
-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
-  _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
-  _prev_EMCP_method_handles = NULL;
-
-  ConstantPool* cp = pv_node->prev_constant_pool();
-  assert(cp != NULL, "constant pool ref was unexpectedly cleared");
-  if (cp == NULL) {
-    return;  // robustness
-  }
-
-  // make the ConstantPool* safe to return
-  _prev_constant_pool_handle = constantPoolHandle(cp);
-
-  GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
-  if (method_refs == NULL) {
-    // the InstanceKlass did not have any EMCP methods
-    return;
-  }
-
-  _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
-
-  int n_methods = method_refs->length();
-  for (int i = 0; i < n_methods; i++) {
-    Method* method = method_refs->at(i);
-    assert (method != NULL, "method has been cleared");
-    if (method == NULL) {
-      continue;  // robustness
-    }
-    // make the Method* safe to return
-    _prev_EMCP_method_handles->append(methodHandle(method));
-  }
-}
-
-
-// Destroy a PreviousVersionInfo
-PreviousVersionInfo::~PreviousVersionInfo() {
-  // Since _prev_EMCP_method_handles is not C-heap allocated, we
-  // don't have to delete it.
-}
-
-
 // Construct a helper for walking the previous versions array
-PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
+PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
+  _thread = thread;
   _previous_versions = ik->previous_versions();
   _current_index = 0;
-  // _hm needs no initialization
   _current_p = NULL;
-}
-
-
-// Destroy a PreviousVersionWalker
-PreviousVersionWalker::~PreviousVersionWalker() {
-  // Delete the current info just in case the caller didn't walk to
-  // the end of the previous versions list. No harm if _current_p is
-  // already NULL.
-  delete _current_p;
-
-  // When _hm is destroyed, all the Handles returned in
-  // PreviousVersionInfo objects will be destroyed.
-  // Also, after this destructor is finished it will be
-  // safe to delete the GrowableArray allocated in the
-  // PreviousVersionInfo objects.
+  _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
 }
 
 
 // Return the interesting information for the next previous version
 // of the klass. Returns NULL if there are no more previous versions.
-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
+PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
   if (_previous_versions == NULL) {
     // no previous versions so nothing to return
     return NULL;
   }
 
-  delete _current_p;  // cleanup the previous info for the caller
-  _current_p = NULL;  // reset to NULL so we don't delete same object twice
+  _current_p = NULL;  // reset to NULL
+  _current_constant_pool_handle = NULL;
 
   int length = _previous_versions->length();
 
   while (_current_index < length) {
     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
-    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
-                                          PreviousVersionInfo(pv_node);
-
-    constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
-    assert (!cp_h.is_null(), "null cp found in previous version");
-
-    // The caller will need to delete pv_info when they are done with it.
-    _current_p = pv_info;
-    return pv_info;
+
+    // Save a handle to the constant pool for this previous version,
+    // which keeps all the methods from being deallocated.
+    _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
+    _current_p = pv_node;
+    return pv_node;
   }
 
-  // all of the underlying nodes' info has been deleted
   return NULL;
 } // end next_previous_version()
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -245,7 +245,6 @@
   MemberNameTable* _member_names;        // Member names
   JNIid*          _jni_ids;              // First JNI identifier for static fields in this class
   jmethodID*      _methods_jmethod_ids;  // jmethodIDs corresponding to method_idnum, or NULL if none
-  int*            _methods_cached_itable_indices;  // itable_index cache for JNI invoke corresponding to methods idnum, or NULL
   nmethodBucket*  _dependencies;         // list of dependent nmethods
   nmethod*        _osr_nmethods_head;    // Head of list of on-stack replacement nmethods for this class
   BreakpointInfo* _breakpoints;          // bpt lists, managed by Method*
@@ -270,12 +269,18 @@
 
   // Method array.
   Array<Method*>* _methods;
+  // Default Method Array, concrete methods inherited from interfaces
+  Array<Method*>* _default_methods;
   // Interface (Klass*s) this class declares locally to implement.
   Array<Klass*>* _local_interfaces;
   // Interface (Klass*s) this class implements transitively.
   Array<Klass*>* _transitive_interfaces;
   // Int array containing the original order of method in the class file (for JVMTI).
   Array<int>*     _method_ordering;
+  // Int array containing the vtable_indices for default_methods
+  // offset matches _default_methods offset
+  Array<int>*     _default_vtable_indices;
+
   // Instance and static variable information, starts with 6-tuples of shorts
   // [access, name index, sig index, initval index, low_offset, high_offset]
   // for all fields, followed by the generic signature data at the end of
@@ -357,6 +362,15 @@
   void set_method_ordering(Array<int>* m) { _method_ordering = m; }
   void copy_method_ordering(intArray* m, TRAPS);
 
+  // default_methods
+  Array<Method*>* default_methods() const  { return _default_methods; }
+  void set_default_methods(Array<Method*>* a) { _default_methods = a; }
+
+  // default method vtable_indices
+  Array<int>* default_vtable_indices() const { return _default_vtable_indices; }
+  void set_default_vtable_indices(Array<int>* v) { _default_vtable_indices = v; }
+  Array<int>* create_new_default_vtable_indices(int len, TRAPS);
+
   // interfaces
   Array<Klass*>* local_interfaces() const          { return _local_interfaces; }
   void set_local_interfaces(Array<Klass*>* a)      {
@@ -502,12 +516,18 @@
   Method* find_method(Symbol* name, Symbol* signature) const;
   static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
 
+  // find a local method index in default_methods (returns -1 if not found)
+  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
+
   // lookup operation (returns NULL if not found)
   Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
 
   // lookup a method in all the interfaces that this class implements
   // (returns NULL if not found)
   Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const;
+  // lookup a method in local defaults then in all interfaces
+  // (returns NULL if not found)
+  Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
 
   // Find method indices by name.  If a method with the specified name is
   // found the index to the first method is returned, and 'end' is filled in
@@ -690,10 +710,6 @@
                 size_t *length_p, jmethodID* id_p);
   jmethodID jmethod_id_or_null(Method* method);
 
-  // cached itable index support
-  void set_cached_itable_index(size_t idnum, int index);
-  int cached_itable_index(size_t idnum);
-
   // annotations support
   Annotations* annotations() const          { return _annotations; }
   void set_annotations(Annotations* anno)   { _annotations = anno; }
@@ -915,6 +931,11 @@
   klassItable* itable() const;        // return new klassItable wrapper
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
+#if INCLUDE_JVMTI
+  void adjust_default_methods(Method** old_methods, Method** new_methods,
+                              int methods_length, bool* trace_name_printed);
+#endif // INCLUDE_JVMTI
+
   // Garbage collection
   void oop_follow_contents(oop obj);
   int  oop_adjust_pointers(oop obj);
@@ -994,19 +1015,15 @@
   void release_set_methods_jmethod_ids(jmethodID* jmeths)
          { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
 
-  int* methods_cached_itable_indices_acquire() const
-         { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
-  void release_set_methods_cached_itable_indices(int* indices)
-         { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
-
   // Lock during initialization
 public:
   // Lock for (1) initialization; (2) access to the ConstantPool of this class.
   // Must be one per class and it has to be a VM internal object so java code
   // cannot lock it (like the mirror).
   // It has to be an object not a Mutex because it's held through java calls.
-  volatile oop init_lock() const;
+  oop init_lock() const;
 private:
+  void fence_and_clear_init_lock();
 
   // Static methods that are used to implement member methods where an exposed this pointer
   // is needed due to possible GCs
@@ -1136,21 +1153,11 @@
 
 
 // A collection point for interesting information about the previous
-// version(s) of an InstanceKlass. This class uses weak references to
-// the information so that the information may be collected as needed
-// by the system. If the information is shared, then a regular
-// reference must be used because a weak reference would be seen as
-// collectible. A GrowableArray of PreviousVersionNodes is attached
-// to the InstanceKlass as needed. See PreviousVersionWalker below.
+// version(s) of an InstanceKlass.  A GrowableArray of PreviousVersionNodes
+// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
 class PreviousVersionNode : public CHeapObj<mtClass> {
  private:
-  // A shared ConstantPool is never collected so we'll always have
-  // a reference to it so we can update items in the cache. We'll
-  // have a weak reference to a non-shared ConstantPool until all
-  // of the methods (EMCP or obsolete) have been collected; the
-  // non-shared ConstantPool becomes collectible at that point.
-  ConstantPool*    _prev_constant_pool;  // regular or weak reference
-  bool    _prev_cp_is_weak;     // true if not a shared ConstantPool
+  ConstantPool*    _prev_constant_pool;
 
   // If the previous version of the InstanceKlass doesn't have any
   // EMCP methods, then _prev_EMCP_methods will be NULL. If all the
@@ -1159,8 +1166,8 @@
   GrowableArray<Method*>* _prev_EMCP_methods;
 
 public:
-  PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
-    GrowableArray<Method*>* prev_EMCP_methods);
+  PreviousVersionNode(ConstantPool* prev_constant_pool,
+                      GrowableArray<Method*>* prev_EMCP_methods);
   ~PreviousVersionNode();
   ConstantPool* prev_constant_pool() const {
     return _prev_constant_pool;
@@ -1171,59 +1178,26 @@
 };
 
 
-// A Handle-ized version of PreviousVersionNode.
-class PreviousVersionInfo : public ResourceObj {
- private:
-  constantPoolHandle   _prev_constant_pool_handle;
-  // If the previous version of the InstanceKlass doesn't have any
-  // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
-  // methods cannot be collected while we hold a handle,
-  // _prev_EMCP_methods should never have a length of zero.
-  GrowableArray<methodHandle>* _prev_EMCP_method_handles;
-
-public:
-  PreviousVersionInfo(PreviousVersionNode *pv_node);
-  ~PreviousVersionInfo();
-  constantPoolHandle prev_constant_pool_handle() const {
-    return _prev_constant_pool_handle;
-  }
-  GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
-    return _prev_EMCP_method_handles;
-  }
-};
-
-
-// Helper object for walking previous versions. This helper cleans up
-// the Handles that it allocates when the helper object is destroyed.
-// The PreviousVersionInfo object returned by next_previous_version()
-// is only valid until a subsequent call to next_previous_version() or
-// the helper object is destroyed.
+// Helper object for walking previous versions.
 class PreviousVersionWalker : public StackObj {
  private:
+  Thread*                               _thread;
   GrowableArray<PreviousVersionNode *>* _previous_versions;
   int                                   _current_index;
-  // Fields for cleaning up when we are done walking the previous versions:
-  // A HandleMark for the PreviousVersionInfo handles:
-  HandleMark                            _hm;
+
+  // A pointer to the current node object so we can handle the deletes.
+  PreviousVersionNode*                  _current_p;
 
-  // It would be nice to have a ResourceMark field in this helper also,
-  // but the ResourceMark code says to be careful to delete handles held
-  // in GrowableArrays _before_ deleting the GrowableArray. Since we
-  // can't guarantee the order in which the fields are destroyed, we
-  // have to let the creator of the PreviousVersionWalker object do
-  // the right thing. Also, adding a ResourceMark here causes an
-  // include loop.
-
-  // A pointer to the current info object so we can handle the deletes.
-  PreviousVersionInfo *                 _current_p;
+  // The constant pool handle keeps all the methods in this class from being
+  // deallocated from the metaspace during class unloading.
+  constantPoolHandle                    _current_constant_pool_handle;
 
  public:
-  PreviousVersionWalker(InstanceKlass *ik);
-  ~PreviousVersionWalker();
+  PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
 
   // Return the interesting information for the next previous version
   // of the klass. Returns NULL if there are no more previous versions.
-  PreviousVersionInfo* next_previous_version();
+  PreviousVersionNode* next_previous_version();
 };
 
 
--- a/src/share/vm/oops/instanceMirrorKlass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
   // Static field offset is an offset into the Heap, should be converted by
   // based on UseCompressedOop for traversal
   static HeapWord* start_of_static_fields(oop obj) {
-    return (HeapWord*)((intptr_t)obj + offset_of_static_fields());
+    return (HeapWord*)(cast_from_oop<intptr_t>(obj) + offset_of_static_fields());
   }
 
   static void init_offset_of_static_fields() {
--- a/src/share/vm/oops/instanceOop.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/instanceOop.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -37,9 +37,9 @@
 
   // If compressed, the offset of the fields of the instance may not be aligned.
   static int base_offset_in_bytes() {
-    // offset computation code breaks if UseCompressedKlassPointers
+    // offset computation code breaks if UseCompressedClassPointers
     // only is true
-    return (UseCompressedOops && UseCompressedKlassPointers) ?
+    return (UseCompressedOops && UseCompressedClassPointers) ?
              klass_gap_offset_in_bytes() :
              sizeof(instanceOopDesc);
   }
--- a/src/share/vm/oops/instanceRefKlass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
   debug_only(
     if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
+      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
     }
   )
   if (!oopDesc::is_null(heap_oop)) {
@@ -62,7 +62,7 @@
       ref->InstanceKlass::oop_follow_contents(obj);
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
+          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
         }
       )
       return;
@@ -70,7 +70,7 @@
       // treat referent as normal oop
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
+          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (void *)obj);
         }
       )
       MarkSweep::mark_and_push(referent_addr);
@@ -130,7 +130,7 @@
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
   debug_only(
     if(TraceReferenceGC && PrintGCDetails) {
-      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
+      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
     }
   )
   if (!oopDesc::is_null(heap_oop)) {
@@ -142,7 +142,7 @@
       ref->InstanceKlass::oop_follow_contents(cm, obj);
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
+          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
         }
       )
       return;
@@ -150,7 +150,7 @@
       // treat referent as normal oop
       debug_only(
         if(TraceReferenceGC && PrintGCDetails) {
-          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
+          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (void *)obj);
         }
       )
       PSParallelCompact::mark_and_push(cm, referent_addr);
--- a/src/share/vm/oops/klass.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/klass.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -139,7 +139,7 @@
   return NULL;
 }
 
-void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
+void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
   return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
                              MetaspaceObj::ClassType, CHECK_NULL);
 }
@@ -674,13 +674,23 @@
 
 #ifndef PRODUCT
 
-void Klass::verify_vtable_index(int i) {
+bool Klass::verify_vtable_index(int i) {
   if (oop_is_instance()) {
-    assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   } else {
     assert(oop_is_array(), "Must be");
-    assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   }
+  return true;
+}
+
+bool Klass::verify_itable_index(int i) {
+  assert(oop_is_instance(), "");
+  int method_count = klassItable::method_count_for_interface(this);
+  assert(i >= 0 && i < method_count, "index out of bounds");
+  return true;
 }
 
 #endif
--- a/src/share/vm/oops/klass.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/klass.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -179,7 +179,7 @@
   // Constructor
   Klass();
 
-  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS);
+  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
 
  public:
   bool is_klass() const volatile { return true; }
@@ -699,7 +699,8 @@
   void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
 
 #ifndef PRODUCT
-  void verify_vtable_index(int index);
+  bool verify_vtable_index(int index);
+  bool verify_itable_index(int index);
 #endif
 
   virtual void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/klassVtable.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/klassVtable.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -47,11 +47,12 @@
 
 
 // this function computes the vtable size (including the size needed for miranda
-// methods) and the number of miranda methods in this class
+// methods) and the number of miranda methods in this class.
 // Note on Miranda methods: Let's say there is a class C that implements
-// interface I.  Let's say there is a method m in I that neither C nor any
-// of its super classes implement (i.e there is no method of any access, with
-// the same name and signature as m), then m is a Miranda method which is
+// interface I, and none of C's superclasses implements I.
+// Let's say there is an abstract method m in I that neither C
+// nor any of its super classes implement (i.e there is no method of any access,
+// with the same name and signature as m), then m is a Miranda method which is
 // entered as a public abstract method in C's vtable.  From then on it should
 // treated as any other public method in C for method over-ride purposes.
 void klassVtable::compute_vtable_size_and_num_mirandas(
@@ -82,7 +83,7 @@
 
   GrowableArray<Method*> new_mirandas(20);
   // compute the number of mirandas methods that must be added to the end
-  get_mirandas(&new_mirandas, all_mirandas, super, methods, local_interfaces);
+  get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
   *num_new_mirandas = new_mirandas.length();
 
   vtable_length += *num_new_mirandas * vtableEntry::size();
@@ -111,10 +112,13 @@
 }
 
 int klassVtable::index_of(Method* m, int len) const {
-  assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods");
+  assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
   return m->vtable_index();
 }
 
+// Copy super class's vtable to the first part (prefix) of this class's vtable,
+// and return the number of entries copied.  Expects that 'super' is the Java
+// super class (arrays can have "array" super classes that must be skipped).
 int klassVtable::initialize_from_super(KlassHandle super) {
   if (super.is_null()) {
     return 0;
@@ -139,14 +143,14 @@
   }
 }
 
-// Revised lookup semantics   introduced 1.3 (Kestral beta)
+//
+// Revised lookup semantics   introduced 1.3 (Kestrel beta)
 void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
 
   // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
   KlassHandle super (THREAD, klass()->java_super());
   int nofNewEntries = 0;
 
-
   if (PrintVtables && !klass()->oop_is_array()) {
     ResourceMark rm(THREAD);
     tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
@@ -174,13 +178,15 @@
     int len = methods->length();
     int initialized = super_vtable_len;
 
-    // update_inherited_vtable can stop for gc - ensure using handles
+    // Check each of this class's methods against super;
+    // if override, replace in copy of super vtable, otherwise append to end
     for (int i = 0; i < len; i++) {
+      // update_inherited_vtable can stop for gc - ensure using handles
       HandleMark hm(THREAD);
       assert(methods->at(i)->is_method(), "must be a Method*");
       methodHandle mh(THREAD, methods->at(i));
 
-      bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK);
+      bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, -1, checkconstraints, CHECK);
 
       if (needs_new_entry) {
         put_method_at(mh(), initialized);
@@ -189,11 +195,39 @@
       }
     }
 
-    // add miranda methods; it will also update the value of initialized
-    fill_in_mirandas(&initialized);
+    // update vtable with default_methods
+    Array<Method*>* default_methods = ik()->default_methods();
+    if (default_methods != NULL) {
+      len = default_methods->length();
+      if (len > 0) {
+        Array<int>* def_vtable_indices = NULL;
+        if ((def_vtable_indices = ik()->default_vtable_indices()) == NULL) {
+          def_vtable_indices = ik()->create_new_default_vtable_indices(len, CHECK);
+        } else {
+          assert(def_vtable_indices->length() == len, "reinit vtable len?");
+        }
+        for (int i = 0; i < len; i++) {
+          HandleMark hm(THREAD);
+          assert(default_methods->at(i)->is_method(), "must be a Method*");
+          methodHandle mh(THREAD, default_methods->at(i));
+
+          bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, i, checkconstraints, CHECK);
+
+          // needs new entry
+          if (needs_new_entry) {
+            put_method_at(mh(), initialized);
+            def_vtable_indices->at_put(i, initialized); //set vtable index
+            initialized++;
+          }
+        }
+      }
+    }
+
+    // add miranda methods; it will also return the updated initialized
+    initialized = fill_in_mirandas(initialized);
 
     // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
-    // package_private -> publicprotected), the vtable might actually be smaller than our initial
+    // package_private -> public/protected), the vtable might actually be smaller than our initial
     // calculation.
     assert(initialized <= _length, "vtable initialization failed");
     for(;initialized < _length; initialized++) {
@@ -224,14 +258,19 @@
 #ifndef PRODUCT
         if (PrintVtables && Verbose) {
           ResourceMark rm(THREAD);
+          char* sig = target_method()->name_and_sig_as_C_string();
           tty->print("transitive overriding superclass %s with %s::%s index %d, original flags: ",
            supersuperklass->internal_name(),
-           _klass->internal_name(), (target_method() != NULL) ?
-           target_method()->name()->as_C_string() : "<NULL>", vtable_index);
+           _klass->internal_name(), sig, vtable_index);
            super_method->access_flags().print_on(tty);
+           if (super_method->is_default_method()) {
+             tty->print("default");
+           }
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
-           tty->cr();
+           if (target_method->is_default_method()) {
+             tty->print("default");
+           }
         }
 #endif /*PRODUCT*/
         break; // return found superk
@@ -248,36 +287,54 @@
   return superk;
 }
 
-// Methods that are "effectively" final don't need vtable entries.
-bool method_is_effectively_final(
-    AccessFlags klass_flags, methodHandle target) {
-  return target->is_final() || klass_flags.is_final() && !target->is_overpass();
-}
-
 // Update child's copy of super vtable for overrides
-// OR return true if a new vtable entry is required
+// OR return true if a new vtable entry is required.
 // Only called for InstanceKlass's, i.e. not for arrays
 // If that changed, could not use _klass as handle for klass
-bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
-                  bool checkconstraints, TRAPS) {
+bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method,
+                                          int super_vtable_len, int default_index,
+                                          bool checkconstraints, TRAPS) {
   ResourceMark rm;
   bool allocate_new = true;
   assert(klass->oop_is_instance(), "must be InstanceKlass");
 
-  // Initialize the method's vtable index to "nonvirtual".
-  // If we allocate a vtable entry, we will update it to a non-negative number.
-  target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
+  Array<int>* def_vtable_indices = NULL;
+  bool is_default = false;
+  // default methods are concrete methods in superinterfaces which are added to the vtable
+  // with their real method_holder
+  // Since vtable and itable indices share the same storage, don't touch
+  // the default method's real vtable/itable index
+  // default_vtable_indices stores the vtable value relative to this inheritor
+  if (default_index >= 0 ) {
+    is_default = true;
+    def_vtable_indices = klass->default_vtable_indices();
+    assert(def_vtable_indices != NULL, "def vtable alloc?");
+    assert(default_index <= def_vtable_indices->length(), "def vtable len?");
+  } else {
+    assert(klass == target_method()->method_holder(), "caller resp.");
+    // Initialize the method's vtable index to "nonvirtual".
+    // If we allocate a vtable entry, we will update it to a non-negative number.
+    target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
+  }
 
   // Static and <init> methods are never in
   if (target_method()->is_static() || target_method()->name() ==  vmSymbols::object_initializer_name()) {
     return false;
   }
 
-  if (method_is_effectively_final(klass->access_flags(), target_method)) {
+  if (target_method->is_final_method(klass->access_flags())) {
     // a final method never needs a new entry; final methods can be statically
     // resolved and they have to be present in the vtable only if they override
     // a super's method, in which case they re-use its entry
     allocate_new = false;
+  } else if (klass->is_interface()) {
+    allocate_new = false;  // see note below in needs_new_vtable_entry
+    // An interface never allocates new vtable slots, only inherits old ones.
+    // This method will either be assigned its own itable index later,
+    // or be assigned an inherited vtable index in the loop below.
+    // default methods store their vtable indices in the inheritors default_vtable_indices
+    assert (default_index == -1, "interfaces don't store resolved default methods");
+    target_method()->set_vtable_index(Method::pending_itable_index);
   }
 
   // we need a new entry if there is no superclass
@@ -285,9 +342,10 @@
     return allocate_new;
   }
 
-  // private methods always have a new entry in the vtable
+  // private methods in classes always have a new entry in the vtable
   // specification interpretation since classic has
   // private methods not overriding
+  // JDK8 adds private methods in interfaces which require invokespecial
   if (target_method()->is_private()) {
     return allocate_new;
   }
@@ -299,8 +357,15 @@
 
   Symbol* name = target_method()->name();
   Symbol* signature = target_method()->signature();
-  Handle target_loader(THREAD, _klass()->class_loader());
-  Symbol*  target_classname = _klass->name();
+
+  KlassHandle target_klass(THREAD, target_method()->method_holder());
+  if (target_klass == NULL) {
+    target_klass = _klass;
+  }
+
+  Handle target_loader(THREAD, target_klass->class_loader());
+
+  Symbol* target_classname = target_klass->name();
   for(int i = 0; i < super_vtable_len; i++) {
     Method* super_method = method_at(i);
     // Check if method name matches
@@ -309,10 +374,14 @@
       // get super_klass for method_holder for the found method
       InstanceKlass* super_klass =  super_method->method_holder();
 
-      if ((super_klass->is_override(super_method, target_loader, target_classname, THREAD)) ||
-      ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
-        && ((super_klass = find_transitive_override(super_klass, target_method, i, target_loader,
-             target_classname, THREAD)) != (InstanceKlass*)NULL))) {
+      if (is_default
+          || ((super_klass->is_override(super_method, target_loader, target_classname, THREAD))
+          || ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
+          && ((super_klass = find_transitive_override(super_klass,
+                             target_method, i, target_loader,
+                             target_classname, THREAD))
+                             != (InstanceKlass*)NULL))))
+        {
         // overriding, so no new entry
         allocate_new = false;
 
@@ -339,7 +408,7 @@
                 "%s used in the signature";
               char* sig = target_method()->name_and_sig_as_C_string();
               const char* loader1 = SystemDictionary::loader_name(target_loader());
-              char* current = _klass->name()->as_C_string();
+              char* current = target_klass->name()->as_C_string();
               const char* loader2 = SystemDictionary::loader_name(super_loader());
               char* failed_type_name = failed_type_symbol->as_C_string();
               size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
@@ -352,16 +421,39 @@
           }
        }
 
-        put_method_at(target_method(), i);
-        target_method()->set_vtable_index(i);
+       put_method_at(target_method(), i);
+       if (!is_default) {
+         target_method()->set_vtable_index(i);
+       } else {
+         if (def_vtable_indices != NULL) {
+           def_vtable_indices->at_put(default_index, i);
+         }
+         assert(super_method->is_default_method() || super_method->is_overpass()
+                || super_method->is_abstract(), "default override error");
+       }
+
+
 #ifndef PRODUCT
         if (PrintVtables && Verbose) {
+          ResourceMark rm(THREAD);
+          char* sig = target_method()->name_and_sig_as_C_string();
           tty->print("overriding with %s::%s index %d, original flags: ",
-           _klass->internal_name(), (target_method() != NULL) ?
-           target_method()->name()->as_C_string() : "<NULL>", i);
+           target_klass->internal_name(), sig, i);
            super_method->access_flags().print_on(tty);
+           if (super_method->is_default_method()) {
+             tty->print("default");
+           }
+           if (super_method->is_overpass()) {
+             tty->print("overpass");
+           }
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
+           if (target_method->is_default_method()) {
+             tty->print("default");
+           }
+           if (target_method->is_overpass()) {
+             tty->print("overpass");
+           }
            tty->cr();
         }
 #endif /*PRODUCT*/
@@ -370,12 +462,25 @@
         // but not override another. Once we override one, not need new
 #ifndef PRODUCT
         if (PrintVtables && Verbose) {
+          ResourceMark rm(THREAD);
+          char* sig = target_method()->name_and_sig_as_C_string();
           tty->print("NOT overriding with %s::%s index %d, original flags: ",
-           _klass->internal_name(), (target_method() != NULL) ?
-           target_method()->name()->as_C_string() : "<NULL>", i);
+           target_klass->internal_name(), sig,i);
            super_method->access_flags().print_on(tty);
+           if (super_method->is_default_method()) {
+             tty->print("default");
+           }
+           if (super_method->is_overpass()) {
+             tty->print("overpass");
+           }
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
+           if (target_method->is_default_method()) {
+             tty->print("default");
+           }
+           if (target_method->is_overpass()) {
+             tty->print("overpass");
+           }
            tty->cr();
         }
 #endif /*PRODUCT*/
@@ -411,8 +516,14 @@
                                          Symbol* classname,
                                          AccessFlags class_flags,
                                          TRAPS) {
+  if (class_flags.is_interface()) {
+    // Interfaces do not use vtables, so there is no point to assigning
+    // a vtable index to any of their methods.  If we refrain from doing this,
+    // we can use Method::_vtable_index to hold the itable index
+    return false;
+  }
 
-  if (method_is_effectively_final(class_flags, target_method) ||
+  if (target_method->is_final_method(class_flags) ||
       // a final method never needs a new entry; final methods can be statically
       // resolved and they have to be present in the vtable only if they override
       // a super's method, in which case they re-use its entry
@@ -424,14 +535,23 @@
     return false;
   }
 
+  // Concrete interface methods do not need new entries, they override
+  // abstract method entries using default inheritance rules
+  if (target_method()->method_holder() != NULL &&
+      target_method()->method_holder()->is_interface()  &&
+      !target_method()->is_abstract() ) {
+    return false;
+  }
+
   // we need a new entry if there is no superclass
   if (super == NULL) {
     return true;
   }
 
-  // private methods always have a new entry in the vtable
+  // private methods in classes always have a new entry in the vtable
   // specification interpretation since classic has
   // private methods not overriding
+  // JDK8 adds private  methods in interfaces which require invokespecial
   if (target_method()->is_private()) {
     return true;
   }
@@ -500,51 +620,70 @@
   return Method::invalid_vtable_index;
 }
 
-// check if an entry is miranda
+// check if an entry at an index is miranda
+// requires that method m at entry be declared ("held") by an interface.
 bool klassVtable::is_miranda_entry_at(int i) {
   Method* m = method_at(i);
   Klass* method_holder = m->method_holder();
   InstanceKlass *mhk = InstanceKlass::cast(method_holder);
 
-  // miranda methods are interface methods in a class's vtable
+  // miranda methods are public abstract instance interface methods in a class's vtable
   if (mhk->is_interface()) {
     assert(m->is_public(), "should be public");
     assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
-    assert(is_miranda(m, ik()->methods(), ik()->super()), "should be a miranda_method");
+    assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
     return true;
   }
   return false;
 }
 
-// check if a method is a miranda method, given a class's methods table and it's super
+// check if a method is a miranda method, given a class's methods table,
+// its default_method table  and its super
+// "miranda" means not static, not defined by this class.
+// private methods in interfaces do not belong in the miranda list.
 // the caller must make sure that the method belongs to an interface implemented by the class
-bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
-  if (m->is_static()) {
+// Miranda methods only include public interface instance methods
+// Not private methods, not static methods, not default == concrete abstract
+bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
+                             Array<Method*>* default_methods, Klass* super) {
+  if (m->is_static() || m->is_private()) {
     return false;
   }
   Symbol* name = m->name();
   Symbol* signature = m->signature();
   if (InstanceKlass::find_method(class_methods, name, signature) == NULL) {
     // did not find it in the method table of the current class
-    if (super == NULL) {
-      // super doesn't exist
-      return true;
-    }
+    if ((default_methods == NULL) ||
+        InstanceKlass::find_method(default_methods, name, signature) == NULL) {
+      if (super == NULL) {
+        // super doesn't exist
+        return true;
+      }
 
-    Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
-    if (mo == NULL || mo->access_flags().is_private() ) {
-      // super class hierarchy does not implement it or protection is different
-      return true;
+      Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
+      if (mo == NULL || mo->access_flags().is_private() ) {
+        // super class hierarchy does not implement it or protection is different
+        return true;
+      }
     }
   }
 
   return false;
 }
 
+// Scans current_interface_methods for miranda methods that do not
+// already appear in new_mirandas, or default methods,  and are also not defined-and-non-private
+// in super (superclass).  These mirandas are added to all_mirandas if it is
+// not null; in addition, those that are not duplicates of miranda methods
+// inherited by super from its interfaces are added to new_mirandas.
+// Thus, new_mirandas will be the set of mirandas that this class introduces,
+// all_mirandas will be the set of all mirandas applicable to this class
+// including all defined in superclasses.
 void klassVtable::add_new_mirandas_to_lists(
     GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
     Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
-    Klass* super) {
+    Array<Method*>* default_methods, Klass* super) {
+
   // iterate thru the current interface's method to see if it a miranda
   int num_methods = current_interface_methods->length();
   for (int i = 0; i < num_methods; i++) {
@@ -562,7 +701,7 @@
     }
 
     if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
-      if (is_miranda(im, class_methods, super)) { // is it a miranda at all?
+      if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
         InstanceKlass *sk = InstanceKlass::cast(super);
         // check if it is a duplicate of a super's miranda
         if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
@@ -579,6 +718,7 @@
 void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
                                GrowableArray<Method*>* all_mirandas,
                                Klass* super, Array<Method*>* class_methods,
+                               Array<Method*>* default_methods,
                                Array<Klass*>* local_interfaces) {
   assert((new_mirandas->length() == 0) , "current mirandas must be 0");
 
@@ -587,34 +727,75 @@
   for (int i = 0; i < num_local_ifs; i++) {
     InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
     add_new_mirandas_to_lists(new_mirandas, all_mirandas,
-                              ik->methods(), class_methods, super);
+                              ik->methods(), class_methods,
+                              default_methods, super);
     // iterate thru each local's super interfaces
     Array<Klass*>* super_ifs = ik->transitive_interfaces();
     int num_super_ifs = super_ifs->length();
     for (int j = 0; j < num_super_ifs; j++) {
       InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
       add_new_mirandas_to_lists(new_mirandas, all_mirandas,
-                                sik->methods(), class_methods, super);
+                                sik->methods(), class_methods,
+                                default_methods, super);
     }
   }
 }
 
-// fill in mirandas
-void klassVtable::fill_in_mirandas(int* initialized) {
+// Discover miranda methods ("miranda" = "interface abstract, no binding"),
+// and append them into the vtable starting at index initialized,
+// return the new value of initialized.
+int klassVtable::fill_in_mirandas(int initialized) {
   GrowableArray<Method*> mirandas(20);
   get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
-               ik()->local_interfaces());
+               ik()->default_methods(), ik()->local_interfaces());
   for (int i = 0; i < mirandas.length(); i++) {
-    put_method_at(mirandas.at(i), *initialized);
-    ++(*initialized);
+    if (PrintVtables && Verbose) {
+      Method* meth = mirandas.at(i);
+      ResourceMark rm(Thread::current());
+      if (meth != NULL) {
+        char* sig = meth->name_and_sig_as_C_string();
+        tty->print("fill in mirandas with %s index %d, flags: ",
+          sig, initialized);
+        meth->access_flags().print_on(tty);
+        if (meth->is_default_method()) {
+          tty->print("default");
+        }
+        tty->cr();
+      }
+    }
+    put_method_at(mirandas.at(i), initialized);
+    ++initialized;
   }
+  return initialized;
 }
 
+// Copy this class's vtable to the vtable beginning at start.
+// Used to copy superclass vtable to prefix of subclass's vtable.
 void klassVtable::copy_vtable_to(vtableEntry* start) {
   Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size());
 }
 
 #if INCLUDE_JVMTI
+bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Method* new_method) {
+  // If old_method is default, find this vtable index in default_vtable_indices
+  // and replace that method in the _default_methods list
+  bool updated = false;
+
+  Array<Method*>* default_methods = ik()->default_methods();
+  if (default_methods != NULL) {
+    int len = default_methods->length();
+    for (int idx = 0; idx < len; idx++) {
+      if (vtable_index == ik()->default_vtable_indices()->at(idx)) {
+        if (default_methods->at(idx) == old_method) {
+          default_methods->at_put(idx, new_method);
+          updated = true;
+        }
+        break;
+      }
+    }
+  }
+  return updated;
+}
 void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods,
                                         int methods_length, bool * trace_name_printed) {
   // search the vtable for uses of either obsolete or EMCP methods
@@ -630,18 +811,26 @@
     for (int index = 0; index < length(); index++) {
       if (unchecked_method_at(index) == old_method) {
         put_method_at(new_method, index);
+          // For default methods, need to update the _default_methods array
+          // which can only have one method entry for a given signature
+          bool updated_default = false;
+          if (old_method->is_default_method()) {
+            updated_default = adjust_default_method(index, old_method, new_method);
+          }
 
         if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
           if (!(*trace_name_printed)) {
             // RC_TRACE_MESG macro has an embedded ResourceMark
-            RC_TRACE_MESG(("adjust: name=%s",
+            RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
+                           klass()->external_name(),
                            old_method->method_holder()->external_name()));
             *trace_name_printed = true;
           }
           // RC_TRACE macro has an embedded ResourceMark
-          RC_TRACE(0x00100000, ("vtable method update: %s(%s)",
+          RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
                                 new_method->name()->as_C_string(),
-                                new_method->signature()->as_C_string()));
+                                new_method->signature()->as_C_string(),
+                                updated_default ? "true" : "false"));
         }
         // cannot 'break' here; see for-loop comment above.
       }
@@ -668,6 +857,12 @@
     if (m != NULL) {
       tty->print("      (%5d)  ", i);
       m->access_flags().print_on(tty);
+      if (m->is_default_method()) {
+        tty->print("default");
+      }
+      if (m->is_overpass()) {
+        tty->print("overpass");
+      }
       tty->print(" --  ");
       m->print_name(tty);
       tty->cr();
@@ -723,6 +918,12 @@
 
 // Initialization
 void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
+  if (_klass->is_interface()) {
+    // This needs to go after vtable indices are assigned but
+    // before implementors need to know the number of itable indices.
+    assign_itable_indices_for_interface(_klass());
+  }
+
   // Cannot be setup doing bootstrapping, interfaces don't have
   // itables, and klass with only ones entry have empty itables
   if (Universe::is_bootstrapping() ||
@@ -754,45 +955,89 @@
 }
 
 
+inline bool interface_method_needs_itable_index(Method* m) {
+  if (m->is_static())           return false;   // e.g., Stream.empty
+  if (m->is_initializer())      return false;   // <init> or <clinit>
+  // If an interface redeclares a method from java.lang.Object,
+  // it should already have a vtable index, don't touch it.
+  // e.g., CharSequence.toString (from initialize_vtable)
+  // if (m->has_vtable_index())  return false; // NO!
+  return true;
+}
+
+int klassItable::assign_itable_indices_for_interface(Klass* klass) {
+  // an interface does not have an itable, but its methods need to be numbered
+  if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
+                                  klass->name()->as_C_string());
+  Array<Method*>* methods = InstanceKlass::cast(klass)->methods();
+  int nof_methods = methods->length();
+  int ime_num = 0;
+  for (int i = 0; i < nof_methods; i++) {
+    Method* m = methods->at(i);
+    if (interface_method_needs_itable_index(m)) {
+      assert(!m->is_final_method(), "no final interface methods");
+      // If m is already assigned a vtable index, do not disturb it.
+      if (!m->has_vtable_index()) {
+        assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
+        m->set_itable_index(ime_num);
+        // Progress to next itable entry
+        ime_num++;
+      }
+    }
+  }
+  assert(ime_num == method_count_for_interface(klass), "proper sizing");
+  return ime_num;
+}
+
+int klassItable::method_count_for_interface(Klass* interf) {
+  assert(interf->oop_is_instance(), "must be");
+  assert(interf->is_interface(), "must be");
+  Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
+  int nof_methods = methods->length();
+  while (nof_methods > 0) {
+    Method* m = methods->at(nof_methods-1);
+    if (m->has_itable_index()) {
+      int length = m->itable_index() + 1;
+#ifdef ASSERT
+      while (nof_methods = 0) {
+        m = methods->at(--nof_methods);
+        assert(!m->has_itable_index() || m->itable_index() < length, "");
+      }
+#endif //ASSERT
+      return length;  // return the rightmost itable index, plus one
+    }
+    nof_methods -= 1;
+  }
+  // no methods have itable indices
+  return 0;
+}
+
+
 void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) {
   Array<Method*>* methods = InstanceKlass::cast(interf_h())->methods();
   int nof_methods = methods->length();
   HandleMark hm;
-  KlassHandle klass = _klass;
   assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader());
-  int ime_num = 0;
 
-  // Skip first Method* if it is a class initializer
-  int i = methods->at(0)->is_static_initializer() ? 1 : 0;
-
-  // m, method_name, method_signature, klass reset each loop so they
-  // don't need preserving across check_signature_loaders call
-  // methods needs a handle in case of gc from check_signature_loaders
-  for(; i < nof_methods; i++) {
+  int ime_count = method_count_for_interface(interf_h());
+  for (int i = 0; i < nof_methods; i++) {
     Method* m = methods->at(i);
-    Symbol* method_name = m->name();
-    Symbol* method_signature = m->signature();
-
-    // This is same code as in Linkresolver::lookup_instance_method_in_klasses
-    Method* target = klass->uncached_lookup_method(method_name, method_signature);
-    while (target != NULL && target->is_static()) {
-      // continue with recursive lookup through the superclass
-      Klass* super = target->method_holder()->super();
-      target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature);
+    methodHandle target;
+    if (m->has_itable_index()) {
+      LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
     }
     if (target == NULL || !target->is_public() || target->is_abstract()) {
       // Entry do not resolve. Leave it empty
     } else {
       // Entry did resolve, check loader constraints before initializing
       // if checkconstraints requested
-      methodHandle  target_h (THREAD, target); // preserve across gc
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
         if (method_holder_loader() != interface_loader()) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
-            SystemDictionary::check_signature_loaders(method_signature,
+            SystemDictionary::check_signature_loaders(m->signature(),
                                                       method_holder_loader,
                                                       interface_loader,
                                                       true, CHECK);
@@ -803,9 +1048,9 @@
               "and the class loader (instance of %s) for interface "
               "%s have different Class objects for the type %s "
               "used in the signature";
-            char* sig = target_h()->name_and_sig_as_C_string();
+            char* sig = target()->name_and_sig_as_C_string();
             const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
-            char* current = klass->name()->as_C_string();
+            char* current = _klass->name()->as_C_string();
             const char* loader2 = SystemDictionary::loader_name(interface_loader());
             char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
             char* failed_type_name = failed_type_symbol->as_C_string();
@@ -821,10 +1066,25 @@
       }
 
       // ime may have moved during GC so recalculate address
-      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
+      int ime_num = m->itable_index();
+      assert(ime_num < ime_count, "oob");
+      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
+      if (TraceItables && Verbose) {
+        ResourceMark rm(THREAD);
+        if (target() != NULL) {
+          char* sig = target()->name_and_sig_as_C_string();
+          tty->print("interface: %s, ime_num: %d, target: %s, method_holder: %s ",
+                    interf_h()->internal_name(), ime_num, sig,
+                    target()->method_holder()->internal_name());
+          tty->print("target_method flags: ");
+          target()->access_flags().print_on(tty);
+          if (target()->is_default_method()) {
+            tty->print("default");
+          }
+          tty->cr();
+        }
+      }
     }
-    // Progress to next entry
-    ime_num++;
   }
 }
 
@@ -897,6 +1157,9 @@
     if (m != NULL) {
       tty->print("      (%5d)  ", i);
       m->access_flags().print_on(tty);
+      if (m->is_default_method()) {
+        tty->print("default");
+      }
       tty->print(" --  ");
       m->print_name(tty);
       tty->cr();
@@ -913,20 +1176,22 @@
   virtual void doit(Klass* intf, int method_count) = 0;
 };
 
-// Visit all interfaces with at-least one method (excluding <clinit>)
+// Visit all interfaces with at least one itable method
 void visit_all_interfaces(Array<Klass*>* transitive_intf, InterfaceVisiterClosure *blk) {
   // Handle array argument
   for(int i = 0; i < transitive_intf->length(); i++) {
     Klass* intf = transitive_intf->at(i);
     assert(intf->is_interface(), "sanity check");
 
-    // Find no. of methods excluding a <clinit>
-    int method_count = InstanceKlass::cast(intf)->methods()->length();
-    if (method_count > 0) {
-      Method* m = InstanceKlass::cast(intf)->methods()->at(0);
-      assert(m != NULL && m->is_method(), "sanity check");
-      if (m->name() == vmSymbols::object_initializer_name()) {
-        method_count--;
+    // Find no. of itable methods
+    int method_count = 0;
+    // method_count = klassItable::method_count_for_interface(intf);
+    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
+    if (methods->length() > 0) {
+      for (int i = methods->length(); --i >= 0; ) {
+        if (interface_method_needs_itable_index(methods->at(i))) {
+          method_count++;
+        }
       }
     }
 
@@ -1024,40 +1289,26 @@
 }
 
 
-// m must be a method in an interface
-int klassItable::compute_itable_index(Method* m) {
-  InstanceKlass* intf = m->method_holder();
-  assert(intf->is_interface(), "sanity check");
-  Array<Method*>* methods = intf->methods();
-  int index = 0;
-  while(methods->at(index) != m) {
-    index++;
-    assert(index < methods->length(), "should find index for resolve_invoke");
-  }
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index--;
-  }
-  return index;
-}
-
-
-// inverse to compute_itable_index
+// inverse to itable_index
 Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
   assert(InstanceKlass::cast(intf)->is_interface(), "sanity check");
+  assert(intf->verify_itable_index(itable_index), "");
   Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
 
+  if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
+    return NULL;                // help caller defend against bad indices
+
   int index = itable_index;
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index++;
+  Method* m = methods->at(index);
+  int index2 = -1;
+  while (!m->has_itable_index() ||
+         (index2 = m->itable_index()) != itable_index) {
+    assert(index2 < itable_index, "monotonic");
+    if (++index == methods->length())
+      return NULL;
+    m = methods->at(index);
   }
-
-  if (itable_index < 0 || index >= methods->length())
-    return NULL;                // help caller defend against bad indexes
-
-  Method* m = methods->at(index);
-  assert(compute_itable_index(m) == itable_index, "correct inverse");
+  assert(m->itable_index() == itable_index, "correct inverse");
 
   return m;
 }
--- a/src/share/vm/oops/klassVtable.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/klassVtable.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -97,6 +97,7 @@
   // trace_name_printed is set to true if the current call has
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
+  bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
   void adjust_method_entries(Method** old_methods, Method** new_methods,
                              int methods_length, bool * trace_name_printed);
   bool check_no_old_or_obsolete_entries();
@@ -118,24 +119,28 @@
   void put_method_at(Method* m, int index);
   static bool needs_new_vtable_entry(methodHandle m, Klass* super, Handle classloader, Symbol* classname, AccessFlags access_flags, TRAPS);
 
-  bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS);
+  bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, int default_index, bool checkconstraints, TRAPS);
  InstanceKlass* find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method, int vtable_index,
                                          Handle target_loader, Symbol* target_classname, Thread* THREAD);
 
   // support for miranda methods
   bool is_miranda_entry_at(int i);
-  void fill_in_mirandas(int* initialized);
-  static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
+  int fill_in_mirandas(int initialized);
+  static bool is_miranda(Method* m, Array<Method*>* class_methods,
+                         Array<Method*>* default_methods, Klass* super);
   static void add_new_mirandas_to_lists(
       GrowableArray<Method*>* new_mirandas,
       GrowableArray<Method*>* all_mirandas,
-      Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
+      Array<Method*>* current_interface_methods,
+      Array<Method*>* class_methods,
+      Array<Method*>* default_methods,
       Klass* super);
   static void get_mirandas(
       GrowableArray<Method*>* new_mirandas,
       GrowableArray<Method*>* all_mirandas, Klass* super,
-      Array<Method*>* class_methods, Array<Klass*>* local_interfaces);
-
+      Array<Method*>* class_methods,
+      Array<Method*>* default_methods,
+      Array<Klass*>* local_interfaces);
   void verify_against(outputStream* st, klassVtable* vt, int index);
   inline InstanceKlass* ik() const;
 };
@@ -150,6 +155,8 @@
 //      from_compiled_code_entry_point -> nmethod entry point
 //      from_interpreter_entry_point   -> i2cadapter
 class vtableEntry VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
  public:
   // size in words
   static int size() {
@@ -288,12 +295,12 @@
 #endif // INCLUDE_JVMTI
 
   // Setup of itable
+  static int assign_itable_indices_for_interface(Klass* klass);
+  static int method_count_for_interface(Klass* klass);
   static int compute_itable_size(Array<Klass*>* transitive_interfaces);
   static void setup_itable_offset_table(instanceKlassHandle klass);
 
   // Resolving of method to index
-  static int compute_itable_index(Method* m);
-  // ...and back again:
   static Method* method_for_itable_index(Klass* klass, int itable_index);
 
   // Debugging/Statistics
--- a/src/share/vm/oops/method.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/method.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -509,24 +509,44 @@
   return _access_flags.has_loops();
 }
 
+bool Method::is_final_method(AccessFlags class_access_flags) const {
+  // or "does_not_require_vtable_entry"
+  // default method or overpass can occur, is not final (reuses vtable entry)
+  // private methods get vtable entries for backward class compatibility.
+  if (is_overpass() || is_default_method())  return false;
+  return is_final() || class_access_flags.is_final();
+}
 
 bool Method::is_final_method() const {
-  // %%% Should return true for private methods also,
-  // since there is no way to override them.
-  return is_final() || method_holder()->is_final();
+  return is_final_method(method_holder()->access_flags());
 }
 
-
-bool Method::is_strict_method() const {
-  return is_strict();
+bool Method::is_default_method() const {
+  if (method_holder() != NULL &&
+      method_holder()->is_interface() &&
+      !is_abstract()) {
+    return true;
+  } else {
+    return false;
+  }
 }
 
-
-bool Method::can_be_statically_bound() const {
-  if (is_final_method())  return true;
+bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
+  if (is_final_method(class_access_flags))  return true;
+#ifdef ASSERT
+  ResourceMark rm;
+  bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
+  if (class_access_flags.is_interface()) {
+      assert(is_nonv == is_static(), err_msg("is_nonv=%s", name_and_sig_as_C_string()));
+  }
+#endif
+  assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
   return vtable_index() == nonvirtual_vtable_index;
 }
 
+bool Method::can_be_statically_bound() const {
+  return can_be_statically_bound(method_holder()->access_flags());
+}
 
 bool Method::is_accessor() const {
   if (code_size() != 5) return false;
@@ -720,11 +740,22 @@
   }
 }
 
+bool Method::is_always_compilable() const {
+  // Generated adapters must be compiled
+  if (is_method_handle_intrinsic() && is_synthetic()) {
+    assert(!is_not_c1_compilable(), "sanity check");
+    assert(!is_not_c2_compilable(), "sanity check");
+    return true;
+  }
+
+  return false;
+}
+
 bool Method::is_not_compilable(int comp_level) const {
   if (number_of_breakpoints() > 0)
     return true;
-  if (is_method_handle_intrinsic())
-    return !is_synthetic();  // the generated adapters must be compiled
+  if (is_always_compilable())
+    return false;
   if (comp_level == CompLevel_any)
     return is_not_c1_compilable() || is_not_c2_compilable();
   if (is_c1_compile(comp_level))
@@ -736,6 +767,10 @@
 
 // call this when compiler finds that this method is not compilable
 void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
+  if (is_always_compilable()) {
+    // Don't mark a method which should be always compilable
+    return;
+  }
   print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_compilable();
@@ -879,16 +914,6 @@
 // This function must not hit a safepoint!
 address Method::verified_code_entry() {
   debug_only(No_Safepoint_Verifier nsv;)
-  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
-  if (code == NULL && UseCodeCacheFlushing) {
-    nmethod *saved_code = CodeCache::reanimate_saved_code(this);
-    if (saved_code != NULL) {
-      methodHandle method(this);
-      assert( ! saved_code->is_osr_method(), "should not get here for osr" );
-      set_code( method, saved_code );
-    }
-  }
-
   assert(_from_compiled_entry != NULL, "must be set");
   return _from_compiled_entry;
 }
@@ -952,7 +977,7 @@
 
   assert(ik->is_subclass_of(method_holder()), "should be subklass");
   assert(ik->vtable() != NULL, "vtable should exist");
-  if (vtable_index() == nonvirtual_vtable_index) {
+  if (!has_vtable_index()) {
     return false;
   } else {
     Method* vt_m = ik->method_at_vtable(vtable_index());
@@ -1359,7 +1384,8 @@
 }
 
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
-void Method::sort_methods(Array<Method*>* methods, bool idempotent) {
+// default_methods also uses this without the ordering for fast find_method
+void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums) {
   int length = methods->length();
   if (length > 1) {
     {
@@ -1367,14 +1393,15 @@
       QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
     }
     // Reset method ordering
-    for (int i = 0; i < length; i++) {
-      Method* m = methods->at(i);
-      m->set_method_idnum(i);
+    if (set_idnums) {
+      for (int i = 0; i < length; i++) {
+        Method* m = methods->at(i);
+        m->set_method_idnum(i);
+      }
     }
   }
 }
 
-
 //-----------------------------------------------------------------------------------
 // Non-product code unless JVM/TI needs it
 
@@ -1488,7 +1515,10 @@
       return bp->orig_bytecode();
     }
   }
-  ShouldNotReachHere();
+  {
+    ResourceMark rm;
+    fatal(err_msg("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci));
+  }
   return Bytecodes::_shouldnotreachhere;
 }
 
@@ -1944,7 +1974,7 @@
 
 void Method::print_value_on(outputStream* st) const {
   assert(is_method(), "must be method");
-  st->print_cr(internal_name());
+  st->print(internal_name());
   print_address_on(st);
   st->print(" ");
   name()->print_value_on(st);
@@ -1952,6 +1982,7 @@
   signature()->print_value_on(st);
   st->print(" in ");
   method_holder()->print_value_on(st);
+  if (WizardMode) st->print("#%d", _vtable_index);
   if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
   if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
 }
--- a/src/share/vm/oops/method.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/method.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -448,16 +448,22 @@
   enum VtableIndexFlag {
     // Valid vtable indexes are non-negative (>= 0).
     // These few negative values are used as sentinels.
-    highest_unused_vtable_index_value = -5,
+    itable_index_max        = -10, // first itable index, growing downward
+    pending_itable_index    = -9,  // itable index will be assigned
     invalid_vtable_index    = -4,  // distinct from any valid vtable index
     garbage_vtable_index    = -3,  // not yet linked; no vtable layout yet
     nonvirtual_vtable_index = -2   // there is no need for vtable dispatch
     // 6330203 Note:  Do not use -1, which was overloaded with many meanings.
   };
   DEBUG_ONLY(bool valid_vtable_index() const     { return _vtable_index >= nonvirtual_vtable_index; })
-  int  vtable_index() const                      { assert(valid_vtable_index(), "");
-                                                   return _vtable_index; }
+  bool has_vtable_index() const                  { return _vtable_index >= 0; }
+  int  vtable_index() const                      { return _vtable_index; }
   void set_vtable_index(int index)               { _vtable_index = index; }
+  DEBUG_ONLY(bool valid_itable_index() const     { return _vtable_index <= pending_itable_index; })
+  bool has_itable_index() const                  { return _vtable_index <= itable_index_max; }
+  int  itable_index() const                      { assert(valid_itable_index(), "");
+                                                   return itable_index_max - _vtable_index; }
+  void set_itable_index(int index)               { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
 
   // interpreter entry
   address interpreter_entry() const              { return _i2i_entry; }
@@ -560,10 +566,12 @@
 
   // checks method and its method holder
   bool is_final_method() const;
-  bool is_strict_method() const;
+  bool is_final_method(AccessFlags class_access_flags) const;
+  bool is_default_method() const;
 
   // true if method needs no dynamic dispatch (final and/or no vtable entry)
   bool can_be_statically_bound() const;
+  bool can_be_statically_bound(AccessFlags class_access_flags) const;
 
   // returns true if the method has any backward branches.
   bool has_loops() {
@@ -740,10 +748,6 @@
   // so handles are not used to avoid deadlock.
   jmethodID find_jmethod_id_or_null()               { return method_holder()->jmethod_id_or_null(this); }
 
-  // JNI static invoke cached itable index accessors
-  int cached_itable_index()                         { return method_holder()->cached_itable_index(method_idnum()); }
-  void set_cached_itable_index(int index)           { method_holder()->set_cached_itable_index(method_idnum(), index); }
-
   // Support for inlining of intrinsic methods
   vmIntrinsics::ID intrinsic_id() const          { return (vmIntrinsics::ID) _intrinsic_id;           }
   void     set_intrinsic_id(vmIntrinsics::ID id) {                           _intrinsic_id = (u1) id; }
@@ -796,10 +800,12 @@
   void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_osr_compilable(comp_level, false);
   }
+  bool is_always_compilable() const;
 
  private:
   void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
 
+ public:
   MethodCounters* get_method_counters(TRAPS) {
     if (_method_counters == NULL) {
       build_method_counters(this, CHECK_AND_CLEAR_NULL);
@@ -807,7 +813,6 @@
     return _method_counters;
   }
 
- public:
   bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
   void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
   void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
@@ -842,7 +847,7 @@
 #endif
 
   // Helper routine used for method sorting
-  static void sort_methods(Array<Method*>* methods, bool idempotent = false);
+  static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
 
   // Deallocation function for redefine classes or if an error occurs
   void deallocate_contents(ClassLoaderData* loader_data);
--- a/src/share/vm/oops/methodData.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/methodData.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,7 +41,7 @@
 
 // Some types of data layouts need a length field.
 bool DataLayout::needs_array_len(u1 tag) {
-  return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag);
+  return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
 }
 
 // Perform generic initialization of the data.  More specific
@@ -56,6 +56,11 @@
   if (needs_array_len(tag)) {
     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
   }
+  if (tag == call_type_data_tag) {
+    CallTypeData::initialize(this, cell_count);
+  } else if (tag == virtual_call_type_data_tag) {
+    VirtualCallTypeData::initialize(this, cell_count);
+  }
 }
 
 void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
@@ -76,7 +81,7 @@
 }
 
 #ifndef PRODUCT
-void ProfileData::print_shared(outputStream* st, const char* name) {
+void ProfileData::print_shared(outputStream* st, const char* name) const {
   st->print("bci: %d", bci());
   st->fill_to(tab_width_one);
   st->print("%s", name);
@@ -91,8 +96,8 @@
     st->print("flags(%d) ", flags);
 }
 
-void ProfileData::tab(outputStream* st) {
-  st->fill_to(tab_width_two);
+void ProfileData::tab(outputStream* st, bool first) const {
+  st->fill_to(first ? tab_width_one : tab_width_two);
 }
 #endif // !PRODUCT
 
@@ -104,7 +109,7 @@
 
 
 #ifndef PRODUCT
-void BitData::print_data_on(outputStream* st) {
+void BitData::print_data_on(outputStream* st) const {
   print_shared(st, "BitData");
 }
 #endif // !PRODUCT
@@ -115,7 +120,7 @@
 // A CounterData corresponds to a simple counter.
 
 #ifndef PRODUCT
-void CounterData::print_data_on(outputStream* st) {
+void CounterData::print_data_on(outputStream* st) const {
   print_shared(st, "CounterData");
   st->print_cr("count(%u)", count());
 }
@@ -145,12 +150,217 @@
 }
 
 #ifndef PRODUCT
-void JumpData::print_data_on(outputStream* st) {
+void JumpData::print_data_on(outputStream* st) const {
   print_shared(st, "JumpData");
   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 }
 #endif // !PRODUCT
 
+int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
+  // Parameter profiling include the receiver
+  int args_count = include_receiver ? 1 : 0;
+  ResourceMark rm;
+  SignatureStream ss(signature);
+  args_count += ss.reference_parameter_count();
+  args_count = MIN2(args_count, max);
+  return args_count * per_arg_cell_count;
+}
+
+int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+  int args_cell = 0;
+  if (arguments_profiling_enabled()) {
+    args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
+  }
+  int ret_cell = 0;
+  if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
+    ret_cell = ReturnTypeEntry::static_cell_count();
+  }
+  int header_cell = 0;
+  if (args_cell + ret_cell > 0) {
+    header_cell = header_cell_count();
+  }
+
+  return header_cell + args_cell + ret_cell;
+}
+
+class ArgumentOffsetComputer : public SignatureInfo {
+private:
+  int _max;
+  GrowableArray<int> _offsets;
+
+  void set(int size, BasicType type) { _size += size; }
+  void do_object(int begin, int end) {
+    if (_offsets.length() < _max) {
+      _offsets.push(_size);
+    }
+    SignatureInfo::do_object(begin, end);
+  }
+  void do_array (int begin, int end) {
+    if (_offsets.length() < _max) {
+      _offsets.push(_size);
+    }
+    SignatureInfo::do_array(begin, end);
+  }
+
+public:
+  ArgumentOffsetComputer(Symbol* signature, int max)
+    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
+  }
+
+  int total() { lazy_iterate_parameters(); return _size; }
+
+  int off_at(int i) const { return _offsets.at(i); }
+};
+
+void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
+  ResourceMark rm;
+  int start = 0;
+  // Parameter profiling include the receiver
+  if (include_receiver && has_receiver) {
+    set_stack_slot(0, 0);
+    set_type(0, type_none());
+    start += 1;
+  }
+  ArgumentOffsetComputer aos(signature, _number_of_entries-start);
+  aos.total();
+  for (int i = start; i < _number_of_entries; i++) {
+    set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
+    set_type(i, type_none());
+  }
+}
+
+void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+
+  SignatureStream ss(inv.signature());
+  if (has_arguments()) {
+#ifdef ASSERT
+    ResourceMark rm;
+    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    assert(count > 0, "room for args type but none found?");
+    check_number_of_arguments(count);
+#endif
+    _args.post_initialize(inv.signature(), inv.has_receiver(), false);
+  }
+
+  if (has_return()) {
+    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+    _ret.post_initialize();
+  }
+}
+
+void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
+  Bytecode_invoke inv(stream->method(), stream->bci());
+
+  if (has_arguments()) {
+#ifdef ASSERT
+    ResourceMark rm;
+    SignatureStream ss(inv.signature());
+    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    assert(count > 0, "room for args type but none found?");
+    check_number_of_arguments(count);
+#endif
+    _args.post_initialize(inv.signature(), inv.has_receiver(), false);
+  }
+
+  if (has_return()) {
+    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
+    _ret.post_initialize();
+  }
+}
+
+bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
+  return !is_type_none(p) &&
+    !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
+}
+
+void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
+  for (int i = 0; i < _number_of_entries; i++) {
+    intptr_t p = type(i);
+    if (is_loader_alive(is_alive_cl, p)) {
+      set_type(i, type_none());
+    }
+  }
+}
+
+void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
+  intptr_t p = type();
+  if (is_loader_alive(is_alive_cl, p)) {
+    set_type(type_none());
+  }
+}
+
+bool TypeEntriesAtCall::return_profiling_enabled() {
+  return MethodData::profile_return();
+}
+
+bool TypeEntriesAtCall::arguments_profiling_enabled() {
+  return MethodData::profile_arguments();
+}
+
+#ifndef PRODUCT
+void TypeEntries::print_klass(outputStream* st, intptr_t k) {
+  if (is_type_none(k)) {
+    st->print("none");
+  } else if (is_type_unknown(k)) {
+    st->print("unknown");
+  } else {
+    valid_klass(k)->print_value_on(st);
+  }
+  if (was_null_seen(k)) {
+    st->print(" (null seen)");
+  }
+}
+
+void TypeStackSlotEntries::print_data_on(outputStream* st) const {
+  for (int i = 0; i < _number_of_entries; i++) {
+    _pd->tab(st);
+    st->print("%d: stack(%u) ", i, stack_slot(i));
+    print_klass(st, type(i));
+    st->cr();
+  }
+}
+
+void ReturnTypeEntry::print_data_on(outputStream* st) const {
+  _pd->tab(st);
+  print_klass(st, type());
+  st->cr();
+}
+
+void CallTypeData::print_data_on(outputStream* st) const {
+  CounterData::print_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    _args.print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    _ret.print_data_on(st);
+  }
+}
+
+void VirtualCallTypeData::print_data_on(outputStream* st) const {
+  VirtualCallData::print_data_on(st);
+  if (has_arguments()) {
+    tab(st, true);
+    st->print("argument types");
+    _args.print_data_on(st);
+  }
+  if (has_return()) {
+    tab(st, true);
+    st->print("return type");
+    _ret.print_data_on(st);
+  }
+}
+#endif
+
 // ==================================================================
 // ReceiverTypeData
 //
@@ -169,7 +379,7 @@
 }
 
 #ifndef PRODUCT
-void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
+void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -190,11 +400,11 @@
     }
   }
 }
-void ReceiverTypeData::print_data_on(outputStream* st) {
+void ReceiverTypeData::print_data_on(outputStream* st) const {
   print_shared(st, "ReceiverTypeData");
   print_receiver_data_on(st);
 }
-void VirtualCallData::print_data_on(outputStream* st) {
+void VirtualCallData::print_data_on(outputStream* st) const {
   print_shared(st, "VirtualCallData");
   print_receiver_data_on(st);
 }
@@ -251,7 +461,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-void RetData::print_data_on(outputStream* st) {
+void RetData::print_data_on(outputStream* st) const {
   print_shared(st, "RetData");
   uint row;
   int entries = 0;
@@ -286,7 +496,7 @@
 }
 
 #ifndef PRODUCT
-void BranchData::print_data_on(outputStream* st) {
+void BranchData::print_data_on(outputStream* st) const {
   print_shared(st, "BranchData");
   st->print_cr("taken(%u) displacement(%d)",
                taken(), displacement());
@@ -360,7 +570,7 @@
 }
 
 #ifndef PRODUCT
-void MultiBranchData::print_data_on(outputStream* st) {
+void MultiBranchData::print_data_on(outputStream* st) const {
   print_shared(st, "MultiBranchData");
   st->print_cr("default_count(%u) displacement(%d)",
                default_count(), default_displacement());
@@ -374,7 +584,7 @@
 #endif
 
 #ifndef PRODUCT
-void ArgInfoData::print_data_on(outputStream* st) {
+void ArgInfoData::print_data_on(outputStream* st) const {
   print_shared(st, "ArgInfoData");
   int nargs = number_of_args();
   for (int i = 0; i < nargs; i++) {
@@ -384,6 +594,34 @@
 }
 
 #endif
+
+int ParametersTypeData::compute_cell_count(Method* m) {
+  if (!MethodData::profile_parameters_for_method(m)) {
+    return 0;
+  }
+  int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
+  int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
+  if (obj_args > 0) {
+    return obj_args + 1; // 1 cell for array len
+  }
+  return 0;
+}
+
+void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
+  _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
+}
+
+bool ParametersTypeData::profiling_enabled() {
+  return MethodData::profile_parameters();
+}
+
+#ifndef PRODUCT
+void ParametersTypeData::print_data_on(outputStream* st) const {
+  st->print("parameter types");
+  _parameters.print_data_on(st);
+}
+#endif
+
 // ==================================================================
 // MethodData*
 //
@@ -412,7 +650,11 @@
     }
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokestatic:
-    return CounterData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return CounterData::static_cell_count();
+    }
   case Bytecodes::_goto:
   case Bytecodes::_goto_w:
   case Bytecodes::_jsr:
@@ -420,9 +662,17 @@
     return JumpData::static_cell_count();
   case Bytecodes::_invokevirtual:
   case Bytecodes::_invokeinterface:
-    return VirtualCallData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return VirtualCallData::static_cell_count();
+    }
   case Bytecodes::_invokedynamic:
-    return CounterData::static_cell_count();
+    if (MethodData::profile_arguments() || MethodData::profile_return()) {
+      return variable_cell_count;
+    } else {
+      return CounterData::static_cell_count();
+    }
   case Bytecodes::_ret:
     return RetData::static_cell_count();
   case Bytecodes::_ifeq:
@@ -458,7 +708,36 @@
     return 0;
   }
   if (cell_count == variable_cell_count) {
-    cell_count = MultiBranchData::compute_cell_count(stream);
+    switch (stream->code()) {
+    case Bytecodes::_lookupswitch:
+    case Bytecodes::_tableswitch:
+      cell_count = MultiBranchData::compute_cell_count(stream);
+      break;
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+    case Bytecodes::_invokedynamic:
+      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+          profile_return_for_invoke(stream->method(), stream->bci())) {
+        cell_count = CallTypeData::compute_cell_count(stream);
+      } else {
+        cell_count = CounterData::static_cell_count();
+      }
+      break;
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokeinterface: {
+      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
+      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+          profile_return_for_invoke(stream->method(), stream->bci())) {
+        cell_count = VirtualCallTypeData::compute_cell_count(stream);
+      } else {
+        cell_count = VirtualCallData::static_cell_count();
+      }
+      break;
+    }
+    default:
+      fatal("unexpected bytecode for var length profile data");
+    }
   }
   // Note:  cell_count might be zero, meaning that there is just
   //        a DataLayout header, with no extra cells.
@@ -504,6 +783,13 @@
   // Add a cell to record information about modified arguments.
   int arg_size = method->size_of_parameters();
   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
+
+  // Reserve room for an area of the MDO dedicated to profiling of
+  // parameters
+  int args_cell = ParametersTypeData::compute_cell_count(method());
+  if (args_cell > 0) {
+    object_size += DataLayout::compute_size_in_bytes(args_cell);
+  }
   return object_size;
 }
 
@@ -539,10 +825,21 @@
     }
     break;
   case Bytecodes::_invokespecial:
-  case Bytecodes::_invokestatic:
-    cell_count = CounterData::static_cell_count();
-    tag = DataLayout::counter_data_tag;
+  case Bytecodes::_invokestatic: {
+    int counter_data_cell_count = CounterData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = CallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = counter_data_cell_count;
+    }
+    if (cell_count > counter_data_cell_count) {
+      tag = DataLayout::call_type_data_tag;
+    } else {
+      tag = DataLayout::counter_data_tag;
+    }
     break;
+  }
   case Bytecodes::_goto:
   case Bytecodes::_goto_w:
   case Bytecodes::_jsr:
@@ -551,15 +848,37 @@
     tag = DataLayout::jump_data_tag;
     break;
   case Bytecodes::_invokevirtual:
-  case Bytecodes::_invokeinterface:
-    cell_count = VirtualCallData::static_cell_count();
-    tag = DataLayout::virtual_call_data_tag;
+  case Bytecodes::_invokeinterface: {
+    int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = VirtualCallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = virtual_call_data_cell_count;
+    }
+    if (cell_count > virtual_call_data_cell_count) {
+      tag = DataLayout::virtual_call_type_data_tag;
+    } else {
+      tag = DataLayout::virtual_call_data_tag;
+    }
     break;
-  case Bytecodes::_invokedynamic:
+  }
+  case Bytecodes::_invokedynamic: {
     // %%% should make a type profile for any invokedynamic that takes a ref argument
-    cell_count = CounterData::static_cell_count();
-    tag = DataLayout::counter_data_tag;
+    int counter_data_cell_count = CounterData::static_cell_count();
+    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
+        profile_return_for_invoke(stream->method(), stream->bci())) {
+      cell_count = CallTypeData::compute_cell_count(stream);
+    } else {
+      cell_count = counter_data_cell_count;
+    }
+    if (cell_count > counter_data_cell_count) {
+      tag = DataLayout::call_type_data_tag;
+    } else {
+      tag = DataLayout::counter_data_tag;
+    }
     break;
+  }
   case Bytecodes::_ret:
     cell_count = RetData::static_cell_count();
     tag = DataLayout::ret_data_tag;
@@ -590,6 +909,11 @@
     break;
   }
   assert(tag == DataLayout::multi_branch_data_tag ||
+         ((MethodData::profile_arguments() || MethodData::profile_return()) &&
+          (tag == DataLayout::call_type_data_tag ||
+           tag == DataLayout::counter_data_tag ||
+           tag == DataLayout::virtual_call_type_data_tag ||
+           tag == DataLayout::virtual_call_data_tag)) ||
          cell_count == bytecode_cell_count(c), "cell counts must agree");
   if (cell_count >= 0) {
     assert(tag != DataLayout::no_tag, "bad tag");
@@ -636,6 +960,12 @@
     return new MultiBranchData(this);
   case DataLayout::arg_info_data_tag:
     return new ArgInfoData(this);
+  case DataLayout::call_type_data_tag:
+    return new CallTypeData(this);
+  case DataLayout::virtual_call_type_data_tag:
+    return new VirtualCallTypeData(this);
+  case DataLayout::parameters_type_data_tag:
+    return new ParametersTypeData(this);
   };
 }
 
@@ -657,6 +987,9 @@
     stream->next();
     data->post_initialize(stream, this);
   }
+  if (_parameters_type_data_di != -1) {
+    parameters_type_data()->post_initialize(NULL, this);
+  }
 }
 
 // Initialize the MethodData* corresponding to a given method.
@@ -696,7 +1029,23 @@
   int arg_size = method->size_of_parameters();
   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
 
-  object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1);
+  int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
+  object_size += extra_size + arg_data_size;
+
+  int args_cell = ParametersTypeData::compute_cell_count(method());
+  // If we are profiling parameters, we reserver an area near the end
+  // of the MDO after the slots for bytecodes (because there's no bci
+  // for method entry so they don't fit with the framework for the
+  // profiling of bytecodes). We store the offset within the MDO of
+  // this area (or -1 if no parameter is profiled)
+  if (args_cell > 0) {
+    object_size += DataLayout::compute_size_in_bytes(args_cell);
+    _parameters_type_data_di = data_size + extra_size + arg_data_size;
+    DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
+    dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell);
+  } else {
+    _parameters_type_data_di = -1;
+  }
 
   // Set an initial hint. Don't use set_hint_di() because
   // first_di() may be out of bounds if data_size is 0.
@@ -855,6 +1204,9 @@
 void MethodData::print_data_on(outputStream* st) const {
   ResourceMark rm;
   ProfileData* data = first_data();
+  if (_parameters_type_data_di != -1) {
+    parameters_type_data()->print_data_on(st);
+  }
   for ( ; is_valid(data); data = next_data(data)) {
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
@@ -903,3 +1255,99 @@
   NEEDS_CLEANUP;
   // not yet implemented.
 }
+
+bool MethodData::profile_jsr292(methodHandle m, int bci) {
+  if (m->is_compiled_lambda_form()) {
+    return true;
+  }
+
+  Bytecode_invoke inv(m , bci);
+  return inv.is_invokedynamic() || inv.is_invokehandle();
+}
+
+int MethodData::profile_arguments_flag() {
+  return TypeProfileLevel % 10;
+}
+
+bool MethodData::profile_arguments() {
+  return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_arguments_jsr292_only() {
+  return profile_arguments_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_arguments() {
+  return profile_arguments_flag() == type_profile_all;
+}
+
+bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
+  if (!profile_arguments()) {
+    return false;
+  }
+
+  if (profile_all_arguments()) {
+    return true;
+  }
+
+  assert(profile_arguments_jsr292_only(), "inconsistent");
+  return profile_jsr292(m, bci);
+}
+
+int MethodData::profile_return_flag() {
+  return (TypeProfileLevel % 100) / 10;
+}
+
+bool MethodData::profile_return() {
+  return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_return_jsr292_only() {
+  return profile_return_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_return() {
+  return profile_return_flag() == type_profile_all;
+}
+
+bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
+  if (!profile_return()) {
+    return false;
+  }
+
+  if (profile_all_return()) {
+    return true;
+  }
+
+  assert(profile_return_jsr292_only(), "inconsistent");
+  return profile_jsr292(m, bci);
+}
+
+int MethodData::profile_parameters_flag() {
+  return TypeProfileLevel / 100;
+}
+
+bool MethodData::profile_parameters() {
+  return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
+}
+
+bool MethodData::profile_parameters_jsr292_only() {
+  return profile_parameters_flag() == type_profile_jsr292;
+}
+
+bool MethodData::profile_all_parameters() {
+  return profile_parameters_flag() == type_profile_all;
+}
+
+bool MethodData::profile_parameters_for_method(methodHandle m) {
+  if (!profile_parameters()) {
+    return false;
+  }
+
+  if (profile_all_parameters()) {
+    return true;
+  }
+
+  assert(profile_parameters_jsr292_only(), "inconsistent");
+  return m->is_compiled_lambda_form();
+}
--- a/src/share/vm/oops/methodData.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/methodData.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -72,6 +72,8 @@
 //
 // Overlay for generic profiling data.
 class DataLayout VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
 private:
   // Every data layout begins with a header.  This header
   // contains a tag, which is used to indicate the size/layout
@@ -115,7 +117,10 @@
     ret_data_tag,
     branch_data_tag,
     multi_branch_data_tag,
-    arg_info_data_tag
+    arg_info_data_tag,
+    call_type_data_tag,
+    virtual_call_type_data_tag,
+    parameters_type_data_tag
   };
 
   enum {
@@ -163,7 +168,7 @@
   // occurred, and the MDO shows N occurrences of X, we make the
   // simplifying assumption that all N occurrences can be blamed
   // on that BCI.
-  int trap_state() {
+  int trap_state() const {
     return ((_header._struct._flags >> trap_shift) & trap_mask);
   }
 
@@ -173,11 +178,11 @@
     _header._struct._flags = (new_state << trap_shift) | old_flags;
   }
 
-  u1 flags() {
+  u1 flags() const {
     return _header._struct._flags;
   }
 
-  u2 bci() {
+  u2 bci() const {
     return _header._struct._bci;
   }
 
@@ -196,7 +201,7 @@
   void release_set_cell_at(int index, intptr_t value) {
     OrderAccess::release_store_ptr(&_cells[index], value);
   }
-  intptr_t cell_at(int index) {
+  intptr_t cell_at(int index) const {
     return _cells[index];
   }
 
@@ -204,7 +209,7 @@
     assert(flag_number < flag_limit, "oob");
     _header._struct._flags |= (0x1 << flag_number);
   }
-  bool flag_at(int flag_number) {
+  bool flag_at(int flag_number) const {
     assert(flag_number < flag_limit, "oob");
     return (_header._struct._flags & (0x1 << flag_number)) != 0;
   }
@@ -257,19 +262,24 @@
 class     CounterData;
 class       ReceiverTypeData;
 class         VirtualCallData;
+class           VirtualCallTypeData;
 class       RetData;
+class       CallTypeData;
 class   JumpData;
 class     BranchData;
 class   ArrayData;
 class     MultiBranchData;
 class     ArgInfoData;
-
+class     ParametersTypeData;
 
 // ProfileData
 //
 // A ProfileData object is created to refer to a section of profiling
 // data in a structured way.
 class ProfileData : public ResourceObj {
+  friend class TypeEntries;
+  friend class ReturnTypeEntry;
+  friend class TypeStackSlotEntries;
 private:
 #ifndef PRODUCT
   enum {
@@ -283,6 +293,7 @@
 
 protected:
   DataLayout* data() { return _data; }
+  const DataLayout* data() const { return _data; }
 
   enum {
     cell_size = DataLayout::cell_size
@@ -290,7 +301,7 @@
 
 public:
   // How many cells are in this?
-  virtual int cell_count() {
+  virtual int cell_count() const {
     ShouldNotReachHere();
     return -1;
   }
@@ -310,7 +321,7 @@
     assert(0 <= index && index < cell_count(), "oob");
     data()->release_set_cell_at(index, value);
   }
-  intptr_t intptr_at(int index) {
+  intptr_t intptr_at(int index) const {
     assert(0 <= index && index < cell_count(), "oob");
     return data()->cell_at(index);
   }
@@ -320,7 +331,7 @@
   void release_set_uint_at(int index, uint value) {
     release_set_intptr_at(index, (intptr_t) value);
   }
-  uint uint_at(int index) {
+  uint uint_at(int index) const {
     return (uint)intptr_at(index);
   }
   void set_int_at(int index, int value) {
@@ -329,23 +340,23 @@
   void release_set_int_at(int index, int value) {
     release_set_intptr_at(index, (intptr_t) value);
   }
-  int int_at(int index) {
+  int int_at(int index) const {
     return (int)intptr_at(index);
   }
-  int int_at_unchecked(int index) {
+  int int_at_unchecked(int index) const {
     return (int)data()->cell_at(index);
   }
   void set_oop_at(int index, oop value) {
-    set_intptr_at(index, (intptr_t) value);
+    set_intptr_at(index, cast_from_oop<intptr_t>(value));
   }
-  oop oop_at(int index) {
-    return (oop)intptr_at(index);
+  oop oop_at(int index) const {
+    return cast_to_oop(intptr_at(index));
   }
 
   void set_flag_at(int flag_number) {
     data()->set_flag_at(flag_number);
   }
-  bool flag_at(int flag_number) {
+  bool flag_at(int flag_number) const {
     return data()->flag_at(flag_number);
   }
 
@@ -400,7 +411,7 @@
   // Constructor for invalid ProfileData.
   ProfileData();
 
-  u2 bci() {
+  u2 bci() const {
     return data()->bci();
   }
 
@@ -408,7 +419,7 @@
     return (address)_data;
   }
 
-  int trap_state() {
+  int trap_state() const {
     return data()->trap_state();
   }
   void set_trap_state(int new_state) {
@@ -416,58 +427,73 @@
   }
 
   // Type checking
-  virtual bool is_BitData()         { return false; }
-  virtual bool is_CounterData()     { return false; }
-  virtual bool is_JumpData()        { return false; }
-  virtual bool is_ReceiverTypeData(){ return false; }
-  virtual bool is_VirtualCallData() { return false; }
-  virtual bool is_RetData()         { return false; }
-  virtual bool is_BranchData()      { return false; }
-  virtual bool is_ArrayData()       { return false; }
-  virtual bool is_MultiBranchData() { return false; }
-  virtual bool is_ArgInfoData()     { return false; }
+  virtual bool is_BitData()         const { return false; }
+  virtual bool is_CounterData()     const { return false; }
+  virtual bool is_JumpData()        const { return false; }
+  virtual bool is_ReceiverTypeData()const { return false; }
+  virtual bool is_VirtualCallData() const { return false; }
+  virtual bool is_RetData()         const { return false; }
+  virtual bool is_BranchData()      const { return false; }
+  virtual bool is_ArrayData()       const { return false; }
+  virtual bool is_MultiBranchData() const { return false; }
+  virtual bool is_ArgInfoData()     const { return false; }
+  virtual bool is_CallTypeData()    const { return false; }
+  virtual bool is_VirtualCallTypeData()const { return false; }
+  virtual bool is_ParametersTypeData() const { return false; }
 
 
-  BitData* as_BitData() {
+  BitData* as_BitData() const {
     assert(is_BitData(), "wrong type");
     return is_BitData()         ? (BitData*)        this : NULL;
   }
-  CounterData* as_CounterData() {
+  CounterData* as_CounterData() const {
     assert(is_CounterData(), "wrong type");
     return is_CounterData()     ? (CounterData*)    this : NULL;
   }
-  JumpData* as_JumpData() {
+  JumpData* as_JumpData() const {
     assert(is_JumpData(), "wrong type");
     return is_JumpData()        ? (JumpData*)       this : NULL;
   }
-  ReceiverTypeData* as_ReceiverTypeData() {
+  ReceiverTypeData* as_ReceiverTypeData() const {
     assert(is_ReceiverTypeData(), "wrong type");
     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
   }
-  VirtualCallData* as_VirtualCallData() {
+  VirtualCallData* as_VirtualCallData() const {
     assert(is_VirtualCallData(), "wrong type");
     return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
   }
-  RetData* as_RetData() {
+  RetData* as_RetData() const {
     assert(is_RetData(), "wrong type");
     return is_RetData()         ? (RetData*)        this : NULL;
   }
-  BranchData* as_BranchData() {
+  BranchData* as_BranchData() const {
     assert(is_BranchData(), "wrong type");
     return is_BranchData()      ? (BranchData*)     this : NULL;
   }
-  ArrayData* as_ArrayData() {
+  ArrayData* as_ArrayData() const {
     assert(is_ArrayData(), "wrong type");
     return is_ArrayData()       ? (ArrayData*)      this : NULL;
   }
-  MultiBranchData* as_MultiBranchData() {
+  MultiBranchData* as_MultiBranchData() const {
     assert(is_MultiBranchData(), "wrong type");
     return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
   }
-  ArgInfoData* as_ArgInfoData() {
+  ArgInfoData* as_ArgInfoData() const {
     assert(is_ArgInfoData(), "wrong type");
     return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
   }
+  CallTypeData* as_CallTypeData() const {
+    assert(is_CallTypeData(), "wrong type");
+    return is_CallTypeData() ? (CallTypeData*)this : NULL;
+  }
+  VirtualCallTypeData* as_VirtualCallTypeData() const {
+    assert(is_VirtualCallTypeData(), "wrong type");
+    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
+  }
+  ParametersTypeData* as_ParametersTypeData() const {
+    assert(is_ParametersTypeData(), "wrong type");
+    return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
+  }
 
 
   // Subclass specific initialization
@@ -481,15 +507,15 @@
   // an oop in a ProfileData to the ci equivalent. Generally speaking,
   // most ProfileData don't require any translation, so we provide the null
   // translation here, and the required translators are in the ci subclasses.
-  virtual void translate_from(ProfileData* data) {}
+  virtual void translate_from(const ProfileData* data) {}
 
-  virtual void print_data_on(outputStream* st) {
+  virtual void print_data_on(outputStream* st) const {
     ShouldNotReachHere();
   }
 
 #ifndef PRODUCT
-  void print_shared(outputStream* st, const char* name);
-  void tab(outputStream* st);
+  void print_shared(outputStream* st, const char* name) const;
+  void tab(outputStream* st, bool first = false) const;
 #endif
 };
 
@@ -508,13 +534,13 @@
   BitData(DataLayout* layout) : ProfileData(layout) {
   }
 
-  virtual bool is_BitData() { return true; }
+  virtual bool is_BitData() const { return true; }
 
   static int static_cell_count() {
     return bit_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -550,7 +576,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -566,18 +592,18 @@
 public:
   CounterData(DataLayout* layout) : BitData(layout) {}
 
-  virtual bool is_CounterData() { return true; }
+  virtual bool is_CounterData() const { return true; }
 
   static int static_cell_count() {
     return counter_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint count() {
+  uint count() const {
     return uint_at(count_off);
   }
 
@@ -613,7 +639,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -641,18 +667,18 @@
       layout->tag() == DataLayout::branch_data_tag, "wrong type");
   }
 
-  virtual bool is_JumpData() { return true; }
+  virtual bool is_JumpData() const { return true; }
 
   static int static_cell_count() {
     return jump_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint taken() {
+  uint taken() const {
     return uint_at(taken_off_set);
   }
 
@@ -669,7 +695,7 @@
     return cnt;
   }
 
-  int displacement() {
+  int displacement() const {
     return int_at(displacement_off_set);
   }
 
@@ -700,7 +726,417 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Entries in a ProfileData object to record types: it can either be
+// none (no profile), unknown (conflicting profile data) or a klass if
+// a single one is seen. Whether a null reference was seen is also
+// recorded. No counter is associated with the type and a single type
+// is tracked (unlike VirtualCallData).
+class TypeEntries {
+
+public:
+
+  // A single cell is used to record information for a type:
+  // - the cell is initialized to 0
+  // - when a type is discovered it is stored in the cell
+  // - bit zero of the cell is used to record whether a null reference
+  // was encountered or not
+  // - bit 1 is set to record a conflict in the type information
+
+  enum {
+    null_seen = 1,
+    type_mask = ~null_seen,
+    type_unknown = 2,
+    status_bits = null_seen | type_unknown,
+    type_klass_mask = ~status_bits
+  };
+
+  // what to initialize a cell to
+  static intptr_t type_none() {
+    return 0;
+  }
+
+  // null seen = bit 0 set?
+  static bool was_null_seen(intptr_t v) {
+    return (v & null_seen) != 0;
+  }
+
+  // conflicting type information = bit 1 set?
+  static bool is_type_unknown(intptr_t v) {
+    return (v & type_unknown) != 0;
+  }
+
+  // not type information yet = all bits cleared, ignoring bit 0?
+  static bool is_type_none(intptr_t v) {
+    return (v & type_mask) == 0;
+  }
+
+  // recorded type: cell without bit 0 and 1
+  static intptr_t klass_part(intptr_t v) {
+    intptr_t r = v & type_klass_mask;
+    assert (r != 0, "invalid");
+    return r;
+  }
+
+  // type recorded
+  static Klass* valid_klass(intptr_t k) {
+    if (!is_type_none(k) &&
+        !is_type_unknown(k)) {
+      return (Klass*)klass_part(k);
+    } else {
+      return NULL;
+    }
+  }
+
+  static intptr_t with_status(intptr_t k, intptr_t in) {
+    return k | (in & status_bits);
+  }
+
+  static intptr_t with_status(Klass* k, intptr_t in) {
+    return with_status((intptr_t)k, in);
+  }
+
+#ifndef PRODUCT
+  static void print_klass(outputStream* st, intptr_t k);
+#endif
+
+  // GC support
+  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
+
+protected:
+  // ProfileData object these entries are part of
+  ProfileData* _pd;
+  // offset within the ProfileData object where the entries start
+  const int _base_off;
+
+  TypeEntries(int base_off)
+    : _base_off(base_off), _pd(NULL) {}
+
+  void set_intptr_at(int index, intptr_t value) {
+    _pd->set_intptr_at(index, value);
+  }
+
+  intptr_t intptr_at(int index) const {
+    return _pd->intptr_at(index);
+  }
+
+public:
+  void set_profile_data(ProfileData* pd) {
+    _pd = pd;
+  }
+};
+
+// Type entries used for arguments passed at a call and parameters on
+// method entry. 2 cells per entry: one for the type encoded as in
+// TypeEntries and one initialized with the stack slot where the
+// profiled object is to be found so that the interpreter can locate
+// it quickly.
+class TypeStackSlotEntries : public TypeEntries {
+
+private:
+  enum {
+    stack_slot_entry,
+    type_entry,
+    per_arg_cell_count
+  };
+
+  // offset of cell for stack slot for entry i within ProfileData object
+  int stack_slot_offset(int i) const {
+    return _base_off + stack_slot_local_offset(i);
+  }
+
+protected:
+  const int _number_of_entries;
+
+  // offset of cell for type for entry i within ProfileData object
+  int type_offset(int i) const {
+    return _base_off + type_local_offset(i);
+  }
+
+public:
+
+  TypeStackSlotEntries(int base_off, int nb_entries)
+    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
+
+  static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
+
+  void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
+
+  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
+  static int stack_slot_local_offset(int i) {
+    return i * per_arg_cell_count + stack_slot_entry;
+  }
+
+  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
+  static int type_local_offset(int i) {
+    return i * per_arg_cell_count + type_entry;
+  }
+
+  // stack slot for entry i
+  uint stack_slot(int i) const {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    return _pd->uint_at(stack_slot_offset(i));
+  }
+
+  // set stack slot for entry i
+  void set_stack_slot(int i, uint num) {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    _pd->set_uint_at(stack_slot_offset(i), num);
+  }
+
+  // type for entry i
+  intptr_t type(int i) const {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    return _pd->intptr_at(type_offset(i));
+  }
+
+  // set type for entry i
+  void set_type(int i, intptr_t k) {
+    assert(i >= 0 && i < _number_of_entries, "oob");
+    _pd->set_intptr_at(type_offset(i), k);
+  }
+
+  static ByteSize per_arg_size() {
+    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
+  }
+
+  static int per_arg_count() {
+    return per_arg_cell_count ;
+  }
+
+  // GC support
+  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Type entry used for return from a call. A single cell to record the
+// type.
+class ReturnTypeEntry : public TypeEntries {
+
+private:
+  enum {
+    cell_count = 1
+  };
+
+public:
+  ReturnTypeEntry(int base_off)
+    : TypeEntries(base_off) {}
+
+  void post_initialize() {
+    set_type(type_none());
+  }
+
+  intptr_t type() const {
+    return _pd->intptr_at(_base_off);
+  }
+
+  void set_type(intptr_t k) {
+    _pd->set_intptr_at(_base_off, k);
+  }
+
+  static int static_cell_count() {
+    return cell_count;
+  }
+
+  static ByteSize size() {
+    return in_ByteSize(cell_count * DataLayout::cell_size);
+  }
+
+  ByteSize type_offset() {
+    return DataLayout::cell_offset(_base_off);
+  }
+
+  // GC support
+  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// Entries to collect type information at a call: contains arguments
+// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
+// number of cells. Because the number of cells for the return type is
+// smaller than the number of cells for the type of an arguments, the
+// number of cells is used to tell how many arguments are profiled and
+// whether a return value is profiled. See has_arguments() and
+// has_return().
+class TypeEntriesAtCall {
+private:
+  static int stack_slot_local_offset(int i) {
+    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
+  }
+
+  static int argument_type_local_offset(int i) {
+    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
+  }
+
+public:
+
+  static int header_cell_count() {
+    return 1;
+  }
+
+  static int cell_count_local_offset() {
+    return 0;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream);
+
+  static void initialize(DataLayout* dl, int base, int cell_count) {
+    int off = base + cell_count_local_offset();
+    dl->set_cell_at(off, cell_count - base - header_cell_count());
+  }
+
+  static bool arguments_profiling_enabled();
+  static bool return_profiling_enabled();
+
+  // Code generation support
+  static ByteSize cell_count_offset() {
+    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
+  }
+
+  static ByteSize args_data_offset() {
+    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
+  }
+
+  static ByteSize stack_slot_offset(int i) {
+    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
+  }
+
+  static ByteSize argument_type_offset(int i) {
+    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
+  }
+};
+
+// CallTypeData
+//
+// A CallTypeData is used to access profiling information about a non
+// virtual call for which we collect type information about arguments
+// and return value.
+class CallTypeData : public CounterData {
+private:
+  // entries for arguments if any
+  TypeStackSlotEntries _args;
+  // entry for return type if any
+  ReturnTypeEntry _ret;
+
+  int cell_count_global_offset() const {
+    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+  }
+
+  // number of cells not counting the header
+  int cell_count_no_header() const {
+    return uint_at(cell_count_global_offset());
+  }
+
+  void check_number_of_arguments(int total) {
+    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+  }
+
+public:
+  CallTypeData(DataLayout* layout) :
+    CounterData(layout),
+    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+  {
+    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
+    // Some compilers (VC++) don't want this passed in member initialization list
+    _args.set_profile_data(this);
+    _ret.set_profile_data(this);
+  }
+
+  const TypeStackSlotEntries* args() const {
+    assert(has_arguments(), "no profiling of arguments");
+    return &_args;
+  }
+
+  const ReturnTypeEntry* ret() const {
+    assert(has_return(), "no profiling of return value");
+    return &_ret;
+  }
+
+  virtual bool is_CallTypeData() const { return true; }
+
+  static int static_cell_count() {
+    return -1;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream) {
+    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
+  }
+
+  static void initialize(DataLayout* dl, int cell_count) {
+    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
+  }
+
+  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
+  virtual int cell_count() const {
+    return CounterData::static_cell_count() +
+      TypeEntriesAtCall::header_cell_count() +
+      int_at_unchecked(cell_count_global_offset());
+  }
+
+  int number_of_arguments() const {
+    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
+  }
+
+  void set_argument_type(int i, Klass* k) {
+    assert(has_arguments(), "no arguments!");
+    intptr_t current = _args.type(i);
+    _args.set_type(i, TypeEntries::with_status(k, current));
+  }
+
+  void set_return_type(Klass* k) {
+    assert(has_return(), "no return!");
+    intptr_t current = _ret.type();
+    _ret.set_type(TypeEntries::with_status(k, current));
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument so if the number of cells exceeds the number of cells
+  // needed for an argument, this object contains type information for
+  // at least one argument.
+  bool has_arguments() const {
+    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+    return res;
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument, so if the remainder of the number of cells divided by
+  // the number of cells for an argument is not null, a return value
+  // is profiled in this object.
+  bool has_return() const {
+    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+    return res;
+  }
+
+  // Code generation support
+  static ByteSize args_data_offset() {
+    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
+  }
+
+  // GC support
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
+    if (has_arguments()) {
+      _args.clean_weak_klass_links(is_alive_closure);
+    }
+    if (has_return()) {
+      _ret.clean_weak_klass_links(is_alive_closure);
+    }
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -721,16 +1157,17 @@
 public:
   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
-           layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
+           layout->tag() == DataLayout::virtual_call_data_tag ||
+           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
   }
 
-  virtual bool is_ReceiverTypeData() { return true; }
+  virtual bool is_ReceiverTypeData() const { return true; }
 
   static int static_cell_count() {
     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -745,7 +1182,7 @@
     return count0_offset + row * receiver_type_row_cell_count;
   }
 
-  Klass* receiver(uint row) {
+  Klass* receiver(uint row) const {
     assert(row < row_limit(), "oob");
 
     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
@@ -758,7 +1195,7 @@
     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
   }
 
-  uint receiver_count(uint row) {
+  uint receiver_count(uint row) const {
     assert(row < row_limit(), "oob");
     return uint_at(receiver_count_cell_index(row));
   }
@@ -843,8 +1280,8 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_receiver_data_on(outputStream* st);
-  void print_data_on(outputStream* st);
+  void print_receiver_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -855,10 +1292,11 @@
 class VirtualCallData : public ReceiverTypeData {
 public:
   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
-    assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
+    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
+           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
   }
 
-  virtual bool is_VirtualCallData() { return true; }
+  virtual bool is_VirtualCallData() const { return true; }
 
   static int static_cell_count() {
     // At this point we could add more profile state, e.g., for arguments.
@@ -866,7 +1304,7 @@
     return ReceiverTypeData::static_cell_count();
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -886,7 +1324,133 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
+#endif
+};
+
+// VirtualCallTypeData
+//
+// A VirtualCallTypeData is used to access profiling information about
+// a virtual call for which we collect type information about
+// arguments and return value.
+class VirtualCallTypeData : public VirtualCallData {
+private:
+  // entries for arguments if any
+  TypeStackSlotEntries _args;
+  // entry for return type if any
+  ReturnTypeEntry _ret;
+
+  int cell_count_global_offset() const {
+    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
+  }
+
+  // number of cells not counting the header
+  int cell_count_no_header() const {
+    return uint_at(cell_count_global_offset());
+  }
+
+  void check_number_of_arguments(int total) {
+    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
+  }
+
+public:
+  VirtualCallTypeData(DataLayout* layout) :
+    VirtualCallData(layout),
+    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
+    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
+  {
+    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
+    // Some compilers (VC++) don't want this passed in member initialization list
+    _args.set_profile_data(this);
+    _ret.set_profile_data(this);
+  }
+
+  const TypeStackSlotEntries* args() const {
+    assert(has_arguments(), "no profiling of arguments");
+    return &_args;
+  }
+
+  const ReturnTypeEntry* ret() const {
+    assert(has_return(), "no profiling of return value");
+    return &_ret;
+  }
+
+  virtual bool is_VirtualCallTypeData() const { return true; }
+
+  static int static_cell_count() {
+    return -1;
+  }
+
+  static int compute_cell_count(BytecodeStream* stream) {
+    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
+  }
+
+  static void initialize(DataLayout* dl, int cell_count) {
+    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
+  }
+
+  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
+  virtual int cell_count() const {
+    return VirtualCallData::static_cell_count() +
+      TypeEntriesAtCall::header_cell_count() +
+      int_at_unchecked(cell_count_global_offset());
+  }
+
+  int number_of_arguments() const {
+    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
+  }
+
+  void set_argument_type(int i, Klass* k) {
+    assert(has_arguments(), "no arguments!");
+    intptr_t current = _args.type(i);
+    _args.set_type(i, TypeEntries::with_status(k, current));
+  }
+
+  void set_return_type(Klass* k) {
+    assert(has_return(), "no return!");
+    intptr_t current = _ret.type();
+    _ret.set_type(TypeEntries::with_status(k, current));
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument, so if the remainder of the number of cells divided by
+  // the number of cells for an argument is not null, a return value
+  // is profiled in this object.
+  bool has_return() const {
+    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
+    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
+    return res;
+  }
+
+  // An entry for a return value takes less space than an entry for an
+  // argument so if the number of cells exceeds the number of cells
+  // needed for an argument, this object contains type information for
+  // at least one argument.
+  bool has_arguments() const {
+    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
+    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
+    return res;
+  }
+
+  // Code generation support
+  static ByteSize args_data_offset() {
+    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
+  }
+
+  // GC support
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
+    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
+    if (has_arguments()) {
+      _args.clean_weak_klass_links(is_alive_closure);
+    }
+    if (has_return()) {
+      _ret.clean_weak_klass_links(is_alive_closure);
+    }
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -929,7 +1493,7 @@
     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
   }
 
-  virtual bool is_RetData() { return true; }
+  virtual bool is_RetData() const { return true; }
 
   enum {
     no_bci = -1 // value of bci when bci1/2 are not in use.
@@ -939,7 +1503,7 @@
     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
@@ -957,13 +1521,13 @@
   }
 
   // Direct accessors
-  int bci(uint row) {
+  int bci(uint row) const {
     return int_at(bci_cell_index(row));
   }
-  uint bci_count(uint row) {
+  uint bci_count(uint row) const {
     return uint_at(bci_count_cell_index(row));
   }
-  int bci_displacement(uint row) {
+  int bci_displacement(uint row) const {
     return int_at(bci_displacement_cell_index(row));
   }
 
@@ -989,7 +1553,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -1014,18 +1578,18 @@
     assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
   }
 
-  virtual bool is_BranchData() { return true; }
+  virtual bool is_BranchData() const { return true; }
 
   static int static_cell_count() {
     return branch_cell_count;
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return static_cell_count();
   }
 
   // Direct accessor
-  uint not_taken() {
+  uint not_taken() const {
     return uint_at(not_taken_off_set);
   }
 
@@ -1067,7 +1631,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -1085,15 +1649,15 @@
     array_start_off_set
   };
 
-  uint array_uint_at(int index) {
+  uint array_uint_at(int index) const {
     int aindex = index + array_start_off_set;
     return uint_at(aindex);
   }
-  int array_int_at(int index) {
+  int array_int_at(int index) const {
     int aindex = index + array_start_off_set;
     return int_at(aindex);
   }
-  oop array_oop_at(int index) {
+  oop array_oop_at(int index) const {
     int aindex = index + array_start_off_set;
     return oop_at(aindex);
   }
@@ -1124,17 +1688,17 @@
 public:
   ArrayData(DataLayout* layout) : ProfileData(layout) {}
 
-  virtual bool is_ArrayData() { return true; }
+  virtual bool is_ArrayData() const { return true; }
 
   static int static_cell_count() {
     return -1;
   }
 
-  int array_len() {
+  int array_len() const {
     return int_at_unchecked(array_len_off_set);
   }
 
-  virtual int cell_count() {
+  virtual int cell_count() const {
     return array_len() + 1;
   }
 
@@ -1181,29 +1745,29 @@
     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
   }
 
-  virtual bool is_MultiBranchData() { return true; }
+  virtual bool is_MultiBranchData() const { return true; }
 
   static int compute_cell_count(BytecodeStream* stream);
 
-  int number_of_cases() {
+  int number_of_cases() const {
     int alen = array_len() - 2; // get rid of default case here.
     assert(alen % per_case_cell_count == 0, "must be even");
     return (alen / per_case_cell_count);
   }
 
-  uint default_count() {
+  uint default_count() const {
     return array_uint_at(default_count_off_set);
   }
-  int default_displacement() {
+  int default_displacement() const {
     return array_int_at(default_disaplacement_off_set);
   }
 
-  uint count_at(int index) {
+  uint count_at(int index) const {
     return array_uint_at(case_array_start +
                          index * per_case_cell_count +
                          relative_count_off_set);
   }
-  int displacement_at(int index) {
+  int displacement_at(int index) const {
     return array_int_at(case_array_start +
                         index * per_case_cell_count +
                         relative_displacement_off_set);
@@ -1260,7 +1824,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
@@ -1271,14 +1835,14 @@
     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
   }
 
-  virtual bool is_ArgInfoData() { return true; }
+  virtual bool is_ArgInfoData() const { return true; }
 
 
-  int number_of_args() {
+  int number_of_args() const {
     return array_len();
   }
 
-  uint arg_modified(int arg) {
+  uint arg_modified(int arg) const {
     return array_uint_at(arg);
   }
 
@@ -1287,10 +1851,79 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st);
+  void print_data_on(outputStream* st) const;
 #endif
 };
 
+// ParametersTypeData
+//
+// A ParametersTypeData is used to access profiling information about
+// types of parameters to a method
+class ParametersTypeData : public ArrayData {
+
+private:
+  TypeStackSlotEntries _parameters;
+
+  static int stack_slot_local_offset(int i) {
+    assert_profiling_enabled();
+    return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
+  }
+
+  static int type_local_offset(int i) {
+    assert_profiling_enabled();
+    return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
+  }
+
+  static bool profiling_enabled();
+  static void assert_profiling_enabled() {
+    assert(profiling_enabled(), "method parameters profiling should be on");
+  }
+
+public:
+  ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
+    assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
+    // Some compilers (VC++) don't want this passed in member initialization list
+    _parameters.set_profile_data(this);
+  }
+
+  static int compute_cell_count(Method* m);
+
+  virtual bool is_ParametersTypeData() const { return true; }
+
+  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
+
+  int number_of_parameters() const {
+    return array_len() / TypeStackSlotEntries::per_arg_count();
+  }
+
+  const TypeStackSlotEntries* parameters() const { return &_parameters; }
+
+  uint stack_slot(int i) const {
+    return _parameters.stack_slot(i);
+  }
+
+  void set_type(int i, Klass* k) {
+    intptr_t current = _parameters.type(i);
+    _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
+  }
+
+  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
+    _parameters.clean_weak_klass_links(is_alive_closure);
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st) const;
+#endif
+
+  static ByteSize stack_slot_offset(int i) {
+    return cell_offset(stack_slot_local_offset(i));
+  }
+
+  static ByteSize type_offset(int i) {
+    return cell_offset(type_local_offset(i));
+  }
+};
+
 // MethodData*
 //
 // A MethodData* holds information which has been collected about
@@ -1405,6 +2038,10 @@
   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
   int _data_size;
 
+  // data index for the area dedicated to parameters. -1 if no
+  // parameter profiling.
+  int _parameters_type_data_di;
+
   // Beginning of the data entries
   intptr_t _data[1];
 
@@ -1460,6 +2097,24 @@
   // return the argument info cell
   ArgInfoData *arg_info();
 
+  enum {
+    no_type_profile = 0,
+    type_profile_jsr292 = 1,
+    type_profile_all = 2
+  };
+
+  static bool profile_jsr292(methodHandle m, int bci);
+  static int profile_arguments_flag();
+  static bool profile_arguments_jsr292_only();
+  static bool profile_all_arguments();
+  static bool profile_arguments_for_invoke(methodHandle m, int bci);
+  static int profile_return_flag();
+  static bool profile_all_return();
+  static bool profile_return_for_invoke(methodHandle m, int bci);
+  static int profile_parameters_flag();
+  static bool profile_parameters_jsr292_only();
+  static bool profile_all_parameters();
+
 public:
   static int header_size() {
     return sizeof(MethodData)/wordSize;
@@ -1665,6 +2320,16 @@
     }
   }
 
+  // Return pointer to area dedicated to parameters in MDO
+  ParametersTypeData* parameters_type_data() const {
+    return _parameters_type_data_di != -1 ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
+  }
+
+  int parameters_type_data_di() const {
+    assert(_parameters_type_data_di != -1, "no args type data");
+    return _parameters_type_data_di;
+  }
+
   // Support for code generation
   static ByteSize data_offset() {
     return byte_offset_of(MethodData, _data[0]);
@@ -1677,6 +2342,10 @@
     return byte_offset_of(MethodData, _backedge_counter);
   }
 
+  static ByteSize parameters_type_data_di_offset() {
+    return byte_offset_of(MethodData, _parameters_type_data_di);
+  }
+
   // Deallocation support - no pointer fields to deallocate
   void deallocate_contents(ClassLoaderData* loader_data) {}
 
@@ -1699,6 +2368,12 @@
   // verification
   void verify_on(outputStream* st);
   void verify_data_on(outputStream* st);
+
+  static bool profile_parameters_for_method(methodHandle m);
+  static bool profile_arguments();
+  static bool profile_return();
+  static bool profile_parameters();
+  static bool profile_return_jsr292_only();
 };
 
 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/src/share/vm/oops/oop.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/oop.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -69,7 +69,7 @@
 }
 
 inline Klass* oopDesc::klass() const {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass_not_null(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -78,7 +78,7 @@
 
 inline Klass* oopDesc::klass_or_null() const volatile {
   // can be NULL in CMS
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -86,19 +86,19 @@
 }
 
 inline int oopDesc::klass_gap_offset_in_bytes() {
-  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
+  assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
   return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
 }
 
 inline Klass** oopDesc::klass_addr() {
   // Only used internally and with CMS and will not work with
   // UseCompressedOops
-  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
+  assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
   return (Klass**) &_metadata._klass;
 }
 
 inline narrowKlass* oopDesc::compressed_klass_addr() {
-  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
+  assert(UseCompressedClassPointers, "only called by compressed klass pointers");
   return &_metadata._compressed_klass;
 }
 
@@ -106,7 +106,7 @@
   // since klasses are promoted no store check is needed
   assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *compressed_klass_addr() = Klass::encode_klass_not_null(k);
   } else {
     *klass_addr() = k;
@@ -118,7 +118,7 @@
 }
 
 inline void oopDesc::set_klass_gap(int v) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   }
 }
@@ -126,7 +126,7 @@
 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (Klass*)(address)k;
@@ -135,7 +135,7 @@
 
 inline oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return decode_heap_oop((narrowOop)_metadata._compressed_klass);
   } else {
     // Special case for GC
@@ -183,7 +183,7 @@
 // in inner GC loops so these are separated.
 
 inline bool check_obj_alignment(oop obj) {
-  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
+  return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
 }
 
 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
--- a/src/share/vm/oops/oopsHierarchy.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/oopsHierarchy.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -55,11 +55,16 @@
 // to and from the underlying oopDesc pointer type.
 //
 // Because oop and its subclasses <type>Oop are class types, arbitrary
-// conversions are not accepted by the compiler, and you may get a message
-// about overloading ambiguity (between long and int is common when converting
-// from a constant in 64 bit mode), or unable to convert from type to 'oop'.
-// Applying a cast to one of these conversion operators first will get to the
-// underlying oopDesc* type if appropriate.
+// conversions are not accepted by the compiler.  Applying a cast to
+// an oop will cause the best matched conversion operator to be
+// invoked returning the underlying oopDesc* type if appropriate.
+// No copy constructors, explicit user conversions or operators of
+// numerical type should be defined within the oop class. Most C++
+// compilers will issue a compile time error concerning the overloading
+// ambiguity between operators of numerical and pointer types. If
+// a conversion to or from an oop to a numerical type is needed,
+// use the inline template methods, cast_*_oop, defined below.
+//
 // Converting NULL to oop to Handle implicit is no longer accepted by the
 // compiler because there are too many steps in the conversion.  Use Handle()
 // instead, which generates less code anyway.
@@ -83,12 +88,9 @@
   void raw_set_obj(const void* p)     { _o = (oopDesc*)p; }
 
   oop()                               { set_obj(NULL); }
+  oop(const oop& o)                   { set_obj(o.obj()); }
   oop(const volatile oop& o)          { set_obj(o.obj()); }
   oop(const void* p)                  { set_obj(p); }
-  oop(intptr_t i)                     { set_obj((void *)i); }
-#ifdef _LP64
-  oop(int i)                          { set_obj((void *)i); }
-#endif
   ~oop()                              {
     if (CheckUnhandledOops) unregister_oop();
   }
@@ -101,8 +103,6 @@
   bool operator==(void *p) const      { return obj() == p; }
   bool operator!=(const volatile oop o) const  { return obj() != o.obj(); }
   bool operator!=(void *p) const      { return obj() != p; }
-  bool operator==(intptr_t p) const   { return obj() == (oopDesc*)p; }
-  bool operator!=(intptr_t p) const   { return obj() != (oopDesc*)p; }
 
   bool operator<(oop o) const         { return obj() < o.obj(); }
   bool operator>(oop o) const         { return obj() > o.obj(); }
@@ -110,8 +110,18 @@
   bool operator>=(oop o) const        { return obj() >= o.obj(); }
   bool operator!() const              { return !obj(); }
 
-  // Cast
+  // Assignment
+  oop& operator=(const oop& o)                            { _o = o.obj(); return *this; }
+#ifndef SOLARIS
+  volatile oop& operator=(const oop& o) volatile          { _o = o.obj(); return *this; }
+#endif
+  volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; }
+
+  // Explict user conversions
   operator void* () const             { return (void *)obj(); }
+#ifndef SOLARIS
+  operator void* () const volatile    { return (void *)obj(); }
+#endif
   operator HeapWord* () const         { return (HeapWord*)obj(); }
   operator oopDesc* () const          { return obj(); }
   operator intptr_t* () const         { return (intptr_t*)obj(); }
@@ -119,7 +129,6 @@
   operator markOop () const           { return markOop(obj()); }
 
   operator address   () const         { return (address)obj(); }
-  operator intptr_t () const volatile { return (intptr_t)obj(); }
 
   // from javaCalls.cpp
   operator jobject () const           { return (jobject)obj(); }
@@ -141,12 +150,26 @@
    class type##Oop : public oop {                                          \
      public:                                                               \
        type##Oop() : oop() {}                                              \
+       type##Oop(const oop& o) : oop(o) {}                                 \
        type##Oop(const volatile oop& o) : oop(o) {}                        \
        type##Oop(const void* p) : oop(p) {}                                \
        operator type##OopDesc* () const { return (type##OopDesc*)obj(); }  \
        type##OopDesc* operator->() const {                                 \
             return (type##OopDesc*)obj();                                  \
        }                                                                   \
+       type##Oop& operator=(const type##Oop& o) {                          \
+            oop::operator=(o);                                             \
+            return *this;                                                  \
+       }                                                                   \
+       NOT_SOLARIS(                                                        \
+       volatile type##Oop& operator=(const type##Oop& o) volatile {        \
+            (void)const_cast<oop&>(oop::operator=(o));                     \
+            return *this;                                                  \
+       })                                                                  \
+       volatile type##Oop& operator=(const volatile type##Oop& o) volatile {\
+            (void)const_cast<oop&>(oop::operator=(o));                     \
+            return *this;                                                  \
+       }                                                                   \
    };
 
 DEF_OOP(instance);
@@ -156,6 +179,16 @@
 
 #endif // CHECK_UNHANDLED_OOPS
 
+// For CHECK_UNHANDLED_OOPS, it is ambiguous C++ behavior to have the oop
+// structure contain explicit user defined conversions of both numerical
+// and pointer type. Define inline methods to provide the numerical conversions.
+template <class T> inline oop cast_to_oop(T value) {
+  return (oop)(CHECK_UNHANDLED_OOPS_ONLY((void *))(value));
+}
+template <class T> inline T cast_from_oop(oop o) {
+  return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
+}
+
 // The metadata hierarchy is separate from the oop hierarchy
 
 //      class MetaspaceObj
--- a/src/share/vm/oops/symbol.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/symbol.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,19 +41,19 @@
   }
 }
 
-void* Symbol::operator new(size_t sz, int len, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, TRAPS) throw() {
   int alloc_size = size(len)*HeapWordSize;
   address res = (address) AllocateHeap(alloc_size, mtSymbol);
   return res;
 }
 
-void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
   int alloc_size = size(len)*HeapWordSize;
   address res = (address)arena->Amalloc(alloc_size);
   return res;
 }
 
-void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) {
+void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
   address res;
   int alloc_size = size(len)*HeapWordSize;
   res = (address) Metaspace::allocate(loader_data, size(len), true,
--- a/src/share/vm/oops/symbol.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/oops/symbol.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -45,7 +45,7 @@
 // in the SymbolTable bucket (the _literal field in HashtableEntry)
 // that points to the Symbol.  All other stores of a Symbol*
 // to a field of a persistent variable (e.g., the _name filed in
-// FieldAccessInfo or _ptr in a CPSlot) is reference counted.
+// fieldDescriptor or _ptr in a CPSlot) is reference counted.
 //
 // 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for
 // "name" and returns a pointer to F or finds a pre-existing Symbol F for
@@ -136,9 +136,9 @@
   }
 
   Symbol(const u1* name, int length, int refcount);
-  void* operator new(size_t size, int len, TRAPS);
-  void* operator new(size_t size, int len, Arena* arena, TRAPS);
-  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS);
+  void* operator new(size_t size, int len, TRAPS) throw();
+  void* operator new(size_t size, int len, Arena* arena, TRAPS) throw();
+  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw();
 
   void  operator delete(void* p);
 
--- a/src/share/vm/opto/block.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/block.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -112,9 +112,9 @@
 // exceeds OptoLoopAlignment.
 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
                                     PhaseRegAlloc* ra) {
-  uint last_inst = _nodes.size();
+  uint last_inst = number_of_nodes();
   for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
-    uint inst_size = _nodes[j]->size(ra);
+    uint inst_size = get_node(j)->size(ra);
     if( inst_size > 0 ) {
       inst_cnt--;
       uint sz = sum_size + inst_size;
@@ -131,8 +131,8 @@
 }
 
 uint Block::find_node( const Node *n ) const {
-  for( uint i = 0; i < _nodes.size(); i++ ) {
-    if( _nodes[i] == n )
+  for( uint i = 0; i < number_of_nodes(); i++ ) {
+    if( get_node(i) == n )
       return i;
   }
   ShouldNotReachHere();
@@ -141,7 +141,7 @@
 
 // Find and remove n from block list
 void Block::find_remove( const Node *n ) {
-  _nodes.remove(find_node(n));
+  remove_node(find_node(n));
 }
 
 // Return empty status of a block.  Empty blocks contain only the head, other
@@ -154,10 +154,10 @@
   }
 
   int success_result = completely_empty;
-  int end_idx = _nodes.size()-1;
+  int end_idx = number_of_nodes() - 1;
 
   // Check for ending goto
-  if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
+  if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
     success_result = empty_with_goto;
     end_idx--;
   }
@@ -170,7 +170,7 @@
   // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
   // turn directly into code, because only MachNodes have non-trivial
   // emit() functions.
-  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
+  while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
     end_idx--;
   }
 
@@ -209,15 +209,15 @@
 
 // True if block is low enough frequency or guarded by a test which
 // mostly does not go here.
-bool Block::is_uncommon(PhaseCFG* cfg) const {
+bool PhaseCFG::is_uncommon(const Block* block) {
   // Initial blocks must never be moved, so are never uncommon.
-  if (head()->is_Root() || head()->is_Start())  return false;
+  if (block->head()->is_Root() || block->head()->is_Start())  return false;
 
   // Check for way-low freq
-  if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
+  if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
 
   // Look for code shape indicating uncommon_trap or slow path
-  if (has_uncommon_code()) return true;
+  if (block->has_uncommon_code()) return true;
 
   const float epsilon = 0.05f;
   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@@ -225,8 +225,8 @@
   uint freq_preds = 0;
   uint uncommon_for_freq_preds = 0;
 
-  for( uint i=1; i<num_preds(); i++ ) {
-    Block* guard = cfg->get_block_for_node(pred(i));
+  for( uint i=1; i< block->num_preds(); i++ ) {
+    Block* guard = get_block_for_node(block->pred(i));
     // Check to see if this block follows its guard 1 time out of 10000
     // or less.
     //
@@ -244,14 +244,14 @@
       uncommon_preds++;
     } else {
       freq_preds++;
-      if( _freq < guard->_freq * guard_factor ) {
+      if(block->_freq < guard->_freq * guard_factor ) {
         uncommon_for_freq_preds++;
       }
     }
   }
-  if( num_preds() > 1 &&
+  if( block->num_preds() > 1 &&
       // The block is uncommon if all preds are uncommon or
-      (uncommon_preds == (num_preds()-1) ||
+      (uncommon_preds == (block->num_preds()-1) ||
       // it is uncommon for all frequent preds.
        uncommon_for_freq_preds == freq_preds) ) {
     return true;
@@ -344,8 +344,8 @@
 
 void Block::dump(const PhaseCFG* cfg) const {
   dump_head(cfg);
-  for (uint i=0; i< _nodes.size(); i++) {
-    _nodes[i]->dump();
+  for (uint i=0; i< number_of_nodes(); i++) {
+    get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -434,7 +434,7 @@
       map_node_to_block(p, bb);
       map_node_to_block(x, bb);
       if( x != p ) {                // Only for root is x == p
-        bb->_nodes.push((Node*)x);
+        bb->push_node((Node*)x);
       }
       // Now handle predecessors
       ++sum;                        // Count 1 for self block
@@ -469,11 +469,11 @@
         assert( x != proj, "" );
         // Map basic block of projection
         map_node_to_block(proj, pb);
-        pb->_nodes.push(proj);
+        pb->push_node(proj);
       }
       // Insert self as a child of my predecessor block
       pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
-      assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
+      assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
               "too many control users, not a CFG?" );
     }
   }
@@ -495,7 +495,7 @@
   // surrounding blocks.
   float freq = in->_freq * in->succ_prob(succ_no);
   // get ProjNode corresponding to the succ_no'th successor of the in block
-  ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
+  ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
   // create region for basic block
   RegionNode* region = new (C) RegionNode(2);
   region->init_req(1, proj);
@@ -507,7 +507,7 @@
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, region);
   // add it to the basic block
-  block->_nodes.push(gto);
+  block->push_node(gto);
   map_node_to_block(gto, block);
   C->regalloc()->set_bad(gto->_idx);
   // hook up successor block
@@ -527,9 +527,9 @@
 // Does this block end in a multiway branch that cannot have the default case
 // flipped for another case?
 static bool no_flip_branch( Block *b ) {
-  int branch_idx = b->_nodes.size() - b->_num_succs-1;
+  int branch_idx = b->number_of_nodes() - b->_num_succs-1;
   if( branch_idx < 1 ) return false;
-  Node *bra = b->_nodes[branch_idx];
+  Node *bra = b->get_node(branch_idx);
   if( bra->is_Catch() )
     return true;
   if( bra->is_Mach() ) {
@@ -550,16 +550,16 @@
 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
   // Find true target
   int end_idx = b->end_idx();
-  int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
+  int idx = b->get_node(end_idx+1)->as_Proj()->_con;
   Block *succ = b->_succs[idx];
   Node* gto = _goto->clone(); // get a new goto node
   gto->set_req(0, b->head());
-  Node *bp = b->_nodes[end_idx];
-  b->_nodes.map(end_idx,gto); // Slam over NeverBranch
+  Node *bp = b->get_node(end_idx);
+  b->map_node(gto, end_idx); // Slam over NeverBranch
   map_node_to_block(gto, b);
   C->regalloc()->set_bad(gto->_idx);
-  b->_nodes.pop();              // Yank projections
-  b->_nodes.pop();              // Yank projections
+  b->pop_node();              // Yank projections
+  b->pop_node();              // Yank projections
   b->_succs.map(0,succ);        // Map only successor
   b->_num_succs = 1;
   // remap successor's predecessors if necessary
@@ -575,8 +575,8 @@
   // Scan through block, yanking dead path from
   // all regions and phis.
   dead->head()->del_req(j);
-  for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
-    dead->_nodes[k]->del_req(j);
+  for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
+    dead->get_node(k)->del_req(j);
 }
 
 // Helper function to move block bx to the slot following b_index. Return
@@ -620,7 +620,7 @@
   if (e != Block::not_empty) {
     if (e == Block::empty_with_goto) {
       // Remove the goto, but leave the block.
-      b->_nodes.pop();
+      b->pop_node();
     }
     // Mark this block as a connector block, which will cause it to be
     // ignored in certain functions such as non_connector_successor().
@@ -663,13 +663,13 @@
     // to give a fake exit path to infinite loops.  At this late stage they
     // need to turn into Goto's so that when you enter the infinite loop you
     // indeed hang.
-    if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
+    if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
       convert_NeverBranch_to_Goto(block);
     }
 
     // Look for uncommon blocks and move to end.
     if (!C->do_freq_based_layout()) {
-      if (block->is_uncommon(this)) {
+      if (is_uncommon(block)) {
         move_to_end(block, i);
         last--;                   // No longer check for being uncommon!
         if (no_flip_branch(block)) { // Fall-thru case must follow?
@@ -720,9 +720,9 @@
     // exchange the true and false targets.
     if (no_flip_branch(block)) {
       // Find fall through case - if must fall into its target
-      int branch_idx = block->_nodes.size() - block->_num_succs;
+      int branch_idx = block->number_of_nodes() - block->_num_succs;
       for (uint j2 = 0; j2 < block->_num_succs; j2++) {
-        const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
+        const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
         if (p->_con == 0) {
           // successor j2 is fall through case
           if (block->non_connector_successor(j2) != bnext) {
@@ -743,14 +743,14 @@
 
       // Remove all CatchProjs
       for (uint j = 0; j < block->_num_succs; j++) {
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if (block->_num_succs == 1) {
       // Block ends in a Goto?
       if (bnext == bs0) {
         // We fall into next block; remove the Goto
-        block->_nodes.pop();
+        block->pop_node();
       }
 
     } else if(block->_num_succs == 2) { // Block ends in a If?
@@ -759,9 +759,9 @@
       //       be projections (in any order), the 3rd last node must be
       //       the IfNode (we have excluded other 2-way exits such as
       //       CatchNodes already).
-      MachNode* iff   = block->_nodes[block->_nodes.size() - 3]->as_Mach();
-      ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
-      ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
+      MachNode* iff   = block->get_node(block->number_of_nodes() - 3)->as_Mach();
+      ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
+      ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
 
       // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
       assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
@@ -833,8 +833,8 @@
         iff->as_MachIf()->negate();
       }
 
-      block->_nodes.pop();          // Remove IfFalse & IfTrue projections
-      block->_nodes.pop();
+      block->pop_node();          // Remove IfFalse & IfTrue projections
+      block->pop_node();
 
     } else {
       // Multi-exit block, e.g. a switch statement
@@ -895,13 +895,13 @@
   // Verify sane CFG
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
     uint j;
     for (j = 0; j < cnt; j++)  {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       assert(get_block_for_node(n) == block, "");
       if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
-        assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
+        assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
       }
       for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
@@ -930,14 +930,14 @@
     }
 
     j = block->end_idx();
-    Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
+    Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
     assert(bp, "last instruction must be a block proj");
-    assert(bp == block->_nodes[j], "wrong number of successors for this block");
+    assert(bp == block->get_node(j), "wrong number of successors for this block");
     if (bp->is_Catch()) {
-      while (block->_nodes[--j]->is_MachProj()) {
+      while (block->get_node(--j)->is_MachProj()) {
         ;
       }
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
     } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
       assert(block->_num_succs == 2, "Conditional branch must have two targets");
     }
@@ -1440,9 +1440,9 @@
           Block *bnext = next(b);
           Block *bs0 = b->non_connector_successor(0);
 
-          MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
-          ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
-          ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
+          MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
+          ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
+          ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
 
           if (bnext == bs0) {
             // Fall-thru case in succs[0], should be in succs[1]
@@ -1454,8 +1454,8 @@
             b->_succs.map( 1, tbs0 );
 
             // Flip projections to match targets
-            b->_nodes.map(b->_nodes.size()-2, proj1);
-            b->_nodes.map(b->_nodes.size()-1, proj0);
+            b->map_node(proj1, b->number_of_nodes() - 2);
+            b->map_node(proj0, b->number_of_nodes() - 1);
           }
         }
       }
--- a/src/share/vm/opto/block.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/block.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -105,15 +105,53 @@
 // any optimization pass.  They are created late in the game.
 class Block : public CFGElement {
   friend class VMStructs;
- public:
+
+private:
   // Nodes in this block, in order
   Node_List _nodes;
 
+public:
+
+  // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
+  Node* get_node(uint at_index) const {
+    return _nodes[at_index];
+  }
+
+  // Get the number of nodes in this block
+  uint number_of_nodes() const {
+    return _nodes.size();
+  }
+
+  // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
+  void map_node(Node* node, uint to_index) {
+    _nodes.map(to_index, node);
+  }
+
+  // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
+  void insert_node(Node* node, uint at_index) {
+    _nodes.insert(at_index, node);
+  }
+
+  // Remove a node at index 'at_index'
+  void remove_node(uint at_index) {
+    _nodes.remove(at_index);
+  }
+
+  // Push a node 'node' onto the node list
+  void push_node(Node* node) {
+    _nodes.push(node);
+  }
+
+  // Pop the last node off the node list
+  Node* pop_node() {
+    return _nodes.pop();
+  }
+
   // Basic blocks have a Node which defines Control for all Nodes pinned in
   // this block.  This Node is a RegionNode.  Exception-causing Nodes
   // (division, subroutines) and Phi functions are always pinned.  Later,
   // every Node will get pinned to some block.
-  Node *head() const { return _nodes[0]; }
+  Node *head() const { return get_node(0); }
 
   // CAUTION: num_preds() is ONE based, so that predecessor numbers match
   // input edges to Regions and Phis.
@@ -274,29 +312,12 @@
 
   // Add an instruction to an existing block.  It must go after the head
   // instruction and before the end instruction.
-  void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
+  void add_inst( Node *n ) { insert_node(n, end_idx()); }
   // Find node in block
   uint find_node( const Node *n ) const;
   // Find and remove n from block list
   void find_remove( const Node *n );
 
-  // helper function that adds caller save registers to MachProjNode
-  void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
-  // Schedule a call next in the block
-  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
-
-  // Perform basic-block local scheduling
-  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
-  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
-  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
-  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
-  // Cleanup if any code lands between a Call and his Catch
-  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
-  // Detect implicit-null-check opportunities.  Basically, find NULL checks
-  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
-  // I can generate a memory op if there is not one nearby.
-  void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
-
   // Return the empty status of a block
   enum { not_empty, empty_with_goto, completely_empty };
   int is_Empty() const;
@@ -328,10 +349,6 @@
   // Examine block's code shape to predict if it is not commonly executed.
   bool has_uncommon_code() const;
 
-  // Use frequency calculations and code shape to predict if the block
-  // is uncommon.
-  bool is_uncommon(PhaseCFG* cfg) const;
-
 #ifndef PRODUCT
   // Debugging print of basic block
   void dump_bidx(const Block* orig, outputStream* st = tty) const;
@@ -414,6 +431,27 @@
   // to late. Helper for schedule_late.
   Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
 
+  bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
+  void set_next_call(Block* block, Node* n, VectorSet& next_call);
+  void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
+
+  // Perform basic-block local scheduling
+  Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
+
+  // Schedule a call next in the block
+  uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
+
+  // Cleanup if any code lands between a Call and his Catch
+  void call_catch_cleanup(Block* block);
+
+  Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
+  void  catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
+
+  // Detect implicit-null-check opportunities.  Basically, find NULL checks
+  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
+  // I can generate a memory op if there is not one nearby.
+  void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
+
   // Perform a Depth First Search (DFS).
   // Setup 'vertex' as DFS to vertex mapping.
   // Setup 'semi' as vertex to DFS mapping.
@@ -530,6 +568,10 @@
     return (_node_to_block_mapping.lookup(node->_idx) != NULL);
   }
 
+  // Use frequency calculations and code shape to predict if the block
+  // is uncommon.
+  bool is_uncommon(const Block* block);
+
 #ifdef ASSERT
   Unique_Node_List _raw_oops;
 #endif
@@ -550,7 +592,7 @@
 
   // Insert a node into a block at index and map the node to the block
   void insert(Block *b, uint idx, Node *n) {
-    b->_nodes.insert( idx, n );
+    b->insert_node(n , idx);
     map_node_to_block(n, b);
   }
 
--- a/src/share/vm/opto/buildOopMap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/buildOopMap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -121,8 +121,8 @@
 // Given reaching-defs for this block start, compute it for this block end
 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
 
-  for( uint i=0; i<_b->_nodes.size(); i++ ) {
-    Node *n = _b->_nodes[i];
+  for( uint i=0; i<_b->number_of_nodes(); i++ ) {
+    Node *n = _b->get_node(i);
 
     if( n->jvms() ) {           // Build an OopMap here?
       JVMState *jvms = n->jvms();
@@ -447,8 +447,8 @@
       }
 
       // Now walk tmp_live up the block backwards, computing live
-      for( int k=b->_nodes.size()-1; k>=0; k-- ) {
-        Node *n = b->_nodes[k];
+      for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
+        Node *n = b->get_node(k);
         // KILL def'd bits
         int first = regalloc->get_reg_first(n);
         int second = regalloc->get_reg_second(n);
@@ -544,12 +544,12 @@
     for (i = 1; i < cfg->number_of_blocks(); i++) {
       Block* block = cfg->get_block(i);
       uint j;
-      for (j = 1; j < block->_nodes.size(); j++) {
-        if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
+      for (j = 1; j < block->number_of_nodes(); j++) {
+        if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
            break;
         }
       }
-      if (j < block->_nodes.size()) {
+      if (j < block->number_of_nodes()) {
         break;
       }
     }
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -123,7 +123,7 @@
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
@@ -137,7 +137,7 @@
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
@@ -197,6 +197,7 @@
 // negative filter: should callee NOT be inlined?
 bool InlineTree::should_not_inline(ciMethod *callee_method,
                                    ciMethod* caller_method,
+                                   JVMState* jvms,
                                    WarmCallInfo* wci_result) {
 
   const char* fail_msg = NULL;
@@ -226,7 +227,7 @@
     // don't inline exception code unless the top method belongs to an
     // exception class
     if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
-      ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
+      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
       if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
         wci_result->set_profit(wci_result->profit() * 0.1);
       }
@@ -328,7 +329,7 @@
 // return true if ok
 // Relocated from "InliningClosure::try_to_inline"
 bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
-                               int caller_bci, ciCallProfile& profile,
+                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
                                WarmCallInfo* wci_result, bool& should_delay) {
 
    // Old algorithm had funny accumulating BC-size counters
@@ -346,7 +347,7 @@
                      wci_result)) {
     return false;
   }
-  if (should_not_inline(callee_method, caller_method, wci_result)) {
+  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
     return false;
   }
 
@@ -397,24 +398,35 @@
   }
 
   // detect direct and indirect recursive inlining
-  if (!callee_method->is_compiled_lambda_form()) {
+  {
     // count the current method and the callee
-    int inline_level = (method() == callee_method) ? 1 : 0;
-    if (inline_level > MaxRecursiveInlineLevel) {
-      set_msg("recursively inlining too deep");
-      return false;
+    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
+    int inline_level = 0;
+    if (!is_compiled_lambda_form) {
+      if (method() == callee_method) {
+        inline_level++;
+      }
     }
     // count callers of current method and callee
-    JVMState* jvms = caller_jvms();
-    while (jvms != NULL && jvms->has_method()) {
-      if (jvms->method() == callee_method) {
-        inline_level++;
-        if (inline_level > MaxRecursiveInlineLevel) {
-          set_msg("recursively inlining too deep");
-          return false;
+    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
+    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
+      if (j->method() == callee_method) {
+        if (is_compiled_lambda_form) {
+          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
+          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
+          // compiler stack.
+          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
+          if (caller_argument0 == callee_argument0) {
+            inline_level++;
+          }
+        } else {
+          inline_level++;
         }
       }
-      jvms = jvms->caller();
+    }
+    if (inline_level > MaxRecursiveInlineLevel) {
+      set_msg("recursive inlining is too deep");
+      return false;
     }
   }
 
@@ -491,7 +503,7 @@
       C->log()->inline_fail(inline_msg);
     }
   }
-  if (PrintInlining) {
+  if (C->print_inlining()) {
     C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
     if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
     if (Verbose && callee_method) {
@@ -536,11 +548,11 @@
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
   bool success = try_to_inline(callee_method, caller_method, caller_bci,
-                               profile, &wci, should_delay);
+                               jvms, profile, &wci, should_delay);
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
-      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+      && (PrintOpto || C->print_inlining())) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
     bool old_cold = !success;
@@ -617,7 +629,7 @@
              callee_method->is_compiled_lambda_form()) {
       max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr(" \\-> discounting inline depth");
     }
--- a/src/share/vm/opto/c2_globals.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/c2_globals.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -427,7 +427,7 @@
   product(bool, UseDivMod, true,                                            \
           "Use combined DivMod instruction if available")                   \
                                                                             \
-  product(intx, MinJumpTableSize, 18,                                       \
+  product_pd(intx, MinJumpTableSize,                                        \
           "Minimum number of targets in a generated jump table")            \
                                                                             \
   product(intx, MaxJumpTableSize, 65000,                                    \
@@ -454,6 +454,9 @@
   product(bool, EliminateAutoBox, true,                                     \
           "Control optimizations for autobox elimination")                  \
                                                                             \
+  experimental(bool, UseImplicitStableValues, false,                        \
+          "Mark well-known stable fields as such (e.g. String.value)")      \
+                                                                            \
   product(intx, AutoBoxCacheMax, 128,                                       \
           "Sets max value cached by the java.lang.Integer autobox cache")   \
                                                                             \
@@ -639,7 +642,15 @@
                                                                             \
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
           "Find best control for expensive operations")                     \
-
+                                                                            \
+  product(bool, UseMathExactIntrinsics, true,                               \
+          "Enables intrinsification of various java.lang.Math functions")   \
+                                                                            \
+  experimental(bool, ReplaceInParentMaps, false,                            \
+          "Propagate type improvements in callers of inlinee if possible")  \
+                                                                            \
+  experimental(bool, UseTypeSpeculation, false,                             \
+          "Speculatively propagate types from profiles")
 
 C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
--- a/src/share/vm/opto/c2compiler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/c2compiler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -47,9 +47,6 @@
 # include "adfiles/ad_ppc_64.hpp"
 #endif
 
-
-volatile int C2Compiler::_runtimes = uninitialized;
-
 // register information defined by ADLC
 extern const char register_save_policy[];
 extern const int  register_save_type[];
@@ -60,7 +57,7 @@
 const char* C2Compiler::retry_no_escape_analysis() {
   return "retry without escape analysis";
 }
-void C2Compiler::initialize_runtime() {
+bool C2Compiler::init_c2_runtime() {
 
   // Check assumptions used while running ADLC
   Compile::adlc_verification();
@@ -93,41 +90,31 @@
 
   CompilerThread* thread = CompilerThread::current();
 
-  HandleMark  handle_mark(thread);
-
-  OptoRuntime::generate(thread->env());
-
+  HandleMark handle_mark(thread);
+  return OptoRuntime::generate(thread->env());
 }
 
 
 void C2Compiler::initialize() {
-
-  // This method can only be called once per C2Compiler object
   // The first compiler thread that gets here will initialize the
-  // small amount of global state (and runtime stubs) that c2 needs.
+  // small amount of global state (and runtime stubs) that C2 needs.
 
   // There is a race possible once at startup and then we're fine
 
   // Note that this is being called from a compiler thread not the
   // main startup thread.
-
-  if (_runtimes != initialized) {
-    initialize_runtimes( initialize_runtime, &_runtimes);
+  if (should_perform_init()) {
+    bool successful = C2Compiler::init_c2_runtime();
+    int new_state = (successful) ? initialized : failed;
+    set_state(new_state);
   }
-
-  // Mark this compiler object as ready to roll
-  mark_initialized();
 }
 
-void C2Compiler::compile_method(ciEnv* env,
-                                ciMethod* target,
-                                int entry_bci) {
-  if (!is_initialized()) {
-    initialize();
-  }
+void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
+  assert(is_initialized(), "Compiler thread must be initialized");
+
   bool subsume_loads = SubsumeLoads;
-  bool do_escape_analysis = DoEscapeAnalysis &&
-    !env->jvmti_can_access_local_variables();
+  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
   bool eliminate_boxing = EliminateAutoBox;
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
--- a/src/share/vm/opto/c2compiler.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/c2compiler.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,24 +28,17 @@
 #include "compiler/abstractCompiler.hpp"
 
 class C2Compiler : public AbstractCompiler {
-private:
-
-  static void initialize_runtime();
+ private:
+  static bool init_c2_runtime();
 
 public:
   // Name
   const char *name() { return "C2"; }
 
-  static volatile int _runtimes;
-
 #ifdef TIERED
   virtual bool is_c2() { return true; };
 #endif // TIERED
 
-  // Customization
-  bool needs_adapters         () { return true; }
-  bool needs_stubs            () { return true; }
-
   void initialize();
 
   // Compilation entry point for methods
--- a/src/share/vm/opto/callGenerator.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/callGenerator.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -63,12 +63,12 @@
   }
 
   virtual bool      is_parse() const           { return true; }
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   int is_osr() { return _is_osr; }
 
 };
 
-JVMState* ParseGenerator::generate(JVMState* jvms) {
+JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   Compile* C = Compile::current();
 
   if (is_osr()) {
@@ -80,7 +80,7 @@
     return NULL;  // bailing out of the compile; do not try to parse
   }
 
-  Parse parser(jvms, method(), _expected_uses);
+  Parse parser(jvms, method(), _expected_uses, parent_parser);
   // Grab signature for matching/allocation
 #ifdef ASSERT
   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
@@ -119,12 +119,12 @@
       _separate_io_proj(separate_io_proj)
   {
   }
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 
   CallStaticJavaNode* call_node() const { return _call_node; }
 };
 
-JVMState* DirectCallGenerator::generate(JVMState* jvms) {
+JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
   bool is_static = method()->is_static();
   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
@@ -171,10 +171,10 @@
            vtable_index >= 0, "either invalid or usable");
   }
   virtual bool      is_virtual() const          { return true; }
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 };
 
-JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
+JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
   Node* receiver = kit.argument(0);
 
@@ -276,7 +276,7 @@
   // Convert the CallStaticJava into an inline
   virtual void do_late_inline();
 
-  virtual JVMState* generate(JVMState* jvms) {
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
@@ -290,7 +290,7 @@
     // that the late inlining logic can distinguish between fall
     // through and exceptional uses of the memory and io projections
     // as is done for allocations and macro expansion.
-    return DirectCallGenerator::generate(jvms);
+    return DirectCallGenerator::generate(jvms, parent_parser);
   }
 
   virtual void print_inlining_late(const char* msg) {
@@ -389,7 +389,7 @@
   }
 
   // Now perform the inling using the synthesized JVMState
-  JVMState* new_jvms = _inline_cg->generate(jvms);
+  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
   if (new_jvms == NULL)  return;  // no change
   if (C->failing())      return;
 
@@ -429,8 +429,8 @@
 
   virtual bool is_mh_late_inline() const { return true; }
 
-  virtual JVMState* generate(JVMState* jvms) {
-    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
+    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
     if (_input_not_const) {
       // inlining won't be possible so no need to enqueue right now.
       call_node()->set_generator(this);
@@ -477,15 +477,17 @@
   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
     LateInlineCallGenerator(method, inline_cg) {}
 
-  virtual JVMState* generate(JVMState* jvms) {
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
     C->add_string_late_inline(this);
 
-    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
+    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
     return new_jvms;
   }
+
+  virtual bool is_string_late_inline() const { return true; }
 };
 
 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
@@ -498,13 +500,13 @@
   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
     LateInlineCallGenerator(method, inline_cg) {}
 
-  virtual JVMState* generate(JVMState* jvms) {
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
     C->add_boxing_late_inline(this);
 
-    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
+    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
     return new_jvms;
   }
 };
@@ -540,7 +542,7 @@
   virtual bool      is_virtual() const          { return _is_virtual; }
   virtual bool      is_deferred() const         { return true; }
 
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 };
 
 
@@ -550,12 +552,12 @@
   return new WarmCallGenerator(ci, if_cold, if_hot);
 }
 
-JVMState* WarmCallGenerator::generate(JVMState* jvms) {
+JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   Compile* C = Compile::current();
   if (C->log() != NULL) {
     C->log()->elem("warm_call bci='%d'", jvms->bci());
   }
-  jvms = _if_cold->generate(jvms);
+  jvms = _if_cold->generate(jvms, parent_parser);
   if (jvms != NULL) {
     Node* m = jvms->map()->control();
     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
@@ -616,7 +618,7 @@
   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 };
 
 
@@ -628,7 +630,7 @@
 }
 
 
-JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
+JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
   // We need an explicit receiver null_check before checking its type.
@@ -656,7 +658,7 @@
   { PreserveJVMState pjvms(&kit);
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
-      slow_jvms = _if_missed->generate(kit.sync_jvms());
+      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
       if (kit.failing())
         return NULL;  // might happen because of NodeCountInliningCutoff
       assert(slow_jvms != NULL, "must be");
@@ -677,12 +679,12 @@
   kit.replace_in_map(receiver, exact_receiver);
 
   // Make the hot call:
-  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
+  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
   if (new_jvms == NULL) {
     // Inline failed, so make a direct call.
     assert(_if_hit->is_inline(), "must have been a failed inline");
     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
-    new_jvms = cg->generate(kit.sync_jvms());
+    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
   }
   kit.add_exception_states_from(new_jvms);
   kit.set_jvms(new_jvms);
@@ -773,7 +775,7 @@
         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
         const int vtable_index = Method::invalid_vtable_index;
-        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
+        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
         assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
@@ -829,6 +831,7 @@
         int  vtable_index       = Method::invalid_vtable_index;
         bool call_does_dispatch = false;
 
+        ciKlass* speculative_receiver_type = NULL;
         if (is_virtual_or_interface) {
           ciInstanceKlass* klass = target->holder();
           Node*             receiver_node = kit.argument(0);
@@ -837,9 +840,12 @@
           target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
                                             is_virtual,
                                             call_does_dispatch, vtable_index);  // out-parameters
+          // We lack profiling at this call but type speculation may
+          // provide us with a type
+          speculative_receiver_type = receiver_type->speculative_type();
         }
 
-        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
+        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
         assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
@@ -874,7 +880,7 @@
   virtual bool      is_inlined()   const    { return true; }
   virtual bool      is_intrinsic() const    { return true; }
 
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 };
 
 
@@ -884,7 +890,7 @@
 }
 
 
-JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
+JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
 
@@ -904,7 +910,7 @@
     PreserveJVMState pjvms(&kit);
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
-      slow_jvms = _cg->generate(kit.sync_jvms());
+      slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
       if (kit.failing())
         return NULL;  // might happen because of NodeCountInliningCutoff
       assert(slow_jvms != NULL, "must be");
@@ -922,12 +928,12 @@
   }
 
   // Generate intrinsic code:
-  JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
+  JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
   if (new_jvms == NULL) {
     // Intrinsic failed, so use slow code or make a direct call.
     if (slow_map == NULL) {
       CallGenerator* cg = CallGenerator::for_direct_call(method());
-      new_jvms = cg->generate(kit.sync_jvms());
+      new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
     } else {
       kit.set_jvms(slow_jvms);
       return kit.transfer_exceptions_into_jvms();
@@ -997,7 +1003,7 @@
   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
   virtual bool      is_trap() const             { return true; }
 
-  virtual JVMState* generate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
 };
 
 
@@ -1009,7 +1015,7 @@
 }
 
 
-JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
+JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   GraphKit kit(jvms);
   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
   int nargs = method()->arg_size();
--- a/src/share/vm/opto/callGenerator.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/callGenerator.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
 #include "opto/type.hpp"
 #include "runtime/deoptimization.hpp"
 
+class Parse;
+
 //---------------------------CallGenerator-------------------------------------
 // The subclasses of this class handle generation of ideal nodes for
 // call sites and method entry points.
@@ -65,11 +67,14 @@
   virtual bool      is_predicted() const        { return false; }
   // is_trap: Does not return to the caller.  (E.g., uncommon trap.)
   virtual bool      is_trap() const             { return false; }
+  // does_virtual_dispatch: Should try inlining as normal method first.
+  virtual bool      does_virtual_dispatch() const     { return false; }
 
   // is_late_inline: supports conversion of call into an inline
   virtual bool      is_late_inline() const      { return false; }
   // same but for method handle calls
   virtual bool      is_mh_late_inline() const   { return false; }
+  virtual bool      is_string_late_inline() const{ return false; }
 
   // for method handle calls: have we tried inlinining the call already?
   virtual bool      already_attempted() const   { ShouldNotReachHere(); return false; }
@@ -106,7 +111,7 @@
   //
   // If the result is NULL, it means that this CallGenerator was unable
   // to handle the given call, and another CallGenerator should be consulted.
-  virtual JVMState* generate(JVMState* jvms) = 0;
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
 
   // How to generate a call site that is inlined:
   static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
@@ -159,8 +164,9 @@
   virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
 
   static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
-    if (PrintInlining)
+    if (C->print_inlining()) {
       C->print_inlining(callee, inline_level, bci, msg);
+    }
   }
 };
 
@@ -260,7 +266,7 @@
   // Because WarmInfo objects live over the entire lifetime of the
   // Compile object, they are allocated into the comp_arena, which
   // does not get resource marked or reset during the compile process
-  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   void operator delete( void * ) { } // fast deallocation
 
   static WarmCallInfo* always_hot();
--- a/src/share/vm/opto/callnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/callnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -458,7 +458,7 @@
       st->print("={");
       uint nf = spobj->n_fields();
       if (nf > 0) {
-        uint first_ind = spobj->first_index();
+        uint first_ind = spobj->first_index(mcall->jvms());
         Node* fld_node = mcall->in(first_ind);
         ciField* cifield;
         if (iklass != NULL) {
@@ -1063,7 +1063,6 @@
   int scloff = jvms->scloff();
   int endoff = jvms->endoff();
   assert(endoff == (int)req(), "no other states or debug info after me");
-  assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
   Node* top = Compile::current()->top();
   for (uint i = 0; i < grow_by; i++) {
     ins_req(monoff, top);
@@ -1079,32 +1078,31 @@
   const int MonitorEdges = 2;
   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   assert(req() == jvms()->endoff(), "correct sizing");
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   int nextmon = jvms()->scloff();
   if (GenerateSynchronizationCode) {
-    add_req(lock->box_node());
-    add_req(lock->obj_node());
+    ins_req(nextmon,   lock->box_node());
+    ins_req(nextmon+1, lock->obj_node());
   } else {
     Node* top = Compile::current()->top();
-    add_req(top);
-    add_req(top);
+    ins_req(nextmon, top);
+    ins_req(nextmon, top);
   }
-  jvms()->set_scloff(nextmon+MonitorEdges);
+  jvms()->set_scloff(nextmon + MonitorEdges);
   jvms()->set_endoff(req());
 }
 
 void SafePointNode::pop_monitor() {
   // Delete last monitor from debug info
-  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   debug_only(int num_before_pop = jvms()->nof_monitors());
-  const int MonitorEdges = (1<<JVMState::logMonitorEdges);
+  const int MonitorEdges = 2;
+  assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   int scloff = jvms()->scloff();
   int endoff = jvms()->endoff();
   int new_scloff = scloff - MonitorEdges;
   int new_endoff = endoff - MonitorEdges;
   jvms()->set_scloff(new_scloff);
   jvms()->set_endoff(new_endoff);
-  while (scloff > new_scloff)  del_req(--scloff);
+  while (scloff > new_scloff)  del_req_ordered(--scloff);
   assert(jvms()->nof_monitors() == num_before_pop-1, "");
 }
 
@@ -1169,13 +1167,12 @@
 }
 
 SafePointScalarObjectNode*
-SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
+SafePointScalarObjectNode::clone(Dict* sosn_map) const {
   void* cached = (*sosn_map)[(void*)this];
   if (cached != NULL) {
     return (SafePointScalarObjectNode*)cached;
   }
   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
-  res->_first_index += jvms_adj;
   sosn_map->Insert((void*)this, (void*)res);
   return res;
 }
--- a/src/share/vm/opto/callnode.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/callnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -216,7 +216,7 @@
   // Because JVMState objects live over the entire lifetime of the
   // Compile object, they are allocated into the comp_arena, which
   // does not get resource marked or reset during the compile process
-  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   void operator delete( void * ) { } // fast deallocation
 
   // Create a new JVMState, ready for abstract interpretation.
@@ -449,14 +449,17 @@
 // at a safepoint.
 
 class SafePointScalarObjectNode: public TypeNode {
-  uint _first_index; // First input edge index of a SafePoint node where
+  uint _first_index; // First input edge relative index of a SafePoint node where
                      // states of the scalarized object fields are collected.
+                     // It is relative to the last (youngest) jvms->_scloff.
   uint _n_fields;    // Number of non-static fields of the scalarized object.
   DEBUG_ONLY(AllocateNode* _alloc;)
 
   virtual uint hash() const ; // { return NO_HASH; }
   virtual uint cmp( const Node &n ) const;
 
+  uint first_index() const { return _first_index; }
+
 public:
   SafePointScalarObjectNode(const TypeOopPtr* tp,
 #ifdef ASSERT
@@ -469,7 +472,10 @@
   virtual const RegMask &out_RegMask() const;
   virtual uint           match_edge(uint idx) const;
 
-  uint first_index() const { return _first_index; }
+  uint first_index(JVMState* jvms) const {
+    assert(jvms != NULL, "missed JVMS");
+    return jvms->scloff() + _first_index;
+  }
   uint n_fields()    const { return _n_fields; }
 
 #ifdef ASSERT
@@ -485,7 +491,7 @@
   // corresponds appropriately to "this" in "new_call".  Assumes that
   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
-  SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
+  SafePointScalarObjectNode* clone(Dict* sosn_map) const;
 
 #ifndef PRODUCT
   virtual void              dump_spec(outputStream *st) const;
--- a/src/share/vm/opto/cfgnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/cfgnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1932,7 +1932,7 @@
 #ifdef _LP64
   // Push DecodeN/DecodeNKlass down through phi.
   // The rest of phi graph will transform by split EncodeP node though phis up.
-  if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
+  if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
     bool may_push = true;
     bool has_decodeN = false;
     bool is_decodeN = false;
--- a/src/share/vm/opto/chaitin.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/chaitin.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -122,40 +122,23 @@
   return score;
 }
 
-LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
-  memset( _lidxs, 0, sizeof(uint)*max );
-}
-
-void LRG_List::extend( uint nidx, uint lidx ) {
-  _nesting.check();
-  if( nidx >= _max ) {
-    uint size = 16;
-    while( size <= nidx ) size <<=1;
-    _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
-    _max = size;
-  }
-  while( _cnt <= nidx )
-    _lidxs[_cnt++] = 0;
-  _lidxs[nidx] = lidx;
-}
-
 #define NUMBUCKS 3
 
 // Straight out of Tarjan's union-find algorithm
 uint LiveRangeMap::find_compress(uint lrg) {
   uint cur = lrg;
-  uint next = _uf_map[cur];
+  uint next = _uf_map.at(cur);
   while (next != cur) { // Scan chain of equivalences
     assert( next < cur, "always union smaller");
     cur = next; // until find a fixed-point
-    next = _uf_map[cur];
+    next = _uf_map.at(cur);
   }
 
   // Core of union-find algorithm: update chain of
   // equivalences to be equal to the root.
   while (lrg != next) {
-    uint tmp = _uf_map[lrg];
-    _uf_map.map(lrg, next);
+    uint tmp = _uf_map.at(lrg);
+    _uf_map.at_put(lrg, next);
     lrg = tmp;
   }
   return lrg;
@@ -165,10 +148,10 @@
 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
   _max_lrg_id= max_lrg_id;
   // Force the Union-Find mapping to be at least this large
-  _uf_map.extend(_max_lrg_id, 0);
+  _uf_map.at_put_grow(_max_lrg_id, 0);
   // Initialize it to be the ID mapping.
   for (uint i = 0; i < _max_lrg_id; ++i) {
-    _uf_map.map(i, i);
+    _uf_map.at_put(i, i);
   }
 }
 
@@ -176,12 +159,12 @@
 // the Union-Find mapping after this call.
 void LiveRangeMap::compress_uf_map_for_nodes() {
   // For all Nodes, compress mapping
-  uint unique = _names.Size();
+  uint unique = _names.length();
   for (uint i = 0; i < unique; ++i) {
-    uint lrg = _names[i];
+    uint lrg = _names.at(i);
     uint compressed_lrg = find(lrg);
     if (lrg != compressed_lrg) {
-      _names.map(i, compressed_lrg);
+      _names.at_put(i, compressed_lrg);
     }
   }
 }
@@ -198,11 +181,11 @@
     return lrg;
   }
 
-  uint next = _uf_map[lrg];
+  uint next = _uf_map.at(lrg);
   while (next != lrg) { // Scan chain of equivalences
     assert(next < lrg, "always union smaller");
     lrg = next; // until find a fixed-point
-    next = _uf_map[lrg];
+    next = _uf_map.at(lrg);
   }
   return next;
 }
@@ -215,7 +198,7 @@
        NULL
 #endif
        )
-  , _lrg_map(unique)
+  , _lrg_map(Thread::current()->resource_area(), unique)
   , _live(0)
   , _spilled_once(Thread::current()->resource_area())
   , _spilled_twice(Thread::current()->resource_area())
@@ -301,7 +284,7 @@
       // Copy kill projections after the cloned node
       Node* kills = proj->clone();
       kills->set_req(0, copy);
-      b->_nodes.insert(idx++, kills);
+      b->insert_node(kills, idx++);
       _cfg.map_node_to_block(kills, b);
       new_lrg(kills, max_lrg_id++);
     }
@@ -682,16 +665,17 @@
   uint lr_counter = 1;
   for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
     Block* block = _cfg.get_block(i);
-    uint cnt = block->_nodes.size();
+    uint cnt = block->number_of_nodes();
 
     // Handle all the normal Nodes in the block
     for( uint j = 0; j < cnt; j++ ) {
-      Node *n = block->_nodes[j];
+      Node *n = block->get_node(j);
       // Pre-color to the zero live range, or pick virtual register
       const RegMask &rm = n->out_RegMask();
       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
     }
   }
+
   // Reset the Union-Find mapping to be identity
   _lrg_map.reset_uf_map(lr_counter);
 }
@@ -710,8 +694,8 @@
     Block* block = _cfg.get_block(i);
 
     // For all instructions
-    for (uint j = 1; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (uint j = 1; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
       uint input_edge_start =1; // Skip control most nodes
       if (n->is_Mach()) {
         input_edge_start = n->as_Mach()->oper_input_base();
@@ -1604,7 +1588,7 @@
     // For all instructions in block
     uint last_inst = block->end_idx();
     for (uint j = 1; j <= last_inst; j++) {
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // Dead instruction???
       assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@@ -1641,7 +1625,7 @@
             assert( cisc->oper_input_base() == 2, "Only adding one edge");
             cisc->ins_req(1,src);         // Requires a memory edge
           }
-          block->_nodes.map(j,cisc);          // Insert into basic block
+          block->map_node(cisc, j);          // Insert into basic block
           n->subsume_by(cisc, C); // Correct graph
           //
           ++_used_cisc_instructions;
@@ -1698,7 +1682,7 @@
       // (where top() node is placed).
       base->init_req(0, _cfg.get_root_node());
       Block *startb = _cfg.get_block_for_node(C->top());
-      startb->_nodes.insert(startb->find_node(C->top()), base );
+      startb->insert_node(base, startb->find_node(C->top()));
       _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
     }
@@ -1743,9 +1727,9 @@
   // Search the current block for an existing base-Phi
   Block *b = _cfg.get_block_for_node(derived);
   for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
-    Node *phi = b->_nodes[i];
+    Node *phi = b->get_node(i);
     if( !phi->is_Phi() ) {      // Found end of Phis with no match?
-      b->_nodes.insert( i, base ); // Must insert created Phi here as base
+      b->insert_node(base,  i); // Must insert created Phi here as base
       _cfg.map_node_to_block(base, b);
       new_lrg(base,maxlrg++);
       break;
@@ -1786,7 +1770,7 @@
     IndexSet liveout(_live->live(block));
 
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
       // like to see in the same register.  Compare uses the loop-phi and so
@@ -1979,8 +1963,8 @@
   b->dump_head(&_cfg);
 
   // For all instructions
-  for( uint j = 0; j < b->_nodes.size(); j++ )
-    dump(b->_nodes[j]);
+  for( uint j = 0; j < b->number_of_nodes(); j++ )
+    dump(b->get_node(j));
   // Print live-out info at end of block
   if( _live ) {
     tty->print("Liveout: ");
@@ -2271,8 +2255,8 @@
     int dump_once = 0;
 
     // For all instructions
-    for( uint j = 0; j < block->_nodes.size(); j++ ) {
-      Node *n = block->_nodes[j];
+    for( uint j = 0; j < block->number_of_nodes(); j++ ) {
+      Node *n = block->get_node(j);
       if (_lrg_map.find_const(n) == lidx) {
         if (!dump_once++) {
           tty->cr();
--- a/src/share/vm/opto/chaitin.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/chaitin.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,6 +52,7 @@
 class LRG : public ResourceObj {
   friend class VMStructs;
 public:
+  static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
   enum { SPILL_REG=29999 };     // Register number of a spilled LRG
 
   double _cost;                 // 2 for loads/1 for stores times block freq
@@ -80,14 +81,21 @@
 private:
   uint _eff_degree;             // Effective degree: Sum of neighbors _num_regs
 public:
-  int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
+  int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
   // Degree starts not valid and any change to the IFG neighbor
   // set makes it not valid.
-  void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
+  void set_degree( uint degree ) {
+    _eff_degree = degree;
+    debug_only(_degree_valid = 1;)
+    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
+  }
   // Made a change that hammered degree
   void invalid_degree() { debug_only(_degree_valid=0;) }
   // Incrementally modify degree.  If it was correct, it should remain correct
-  void inc_degree( uint mod ) { _eff_degree += mod; }
+  void inc_degree( uint mod ) {
+    _eff_degree += mod;
+    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
+  }
   // Compute the degree between 2 live ranges
   int compute_degree( LRG &l ) const;
 
@@ -95,9 +103,9 @@
   RegMask _mask;                // Allowed registers for this LRG
   uint _mask_size;              // cache of _mask.Size();
 public:
-  int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
+  int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
   void set_mask_size( int size ) {
-    assert((size == 65535) || (size == (int)_mask.Size()), "");
+    assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
     _mask_size = size;
 #ifdef ASSERT
     _msize_valid=1;
@@ -283,8 +291,8 @@
 
   // Straight out of Tarjan's union-find algorithm
   uint find_compress(const Node *node) {
-    uint lrg_id = find_compress(_names[node->_idx]);
-    _names.map(node->_idx, lrg_id);
+    uint lrg_id = find_compress(_names.at(node->_idx));
+    _names.at_put(node->_idx, lrg_id);
     return lrg_id;
   }
 
@@ -305,40 +313,40 @@
   }
 
   uint size() const {
-    return _names.Size();
+    return _names.length();
   }
 
   uint live_range_id(uint idx) const {
-    return _names[idx];
+    return _names.at(idx);
   }
 
   uint live_range_id(const Node *node) const {
-    return _names[node->_idx];
+    return _names.at(node->_idx);
   }
 
   uint uf_live_range_id(uint lrg_id) const {
-    return _uf_map[lrg_id];
+    return _uf_map.at(lrg_id);
   }
 
   void map(uint idx, uint lrg_id) {
-    _names.map(idx, lrg_id);
+    _names.at_put(idx, lrg_id);
   }
 
   void uf_map(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.map(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put(dst_lrg_id, src_lrg_id);
   }
 
   void extend(uint idx, uint lrg_id) {
-    _names.extend(idx, lrg_id);
+    _names.at_put_grow(idx, lrg_id);
   }
 
   void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.extend(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put_grow(dst_lrg_id, src_lrg_id);
   }
 
-  LiveRangeMap(uint unique)
-  : _names(unique)
-  , _uf_map(unique)
+  LiveRangeMap(Arena* arena, uint unique)
+  : _names(arena, unique, unique, 0)
+  , _uf_map(arena, unique, unique, 0)
   , _max_lrg_id(0) {}
 
   uint find_id( const Node *n ) {
@@ -355,14 +363,14 @@
   void compress_uf_map_for_nodes();
 
   uint find(uint lidx) {
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
   }
 
   // Convert a Node into a Live Range Index - a lidx
   uint find(const Node *node) {
     uint lidx = live_range_id(node);
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
   }
 
@@ -371,10 +379,10 @@
 
   // Like Find above, but no path compress, so bad asymptotic behavior
   uint find_const(const Node *node) const {
-    if(node->_idx >= _names.Size()) {
+    if(node->_idx >= (uint)_names.length()) {
       return 0; // not mapped, usual for debug dump
     }
-    return find_const(_names[node->_idx]);
+    return find_const(_names.at(node->_idx));
   }
 };
 
--- a/src/share/vm/opto/classes.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/classes.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -32,6 +32,7 @@
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/memnode.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/multnode.hpp"
 #include "opto/node.hpp"
--- a/src/share/vm/opto/classes.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/classes.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -29,6 +29,8 @@
 macro(AbsF)
 macro(AbsI)
 macro(AddD)
+macro(AddExactI)
+macro(AddExactL)
 macro(AddF)
 macro(AddI)
 macro(AddL)
@@ -133,6 +135,7 @@
 macro(ExpD)
 macro(FastLock)
 macro(FastUnlock)
+macro(FlagsProj)
 macro(Goto)
 macro(Halt)
 macro(If)
@@ -167,6 +170,9 @@
 macro(LoopLimit)
 macro(Mach)
 macro(MachProj)
+macro(MathExact)
+macro(MathExactI)
+macro(MathExactL)
 macro(MaxI)
 macro(MemBarAcquire)
 macro(MemBarAcquireLock)
@@ -186,12 +192,16 @@
 macro(MoveL2D)
 macro(MoveD2L)
 macro(MulD)
+macro(MulExactI)
+macro(MulExactL)
 macro(MulF)
 macro(MulHiL)
 macro(MulI)
 macro(MulL)
 macro(Multi)
 macro(NegD)
+macro(NegExactI)
+macro(NegExactL)
 macro(NegF)
 macro(NeverBranch)
 macro(Opaque1)
@@ -241,6 +251,8 @@
 macro(StrEquals)
 macro(StrIndexOf)
 macro(SubD)
+macro(SubExactI)
+macro(SubExactL)
 macro(SubF)
 macro(SubI)
 macro(SubL)
--- a/src/share/vm/opto/coalesce.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/coalesce.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -54,9 +54,9 @@
     for( j=0; j<b->_num_succs; j++ )
       tty->print("B%d ",b->_succs[j]->_pre_order);
     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
-    uint cnt = b->_nodes.size();
+    uint cnt = b->number_of_nodes();
     for( j=0; j<cnt; j++ ) {
-      Node *n = b->_nodes[j];
+      Node *n = b->get_node(j);
       dump( n );
       tty->print("\t%s\t",n->Name());
 
@@ -152,7 +152,7 @@
   // after the last use.  Last use is really first-use on a backwards scan.
   uint i = b->end_idx()-1;
   while(1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -174,7 +174,7 @@
   // the last kill.  Thus it is the first kill on a backwards scan.
   i = b->end_idx()-1;
   while (1) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Check for end of virtual copies; this is also the end of the
     // parallel renaming effort.
     if (n->_idx < _unique) {
@@ -200,13 +200,13 @@
     tmp ->set_req(idx,copy->in(idx));
     copy->set_req(idx,tmp);
     // Save source in temp early, before source is killed
-    b->_nodes.insert(kill_src_idx,tmp);
+    b->insert_node(tmp, kill_src_idx);
     _phc._cfg.map_node_to_block(tmp, b);
     last_use_idx++;
   }
 
   // Insert just after last use
-  b->_nodes.insert(last_use_idx+1,copy);
+  b->insert_node(copy, last_use_idx + 1);
 }
 
 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
@@ -237,8 +237,8 @@
     Block *b = _phc._cfg.get_block(i);
     uint cnt = b->num_preds();  // Number of inputs to the Phi
 
-    for( uint l = 1; l<b->_nodes.size(); l++ ) {
-      Node *n = b->_nodes[l];
+    for( uint l = 1; l<b->number_of_nodes(); l++ ) {
+      Node *n = b->get_node(l);
 
       // Do not use removed-copies, use copied value instead
       uint ncnt = n->req();
@@ -260,7 +260,7 @@
         if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
           n->replace_by(def);
           n->set_req(cidx,NULL);
-          b->_nodes.remove(l);
+          b->remove_node(l);
           l--;
           continue;
         }
@@ -321,13 +321,13 @@
                m->as_Mach()->rematerialize()) {
               copy = m->clone();
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
               copy = new (C) MachSpillCopyNode(m, *rm, *rm);
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert(l++, copy);
+              b->insert_node(copy, l++);
             }
             // Insert the copy in the use-def chain
             n->set_req(idx, copy);
@@ -339,7 +339,7 @@
         } // End of is two-adr
 
         // Insert a copy at a debug use for a lrg which has high frequency
-        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
+        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
           // Walk the debug inputs to the node and check for lrg freq
           JVMState* jvms = n->jvms();
           uint debug_start = jvms ? jvms->debug_start() : 999999;
@@ -376,7 +376,7 @@
               // Insert the copy in the use-def chain
               n->set_req(inpidx, copy );
               // Insert the copy in the basic block, just before us
-              b->_nodes.insert( l++, copy );
+              b->insert_node(copy,  l++);
               // Extend ("register allocate") the names array for the copy.
               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
               _phc.new_lrg(copy, max_lrg_id);
@@ -431,8 +431,8 @@
     }
 
     // Visit all the Phis in successor block
-    for( uint k = 1; k<bs->_nodes.size(); k++ ) {
-      Node *n = bs->_nodes[k];
+    for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
+      Node *n = bs->get_node(k);
       if( !n->is_Phi() ) break;
       combine_these_two( n, n->in(j) );
     }
@@ -442,7 +442,7 @@
   // Check _this_ block for 2-address instructions and copies.
   uint cnt = b->end_idx();
   for( i = 1; i<cnt; i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     uint idx;
     // 2-address instructions have a virtual Copy matching their input
     // to their output
@@ -490,10 +490,10 @@
   dst_copy->set_req( didx, src_def );
   // Add copy to free list
   // _phc.free_spillcopy(b->_nodes[bindex]);
-  assert( b->_nodes[bindex] == dst_copy, "" );
+  assert( b->get_node(bindex) == dst_copy, "" );
   dst_copy->replace_by( dst_copy->in(didx) );
   dst_copy->set_req( didx, NULL);
-  b->_nodes.remove(bindex);
+  b->remove_node(bindex);
   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
 
@@ -523,8 +523,8 @@
       bindex2 = b2->end_idx()-1;
     }
     // Get prior instruction
-    assert(bindex2 < b2->_nodes.size(), "index out of bounds");
-    Node *x = b2->_nodes[bindex2];
+    assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
+    Node *x = b2->get_node(bindex2);
     if( x == prev_copy ) {      // Previous copy in copy chain?
       if( prev_copy == src_copy)// Found end of chain and all interferences
         break;                  // So break out of loop
@@ -769,14 +769,14 @@
 // Conservative (but pessimistic) copy coalescing of a single block
 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   // Bail out on infrequent blocks
-  if (b->is_uncommon(&_phc._cfg)) {
+  if (_phc._cfg.is_uncommon(b)) {
     return;
   }
   // Check this block for copies.
   for( uint i = 1; i<b->end_idx(); i++ ) {
     // Check for actual copies on inputs.  Coalesce a copy into its
     // input if use and copy's input are compatible.
-    Node *copy1 = b->_nodes[i];
+    Node *copy1 = b->get_node(i);
     uint idx1 = copy1->is_Copy();
     if( !idx1 ) continue;       // Not a copy
 
--- a/src/share/vm/opto/coalesce.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/coalesce.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -29,7 +29,6 @@
 
 class LoopTree;
 class LRG;
-class LRG_List;
 class Matcher;
 class PhaseIFG;
 class PhaseCFG;
--- a/src/share/vm/opto/compile.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/compile.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -47,6 +47,7 @@
 #include "opto/machnode.hpp"
 #include "opto/macro.hpp"
 #include "opto/matcher.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/node.hpp"
@@ -657,7 +658,8 @@
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
-                  _print_inlining(0) {
+                  _print_inlining_idx(0),
+                  _preserve_jvm_state(0) {
   C = this;
 
   CompileWrapper cw(this);
@@ -682,6 +684,8 @@
   set_print_assembly(print_opto_assembly);
   set_parsed_irreducible_loop(false);
 #endif
+  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 
   if (ProfileTraps) {
     // Make sure the method being compiled gets its own MDO,
@@ -713,7 +717,7 @@
   PhaseGVN gvn(node_arena(), estimated_size);
   set_initial_gvn(&gvn);
 
-  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   }
   { // Scope for timing the parser
@@ -763,7 +767,7 @@
       return;
     }
     JVMState* jvms = build_start_state(start(), tf());
-    if ((jvms = cg->generate(jvms)) == NULL) {
+    if ((jvms = cg->generate(jvms, NULL)) == NULL) {
       record_method_not_compilable("method parse failed");
       return;
     }
@@ -940,7 +944,8 @@
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
-    _print_inlining(0) {
+    _print_inlining_idx(0),
+    _preserve_jvm_state(0) {
   C = this;
 
 #ifndef PRODUCT
@@ -1300,6 +1305,10 @@
 
   // Array pointers need some flattening
   const TypeAryPtr *ta = tj->isa_aryptr();
+  if (ta && ta->is_stable()) {
+    // Erase stability property for alias analysis.
+    tj = ta = ta->cast_to_stable(false);
+  }
   if( ta && is_known_inst ) {
     if ( offset != Type::OffsetBot &&
          offset > arrayOopDesc::length_offset_in_bytes() ) {
@@ -1354,7 +1363,7 @@
     // During the 2nd round of IterGVN, NotNull castings are removed.
     // Make sure the Bottom and NotNull variants alias the same.
     // Also, make sure exact and non-exact variants alias the same.
-    if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
+    if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
     }
   }
@@ -1379,6 +1388,9 @@
       // Also, make sure exact and non-exact variants alias the same.
       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
     }
+    if (to->speculative() != NULL) {
+      tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
+    }
     // Canonicalize the holder of this field
     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
       // First handle header references such as a LoadKlassNode, even if the
@@ -1500,6 +1512,7 @@
   _index = i;
   _adr_type = at;
   _field = NULL;
+  _element = NULL;
   _is_rewritable = true; // default
   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
   if (atoop != NULL && atoop->is_known_instance()) {
@@ -1618,6 +1631,16 @@
           && flat->is_instptr()->klass() == env()->Class_klass())
         alias_type(idx)->set_rewritable(false);
     }
+    if (flat->isa_aryptr()) {
+#ifdef ASSERT
+      const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+      // (T_BYTE has the weakest alignment and size restrictions...)
+      assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
+#endif
+      if (flat->offset() == TypePtr::OffsetBot) {
+        alias_type(idx)->set_element(flat->is_aryptr()->elem());
+      }
+    }
     if (flat->isa_klassptr()) {
       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
         alias_type(idx)->set_rewritable(false);
@@ -1680,7 +1703,7 @@
   else
     t = TypeOopPtr::make_from_klass_raw(field->holder());
   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
-  assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
+  assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
   return atp;
 }
 
@@ -1996,6 +2019,12 @@
     if (failing())  return;
   }
 
+  // Remove the speculative part of types and clean up the graph from
+  // the extra CastPP nodes whose only purpose is to carry them. Do
+  // that early so that optimizations are not disrupted by the extra
+  // CastPP nodes.
+  remove_speculative_types(igvn);
+
   // No more new expensive nodes will be added to the list from here
   // so keep only the actual candidates for optimizations.
   cleanup_expensive_nodes(igvn);
@@ -2261,7 +2290,7 @@
     if (block->is_connector() && !Verbose) {
       continue;
     }
-    n = block->_nodes[0];
+    n = block->head();
     if (pcs && n->_idx < pc_limit) {
       tty->print("%3.3x   ", pcs[n->_idx]);
     } else {
@@ -2276,12 +2305,12 @@
 
     // For all instructions
     Node *delay = NULL;
-    for (uint j = 0; j < block->_nodes.size(); j++) {
+    for (uint j = 0; j < block->number_of_nodes(); j++) {
       if (VMThread::should_terminate()) {
         cut_short = true;
         break;
       }
-      n = block->_nodes[j];
+      n = block->get_node(j);
       if (valid_bundle_info(n)) {
         Bundle* bundle = node_bundling(n);
         if (bundle->used_in_unconditional_delay()) {
@@ -2634,7 +2663,7 @@
             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
             "Base pointers must match" );
 #ifdef _LP64
-    if ((UseCompressedOops || UseCompressedKlassPointers) &&
+    if ((UseCompressedOops || UseCompressedClassPointers) &&
         addp->Opcode() == Op_ConP &&
         addp == n->in(AddPNode::Base) &&
         n->in(AddPNode::Offset)->is_Con()) {
@@ -2972,6 +3001,37 @@
       n->set_req(MemBarNode::Precedent, top());
     }
     break;
+    // Must set a control edge on all nodes that produce a FlagsProj
+    // so they can't escape the block that consumes the flags.
+    // Must also set the non throwing branch as the control
+    // for all nodes that depends on the result. Unless the node
+    // already have a control that isn't the control of the
+    // flag producer
+  case Op_FlagsProj:
+    {
+      MathExactNode* math = (MathExactNode*)  n->in(0);
+      Node* ctrl = math->control_node();
+      Node* non_throwing = math->non_throwing_branch();
+      math->set_req(0, ctrl);
+
+      Node* result = math->result_node();
+      if (result != NULL) {
+        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
+          Node* out = result->fast_out(j);
+          // Phi nodes shouldn't be moved. They would only match below if they
+          // had the same control as the MathExactNode. The only time that
+          // would happen is if the Phi is also an input to the MathExact
+          if (!out->is_Phi()) {
+            if (out->in(0) == NULL) {
+              out->set_req(0, non_throwing);
+            } else if (out->in(0) == ctrl) {
+              out->set_req(0, non_throwing);
+            }
+          }
+        }
+      }
+    }
+    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
@@ -3021,7 +3081,7 @@
 
   // Skip next transformation if compressed oops are not used.
   if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
-      (!UseCompressedOops && !UseCompressedKlassPointers))
+      (!UseCompressedOops && !UseCompressedClassPointers))
     return;
 
   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
@@ -3599,7 +3659,7 @@
 }
 
 void Compile::dump_inlining() {
-  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     // Print inlining message for candidates that we couldn't inline
     // for lack of space or non constant receiver
     for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3623,7 +3683,7 @@
       }
     }
     for (int i = 0; i < _print_inlining_list->length(); i++) {
-      tty->print(_print_inlining_list->at(i).ss()->as_string());
+      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
     }
   }
 }
@@ -3751,6 +3811,45 @@
   }
 }
 
+/**
+ * Remove the speculative part of types and clean up the graph
+ */
+void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
+  if (UseTypeSpeculation) {
+    Unique_Node_List worklist;
+    worklist.push(root());
+    int modified = 0;
+    // Go over all type nodes that carry a speculative type, drop the
+    // speculative part of the type and enqueue the node for an igvn
+    // which may optimize it out.
+    for (uint next = 0; next < worklist.size(); ++next) {
+      Node *n  = worklist.at(next);
+      if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
+          n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
+        TypeNode* tn = n->as_Type();
+        const TypeOopPtr* t = tn->type()->is_oopptr();
+        bool in_hash = igvn.hash_delete(n);
+        assert(in_hash, "node should be in igvn hash table");
+        tn->set_type(t->remove_speculative());
+        igvn.hash_insert(n);
+        igvn._worklist.push(n); // give it a chance to go away
+        modified++;
+      }
+      uint max = n->len();
+      for( uint i = 0; i < max; ++i ) {
+        Node *m = n->in(i);
+        if (not_a_node(m))  continue;
+        worklist.push(m);
+      }
+    }
+    // Drop the speculative part of all types in the igvn's type table
+    igvn.remove_speculative_types();
+    if (modified > 0) {
+      igvn.optimize();
+    }
+  }
+}
+
 // Auxiliary method to support randomized stressing/fuzzing.
 //
 // This method can be called the arbitrary number of times, with current count
--- a/src/share/vm/opto/compile.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/compile.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -72,6 +72,7 @@
 class StartNode;
 class SafePointNode;
 class JVMState;
+class Type;
 class TypeData;
 class TypePtr;
 class TypeOopPtr;
@@ -119,6 +120,7 @@
     int             _index;         // unique index, used with MergeMemNode
     const TypePtr*  _adr_type;      // normalized address type
     ciField*        _field;         // relevant instance field, or null if none
+    const Type*     _element;       // relevant array element type, or null if none
     bool            _is_rewritable; // false if the memory is write-once only
     int             _general_index; // if this is type is an instance, the general
                                     // type that this is an instance of
@@ -129,6 +131,7 @@
     int             index()         const { return _index; }
     const TypePtr*  adr_type()      const { return _adr_type; }
     ciField*        field()         const { return _field; }
+    const Type*     element()       const { return _element; }
     bool            is_rewritable() const { return _is_rewritable; }
     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
@@ -137,7 +140,14 @@
     void set_field(ciField* f) {
       assert(!_field,"");
       _field = f;
-      if (f->is_final())  _is_rewritable = false;
+      if (f->is_final() || f->is_stable()) {
+        // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
+        _is_rewritable = false;
+      }
+    }
+    void set_element(const Type* e) {
+      assert(_element == NULL, "");
+      _element = e;
     }
 
     void print_on(outputStream* st) PRODUCT_RETURN;
@@ -302,6 +312,8 @@
   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
+  bool                  _print_inlining;        // True if we should print inlining for this compilation
+  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 #ifndef PRODUCT
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -404,7 +416,7 @@
   };
 
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
-  int _print_inlining;
+  int _print_inlining_idx;
 
   // Only keep nodes in the expensive node list that need to be optimized
   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -412,28 +424,33 @@
   static int cmp_expensive_nodes(Node** n1, Node** n2);
   // Expensive nodes list already sorted?
   bool expensive_nodes_sorted() const;
+  // Remove the speculative part of types and clean up the graph
+  void remove_speculative_types(PhaseIterGVN &igvn);
+
+  // Are we within a PreserveJVMState block?
+  int _preserve_jvm_state;
 
  public:
 
   outputStream* print_inlining_stream() const {
-    return _print_inlining_list->at(_print_inlining).ss();
+    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   }
 
   void print_inlining_skip(CallGenerator* cg) {
-    if (PrintInlining) {
-      _print_inlining_list->at(_print_inlining).set_cg(cg);
-      _print_inlining++;
-      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+    if (_print_inlining) {
+      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+      _print_inlining_idx++;
+      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
     }
   }
 
   void print_inlining_insert(CallGenerator* cg) {
-    if (PrintInlining) {
+    if (_print_inlining) {
       for (int i = 0; i < _print_inlining_list->length(); i++) {
-        if (_print_inlining_list->at(i).cg() == cg) {
+        if (_print_inlining_list->adr_at(i)->cg() == cg) {
           _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
-          _print_inlining = i+1;
-          _print_inlining_list->at(i).set_cg(NULL);
+          _print_inlining_idx = i+1;
+          _print_inlining_list->adr_at(i)->set_cg(NULL);
           return;
         }
       }
@@ -562,6 +579,10 @@
   int               AliasLevel() const          { return _AliasLevel; }
   bool              print_assembly() const       { return _print_assembly; }
   void          set_print_assembly(bool z)       { _print_assembly = z; }
+  bool              print_inlining() const       { return _print_inlining; }
+  void          set_print_inlining(bool z)       { _print_inlining = z; }
+  bool              print_intrinsics() const     { return _print_intrinsics; }
+  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
@@ -804,7 +825,9 @@
 
   // Decide how to build a call.
   // The profile factor is a discount to apply to this site's interp. profile.
-  CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
+  CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
+                                   JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
+                                   bool allow_intrinsics = true, bool delayed_forbidden = false);
   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
     return should_delay_string_inlining(call_method, jvms) ||
            should_delay_boxing_inlining(call_method, jvms);
@@ -1140,6 +1163,21 @@
 
   // Auxiliary method for randomized fuzzing/stressing
   static bool randomized_select(int count);
+
+  // enter a PreserveJVMState block
+  void inc_preserve_jvm_state() {
+    _preserve_jvm_state++;
+  }
+
+  // exit a PreserveJVMState block
+  void dec_preserve_jvm_state() {
+    _preserve_jvm_state--;
+    assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
+  }
+
+  bool has_preserve_jvm_state() const {
+    return _preserve_jvm_state > 0;
+  }
 };
 
 #endif // SHARE_VM_OPTO_COMPILE_HPP
--- a/src/share/vm/opto/connode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/connode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -630,7 +630,7 @@
   if (t == Type::TOP) return Type::TOP;
   assert (t != TypePtr::NULL_PTR, "null klass?");
 
-  assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
   return t->make_narrowklass();
 }
 
--- a/src/share/vm/opto/doCall.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/doCall.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,9 +41,9 @@
 #include "runtime/sharedRuntime.hpp"
 
 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
-  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+  if (TraceTypeProfile || C->print_inlining()) {
     outputStream* out = tty;
-    if (!PrintInlining) {
+    if (!C->print_inlining()) {
       if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
         method->print_short_name();
         tty->cr();
@@ -63,7 +63,8 @@
 
 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
                                        JVMState* jvms, bool allow_inline,
-                                       float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
+                                       float prof_factor, ciKlass* speculative_receiver_type,
+                                       bool allow_intrinsics, bool delayed_forbidden) {
   ciMethod*       caller   = jvms->method();
   int             bci      = jvms->bci();
   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
@@ -110,18 +111,28 @@
   // then we return it as the inlined version of the call.
   // We do this before the strict f.p. check below because the
   // intrinsics handle strict f.p. correctly.
+  CallGenerator* cg_intrinsic = NULL;
   if (allow_inline && allow_intrinsics) {
     CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
     if (cg != NULL) {
       if (cg->is_predicted()) {
         // Code without intrinsic but, hopefully, inlined.
         CallGenerator* inline_cg = this->call_generator(callee,
-              vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
+              vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
         if (inline_cg != NULL) {
           cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
         }
       }
-      return cg;
+
+      // If intrinsic does the virtual dispatch, we try to use the type profile
+      // first, and hopefully inline it as the regular virtual call below.
+      // We will retry the intrinsic if nothing had claimed it afterwards.
+      if (cg->does_virtual_dispatch()) {
+        cg_intrinsic = cg;
+        cg = NULL;
+      } else {
+        return cg;
+      }
     }
   }
 
@@ -202,8 +213,24 @@
       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
       ciMethod* receiver_method = NULL;
-      if (have_major_receiver || profile.morphism() == 1 ||
-          (profile.morphism() == 2 && UseBimorphicInlining)) {
+
+      int morphism = profile.morphism();
+      if (speculative_receiver_type != NULL) {
+        // We have a speculative type, we should be able to resolve
+        // the call. We do that before looking at the profiling at
+        // this invoke because it may lead to bimorphic inlining which
+        // a speculative type should help us avoid.
+        receiver_method = callee->resolve_invoke(jvms->method()->holder(),
+                                                 speculative_receiver_type);
+        if (receiver_method == NULL) {
+          speculative_receiver_type = NULL;
+        } else {
+          morphism = 1;
+        }
+      }
+      if (receiver_method == NULL &&
+          (have_major_receiver || morphism == 1 ||
+           (morphism == 2 && UseBimorphicInlining))) {
         // receiver_method = profile.method();
         // Profiles do not suggest methods now.  Look it up in the major receiver.
         receiver_method = callee->resolve_invoke(jvms->method()->holder(),
@@ -217,7 +244,7 @@
           // Look up second receiver.
           CallGenerator* next_hit_cg = NULL;
           ciMethod* next_receiver_method = NULL;
-          if (profile.morphism() == 2 && UseBimorphicInlining) {
+          if (morphism == 2 && UseBimorphicInlining) {
             next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
                                                                profile.receiver(1));
             if (next_receiver_method != NULL) {
@@ -232,11 +259,10 @@
             }
           }
           CallGenerator* miss_cg;
-          Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
+          Deoptimization::DeoptReason reason = morphism == 2 ?
                                     Deoptimization::Reason_bimorphic :
                                     Deoptimization::Reason_class_check;
-          if (( profile.morphism() == 1 ||
-               (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
+          if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
               !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
             // Generate uncommon trap for class check failure path
@@ -250,6 +276,7 @@
           }
           if (miss_cg != NULL) {
             if (next_hit_cg != NULL) {
+              assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
               trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
               // We don't need to record dependency on a receiver here and below.
               // Whenever we inline, the dependency is added by Parse::Parse().
@@ -257,7 +284,9 @@
             }
             if (miss_cg != NULL) {
               trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
-              CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
+              ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
+              float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
+              CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
               if (cg != NULL)  return cg;
             }
           }
@@ -266,6 +295,13 @@
     }
   }
 
+  // Nothing claimed the intrinsic, we go with straight-forward inlining
+  // for already discovered intrinsic.
+  if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {
+    assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
+    return cg_intrinsic;
+  }
+
   // There was no special inlining tactic, or it bailed out.
   // Use a more generic tactic, like a simple call.
   if (call_does_dispatch) {
@@ -429,13 +465,16 @@
   int       vtable_index       = Method::invalid_vtable_index;
   bool      call_does_dispatch = false;
 
+  // Speculative type of the receiver if any
+  ciKlass* speculative_receiver_type = NULL;
   if (is_virtual_or_interface) {
-    Node*             receiver_node = stack(sp() - nargs);
+    Node* receiver_node             = stack(sp() - nargs);
     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
     // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
     callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
                                       is_virtual,
                                       call_does_dispatch, vtable_index);  // out-parameters
+    speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
   }
 
   // Note:  It's OK to try to inline a virtual call.
@@ -451,7 +490,7 @@
   // Decide call tactic.
   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
   // It decides whether inlining is desirable or not.
-  CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
+  CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
 
   // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
   orig_callee = callee = NULL;
@@ -460,6 +499,10 @@
   // Round double arguments before call
   round_double_arguments(cg->method());
 
+  // Feed profiling data for arguments to the type system so it can
+  // propagate it as speculative types
+  record_profiled_arguments_for_speculation(cg->method(), bc());
+
 #ifndef PRODUCT
   // bump global counters for calls
   count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
@@ -474,11 +517,18 @@
   // save across call, for a subsequent cast_not_null.
   Node* receiver = has_receiver ? argument(0) : NULL;
 
+  // The extra CheckCastPP for speculative types mess with PhaseStringOpts
+  if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
+    // Feed profiling data for a single receiver to the type system so
+    // it can propagate it as a speculative type
+    receiver = record_profiled_receiver_for_speculation(receiver);
+  }
+
   // Bump method data counters (We profile *before* the call is made
   // because exceptions don't return to the call site.)
   profile_call(receiver);
 
-  JVMState* new_jvms = cg->generate(jvms);
+  JVMState* new_jvms = cg->generate(jvms, this);
   if (new_jvms == NULL) {
     // When inlining attempt fails (e.g., too many arguments),
     // it may contaminate the current compile state, making it
@@ -491,8 +541,8 @@
     // the call site, perhaps because it did not match a pattern the
     // intrinsic was expecting to optimize. Should always be possible to
     // get a normal java call that may inline in that case
-    cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
-    if ((new_jvms = cg->generate(jvms)) == NULL) {
+    cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
+    if ((new_jvms = cg->generate(jvms, this)) == NULL) {
       guarantee(failing(), "call failed to generate:  calls should work");
       return;
     }
@@ -590,6 +640,16 @@
       null_assert(peek());
       set_bci(iter().cur_bci()); // put it back
     }
+    BasicType ct = ctype->basic_type();
+    if (ct == T_OBJECT || ct == T_ARRAY) {
+      ciKlass* better_type = method()->return_profiled_type(bci());
+      if (UseTypeSpeculation && better_type != NULL) {
+        // If profiling reports a single type for the return value,
+        // feed it to the type system so it can propagate it as a
+        // speculative type
+        record_profile_for_speculation(stack(sp()-1), better_type);
+      }
+    }
   }
 
   // Restart record of parsing work after possible inlining of call
--- a/src/share/vm/opto/domgraph.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/domgraph.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -211,21 +211,21 @@
 uint Block_Stack::most_frequent_successor( Block *b ) {
   uint freq_idx = 0;
   int eidx = b->end_idx();
-  Node *n = b->_nodes[eidx];
+  Node *n = b->get_node(eidx);
   int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
   switch( op ) {
   case Op_CountedLoopEnd:
   case Op_If: {               // Split frequency amongst children
     float prob = n->as_MachIf()->_prob;
     // Is succ[0] the TRUE branch or the FALSE branch?
-    if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
+    if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
       prob = 1.0f - prob;
     freq_idx = prob < PROB_FAIR;      // freq=1 for succ[0] < 0.5 prob
     break;
   }
   case Op_Catch:                // Split frequency amongst children
     for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
-      if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
+      if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
         break;
     // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
     if( freq_idx == b->_num_succs ) freq_idx = 0;
--- a/src/share/vm/opto/escape.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/escape.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -780,6 +780,7 @@
       }
     } else {  // Allocate instance
       if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
+          cik->is_subclass_of(_compile->env()->Reference_klass()) ||
          !cik->is_instance_klass() || // StressReflectiveCode
           cik->as_instance_klass()->has_finalizer()) {
         es = PointsToNode::GlobalEscape;
--- a/src/share/vm/opto/gcm.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/gcm.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -106,12 +106,12 @@
     uint j = 0;
     if (pb->_num_succs != 1) {  // More then 1 successor?
       // Search for successor
-      uint max = pb->_nodes.size();
+      uint max = pb->number_of_nodes();
       assert( max > 1, "" );
       uint start = max - pb->_num_succs;
       // Find which output path belongs to projection
       for (j = start; j < max; j++) {
-        if( pb->_nodes[j] == in0 )
+        if( pb->get_node(j) == in0 )
           break;
       }
       assert( j < max, "must find" );
@@ -1031,8 +1031,8 @@
   Block* least       = LCA;
   double least_freq  = least->_freq;
   uint target        = get_latency_for_node(self);
-  uint start_latency = get_latency_for_node(LCA->_nodes[0]);
-  uint end_latency   = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
+  uint start_latency = get_latency_for_node(LCA->head());
+  uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
   bool in_latency    = (target <= start_latency);
   const Block* root_block = get_block_for_node(_root);
 
@@ -1053,9 +1053,9 @@
     self->dump();
     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
-      LCA->_nodes[0]->_idx,
+      LCA->head()->_idx,
       start_latency,
-      LCA->_nodes[LCA->end_idx()]->_idx,
+      LCA->get_node(LCA->end_idx())->_idx,
       end_latency,
       least_freq);
   }
@@ -1078,14 +1078,14 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = get_latency_for_node(LCA->_nodes[0]);
+    uint start_lat = get_latency_for_node(LCA->head());
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = get_latency_for_node(LCA->_nodes[end_idx]);
+    uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
-        LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
+        LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
     }
 #endif
     cand_cnt++;
@@ -1346,7 +1346,7 @@
       Node* proj = _matcher._null_check_tests[i];
       Node* val  = _matcher._null_check_tests[i + 1];
       Block* block = get_block_for_node(proj);
-      block->implicit_null_check(this, proj, val, allowed_reasons);
+      implicit_null_check(block, proj, val, allowed_reasons);
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1367,7 +1367,7 @@
   visited.Clear();
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
+    if (!schedule_local(block, ready_cnt, visited)) {
       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
         C->record_method_not_compilable("local schedule failed");
       }
@@ -1379,7 +1379,7 @@
   // clone the instructions on all paths below the Catch.
   for (uint i = 0; i < number_of_blocks(); i++) {
     Block* block = get_block(i);
-    block->call_catch_cleanup(this, C);
+    call_catch_cleanup(block);
   }
 
 #ifndef PRODUCT
@@ -1730,7 +1730,7 @@
 // Determine the probability of reaching successor 'i' from the receiver block.
 float Block::succ_prob(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1765,7 +1765,7 @@
     float prob  = n->as_MachIf()->_prob;
     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
     // If succ[i] is the FALSE branch, invert path info
-    if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
+    if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
       return 1.0f - prob; // not taken
     } else {
       return prob; // taken
@@ -1777,7 +1777,7 @@
     return 1.0f/_num_succs;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     if (ci->_con == CatchProjNode::fall_through_index) {
       // Fall-thru path gets the lion's share.
       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@@ -1814,7 +1814,7 @@
 // Return the number of fall-through candidates for a block
 int Block::num_fall_throughs() {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
@@ -1838,7 +1838,7 @@
 
   case Op_Catch: {
     for (uint i = 0; i < _num_succs; i++) {
-      const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+      const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
       if (ci->_con == CatchProjNode::fall_through_index) {
         return 1;
       }
@@ -1866,14 +1866,14 @@
 // Return true if a specific successor could be fall-through target.
 bool Block::succ_fall_through(uint i) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->Opcode();
   if (n->is_Mach()) {
     if (n->is_MachNullCheck()) {
       // In theory, either side can fall-thru, for simplicity sake,
       // let's say only the false branch can now.
-      return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
+      return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
     }
     op = n->as_Mach()->ideal_Opcode();
   }
@@ -1887,7 +1887,7 @@
     return true;
 
   case Op_Catch: {
-    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
+    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
     return ci->_con == CatchProjNode::fall_through_index;
   }
 
@@ -1911,7 +1911,7 @@
 // Update the probability of a two-branch to be uncommon
 void Block::update_uncommon_branch(Block* ub) {
   int eidx = end_idx();
-  Node *n = _nodes[eidx];  // Get ending Node
+  Node *n = get_node(eidx);  // Get ending Node
 
   int op = n->as_Mach()->ideal_Opcode();
 
@@ -1927,7 +1927,7 @@
 
   // If ub is the true path, make the proability small, else
   // ub is the false path, and make the probability large
-  bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
+  bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
 
   // Get existing probability
   float p = n->as_MachIf()->_prob;
--- a/src/share/vm/opto/generateOptoStub.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/generateOptoStub.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -61,6 +61,7 @@
   JVMState* jvms = new (C) JVMState(0);
   jvms->set_bci(InvocationEntryBci);
   jvms->set_monoff(max_map);
+  jvms->set_scloff(max_map);
   jvms->set_endoff(max_map);
   {
     SafePointNode *map = new (C) SafePointNode( max_map, jvms );
--- a/src/share/vm/opto/graphKit.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/graphKit.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -639,6 +639,7 @@
   _map    = kit->map();   // preserve the map
   _sp     = kit->sp();
   kit->set_map(clone_map ? kit->clone_map() : NULL);
+  Compile::current()->inc_preserve_jvm_state();
 #ifdef ASSERT
   _bci    = kit->bci();
   Parse* parser = kit->is_Parse();
@@ -656,6 +657,7 @@
 #endif
   kit->set_map(_map);
   kit->set_sp(_sp);
+  Compile::current()->dec_preserve_jvm_state();
 }
 
 
@@ -1373,17 +1375,70 @@
 
 //--------------------------replace_in_map-------------------------------------
 void GraphKit::replace_in_map(Node* old, Node* neww) {
-  this->map()->replace_edge(old, neww);
+  if (old == neww) {
+    return;
+  }
+
+  map()->replace_edge(old, neww);
 
   // Note: This operation potentially replaces any edge
   // on the map.  This includes locals, stack, and monitors
   // of the current (innermost) JVM state.
 
-  // We can consider replacing in caller maps.
-  // The idea would be that an inlined function's null checks
-  // can be shared with the entire inlining tree.
-  // The expense of doing this is that the PreserveJVMState class
-  // would have to preserve caller states too, with a deep copy.
+  if (!ReplaceInParentMaps) {
+    return;
+  }
+
+  // PreserveJVMState doesn't do a deep copy so we can't modify
+  // parents
+  if (Compile::current()->has_preserve_jvm_state()) {
+    return;
+  }
+
+  Parse* parser = is_Parse();
+  bool progress = true;
+  Node* ctrl = map()->in(0);
+  // Follow the chain of parsers and see whether the update can be
+  // done in the map of callers. We can do the replace for a caller if
+  // the current control post dominates the control of a caller.
+  while (parser != NULL && parser->caller() != NULL && progress) {
+    progress = false;
+    Node* parent_map = parser->caller()->map();
+    assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
+
+    Node* parent_ctrl = parent_map->in(0);
+
+    while (parent_ctrl->is_Region()) {
+      Node* n = parent_ctrl->as_Region()->is_copy();
+      if (n == NULL) {
+        break;
+      }
+      parent_ctrl = n;
+    }
+
+    for (;;) {
+      if (ctrl == parent_ctrl) {
+        // update the map of the exits which is the one that will be
+        // used when compilation resume after inlining
+        parser->exits().map()->replace_edge(old, neww);
+        progress = true;
+        break;
+      }
+      if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
+        ctrl = ctrl->in(0)->in(0);
+      } else if (ctrl->is_Region()) {
+        Node* n = ctrl->as_Region()->is_copy();
+        if (n == NULL) {
+          break;
+        }
+        ctrl = n;
+      } else {
+        break;
+      }
+    }
+
+    parser = parser->parent_parser();
+  }
 }
 
 
@@ -1501,6 +1556,25 @@
   }
 }
 
+bool GraphKit::can_move_pre_barrier() const {
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  switch (bs->kind()) {
+    case BarrierSet::G1SATBCT:
+    case BarrierSet::G1SATBCTLogging:
+      return true; // Can move it if no safepoint
+
+    case BarrierSet::CardTableModRef:
+    case BarrierSet::CardTableExtension:
+    case BarrierSet::ModRef:
+      return true; // There is no pre-barrier
+
+    case BarrierSet::Other:
+    default      :
+      ShouldNotReachHere();
+  }
+  return false;
+}
+
 void GraphKit::post_barrier(Node* ctl,
                             Node* store,
                             Node* obj,
@@ -2024,6 +2098,104 @@
   }
 }
 
+/**
+ * Record profiling data exact_kls for Node n with the type system so
+ * that it can propagate it (speculation)
+ *
+ * @param n          node that the type applies to
+ * @param exact_kls  type from profiling
+ *
+ * @return           node with improved type
+ */
+Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
+  const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr();
+  assert(UseTypeSpeculation, "type speculation must be on");
+  if (exact_kls != NULL &&
+      // nothing to improve if type is already exact
+      (current_type == NULL ||
+       (!current_type->klass_is_exact() &&
+        (current_type->speculative() == NULL ||
+         !current_type->speculative()->klass_is_exact())))) {
+    const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
+    const TypeOopPtr* xtype = tklass->as_instance_type();
+    assert(xtype->klass_is_exact(), "Should be exact");
+
+    // Build a type with a speculative type (what we think we know
+    // about the type but will need a guard when we use it)
+    const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype);
+    // We're changing the type, we need a new cast node to carry the
+    // new type. The new type depends on the control: what profiling
+    // tells us is only valid from here as far as we can tell.
+    Node* cast = new(C) CastPPNode(n, spec_type);
+    cast->init_req(0, control());
+    cast = _gvn.transform(cast);
+    replace_in_map(n, cast);
+    n = cast;
+  }
+  return n;
+}
+
+/**
+ * Record profiling data from receiver profiling at an invoke with the
+ * type system so that it can propagate it (speculation)
+ *
+ * @param n  receiver node
+ *
+ * @return           node with improved type
+ */
+Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
+  if (!UseTypeSpeculation) {
+    return n;
+  }
+  ciKlass* exact_kls = profile_has_unique_klass();
+  return record_profile_for_speculation(n, exact_kls);
+}
+
+/**
+ * Record profiling data from argument profiling at an invoke with the
+ * type system so that it can propagate it (speculation)
+ *
+ * @param dest_method  target method for the call
+ * @param bc           what invoke bytecode is this?
+ */
+void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
+  if (!UseTypeSpeculation) {
+    return;
+  }
+  const TypeFunc* tf    = TypeFunc::make(dest_method);
+  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
+  int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
+  for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
+    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
+    if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
+      ciKlass* better_type = method()->argument_profiled_type(bci(), i);
+      if (better_type != NULL) {
+        record_profile_for_speculation(argument(j), better_type);
+      }
+      i++;
+    }
+  }
+}
+
+/**
+ * Record profiling data from parameter profiling at an invoke with
+ * the type system so that it can propagate it (speculation)
+ */
+void GraphKit::record_profiled_parameters_for_speculation() {
+  if (!UseTypeSpeculation) {
+    return;
+  }
+  for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
+    if (_gvn.type(local(i))->isa_oopptr()) {
+      ciKlass* better_type = method()->parameter_profiled_type(j);
+      if (better_type != NULL) {
+        record_profile_for_speculation(local(i), better_type);
+      }
+      j++;
+    }
+  }
+}
+
 void GraphKit::round_double_result(ciMethod* dest_method) {
   // A non-strict method may return a double value which has an extended
   // exponent, but this must not be visible in a caller which is 'strict'
@@ -2103,7 +2275,7 @@
 // Null check oop.  Set null-path control into Region in slot 3.
 // Make a cast-not-nullness use the other not-null control.  Return cast.
 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
-                               bool never_see_null) {
+                               bool never_see_null, bool safe_for_replace) {
   // Initial NULL check taken path
   (*null_control) = top();
   Node* cast = null_check_common(value, T_OBJECT, false, null_control);
@@ -2121,6 +2293,9 @@
                   Deoptimization::Action_make_not_entrant);
     (*null_control) = top();    // NULL path is dead
   }
+  if ((*null_control) == top() && safe_for_replace) {
+    replace_in_map(value, cast);
+  }
 
   // Cast away null-ness on the result
   return cast;
@@ -2558,10 +2733,10 @@
 // If the profile has seen exactly one type, narrow to exactly that type.
 // Subsequent type checks will always fold up.
 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
-                                             ciProfileData* data,
-                                             ciKlass* require_klass) {
+                                             ciKlass* require_klass,
+                                            ciKlass* spec_klass,
+                                             bool safe_for_replace) {
   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
-  if (data == NULL)  return NULL;
 
   // Make sure we haven't already deoptimized from this tactic.
   if (too_many_traps(Deoptimization::Reason_class_check))
@@ -2569,15 +2744,15 @@
 
   // (No, this isn't a call, but it's enough like a virtual call
   // to use the same ciMethod accessor to get the profile info...)
-  ciCallProfile profile = method()->call_profile_at_bci(bci());
-  if (profile.count() >= 0 &&         // no cast failures here
-      profile.has_receiver(0) &&
-      profile.morphism() == 1) {
-    ciKlass* exact_kls = profile.receiver(0);
+  // If we have a speculative type use it instead of profiling (which
+  // may not help us)
+  ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
+  if (exact_kls != NULL) {// no cast failures here
     if (require_klass == NULL ||
         static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
-      // If we narrow the type to match what the type profile sees,
-      // we can then remove the rest of the cast.
+      // If we narrow the type to match what the type profile sees or
+      // the speculative type, we can then remove the rest of the
+      // cast.
       // This is a win, even if the exact_kls is very specific,
       // because downstream operations, such as method calls,
       // will often benefit from the sharper type.
@@ -2589,7 +2764,9 @@
         uncommon_trap(Deoptimization::Reason_class_check,
                       Deoptimization::Action_maybe_recompile);
       }
-      replace_in_map(not_null_obj, exact_obj);
+      if (safe_for_replace) {
+        replace_in_map(not_null_obj, exact_obj);
+      }
       return exact_obj;
     }
     // assert(ssc == SSC_always_true)... except maybe the profile lied to us.
@@ -2598,11 +2775,59 @@
   return NULL;
 }
 
+/**
+ * Cast obj to type and emit guard unless we had too many traps here
+ * already
+ *
+ * @param obj       node being casted
+ * @param type      type to cast the node to
+ * @param not_null  true if we know node cannot be null
+ */
+Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
+                                        ciKlass* type,
+                                        bool not_null) {
+  // type == NULL if profiling tells us this object is always null
+  if (type != NULL) {
+    if (!too_many_traps(Deoptimization::Reason_null_check) &&
+        !too_many_traps(Deoptimization::Reason_class_check)) {
+      Node* not_null_obj = NULL;
+      // not_null is true if we know the object is not null and
+      // there's no need for a null check
+      if (!not_null) {
+        Node* null_ctl = top();
+        not_null_obj = null_check_oop(obj, &null_ctl, true, true);
+        assert(null_ctl->is_top(), "no null control here");
+      } else {
+        not_null_obj = obj;
+      }
+
+      Node* exact_obj = not_null_obj;
+      ciKlass* exact_kls = type;
+      Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
+                                            &exact_obj);
+      {
+        PreserveJVMState pjvms(this);
+        set_control(slow_ctl);
+        uncommon_trap(Deoptimization::Reason_class_check,
+                      Deoptimization::Action_maybe_recompile);
+      }
+      replace_in_map(not_null_obj, exact_obj);
+      obj = exact_obj;
+    }
+  } else {
+    if (!too_many_traps(Deoptimization::Reason_null_assert)) {
+      Node* exact_obj = null_assert(obj);
+      replace_in_map(obj, exact_obj);
+      obj = exact_obj;
+    }
+  }
+  return obj;
+}
 
 //-------------------------------gen_instanceof--------------------------------
 // Generate an instance-of idiom.  Used by both the instance-of bytecode
 // and the reflective instance-of call.
-Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
+Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
   kill_dead_locals();           // Benefit all the uncommon traps
   assert( !stopped(), "dead parse path should be checked in callers" );
   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
@@ -2623,7 +2848,7 @@
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -2640,14 +2865,37 @@
     phi   ->del_req(_null_path);
   }
 
-  if (ProfileDynamicTypes && data != NULL) {
-    Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL);
-    if (stopped()) {            // Profile disagrees with this path.
-      set_control(null_ctl);    // Null is the only remaining possibility.
-      return intcon(0);
+  // Do we know the type check always succeed?
+  bool known_statically = false;
+  if (_gvn.type(superklass)->singleton()) {
+    ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
+    ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
+    if (subk != NULL && subk->is_loaded()) {
+      int static_res = static_subtype_check(superk, subk);
+      known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
     }
-    if (cast_obj != NULL)
-      not_null_obj = cast_obj;
+  }
+
+  if (known_statically && UseTypeSpeculation) {
+    // If we know the type check always succeed then we don't use the
+    // profiling data at this bytecode. Don't lose it, feed it to the
+    // type system as a speculative type.
+    not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
+  } else {
+    const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+    // We may not have profiling here or it may not help us. If we
+    // have a speculative type use it to perform an exact cast.
+    ciKlass* spec_obj_type = obj_type->speculative_type();
+    if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
+      Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
+      if (stopped()) {            // Profile disagrees with this path.
+        set_control(null_ctl);    // Null is the only remaining possibility.
+        return intcon(0);
+      }
+      if (cast_obj != NULL) {
+        not_null_obj = cast_obj;
+      }
+    }
   }
 
   // Load the object's klass
@@ -2694,7 +2942,10 @@
     if (objtp != NULL && objtp->klass() != NULL) {
       switch (static_subtype_check(tk->klass(), objtp->klass())) {
       case SSC_always_true:
-        return obj;
+        // If we know the type check always succeed then we don't use
+        // the profiling data at this bytecode. Don't lose it, feed it
+        // to the type system as a speculative type.
+        return record_profiled_receiver_for_speculation(obj);
       case SSC_always_false:
         // It needs a null check because a null will *pass* the cast check.
         // A non-null value will always produce an exception.
@@ -2704,11 +2955,13 @@
   }
 
   ciProfileData* data = NULL;
+  bool safe_for_replace = false;
   if (failure_control == NULL) {        // use MDO in regular case only
     assert(java_bc() == Bytecodes::_aastore ||
            java_bc() == Bytecodes::_checkcast,
            "interpreter profiles type checks only for these BCs");
     data = method()->method_data()->bci_to_data(bci());
+    safe_for_replace = true;
   }
 
   // Make the merge point
@@ -2723,7 +2976,7 @@
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -2741,12 +2994,17 @@
   }
 
   Node* cast_obj = NULL;
-  if (data != NULL &&
-      // Counter has never been decremented (due to cast failure).
-      // ...This is a reasonable thing to expect.  It is true of
-      // all casts inserted by javac to implement generic types.
-      data->as_CounterData()->count() >= 0) {
-    cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
+  const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+  // We may not have profiling here or it may not help us. If we have
+  // a speculative type use it to perform an exact cast.
+  ciKlass* spec_obj_type = obj_type->speculative_type();
+  if (spec_obj_type != NULL ||
+      (data != NULL &&
+       // Counter has never been decremented (due to cast failure).
+       // ...This is a reasonable thing to expect.  It is true of
+       // all casts inserted by javac to implement generic types.
+       data->as_CounterData()->count() >= 0)) {
+    cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
     if (cast_obj != NULL) {
       if (failure_control != NULL) // failure is now impossible
         (*failure_control) = top();
@@ -3551,6 +3809,8 @@
   } else {
     // In this case both val_type and alias_idx are unused.
     assert(pre_val != NULL, "must be loaded already");
+    // Nothing to be done if pre_val is null.
+    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
     assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
   }
   assert(bt == T_OBJECT, "or we shouldn't be here");
@@ -3587,7 +3847,7 @@
   Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
 
   // if (!marking)
-  __ if_then(marking, BoolTest::ne, zero); {
+  __ if_then(marking, BoolTest::ne, zero, unlikely); {
     BasicType index_bt = TypeX_X->basic_type();
     assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
     Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
@@ -3595,7 +3855,7 @@
     if (do_load) {
       // load original value
       // alias_idx correct??
-      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
+      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
     }
 
     // if (pre_val != NULL)
@@ -3692,7 +3952,8 @@
   Node* no_base = __ top();
   float likely  = PROB_LIKELY(0.999);
   float unlikely  = PROB_UNLIKELY(0.999);
-  Node* zero = __ ConI(0);
+  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
+  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
   Node* zeroX = __ ConX(0);
 
   // Get the alias_index for raw card-mark memory
@@ -3748,8 +4009,16 @@
         // load the original value of the card
         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 
-        __ if_then(card_val, BoolTest::ne, zero); {
-          g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
+        __ if_then(card_val, BoolTest::ne, young_card); {
+          sync_kit(ideal);
+          // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
+          insert_mem_bar(Op_MemBarVolatile, oop_store);
+          __ sync_kit(this);
+
+          Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
+          __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
+            g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
+          } __ end_if();
         } __ end_if();
       } __ end_if();
     } __ end_if();
@@ -3804,8 +4073,13 @@
                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
   int value_field_idx = C->get_alias_index(value_field_type);
-  return make_load(ctrl, basic_plus_adr(str, str, value_offset),
-                   value_type, T_OBJECT, value_field_idx);
+  Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
+                         value_type, T_OBJECT, value_field_idx);
+  // String.value field is known to be @Stable.
+  if (UseImplicitStableValues) {
+    load = cast_array_to_stable(load, value_type);
+  }
+  return load;
 }
 
 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
@@ -3823,12 +4097,9 @@
   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                      false, NULL, 0);
   const TypePtr* value_field_type = string_type->add_offset(value_offset);
-  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
-                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
-                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
-  int value_field_idx = C->get_alias_index(value_field_type);
-  store_to_memory(ctrl, basic_plus_adr(str, value_offset),
-                  value, T_OBJECT, value_field_idx);
+
+  store_oop_to_object(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
+      value, TypeAryPtr::CHARS, T_OBJECT);
 }
 
 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
@@ -3840,3 +4111,9 @@
   store_to_memory(ctrl, basic_plus_adr(str, count_offset),
                   value, T_INT, count_field_idx);
 }
+
+Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
+  // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
+  // assumption of CCP analysis.
+  return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
+}
--- a/src/share/vm/opto/graphKit.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/graphKit.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -378,16 +378,41 @@
   // Return a cast-not-null node which depends on the not-null control.
   // If never_see_null, use an uncommon trap (*null_control sees a top).
   // The cast is not valid along the null path; keep a copy of the original.
+  // If safe_for_replace, then we can replace the value with the cast
+  // in the parsing map (the cast is guaranteed to dominate the map)
   Node* null_check_oop(Node* value, Node* *null_control,
-                       bool never_see_null = false);
+                       bool never_see_null = false, bool safe_for_replace = false);
 
   // Check the null_seen bit.
   bool seems_never_null(Node* obj, ciProfileData* data);
 
+  // Check for unique class for receiver at call
+  ciKlass* profile_has_unique_klass() {
+    ciCallProfile profile = method()->call_profile_at_bci(bci());
+    if (profile.count() >= 0 &&         // no cast failures here
+        profile.has_receiver(0) &&
+        profile.morphism() == 1) {
+      return profile.receiver(0);
+    }
+    return NULL;
+  }
+
+  // record type from profiling with the type system
+  Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
+  Node* record_profiled_receiver_for_speculation(Node* n);
+  void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
+  void record_profiled_parameters_for_speculation();
+
   // Use the type profile to narrow an object type.
   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
-                                     ciProfileData* data,
-                                     ciKlass* require_klass);
+                                     ciKlass* require_klass,
+                                    ciKlass* spec,
+                                     bool safe_for_replace);
+
+  // Cast obj to type and emit guard unless we had too many traps here already
+  Node* maybe_cast_profiled_obj(Node* obj,
+                                ciKlass* type,
+                                bool not_null = false);
 
   // Cast obj to not-null on this path
   Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
@@ -695,6 +720,10 @@
   void write_barrier_post(Node *store, Node* obj,
                           Node* adr,  uint adr_idx, Node* val, bool use_precise);
 
+  // Allow reordering of pre-barrier with oop store and/or post-barrier.
+  // Used for load_store operations which loads old value.
+  bool can_move_pre_barrier() const;
+
   // G1 pre/post barriers
   void g1_write_barrier_pre(bool do_load,
                             Node* obj,
@@ -769,7 +798,7 @@
 
   // Generate an instance-of idiom.  Used by both the instance-of bytecode
   // and the reflective instance-of call.
-  Node* gen_instanceof( Node *subobj, Node* superkls );
+  Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
 
   // Generate a check-cast idiom.  Used by both the check-cast bytecode
   // and the array-store bytecode
@@ -832,6 +861,9 @@
   // Insert a loop predicate into the graph
   void add_predicate(int nargs = 0);
   void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
+
+  // Produce new array node of stable type
+  Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
 };
 
 // Helper class to support building of control flow branches. Upon
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -616,7 +616,11 @@
       buffer[0] = 0;
       _chaitin->dump_register(node, buffer);
       print_prop("reg", buffer);
-      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
+      uint lrg_id = 0;
+      if (node->_idx < _chaitin->_lrg_map.size()) {
+        lrg_id = _chaitin->_lrg_map.live_range_id(node);
+      }
+      print_prop("lrg", lrg_id);
     }
 
     node->_in_dump_cnt--;
@@ -639,8 +643,8 @@
     // reachable but are in the CFG so add them here.
     for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
       Block* block = C->cfg()->get_block(i);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
-        nodeStack.push(block->_nodes[s]);
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
+        nodeStack.push(block->get_node(s));
       }
     }
   }
@@ -713,9 +717,9 @@
       tail(SUCCESSORS_ELEMENT);
 
       head(NODES_ELEMENT);
-      for (uint s = 0; s < block->_nodes.size(); s++) {
+      for (uint s = 0; s < block->number_of_nodes(); s++) {
         begin_elem(NODE_ELEMENT);
-        print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
+        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
         end_elem();
       }
       tail(NODES_ELEMENT);
--- a/src/share/vm/opto/ifg.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/ifg.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -319,7 +319,7 @@
     // value is then removed from the live-ness set and it's inputs are
     // added to the live-ness set.
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -456,7 +456,7 @@
     // Compute first nonphi node index
     uint first_inst;
     for (first_inst = 1; first_inst < last_inst; first_inst++) {
-      if (!block->_nodes[first_inst]->is_Phi()) {
+      if (!block->get_node(first_inst)->is_Phi()) {
         break;
       }
     }
@@ -464,15 +464,15 @@
     // Spills could be inserted before CreateEx node which should be
     // first instruction in block after Phis. Move CreateEx up.
     for (uint insidx = first_inst; insidx < last_inst; insidx++) {
-      Node *ex = block->_nodes[insidx];
+      Node *ex = block->get_node(insidx);
       if (ex->is_SpillCopy()) {
         continue;
       }
       if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
         // If the CreateEx isn't above all the MachSpillCopies
         // then move it to the top.
-        block->_nodes.remove(insidx);
-        block->_nodes.insert(first_inst, ex);
+        block->remove_node(insidx);
+        block->insert_node(ex, first_inst);
       }
       // Stop once a CreateEx or any other node is found
       break;
@@ -523,7 +523,7 @@
     // to the live-ness set.
     uint j;
     for (j = last_inst + 1; j > 1; j--) {
-      Node* n = block->_nodes[j - 1];
+      Node* n = block->get_node(j - 1);
 
       // Get value being defined
       uint r = _lrg_map.live_range_id(n);
@@ -541,7 +541,7 @@
           if( !n->is_Proj() ||
               // Could also be a flags-projection of a dead ADD or such.
               (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
-            block->_nodes.remove(j - 1);
+            block->remove_node(j - 1);
             if (lrgs(r)._def == n) {
               lrgs(r)._def = 0;
             }
@@ -605,7 +605,7 @@
             // (j - 1) is index for current instruction 'n'
             Node *m = n;
             for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
-              m = block->_nodes[i];
+              m = block->get_node(i);
             }
             if (m == single_use) {
               lrgs(r)._area = 0.0;
@@ -677,7 +677,7 @@
             } else {            // Common case: size 1 bound removal
               if( lrg.mask().Member(r_reg) ) {
                 lrg.Remove(r_reg);
-                lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
+                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
               }
             }
             // If 'l' goes completely dry, it must spill.
@@ -772,20 +772,20 @@
 
     // Compute high pressure indice; avoid landing in the middle of projnodes
     j = hrp_index[0];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_ihrp_index = j;
     j = hrp_index[1];
-    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
-      Node* cur = block->_nodes[j];
+    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
+      Node* cur = block->get_node(j);
       while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
         j--;
-        cur = block->_nodes[j];
+        cur = block->get_node(j);
       }
     }
     block->_fhrp_index = j;
--- a/src/share/vm/opto/ifnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/ifnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -76,6 +76,7 @@
   if( !i1->is_Bool() ) return NULL;
   BoolNode *b = i1->as_Bool();
   Node *cmp = b->in(1);
+  if( cmp->is_FlagsProj() ) return NULL;
   if( !cmp->is_Cmp() ) return NULL;
   i1 = cmp->in(1);
   if( i1 == NULL || !i1->is_Phi() ) return NULL;
@@ -688,6 +689,7 @@
         ctrl->in(0)->in(1)->is_Bool() &&
         ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
         ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
+        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
         ctrl->in(0)->in(1)->in(1)->in(1) == n) {
       IfNode* dom_iff = ctrl->in(0)->as_If();
       Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
@@ -1017,7 +1019,7 @@
   // be skipped. For example, range check predicate has two checks
   // for lower and upper bounds.
   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
-  if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
+  if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
     prev_dom = idom;
 
   // Now walk the current IfNode's projections.
--- a/src/share/vm/opto/lcm.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/lcm.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -61,14 +61,14 @@
 // The proj is the control projection for the not-null case.
 // The val is the pointer being checked for nullness or
 // decodeHeapOop_not_null node if it did not fold into address.
-void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
+void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
   // Assume if null check need for 0 offset then always needed
   // Intel solaris doesn't support any null checks yet and no
   // mechanism exists (yet) to set the switches at an os_cpu level
   if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
 
   // Make sure the ptr-is-null path appears to be uncommon!
-  float f = end()->as_MachIf()->_prob;
+  float f = block->end()->as_MachIf()->_prob;
   if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
   if( f > PROB_UNLIKELY_MAG(4) ) return;
 
@@ -78,13 +78,13 @@
   // Get the successor block for if the test ptr is non-null
   Block* not_null_block;  // this one goes with the proj
   Block* null_block;
-  if (_nodes[_nodes.size()-1] == proj) {
-    null_block     = _succs[0];
-    not_null_block = _succs[1];
+  if (block->get_node(block->number_of_nodes()-1) == proj) {
+    null_block     = block->_succs[0];
+    not_null_block = block->_succs[1];
   } else {
-    assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
-    not_null_block = _succs[0];
-    null_block     = _succs[1];
+    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
+    not_null_block = block->_succs[0];
+    null_block     = block->_succs[1];
   }
   while (null_block->is_Empty() == Block::empty_with_goto) {
     null_block     = null_block->_succs[0];
@@ -96,8 +96,8 @@
   // detect failure of this optimization, as in 6366351.)
   {
     bool found_trap = false;
-    for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
-      Node* nn = null_block->_nodes[i1];
+    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
+      Node* nn = null_block->get_node(i1);
       if (nn->is_MachCall() &&
           nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
         const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@@ -240,20 +240,20 @@
     }
 
     // Check ctrl input to see if the null-check dominates the memory op
-    Block *cb = cfg->get_block_for_node(mach);
+    Block *cb = get_block_for_node(mach);
     cb = cb->_idom;             // Always hoist at least 1 block
     if( !was_store ) {          // Stores can be hoisted only one block
-      while( cb->_dom_depth > (_dom_depth + 1))
+      while( cb->_dom_depth > (block->_dom_depth + 1))
         cb = cb->_idom;         // Hoist loads as far as we want
       // The non-null-block should dominate the memory op, too. Live
       // range spilling will insert a spill in the non-null-block if it is
       // needs to spill the memory op for an implicit null check.
-      if (cb->_dom_depth == (_dom_depth + 1)) {
+      if (cb->_dom_depth == (block->_dom_depth + 1)) {
         if (cb != not_null_block) continue;
         cb = cb->_idom;
       }
     }
-    if( cb != this ) continue;
+    if( cb != block ) continue;
 
     // Found a memory user; see if it can be hoisted to check-block
     uint vidx = 0;              // Capture index of value into memop
@@ -265,8 +265,8 @@
         if( is_decoden ) continue;
       }
       // Block of memory-op input
-      Block *inb = cfg->get_block_for_node(mach->in(j));
-      Block *b = this;          // Start from nul check
+      Block *inb = get_block_for_node(mach->in(j));
+      Block *b = block;          // Start from nul check
       while( b != inb && b->_dom_depth > inb->_dom_depth )
         b = b->_idom;           // search upwards for input
       // See if input dominates null check
@@ -275,28 +275,28 @@
     }
     if( j > 0 )
       continue;
-    Block *mb = cfg->get_block_for_node(mach);
+    Block *mb = get_block_for_node(mach);
     // Hoisting stores requires more checks for the anti-dependence case.
     // Give up hoisting if we have to move the store past any load.
     if( was_store ) {
       Block *b = mb;            // Start searching here for a local load
       // mach use (faulting) trying to hoist
       // n might be blocker to hoisting
-      while( b != this ) {
+      while( b != block ) {
         uint k;
-        for( k = 1; k < b->_nodes.size(); k++ ) {
-          Node *n = b->_nodes[k];
+        for( k = 1; k < b->number_of_nodes(); k++ ) {
+          Node *n = b->get_node(k);
           if( n->needs_anti_dependence_check() &&
               n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
             break;              // Found anti-dependent load
         }
-        if( k < b->_nodes.size() )
+        if( k < b->number_of_nodes() )
           break;                // Found anti-dependent load
         // Make sure control does not do a merge (would have to check allpaths)
         if( b->num_preds() != 2 ) break;
-        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
+        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
       }
-      if( b != this ) continue;
+      if( b != block ) continue;
     }
 
     // Make sure this memory op is not already being used for a NullCheck
@@ -306,7 +306,7 @@
 
     // Found a candidate!  Pick one with least dom depth - the highest
     // in the dom tree should be closest to the null check.
-    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
+    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
       best = mach;
       bidx = vidx;
     }
@@ -322,46 +322,45 @@
 
   if( is_decoden ) {
     // Check if we need to hoist decodeHeapOop_not_null first.
-    Block *valb = cfg->get_block_for_node(val);
-    if( this != valb && this->_dom_depth < valb->_dom_depth ) {
+    Block *valb = get_block_for_node(val);
+    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
       // Hoist it up to the end of the test block.
       valb->find_remove(val);
-      this->add_inst(val);
-      cfg->map_node_to_block(val, this);
+      block->add_inst(val);
+      map_node_to_block(val, block);
       // DecodeN on x86 may kill flags. Check for flag-killing projections
       // that also need to be hoisted.
       for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
         Node* n = val->fast_out(j);
         if( n->is_MachProj() ) {
-          cfg->get_block_for_node(n)->find_remove(n);
-          this->add_inst(n);
-          cfg->map_node_to_block(n, this);
+          get_block_for_node(n)->find_remove(n);
+          block->add_inst(n);
+          map_node_to_block(n, block);
         }
       }
     }
   }
   // Hoist the memory candidate up to the end of the test block.
-  Block *old_block = cfg->get_block_for_node(best);
+  Block *old_block = get_block_for_node(best);
   old_block->find_remove(best);
-  add_inst(best);
-  cfg->map_node_to_block(best, this);
+  block->add_inst(best);
+  map_node_to_block(best, block);
 
   // Move the control dependence
-  if (best->in(0) && best->in(0) == old_block->_nodes[0])
-    best->set_req(0, _nodes[0]);
+  if (best->in(0) && best->in(0) == old_block->head())
+    best->set_req(0, block->head());
 
   // Check for flag-killing projections that also need to be hoisted
   // Should be DU safe because no edge updates.
   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
     Node* n = best->fast_out(j);
     if( n->is_MachProj() ) {
-      cfg->get_block_for_node(n)->find_remove(n);
-      add_inst(n);
-      cfg->map_node_to_block(n, this);
+      get_block_for_node(n)->find_remove(n);
+      block->add_inst(n);
+      map_node_to_block(n, block);
     }
   }
 
-  Compile *C = cfg->C;
   // proj==Op_True --> ne test; proj==Op_False --> eq test.
   // One of two graph shapes got matched:
   //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
@@ -371,10 +370,10 @@
   // We need to flip the projections to keep the same semantics.
   if( proj->Opcode() == Op_IfTrue ) {
     // Swap order of projections in basic block to swap branch targets
-    Node *tmp1 = _nodes[end_idx()+1];
-    Node *tmp2 = _nodes[end_idx()+2];
-    _nodes.map(end_idx()+1, tmp2);
-    _nodes.map(end_idx()+2, tmp1);
+    Node *tmp1 = block->get_node(block->end_idx()+1);
+    Node *tmp2 = block->get_node(block->end_idx()+2);
+    block->map_node(tmp2, block->end_idx()+1);
+    block->map_node(tmp1, block->end_idx()+2);
     Node *tmp = new (C) Node(C->top()); // Use not NULL input
     tmp1->replace_by(tmp);
     tmp2->replace_by(tmp1);
@@ -387,8 +386,8 @@
   // it as well.
   Node *old_tst = proj->in(0);
   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
-  _nodes.map(end_idx(),nul_chk);
-  cfg->map_node_to_block(nul_chk, this);
+  block->map_node(nul_chk, block->end_idx());
+  map_node_to_block(nul_chk, block);
   // Redirect users of old_test to nul_chk
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
@@ -396,8 +395,8 @@
   for (uint i3 = 0; i3 < old_tst->req(); i3++)
     old_tst->set_req(i3, NULL);
 
-  cfg->latency_from_uses(nul_chk);
-  cfg->latency_from_uses(best);
+  latency_from_uses(nul_chk);
+  latency_from_uses(best);
 }
 
 
@@ -411,7 +410,7 @@
 // remaining cases (most), choose the instruction with the greatest latency
 // (that is, the most number of pseudo-cycles required to the end of the
 // routine). If there is a tie, choose the instruction with the most inputs.
-Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
+Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
 
   // If only a single entry on the stack, use it
   uint cnt = worklist.size();
@@ -445,7 +444,7 @@
     }
 
     // Final call in a block must be adjacent to 'catch'
-    Node *e = end();
+    Node *e = block->end();
     if( e->is_Catch() && e->in(0)->in(0) == n )
       continue;
 
@@ -471,7 +470,14 @@
         Node* use = n->fast_out(j);
 
         // The use is a conditional branch, make them adjacent
-        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
+        if (use->is_MachIf() && get_block_for_node(use) == block) {
+          found_machif = true;
+          break;
+        }
+
+        // For nodes that produce a FlagsProj, make the node adjacent to the
+        // use of the FlagsProj
+        if (use->is_FlagsProj() && get_block_for_node(use) == block) {
           found_machif = true;
           break;
         }
@@ -504,7 +510,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = cfg->get_latency_for_node(n);
+    uint n_latency = get_latency_for_node(n);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -532,13 +538,13 @@
 
 
 //------------------------------set_next_call----------------------------------
-void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
   if( next_call.test_set(n->_idx) ) return;
   for( uint i=0; i<n->len(); i++ ) {
     Node *m = n->in(i);
     if( !m ) continue;  // must see all nodes in block that precede call
-    if (cfg->get_block_for_node(m) == this) {
-      set_next_call(m, next_call, cfg);
+    if (get_block_for_node(m) == block) {
+      set_next_call(block, m, next_call);
     }
   }
 }
@@ -549,24 +555,26 @@
 // next subroutine call get priority - basically it moves things NOT needed
 // for the next call till after the call.  This prevents me from trying to
 // carry lots of stuff live across a call.
-void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
+void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
   // Find the next control-defining Node in this block
   Node* call = NULL;
   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
     Node* m = this_call->fast_out(i);
-    if(cfg->get_block_for_node(m) == this && // Local-block user
+    if (get_block_for_node(m) == block && // Local-block user
         m != this_call &&       // Not self-start node
-        m->is_MachCall() )
+        m->is_MachCall()) {
       call = m;
       break;
+    }
   }
   if (call == NULL)  return;    // No next call (e.g., block end is near)
   // Set next-call for all inputs to this call
-  set_next_call(call, next_call, cfg);
+  set_next_call(block, call, next_call);
 }
 
 //------------------------------add_call_kills-------------------------------------
-void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
+// helper function that adds caller save registers to MachProjNode
+static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
   // Fill in the kill mask for the call
   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
     if( !regs.Member(r) ) {     // Not already defined by the call
@@ -582,7 +590,7 @@
 
 
 //------------------------------sched_call-------------------------------------
-uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
+uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
   RegMask regs;
 
   // Schedule all the users of the call right now.  All the users are
@@ -595,18 +603,18 @@
     ready_cnt.at_put(n->_idx, n_cnt);
     assert( n_cnt == 0, "" );
     // Schedule next to call
-    _nodes.map(node_cnt++, n);
+    block->map_node(n, node_cnt++);
     // Collect defined registers
     regs.OR(n->out_RegMask());
     // Check for scheduling the next control-definer
     if( n->bottom_type() == Type::CONTROL )
       // Warm up next pile of heuristic bits
-      needed_for_next_call(n, next_call, cfg);
+      needed_for_next_call(block, n, next_call);
 
     // Children of projections are now all ready
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j); // Get user
-      if(cfg->get_block_for_node(m) != this) {
+      if(get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -620,14 +628,14 @@
 
   // Act as if the call defines the Frame Pointer.
   // Certainly the FP is alive and well after the call.
-  regs.Insert(matcher.c_frame_pointer());
+  regs.Insert(_matcher.c_frame_pointer());
 
   // Set all registers killed and not already defined by the call.
   uint r_cnt = mcall->tf()->range()->cnt();
   int op = mcall->ideal_Opcode();
-  MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
-  cfg->map_node_to_block(proj, this);
-  _nodes.insert(node_cnt++, proj);
+  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
+  map_node_to_block(proj, block);
+  block->insert_node(proj, node_cnt++);
 
   // Select the right register save policy.
   const char * save_policy;
@@ -636,13 +644,13 @@
     case Op_CallLeaf:
     case Op_CallLeafNoFP:
       // Calling C code so use C calling convention
-      save_policy = matcher._c_reg_save_policy;
+      save_policy = _matcher._c_reg_save_policy;
       break;
 
     case Op_CallStaticJava:
     case Op_CallDynamicJava:
       // Calling Java code so use Java calling convention
-      save_policy = matcher._register_save_policy;
+      save_policy = _matcher._register_save_policy;
       break;
 
     default:
@@ -677,44 +685,46 @@
 
 //------------------------------schedule_local---------------------------------
 // Topological sort within a block.  Someday become a real scheduler.
-bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
+bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
   // Already "sorted" are the block start Node (as the first entry), and
   // the block-ending Node and any trailing control projections.  We leave
   // these alone.  PhiNodes and ParmNodes are made to follow the block start
   // Node.  Everything else gets topo-sorted.
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
-      for (uint i = 0;i < _nodes.size();i++) {
+    if (trace_opto_pipelining()) {
+      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
+      for (uint i = 0;i < block->number_of_nodes(); i++) {
         tty->print("# ");
-        _nodes[i]->fast_dump();
+        block->get_node(i)->fast_dump();
       }
       tty->print_cr("#");
     }
 #endif
 
   // RootNode is already sorted
-  if( _nodes.size() == 1 ) return true;
+  if (block->number_of_nodes() == 1) {
+    return true;
+  }
 
   // Move PhiNodes and ParmNodes from 1 to cnt up to the start
-  uint node_cnt = end_idx();
+  uint node_cnt = block->end_idx();
   uint phi_cnt = 1;
   uint i;
   for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
-    Node *n = _nodes[i];
+    Node *n = block->get_node(i);
     if( n->is_Phi() ||          // Found a PhiNode or ParmNode
-        (n->is_Proj()  && n->in(0) == head()) ) {
+        (n->is_Proj()  && n->in(0) == block->head()) ) {
       // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
-      _nodes.map(i,_nodes[phi_cnt]);
-      _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
+      block->map_node(block->get_node(phi_cnt), i);
+      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
     } else {                    // All others
       // Count block-local inputs to 'n'
       uint cnt = n->len();      // Input count
       uint local = 0;
       for( uint j=0; j<cnt; j++ ) {
         Node *m = n->in(j);
-        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
+        if( m && get_block_for_node(m) == block && !m->is_top() )
           local++;              // One more block-local input
       }
       ready_cnt.at_put(n->_idx, local); // Count em up
@@ -726,7 +736,7 @@
           for (uint prec = n->req(); prec < n->len(); prec++) {
             Node* oop_store = n->in(prec);
             if (oop_store != NULL) {
-              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
+              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
             }
           }
         }
@@ -750,16 +760,16 @@
       }
     }
   }
-  for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
-    ready_cnt.at_put(_nodes[i2]->_idx, 0);
+  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
+    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
 
   // All the prescheduled guys do not hold back internal nodes
   uint i3;
   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
-    Node *n = _nodes[i3];       // Get pre-scheduled
+    Node *n = block->get_node(i3);       // Get pre-scheduled
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
       Node* m = n->fast_out(j);
-      if (cfg->get_block_for_node(m) == this) { // Local-block user
+      if (get_block_for_node(m) == block) { // Local-block user
         int m_cnt = ready_cnt.at(m->_idx)-1;
         ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
       }
@@ -770,7 +780,7 @@
   // Make a worklist
   Node_List worklist;
   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
-    Node *m = _nodes[i4];
+    Node *m = block->get_node(i4);
     if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
       if (m->is_iteratively_computed()) {
         // Push induction variable increments last to allow other uses
@@ -792,15 +802,15 @@
   }
 
   // Warm up the 'next_call' heuristic bits
-  needed_for_next_call(_nodes[0], next_call, cfg);
+  needed_for_next_call(block, block->head(), next_call);
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
-      for (uint j=0; j<_nodes.size(); j++) {
-        Node     *n = _nodes[j];
+    if (trace_opto_pipelining()) {
+      for (uint j=0; j< block->number_of_nodes(); j++) {
+        Node     *n = block->get_node(j);
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
-        tty->print("latency:%3d  ", cfg->get_latency_for_node(n));
+        tty->print("latency:%3d  ", get_latency_for_node(n));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -811,7 +821,7 @@
   while( worklist.size() ) {    // Worklist is not ready
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#   ready list:");
       for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
         Node *n = worklist[i];      // Get Node on worklist
@@ -822,13 +832,13 @@
 #endif
 
     // Select and pop a ready guy from worklist
-    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
-    _nodes.map(phi_cnt++,n);    // Schedule him next
+    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
+    block->map_node(n, phi_cnt++);    // Schedule him next
 
 #ifndef PRODUCT
-    if (cfg->trace_opto_pipelining()) {
+    if (trace_opto_pipelining()) {
       tty->print("#    select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", cfg->get_latency_for_node(n));
+      tty->print(", latency:%d", get_latency_for_node(n));
       n->dump();
       if (Verbose) {
         tty->print("#   ready list:");
@@ -843,26 +853,26 @@
 #endif
     if( n->is_MachCall() ) {
       MachCallNode *mcall = n->as_MachCall();
-      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
+      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
 
     if (n->is_Mach() && n->as_Mach()->has_call()) {
       RegMask regs;
-      regs.Insert(matcher.c_frame_pointer());
+      regs.Insert(_matcher.c_frame_pointer());
       regs.OR(n->out_RegMask());
 
-      MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
-      cfg->map_node_to_block(proj, this);
-      _nodes.insert(phi_cnt++, proj);
+      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
+      map_node_to_block(proj, block);
+      block->insert_node(proj, phi_cnt++);
 
-      add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
+      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
     }
 
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
-      if (cfg->get_block_for_node(m) != this) {
+      if (get_block_for_node(m) != block) {
         continue;
       }
       if( m->is_Phi() ) continue;
@@ -877,9 +887,8 @@
     }
   }
 
-  if( phi_cnt != end_idx() ) {
+  if( phi_cnt != block->end_idx() ) {
     // did not schedule all.  Retry, Bailout, or Die
-    Compile* C = matcher.C;
     if (C->subsume_loads() == true && !C->failing()) {
       // Retry with subsume_loads == false
       // If this is the first failure, the sentinel string will "stick"
@@ -891,12 +900,12 @@
   }
 
 #ifndef PRODUCT
-  if (cfg->trace_opto_pipelining()) {
+  if (trace_opto_pipelining()) {
     tty->print_cr("#");
     tty->print_cr("# after schedule_local");
-    for (uint i = 0;i < _nodes.size();i++) {
+    for (uint i = 0;i < block->number_of_nodes();i++) {
       tty->print("# ");
-      _nodes[i]->fast_dump();
+      block->get_node(i)->fast_dump();
     }
     tty->cr();
   }
@@ -922,7 +931,7 @@
 }
 
 //------------------------------catch_cleanup_find_cloned_def------------------
-static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   assert( use_blk != def_blk, "Inter-block cleanup only");
 
   // The use is some block below the Catch.  Find and return the clone of the def
@@ -948,14 +957,14 @@
     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
     Node_Array inputs = new Node_List(Thread::current()->resource_area());
     for(uint k = 1; k < use_blk->num_preds(); k++) {
-      Block* block = cfg->get_block_for_node(use_blk->pred(k));
-      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
+      Block* block = get_block_for_node(use_blk->pred(k));
+      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
     }
 
     // Check to see if the use_blk already has an identical phi inserted.
     // If it exists, it will be at the first position since all uses of a
     // def are processed together.
-    Node *phi = use_blk->_nodes[1];
+    Node *phi = use_blk->get_node(1);
     if( phi->is_Phi() ) {
       fixup = phi;
       for (uint k = 1; k < use_blk->num_preds(); k++) {
@@ -970,8 +979,8 @@
     // If an existing PhiNode was not found, make a new one.
     if (fixup == NULL) {
       Node *new_phi = PhiNode::make(use_blk->head(), def);
-      use_blk->_nodes.insert(1, new_phi);
-      cfg->map_node_to_block(new_phi, use_blk);
+      use_blk->insert_node(new_phi, 1);
+      map_node_to_block(new_phi, use_blk);
       for (uint k = 1; k < use_blk->num_preds(); k++) {
         new_phi->set_req(k, inputs[k]);
       }
@@ -980,7 +989,7 @@
 
   } else {
     // Found the use just below the Catch.  Make it use the clone.
-    fixup = use_blk->_nodes[n_clone_idx];
+    fixup = use_blk->get_node(n_clone_idx);
   }
 
   return fixup;
@@ -1000,36 +1009,36 @@
   for( uint k = 0; k < blk->_num_succs; k++ ) {
     // Get clone in each successor block
     Block *sb = blk->_succs[k];
-    Node *clone = sb->_nodes[offset_idx+1];
+    Node *clone = sb->get_node(offset_idx+1);
     assert( clone->Opcode() == use->Opcode(), "" );
 
     // Make use-clone reference the def-clone
-    catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
+    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
   }
 }
 
 //------------------------------catch_cleanup_inter_block---------------------
 // Fix all input edges in use that reference "def".  The use is in a different
 // block than the def.
-static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
+void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
   if( !use_blk ) return;        // Can happen if the use is a precedence edge
 
-  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
+  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
   catch_cleanup_fix_all_inputs(use, def, new_def);
 }
 
 //------------------------------call_catch_cleanup-----------------------------
 // If we inserted any instructions between a Call and his CatchNode,
 // clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
+void PhaseCFG::call_catch_cleanup(Block* block) {
 
   // End of region to clone
-  uint end = end_idx();
-  if( !_nodes[end]->is_Catch() ) return;
+  uint end = block->end_idx();
+  if( !block->get_node(end)->is_Catch() ) return;
   // Start of region to clone
   uint beg = end;
-  while(!_nodes[beg-1]->is_MachProj() ||
-        !_nodes[beg-1]->in(0)->is_MachCall() ) {
+  while(!block->get_node(beg-1)->is_MachProj() ||
+        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
     beg--;
     assert(beg > 0,"Catch cleanup walking beyond block boundary");
   }
@@ -1038,15 +1047,15 @@
 
   // Clone along all Catch output paths.  Clone area between the 'beg' and
   // 'end' indices.
-  for( uint i = 0; i < _num_succs; i++ ) {
-    Block *sb = _succs[i];
+  for( uint i = 0; i < block->_num_succs; i++ ) {
+    Block *sb = block->_succs[i];
     // Clone the entire area; ignoring the edge fixup for now.
     for( uint j = end; j > beg; j-- ) {
       // It is safe here to clone a node with anti_dependence
       // since clones dominate on each path.
-      Node *clone = _nodes[j-1]->clone();
-      sb->_nodes.insert( 1, clone );
-      cfg->map_node_to_block(clone, sb);
+      Node *clone = block->get_node(j-1)->clone();
+      sb->insert_node(clone, 1);
+      map_node_to_block(clone, sb);
     }
   }
 
@@ -1054,7 +1063,7 @@
   // Fixup edges.  Check the def-use info per cloned Node
   for(uint i2 = beg; i2 < end; i2++ ) {
     uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
-    Node *n = _nodes[i2];        // Node that got cloned
+    Node *n = block->get_node(i2);        // Node that got cloned
     // Need DU safe iterator because of edge manipulation in calls.
     Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
     for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@@ -1063,19 +1072,19 @@
     uint max = out->size();
     for (uint j = 0; j < max; j++) {// For all users
       Node *use = out->pop();
-      Block *buse = cfg->get_block_for_node(use);
+      Block *buse = get_block_for_node(use);
       if( use->is_Phi() ) {
         for( uint k = 1; k < use->req(); k++ )
           if( use->in(k) == n ) {
-            Block* block = cfg->get_block_for_node(buse->pred(k));
-            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
+            Block* b = get_block_for_node(buse->pred(k));
+            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
             use->set_req(k, fixup);
           }
       } else {
-        if (this == buse) {
-          catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
+        if (block == buse) {
+          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
         } else {
-          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
+          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
         }
       }
     } // End for all users
@@ -1084,30 +1093,30 @@
 
   // Remove the now-dead cloned ops
   for(uint i3 = beg; i3 < end; i3++ ) {
-    _nodes[beg]->disconnect_inputs(NULL, C);
-    _nodes.remove(beg);
+    block->get_node(beg)->disconnect_inputs(NULL, C);
+    block->remove_node(beg);
   }
 
   // If the successor blocks have a CreateEx node, move it back to the top
-  for(uint i4 = 0; i4 < _num_succs; i4++ ) {
-    Block *sb = _succs[i4];
+  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
+    Block *sb = block->_succs[i4];
     uint new_cnt = end - beg;
     // Remove any newly created, but dead, nodes.
     for( uint j = new_cnt; j > 0; j-- ) {
-      Node *n = sb->_nodes[j];
+      Node *n = sb->get_node(j);
       if (n->outcnt() == 0 &&
           (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
         n->disconnect_inputs(NULL, C);
-        sb->_nodes.remove(j);
+        sb->remove_node(j);
         new_cnt--;
       }
     }
     // If any newly created nodes remain, move the CreateEx node to the top
     if (new_cnt > 0) {
-      Node *cex = sb->_nodes[1+new_cnt];
+      Node *cex = sb->get_node(1+new_cnt);
       if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
-        sb->_nodes.remove(1+new_cnt);
-        sb->_nodes.insert(1,cex);
+        sb->remove_node(1+new_cnt);
+        sb->insert_node(cex, 1);
       }
     }
   }
--- a/src/share/vm/opto/library_call.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/library_call.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -32,6 +32,7 @@
 #include "opto/callGenerator.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/idealKit.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/parse.hpp"
 #include "opto/runtime.hpp"
@@ -46,20 +47,23 @@
  private:
   bool             _is_virtual;
   bool             _is_predicted;
+  bool             _does_virtual_dispatch;
   vmIntrinsics::ID _intrinsic_id;
 
  public:
-  LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, vmIntrinsics::ID id)
+  LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
     : InlineCallGenerator(m),
       _is_virtual(is_virtual),
       _is_predicted(is_predicted),
+      _does_virtual_dispatch(does_virtual_dispatch),
       _intrinsic_id(id)
   {
   }
   virtual bool is_intrinsic() const { return true; }
   virtual bool is_virtual()   const { return _is_virtual; }
   virtual bool is_predicted()   const { return _is_predicted; }
-  virtual JVMState* generate(JVMState* jvms);
+  virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
+  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   virtual Node* generate_predicate(JVMState* jvms);
   vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
 };
@@ -199,6 +203,15 @@
   bool inline_math_native(vmIntrinsics::ID id);
   bool inline_trig(vmIntrinsics::ID id);
   bool inline_math(vmIntrinsics::ID id);
+  void inline_math_mathExact(Node* math);
+  bool inline_math_addExactI(bool is_increment);
+  bool inline_math_addExactL(bool is_increment);
+  bool inline_math_multiplyExactI();
+  bool inline_math_multiplyExactL();
+  bool inline_math_negateExactI();
+  bool inline_math_negateExactL();
+  bool inline_math_subtractExactI(bool is_decrement);
+  bool inline_math_subtractExactL(bool is_decrement);
   bool inline_exp();
   bool inline_pow();
   void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
@@ -352,6 +365,7 @@
   }
 
   bool is_predicted = false;
+  bool does_virtual_dispatch = false;
 
   switch (id) {
   case vmIntrinsics::_compareTo:
@@ -378,8 +392,10 @@
     break;
   case vmIntrinsics::_hashCode:
     if (!InlineObjectHash)  return NULL;
+    does_virtual_dispatch = true;
     break;
   case vmIntrinsics::_clone:
+    does_virtual_dispatch = true;
   case vmIntrinsics::_copyOf:
   case vmIntrinsics::_copyOfRange:
     if (!InlineObjectCopy)  return NULL;
@@ -498,6 +514,35 @@
     if (!UseCRC32Intrinsics) return NULL;
     break;
 
+  case vmIntrinsics::_incrementExactI:
+  case vmIntrinsics::_addExactI:
+    if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_incrementExactL:
+  case vmIntrinsics::_addExactL:
+    if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_decrementExactI:
+  case vmIntrinsics::_subtractExactI:
+    if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_decrementExactL:
+  case vmIntrinsics::_subtractExactL:
+    if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_negateExactI:
+    if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_negateExactL:
+    if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_multiplyExactI:
+    if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
+    break;
+  case vmIntrinsics::_multiplyExactL:
+    if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
+    break;
+
  default:
     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
@@ -529,7 +574,7 @@
     if (!InlineUnsafeOps)  return NULL;
   }
 
-  return new LibraryIntrinsic(m, is_virtual, is_predicted, (vmIntrinsics::ID) id);
+  return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
 }
 
 //----------------------register_library_intrinsics-----------------------
@@ -538,12 +583,12 @@
   // Nothing to do here.
 }
 
-JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
+JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
   LibraryCallKit kit(jvms, this);
   Compile* C = kit.C;
   int nodes = C->unique();
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Intrinsic %s", str);
@@ -554,7 +599,7 @@
 
   // Try to inline the intrinsic.
   if (kit.try_to_inline()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -570,7 +615,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -592,7 +637,7 @@
   int nodes = C->unique();
 #ifndef PRODUCT
   assert(is_predicted(), "sanity");
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Predicate for intrinsic %s", str);
@@ -603,7 +648,7 @@
 
   Node* slow_ctl = kit.try_to_predicate();
   if (!kit.failing()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -617,7 +662,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = "failed to generate predicate for intrinsic";
@@ -668,6 +713,19 @@
   case vmIntrinsics::_min:
   case vmIntrinsics::_max:                      return inline_min_max(intrinsic_id());
 
+  case vmIntrinsics::_addExactI:                return inline_math_addExactI(false /* add */);
+  case vmIntrinsics::_addExactL:                return inline_math_addExactL(false /* add */);
+  case vmIntrinsics::_decrementExactI:          return inline_math_subtractExactI(true /* decrement */);
+  case vmIntrinsics::_decrementExactL:          return inline_math_subtractExactL(true /* decrement */);
+  case vmIntrinsics::_incrementExactI:          return inline_math_addExactI(true /* increment */);
+  case vmIntrinsics::_incrementExactL:          return inline_math_addExactL(true /* increment */);
+  case vmIntrinsics::_multiplyExactI:           return inline_math_multiplyExactI();
+  case vmIntrinsics::_multiplyExactL:           return inline_math_multiplyExactL();
+  case vmIntrinsics::_negateExactI:             return inline_math_negateExactI();
+  case vmIntrinsics::_negateExactL:             return inline_math_negateExactL();
+  case vmIntrinsics::_subtractExactI:           return inline_math_subtractExactI(false /* subtract */);
+  case vmIntrinsics::_subtractExactL:           return inline_math_subtractExactL(false /* subtract */);
+
   case vmIntrinsics::_arraycopy:                return inline_arraycopy();
 
   case vmIntrinsics::_compareTo:                return inline_string_compareTo();
@@ -1280,6 +1338,11 @@
   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
 
+  // String.value field is known to be @Stable.
+  if (UseImplicitStableValues) {
+    target = cast_array_to_stable(target, target_type);
+  }
+
   IdealKit kit(this, false, true);
 #define __ kit.
   Node* zero             = __ ConI(0);
@@ -1906,6 +1969,139 @@
   return true;
 }
 
+void LibraryCallKit::inline_math_mathExact(Node* math) {
+  // If we didn't get the expected opcode it means we have optimized
+  // the node to something else and don't need the exception edge.
+  if (!math->is_MathExact()) {
+    set_result(math);
+    return;
+  }
+
+  Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
+  Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
+
+  Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
+  IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
+  Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
+  Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
+
+  {
+    PreserveJVMState pjvms(this);
+    PreserveReexecuteState preexecs(this);
+    jvms()->set_should_reexecute(true);
+
+    set_control(slow_path);
+    set_i_o(i_o());
+
+    uncommon_trap(Deoptimization::Reason_intrinsic,
+                  Deoptimization::Action_none);
+  }
+
+  set_control(fast_path);
+  set_result(result);
+}
+
+bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
+  Node* arg1 = argument(0);
+  Node* arg2 = NULL;
+
+  if (is_increment) {
+    arg2 = intcon(1);
+  } else {
+    arg2 = argument(1);
+  }
+
+  Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
+  inline_math_mathExact(add);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
+  Node* arg1 = argument(0); // type long
+  // argument(1) == TOP
+  Node* arg2 = NULL;
+
+  if (is_increment) {
+    arg2 = longcon(1);
+  } else {
+    arg2 = argument(2); // type long
+    // argument(3) == TOP
+  }
+
+  Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
+  inline_math_mathExact(add);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
+  Node* arg1 = argument(0);
+  Node* arg2 = NULL;
+
+  if (is_decrement) {
+    arg2 = intcon(1);
+  } else {
+    arg2 = argument(1);
+  }
+
+  Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
+  inline_math_mathExact(sub);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
+  Node* arg1 = argument(0); // type long
+  // argument(1) == TOP
+  Node* arg2 = NULL;
+
+  if (is_decrement) {
+    arg2 = longcon(1);
+  } else {
+    arg2 = argument(2); // type long
+    // argument(3) == TOP
+  }
+
+  Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
+  inline_math_mathExact(sub);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_negateExactI() {
+  Node* arg1 = argument(0);
+
+  Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
+  inline_math_mathExact(neg);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_negateExactL() {
+  Node* arg1 = argument(0);
+  // argument(1) == TOP
+
+  Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
+  inline_math_mathExact(neg);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_multiplyExactI() {
+  Node* arg1 = argument(0);
+  Node* arg2 = argument(1);
+
+  Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
+  inline_math_mathExact(mul);
+  return true;
+}
+
+bool LibraryCallKit::inline_math_multiplyExactL() {
+  Node* arg1 = argument(0);
+  // argument(1) == TOP
+  Node* arg2 = argument(2);
+  // argument(3) == TOP
+
+  Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
+  inline_math_mathExact(mul);
+  return true;
+}
+
 Node*
 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
   // These are the candidate return value:
@@ -2294,7 +2490,7 @@
     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
 
 #ifndef PRODUCT
-    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       tty->print("  from base type: ");  adr_type->dump();
       tty->print("  sharpened value: ");  tjp->dump();
     }
@@ -2756,10 +2952,28 @@
       newval = _gvn.makecon(TypePtr::NULL_PTR);
 
     // Reference stores need a store barrier.
-    pre_barrier(true /* do_load*/,
-                control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
-                NULL /* pre_val*/,
-                T_OBJECT);
+    if (kind == LS_xchg) {
+      // If pre-barrier must execute before the oop store, old value will require do_load here.
+      if (!can_move_pre_barrier()) {
+        pre_barrier(true /* do_load*/,
+                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
+                    NULL /* pre_val*/,
+                    T_OBJECT);
+      } // Else move pre_barrier to use load_store value, see below.
+    } else if (kind == LS_cmpxchg) {
+      // Same as for newval above:
+      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
+        oldval = _gvn.makecon(TypePtr::NULL_PTR);
+      }
+      // The only known value which might get overwritten is oldval.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  oldval /* pre_val */,
+                  T_OBJECT);
+    } else {
+      ShouldNotReachHere();
+    }
+
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@@ -2795,16 +3009,27 @@
   Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
   set_memory(proj, alias_idx);
 
+  if (type == T_OBJECT && kind == LS_xchg) {
+#ifdef _LP64
+    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+      load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
+    }
+#endif
+    if (can_move_pre_barrier()) {
+      // Don't need to load pre_val. The old value is returned by load_store.
+      // The pre_barrier can execute after the xchg as long as no safepoint
+      // gets inserted between them.
+      pre_barrier(false /* do_load */,
+                  control(), NULL, NULL, max_juint, NULL, NULL,
+                  load_store /* pre_val */,
+                  T_OBJECT);
+    }
+  }
+
   // Add the trailing membar surrounding the access
   insert_mem_bar(Op_MemBarCPUOrder);
   insert_mem_bar(Op_MemBarAcquire);
 
-#ifdef _LP64
-  if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
-    load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
-  }
-#endif
-
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   set_result(load_store);
   return true;
@@ -3226,7 +3451,7 @@
   if (mirror_con == NULL)  return false;  // cannot happen?
 
 #ifndef PRODUCT
-  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     ciType* k = mirror_con->java_mirror_type();
     if (k) {
       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3260,6 +3485,7 @@
   // If kls is null, we have a primitive mirror.
   phi->init_req(_prim_path, prim_return_value);
   if (stopped()) { set_result(region, phi); return true; }
+  bool safe_for_replace = (region->in(_prim_path) == top());
 
   Node* p;  // handy temp
   Node* null_ctl;
@@ -3270,7 +3496,7 @@
   switch (id) {
   case vmIntrinsics::_isInstance:
     // nothing is an instance of a primitive type
-    query_value = gen_instanceof(obj, kls);
+    query_value = gen_instanceof(obj, kls, safe_for_replace);
     break;
 
   case vmIntrinsics::_getModifiers:
@@ -3700,6 +3926,8 @@
                                              RegionNode* slow_region) {
   ciMethod* method = callee();
   int vtable_index = method->vtable_index();
+  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+         err_msg_res("bad index %d", vtable_index));
   // Get the Method* out of the appropriate vtable entry.
   int entry_offset  = (InstanceKlass::vtable_start_offset() +
                      vtable_index*vtableEntry::size()) * wordSize +
@@ -3750,6 +3978,8 @@
       // so the vtable index is fixed.
       // No need to use the linkResolver to get it.
        vtable_index = method->vtable_index();
+       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+              err_msg_res("bad index %d", vtable_index));
     }
     slow_call = new(C) CallDynamicJavaNode(tf,
                           SharedRuntime::get_resolve_virtual_call_stub(),
@@ -3914,14 +4144,14 @@
 // caller sensitive methods.
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
 
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
     }
 #endif
@@ -3945,7 +4175,7 @@
       // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
       if (!m->caller_sensitive()) {
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
         }
 #endif
@@ -3961,7 +4191,7 @@
         set_result(makecon(TypeInstPtr::make(caller_mirror)));
 
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
           tty->print_cr("  JVM state at this point:");
           for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -3977,7 +4207,7 @@
   }
 
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
     tty->print_cr("  JVM state at this point:");
     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4170,7 +4400,7 @@
   // 12 - 64-bit VM, compressed klass
   // 16 - 64-bit VM, normal klass
   if (base_off % BytesPerLong != 0) {
-    assert(UseCompressedKlassPointers, "");
+    assert(UseCompressedClassPointers, "");
     if (is_array) {
       // Exclude length to copy by 8 bytes words.
       base_off += sizeof(int);
@@ -4456,8 +4686,62 @@
   const Type* dest_type = dest->Value(&_gvn);
   const TypeAryPtr* top_src  = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
-  if (top_src  == NULL || top_src->klass()  == NULL ||
-      top_dest == NULL || top_dest->klass() == NULL) {
+
+  // Do we have the type of src?
+  bool has_src = (top_src != NULL && top_src->klass() != NULL);
+  // Do we have the type of dest?
+  bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
+  // Is the type for src from speculation?
+  bool src_spec = false;
+  // Is the type for dest from speculation?
+  bool dest_spec = false;
+
+  if (!has_src || !has_dest) {
+    // We don't have sufficient type information, let's see if
+    // speculative types can help. We need to have types for both src
+    // and dest so that it pays off.
+
+    // Do we already have or could we have type information for src
+    bool could_have_src = has_src;
+    // Do we already have or could we have type information for dest
+    bool could_have_dest = has_dest;
+
+    ciKlass* src_k = NULL;
+    if (!has_src) {
+      src_k = src_type->speculative_type();
+      if (src_k != NULL && src_k->is_array_klass()) {
+        could_have_src = true;
+      }
+    }
+
+    ciKlass* dest_k = NULL;
+    if (!has_dest) {
+      dest_k = dest_type->speculative_type();
+      if (dest_k != NULL && dest_k->is_array_klass()) {
+        could_have_dest = true;
+      }
+    }
+
+    if (could_have_src && could_have_dest) {
+      // This is going to pay off so emit the required guards
+      if (!has_src) {
+        src = maybe_cast_profiled_obj(src, src_k);
+        src_type  = _gvn.type(src);
+        top_src  = src_type->isa_aryptr();
+        has_src = (top_src != NULL && top_src->klass() != NULL);
+        src_spec = true;
+      }
+      if (!has_dest) {
+        dest = maybe_cast_profiled_obj(dest, dest_k);
+        dest_type  = _gvn.type(dest);
+        top_dest  = dest_type->isa_aryptr();
+        has_dest = (top_dest != NULL && top_dest->klass() != NULL);
+        dest_spec = true;
+      }
+    }
+  }
+
+  if (!has_src || !has_dest) {
     // Conservatively insert a memory barrier on all memory slices.
     // Do not let writes into the source float below the arraycopy.
     insert_mem_bar(Op_MemBarCPUOrder);
@@ -4492,6 +4776,40 @@
     return true;
   }
 
+  if (src_elem == T_OBJECT) {
+    // If both arrays are object arrays then having the exact types
+    // for both will remove the need for a subtype check at runtime
+    // before the call and may make it possible to pick a faster copy
+    // routine (without a subtype check on every element)
+    // Do we have the exact type of src?
+    bool could_have_src = src_spec;
+    // Do we have the exact type of dest?
+    bool could_have_dest = dest_spec;
+    ciKlass* src_k = top_src->klass();
+    ciKlass* dest_k = top_dest->klass();
+    if (!src_spec) {
+      src_k = src_type->speculative_type();
+      if (src_k != NULL && src_k->is_array_klass()) {
+          could_have_src = true;
+      }
+    }
+    if (!dest_spec) {
+      dest_k = dest_type->speculative_type();
+      if (dest_k != NULL && dest_k->is_array_klass()) {
+        could_have_dest = true;
+      }
+    }
+    if (could_have_src && could_have_dest) {
+      // If we can have both exact types, emit the missing guards
+      if (could_have_src && !src_spec) {
+        src = maybe_cast_profiled_obj(src, src_k);
+      }
+      if (could_have_dest && !dest_spec) {
+        dest = maybe_cast_profiled_obj(dest, dest_k);
+      }
+    }
+  }
+
   //---------------------------------------------------------------------------
   // We will make a fast path for this call to arraycopy.
 
--- a/src/share/vm/opto/live.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/live.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -85,13 +85,13 @@
     IndexSet* def = &_defs[block->_pre_order-1];
     DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
     uint i;
-    for (i = block->_nodes.size(); i > 1; i--) {
-      Node* n = block->_nodes[i-1];
+    for (i = block->number_of_nodes(); i > 1; i--) {
+      Node* n = block->get_node(i-1);
       if (n->is_Phi()) {
         break;
       }
 
-      uint r = _names[n->_idx];
+      uint r = _names.at(n->_idx);
       assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
       def->insert( r );
       use->remove( r );
@@ -100,7 +100,7 @@
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
         if (_cfg.get_block_for_node(nk) != block) {
-          uint u = _names[nkidx];
+          uint u = _names.at(nkidx);
           use->insert(u);
           DEBUG_ONLY(def_outside->insert(u);)
         }
@@ -112,7 +112,7 @@
 #endif
     // Remove anything defined by Phis and the block start instruction
     for (uint k = i; k > 0; k--) {
-      uint r = _names[block->_nodes[k - 1]->_idx];
+      uint r = _names.at(block->get_node(k - 1)->_idx);
       def->insert(r);
       use->remove(r);
     }
@@ -124,7 +124,7 @@
 
       // PhiNode uses go in the live-out set of prior blocks.
       for (uint k = i; k > 0; k--) {
-        add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
+        add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
       }
     }
     freeset(block);
@@ -254,10 +254,10 @@
 void PhaseLive::dump( const Block *b ) const {
   tty->print("Block %d: ",b->_pre_order);
   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
-  uint cnt = b->_nodes.size();
+  uint cnt = b->number_of_nodes();
   for( uint i=0; i<cnt; i++ ) {
-    tty->print("L%d/", _names[b->_nodes[i]->_idx] );
-    b->_nodes[i]->dump();
+    tty->print("L%d/", _names.at(b->get_node(i)->_idx));
+    b->get_node(i)->dump();
   }
   tty->print("\n");
 }
@@ -269,7 +269,7 @@
   for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
     Block* block = _cfg.get_block(i);
     for (uint j = block->end_idx() + 1; j > 1; j--) {
-      Node* n = block->_nodes[j-1];
+      Node* n = block->get_node(j-1);
       if (n->is_Phi()) {
         break;
       }
@@ -321,7 +321,7 @@
 #ifdef _LP64
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
-                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
+                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
 #endif
                       check->as_Mach()->ideal_Opcode() == Op_LoadP ||
                       check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
--- a/src/share/vm/opto/live.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/live.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -40,27 +40,7 @@
 //------------------------------LRG_List---------------------------------------
 // Map Node indices to Live RanGe indices.
 // Array lookup in the optimized case.
-class LRG_List : public ResourceObj {
-  friend class VMStructs;
-  uint _cnt, _max;
-  uint* _lidxs;
-  ReallocMark _nesting;         // assertion check for reallocations
-public:
-  LRG_List( uint max );
-
-  uint lookup( uint nidx ) const {
-    return _lidxs[nidx];
-  }
-  uint operator[] (uint nidx) const { return lookup(nidx); }
-
-  void map( uint nidx, uint lidx ) {
-    assert( nidx < _cnt, "oob" );
-    _lidxs[nidx] = lidx;
-  }
-  void extend( uint nidx, uint lidx );
-
-  uint Size() const { return _cnt; }
-};
+typedef GrowableArray<uint> LRG_List;
 
 //------------------------------PhaseLive--------------------------------------
 // Compute live-in/live-out
--- a/src/share/vm/opto/loopPredicate.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/loopPredicate.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -41,63 +41,6 @@
  * checks (such as null checks).
 */
 
-//-------------------------------is_uncommon_trap_proj----------------------------
-// Return true if proj is the form of "proj->[region->..]call_uct"
-bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
-  int path_limit = 10;
-  assert(proj, "invalid argument");
-  Node* out = proj;
-  for (int ct = 0; ct < path_limit; ct++) {
-    out = out->unique_ctrl_out();
-    if (out == NULL)
-      return false;
-    if (out->is_CallStaticJava()) {
-      int req = out->as_CallStaticJava()->uncommon_trap_request();
-      if (req != 0) {
-        Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
-        if (trap_reason == reason || reason == Deoptimization::Reason_none) {
-           return true;
-        }
-      }
-      return false; // don't do further after call
-    }
-    if (out->Opcode() != Op_Region)
-      return false;
-  }
-  return false;
-}
-
-//-------------------------------is_uncommon_trap_if_pattern-------------------------
-// Return true  for "if(test)-> proj -> ...
-//                          |
-//                          V
-//                      other_proj->[region->..]call_uct"
-//
-// "must_reason_predicate" means the uct reason must be Reason_predicate
-bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
-  Node *in0 = proj->in(0);
-  if (!in0->is_If()) return false;
-  // Variation of a dead If node.
-  if (in0->outcnt() < 2)  return false;
-  IfNode* iff = in0->as_If();
-
-  // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
-  if (reason != Deoptimization::Reason_none) {
-    if (iff->in(1)->Opcode() != Op_Conv2B ||
-       iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
-      return false;
-    }
-  }
-
-  ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
-  if (is_uncommon_trap_proj(other_proj, reason)) {
-    assert(reason == Deoptimization::Reason_none ||
-           Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
-    return true;
-  }
-  return false;
-}
-
 //-------------------------------register_control-------------------------
 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
   assert(n->is_CFG(), "must be control node");
@@ -147,7 +90,7 @@
 // This code is also used to clone predicates to clonned loops.
 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
                                                       Deoptimization::DeoptReason reason) {
-  assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
+  assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
   IfNode* iff = cont_proj->in(0)->as_If();
 
   ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
@@ -235,7 +178,7 @@
 ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
                                                     Deoptimization::DeoptReason reason) {
   assert(new_entry != 0, "only used for clone predicate");
-  assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
+  assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
   IfNode* iff = cont_proj->in(0)->as_If();
 
   ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
@@ -422,7 +365,7 @@
 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
   if (start_c == NULL || !start_c->is_Proj())
     return NULL;
-  if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
+  if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
     return start_c->as_Proj();
   }
   return NULL;
@@ -773,7 +716,7 @@
     ProjNode* proj = if_proj_list.pop()->as_Proj();
     IfNode*   iff  = proj->in(0)->as_If();
 
-    if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
+    if (!proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
       if (loop->is_loop_exit(iff)) {
         // stop processing the remaining projs in the list because the execution of them
         // depends on the condition of "iff" (iff->in(1)).
--- a/src/share/vm/opto/loopTransform.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/loopTransform.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -713,6 +713,10 @@
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
+      case Op_FlagsProj:
+        // Can't handle unrolling of loops containing
+        // nodes that generate a FlagsProj at the moment
+        return false;
       case Op_StrComp:
       case Op_StrEquals:
       case Op_StrIndexOf:
@@ -776,6 +780,9 @@
         continue; // not RC
 
       Node *cmp = bol->in(1);
+      if (cmp->is_FlagsProj()) {
+        continue;
+      }
 
       Node *rc_exp = cmp->in(1);
       Node *limit = cmp->in(2);
--- a/src/share/vm/opto/loopnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/loopnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -167,7 +167,7 @@
     // expensive nodes will notice the loop and skip over it to try to
     // move the node further up.
     if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
-      if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) {
+      if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
         break;
       }
       next = idom(ctl->in(1)->in(0));
@@ -181,7 +181,7 @@
       } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
         next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
       } else if (parent_ctl->is_If()) {
-        if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) {
+        if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
           break;
         }
         assert(idom(ctl) == parent_ctl, "strange");
--- a/src/share/vm/opto/loopnode.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/loopnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -876,13 +876,6 @@
   // Return true if exp is a scaled induction var plus (or minus) constant
   bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
 
-  // Return true if proj is for "proj->[region->..]call_uct"
-  static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
-  // Return true for    "if(test)-> proj -> ...
-  //                          |
-  //                          V
-  //                      other_proj->[region->..]call_uct"
-  static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
   // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
                                         Deoptimization::DeoptReason reason);
--- a/src/share/vm/opto/loopopts.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/loopopts.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -238,7 +238,7 @@
   ProjNode* dp_proj  = dp->as_Proj();
   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
   if (exclude_loop_predicate &&
-      is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
+      unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
     return; // Let IGVN transformation change control dependence.
 
   IdealLoopTree *old_loop = get_loop(dp);
@@ -2355,7 +2355,8 @@
         opc == Op_Catch     ||
         opc == Op_CatchProj ||
         opc == Op_Jump      ||
-        opc == Op_JumpProj) {
+        opc == Op_JumpProj  ||
+        opc == Op_FlagsProj) {
 #if !defined(PRODUCT)
       if (TracePartialPeeling) {
         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
--- a/src/share/vm/opto/machnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/machnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -341,7 +341,7 @@
       return TypePtr::BOTTOM;
     }
     // %%% make offset be intptr_t
-    assert(!Universe::heap()->is_in_reserved((oop)offset), "must be a raw ptr");
+    assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr");
     return TypeRawPtr::BOTTOM;
   }
 
--- a/src/share/vm/opto/machnode.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/machnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
 class MachOper : public ResourceObj {
 public:
   // Allocate right next to the MachNodes in the same arena
-  void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
+  void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
 
   // Opcode
   virtual uint opcode() const = 0;
--- a/src/share/vm/opto/macro.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/macro.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -72,6 +72,8 @@
   int jvms_adj  = new_dbg_start - old_dbg_start;
   assert (new_dbg_start == newcall->req(), "argument count mismatch");
 
+  // SafePointScalarObject node could be referenced several times in debug info.
+  // Use Dict to record cloned nodes.
   Dict* sosn_map = new Dict(cmpkey,hashkey);
   for (uint i = old_dbg_start; i < oldcall->req(); i++) {
     Node* old_in = oldcall->in(i);
@@ -79,8 +81,8 @@
     if (old_in != NULL && old_in->is_SafePointScalarObject()) {
       SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
       uint old_unique = C->unique();
-      Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
-      if (old_unique != C->unique()) {
+      Node* new_in = old_sosn->clone(sosn_map);
+      if (old_unique != C->unique()) { // New node?
         new_in->set_req(0, C->root()); // reset control edge
         new_in = transform_later(new_in); // Register new node.
       }
@@ -725,7 +727,11 @@
   while (safepoints.length() > 0) {
     SafePointNode* sfpt = safepoints.pop();
     Node* mem = sfpt->memory();
-    uint first_ind = sfpt->req();
+    assert(sfpt->jvms() != NULL, "missed JVMS");
+    // Fields of scalar objs are referenced only at the end
+    // of regular debuginfo at the last (youngest) JVMS.
+    // Record relative start index.
+    uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
     SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
 #ifdef ASSERT
                                                  alloc,
@@ -799,7 +805,7 @@
           for (int i = start; i < end; i++) {
             if (sfpt_done->in(i)->is_SafePointScalarObject()) {
               SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
-              if (scobj->first_index() == sfpt_done->req() &&
+              if (scobj->first_index(jvms) == sfpt_done->req() &&
                   scobj->n_fields() == (uint)nfields) {
                 assert(scobj->alloc() == alloc, "sanity");
                 sfpt_done->set_req(i, res);
@@ -2185,7 +2191,7 @@
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
       klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
 #ifdef _LP64
-      if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
+      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
         klass_node->in(1)->init_req(0, ctrl);
       } else
--- a/src/share/vm/opto/matcher.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/matcher.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1967,6 +1967,7 @@
       case Op_Catch:
       case Op_CatchProj:
       case Op_CProj:
+      case Op_FlagsProj:
       case Op_JumpProj:
       case Op_JProj:
       case Op_NeverBranch:
--- a/src/share/vm/opto/matcher.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/matcher.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -337,6 +337,10 @@
   // Register for MODL projection of divmodL
   static RegMask modL_proj_mask();
 
+  static const RegMask mathExactI_result_proj_mask();
+  static const RegMask mathExactL_result_proj_mask();
+  static const RegMask mathExactI_flags_proj_mask();
+
   // Use hardware DIV instruction when it is faster than
   // a code which use multiply for division by constant.
   static bool use_asm_for_ldiv_by_con( jlong divisor );
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/mathexactnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "opto/addnode.hpp"
+#include "opto/cfgnode.hpp"
+#include "opto/machnode.hpp"
+#include "opto/matcher.hpp"
+#include "opto/mathexactnode.hpp"
+#include "opto/subnode.hpp"
+
+MathExactNode::MathExactNode(Node* ctrl, Node* in1) : MultiNode(2) {
+  init_class_id(Class_MathExact);
+  init_req(0, ctrl);
+  init_req(1, in1);
+}
+
+MathExactNode::MathExactNode(Node* ctrl, Node* in1, Node* in2) : MultiNode(3) {
+  init_class_id(Class_MathExact);
+  init_req(0, ctrl);
+  init_req(1, in1);
+  init_req(2, in2);
+}
+
+BoolNode* MathExactNode::bool_node() const {
+  Node* flags = flags_node();
+  BoolNode* boolnode = flags->unique_out()->as_Bool();
+  assert(boolnode != NULL, "must have BoolNode");
+  return boolnode;
+}
+
+IfNode* MathExactNode::if_node() const {
+  BoolNode* boolnode = bool_node();
+  IfNode* ifnode = boolnode->unique_out()->as_If();
+  assert(ifnode != NULL, "must have IfNode");
+  return ifnode;
+}
+
+Node* MathExactNode::control_node() const {
+  IfNode* ifnode = if_node();
+  return ifnode->in(0);
+}
+
+Node* MathExactNode::non_throwing_branch() const {
+  IfNode* ifnode = if_node();
+  if (bool_node()->_test._test == BoolTest::overflow) {
+    return ifnode->proj_out(0);
+  }
+  return ifnode->proj_out(1);
+}
+
+// If the MathExactNode won't overflow we have to replace the
+// FlagsProjNode and ProjNode that is generated by the MathExactNode
+Node* MathExactNode::no_overflow(PhaseGVN* phase, Node* new_result) {
+  PhaseIterGVN* igvn = phase->is_IterGVN();
+  if (igvn) {
+    ProjNode* result = result_node();
+    ProjNode* flags = flags_node();
+
+    if (result != NULL) {
+      igvn->replace_node(result, new_result);
+    }
+
+    if (flags != NULL) {
+      BoolNode* boolnode = bool_node();
+      switch (boolnode->_test._test) {
+        case BoolTest::overflow:
+          // if the check is for overflow - never taken
+          igvn->replace_node(boolnode, phase->intcon(0));
+          break;
+        case BoolTest::no_overflow:
+          // if the check is for no overflow - always taken
+          igvn->replace_node(boolnode, phase->intcon(1));
+          break;
+        default:
+          fatal("Unexpected value of BoolTest");
+          break;
+      }
+      flags->del_req(0);
+    }
+  }
+  return new_result;
+}
+
+Node* MathExactINode::match(const ProjNode* proj, const Matcher* m) {
+  uint ideal_reg = proj->ideal_reg();
+  RegMask rm;
+  if (proj->_con == result_proj_node) {
+    rm = m->mathExactI_result_proj_mask();
+  } else {
+    assert(proj->_con == flags_proj_node, "must be result or flags");
+    assert(ideal_reg == Op_RegFlags, "sanity");
+    rm = m->mathExactI_flags_proj_mask();
+  }
+  return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg);
+}
+
+Node* MathExactLNode::match(const ProjNode* proj, const Matcher* m) {
+  uint ideal_reg = proj->ideal_reg();
+  RegMask rm;
+  if (proj->_con == result_proj_node) {
+    rm = m->mathExactL_result_proj_mask();
+  } else {
+    assert(proj->_con == flags_proj_node, "must be result or flags");
+    assert(ideal_reg == Op_RegFlags, "sanity");
+    rm = m->mathExactI_flags_proj_mask();
+  }
+  return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg);
+}
+
+Node* AddExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jint val1 = arg1->get_int();
+    jint val2 = arg2->get_int();
+    jint result = val1 + val2;
+    // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
+    if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) {
+      Node* con_result = ConINode::make(phase->C, result);
+      return no_overflow(phase, con_result);
+    }
+    return NULL;
+  }
+
+  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) { // (Add 0 x) == x
+    Node* add_result = new (phase->C) AddINode(arg1, arg2);
+    return no_overflow(phase, add_result);
+  }
+
+  if (type2->singleton()) {
+    return NULL; // no change - keep constant on the right
+  }
+
+  if (type1->singleton()) {
+    // Make it x + Constant - move constant to the right
+    swap_edges(1, 2);
+    return this;
+  }
+
+  if (arg2->is_Load()) {
+    return NULL; // no change - keep load on the right
+  }
+
+  if (arg1->is_Load()) {
+    // Make it x + Load - move load to the right
+    swap_edges(1, 2);
+    return this;
+  }
+
+  if (arg1->_idx > arg2->_idx) {
+    // Sort the edges
+    swap_edges(1, 2);
+    return this;
+  }
+
+  return NULL;
+}
+
+Node* AddExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jlong val1 = arg1->get_long();
+    jlong val2 = arg2->get_long();
+    jlong result = val1 + val2;
+    // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
+    if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) {
+      Node* con_result = ConLNode::make(phase->C, result);
+      return no_overflow(phase, con_result);
+    }
+    return NULL;
+  }
+
+  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) { // (Add 0 x) == x
+    Node* add_result = new (phase->C) AddLNode(arg1, arg2);
+    return no_overflow(phase, add_result);
+  }
+
+  if (type2->singleton()) {
+    return NULL; // no change - keep constant on the right
+  }
+
+  if (type1->singleton()) {
+    // Make it x + Constant - move constant to the right
+    swap_edges(1, 2);
+    return this;
+  }
+
+  if (arg2->is_Load()) {
+    return NULL; // no change - keep load on the right
+  }
+
+  if (arg1->is_Load()) {
+    // Make it x + Load - move load to the right
+    swap_edges(1, 2);
+    return this;
+  }
+
+  if (arg1->_idx > arg2->_idx) {
+    // Sort the edges
+    swap_edges(1, 2);
+    return this;
+  }
+
+  return NULL;
+}
+
+Node* SubExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jint val1 = arg1->get_int();
+    jint val2 = arg2->get_int();
+    jint result = val1 - val2;
+
+    // Hacker's Delight 2-12 Overflow iff the arguments have different signs and
+    // the sign of the result is different than the sign of arg1
+    if (((val1 ^ val2) & (val1 ^ result)) >= 0) {
+      Node* con_result = ConINode::make(phase->C, result);
+      return no_overflow(phase, con_result);
+    }
+    return NULL;
+  }
+
+  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) {
+    // Sub with zero is the same as add with zero
+    Node* add_result = new (phase->C) AddINode(arg1, arg2);
+    return no_overflow(phase, add_result);
+  }
+
+  return NULL;
+}
+
+Node* SubExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jlong val1 = arg1->get_long();
+    jlong val2 = arg2->get_long();
+    jlong result = val1 - val2;
+
+    // Hacker's Delight 2-12 Overflow iff the arguments have different signs and
+    // the sign of the result is different than the sign of arg1
+    if (((val1 ^ val2) & (val1 ^ result)) >= 0) {
+      Node* con_result = ConLNode::make(phase->C, result);
+      return no_overflow(phase, con_result);
+    }
+    return NULL;
+  }
+
+  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) {
+    // Sub with zero is the same as add with zero
+    Node* add_result = new (phase->C) AddLNode(arg1, arg2);
+    return no_overflow(phase, add_result);
+  }
+
+  return NULL;
+}
+
+Node* NegExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node *arg = in(1);
+
+  const Type* type = phase->type(arg);
+  if (type != Type::TOP && type->singleton()) {
+    jint value = arg->get_int();
+    if (value != min_jint) {
+      Node* neg_result = ConINode::make(phase->C, -value);
+      return no_overflow(phase, neg_result);
+    }
+  }
+  return NULL;
+}
+
+Node* NegExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node *arg = in(1);
+
+  const Type* type = phase->type(arg);
+  if (type != Type::TOP && type->singleton()) {
+    jlong value = arg->get_long();
+    if (value != min_jlong) {
+      Node* neg_result = ConLNode::make(phase->C, -value);
+      return no_overflow(phase, neg_result);
+    }
+  }
+  return NULL;
+}
+
+Node* MulExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jint val1 = arg1->get_int();
+    jint val2 = arg2->get_int();
+    jlong result = (jlong) val1 * (jlong) val2;
+    if ((jint) result == result) {
+      // no overflow
+      Node* mul_result = ConINode::make(phase->C, result);
+      return no_overflow(phase, mul_result);
+    }
+  }
+
+  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) {
+    return no_overflow(phase, ConINode::make(phase->C, 0));
+  }
+
+  if (type1 == TypeInt::ONE) {
+    Node* mul_result = new (phase->C) AddINode(arg2, phase->intcon(0));
+    return no_overflow(phase, mul_result);
+  }
+  if (type2 == TypeInt::ONE) {
+    Node* mul_result = new (phase->C) AddINode(arg1, phase->intcon(0));
+    return no_overflow(phase, mul_result);
+  }
+
+  if (type1 == TypeInt::MINUS_1) {
+    return new (phase->C) NegExactINode(NULL, arg2);
+  }
+
+  if (type2 == TypeInt::MINUS_1) {
+    return new (phase->C) NegExactINode(NULL, arg1);
+  }
+
+  return NULL;
+}
+
+Node* MulExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  Node* arg1 = in(1);
+  Node* arg2 = in(2);
+
+  const Type* type1 = phase->type(arg1);
+  const Type* type2 = phase->type(arg2);
+
+  if (type1 != Type::TOP && type1->singleton() &&
+      type2 != Type::TOP && type2->singleton()) {
+    jlong val1 = arg1->get_long();
+    jlong val2 = arg2->get_long();
+
+    jlong result = val1 * val2;
+    jlong ax = (val1 < 0 ? -val1 : val1);
+    jlong ay = (val2 < 0 ? -val2 : val2);
+
+    bool overflow = false;
+    if ((ax | ay) & CONST64(0xFFFFFFFF00000000)) {
+      // potential overflow if any bit in upper 32 bits are set
+      if ((val1 == min_jlong && val2 == -1) || (val2 == min_jlong && val1 == -1)) {
+        // -1 * Long.MIN_VALUE will overflow
+        overflow = true;
+      } else if (val2 != 0 && (result / val2 != val1)) {
+        overflow = true;
+      }
+    }
+
+    if (!overflow) {
+      Node* mul_result = ConLNode::make(phase->C, result);
+      return no_overflow(phase, mul_result);
+    }
+  }
+
+  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) {
+    return no_overflow(phase, ConLNode::make(phase->C, 0));
+  }
+
+  if (type1 == TypeLong::ONE) {
+    Node* mul_result = new (phase->C) AddLNode(arg2, phase->longcon(0));
+    return no_overflow(phase, mul_result);
+  }
+  if (type2 == TypeLong::ONE) {
+    Node* mul_result = new (phase->C) AddLNode(arg1, phase->longcon(0));
+    return no_overflow(phase, mul_result);
+  }
+
+  if (type1 == TypeLong::MINUS_1) {
+    return new (phase->C) NegExactLNode(NULL, arg2);
+  }
+
+  if (type2 == TypeLong::MINUS_1) {
+    return new (phase->C) NegExactLNode(NULL, arg1);
+  }
+
+  return NULL;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/mathexactnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_MATHEXACTNODE_HPP
+#define SHARE_VM_OPTO_MATHEXACTNODE_HPP
+
+#include "opto/multnode.hpp"
+#include "opto/node.hpp"
+#include "opto/subnode.hpp"
+#include "opto/type.hpp"
+
+class BoolNode;
+class IfNode;
+class Node;
+
+class PhaseGVN;
+class PhaseTransform;
+
+class MathExactNode : public MultiNode {
+public:
+  MathExactNode(Node* ctrl, Node* in1);
+  MathExactNode(Node* ctrl, Node* in1, Node* in2);
+  enum {
+    result_proj_node = 0,
+    flags_proj_node = 1
+  };
+  virtual int Opcode() const;
+  virtual Node* Identity(PhaseTransform* phase) { return this; }
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; }
+  virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); }
+  virtual uint hash() const { return Node::hash(); }
+  virtual bool is_CFG() const { return false; }
+  virtual uint ideal_reg() const { return NotAMachineReg; }
+
+  ProjNode* result_node() const { return proj_out(result_proj_node); }
+  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
+  Node* control_node() const;
+  Node* non_throwing_branch() const;
+protected:
+  IfNode* if_node() const;
+  BoolNode* bool_node() const;
+  Node* no_overflow(PhaseGVN *phase, Node* new_result);
+};
+
+class MathExactINode : public MathExactNode {
+ public:
+  MathExactINode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {}
+  MathExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* match(const ProjNode* proj, const Matcher* m);
+  virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; }
+};
+
+class MathExactLNode : public MathExactNode {
+public:
+  MathExactLNode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {}
+  MathExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* match(const ProjNode* proj, const Matcher* m);
+  virtual const Type* bottom_type() const { return TypeTuple::LONG_CC_PAIR; }
+};
+
+class AddExactINode : public MathExactINode {
+public:
+  AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+};
+
+class AddExactLNode : public MathExactLNode {
+public:
+  AddExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class SubExactINode : public MathExactINode {
+public:
+  SubExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class SubExactLNode : public MathExactLNode {
+public:
+  SubExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class NegExactINode : public MathExactINode {
+public:
+  NegExactINode(Node* ctrl, Node* in1) : MathExactINode(ctrl, in1) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class NegExactLNode : public MathExactLNode {
+public:
+  NegExactLNode(Node* ctrl, Node* in1) : MathExactLNode(ctrl, in1) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class MulExactINode : public MathExactINode {
+public:
+  MulExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class MulExactLNode : public MathExactLNode {
+public:
+  MulExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
+  virtual int Opcode() const;
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+};
+
+class FlagsProjNode : public ProjNode {
+public:
+  FlagsProjNode(Node* src, uint con) : ProjNode(src, con) {
+    init_class_id(Class_FlagsProj);
+  }
+
+  virtual int Opcode() const;
+  virtual bool is_CFG() const { return false; }
+  virtual const Type* bottom_type() const { return TypeInt::CC; }
+  virtual uint ideal_reg() const { return Op_RegFlags; }
+};
+
+
+#endif
+
--- a/src/share/vm/opto/memnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/memnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -962,6 +962,19 @@
   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
 }
 
+static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
+  if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
+    bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
+    bool is_stable_ary = FoldStableValues &&
+                         (tp != NULL) && (tp->isa_aryptr() != NULL) &&
+                         tp->isa_aryptr()->is_stable();
+
+    return (eliminate_boxing && non_volatile) || is_stable_ary;
+  }
+
+  return false;
+}
+
 //---------------------------can_see_stored_value------------------------------
 // This routine exists to make sure this set of tests is done the same
 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
@@ -976,11 +989,9 @@
   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
   // This is more general than load from boxing objects.
-  if (phase->C->eliminate_boxing() && (atp != NULL) &&
-      (atp->index() >= Compile::AliasIdxRaw) &&
-      (atp->field() != NULL) && !atp->field()->is_volatile()) {
+  if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
     uint alias_idx = atp->index();
-    bool final = atp->field()->is_final();
+    bool final = !atp->is_rewritable();
     Node* result = NULL;
     Node* current = st;
     // Skip through chains of MemBarNodes checking the MergeMems for
@@ -1015,7 +1026,6 @@
     }
   }
 
-
   // Loop around twice in the case Load -> Initialize -> Store.
   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
   for (int trip = 0; trip <= 1; trip++) {
@@ -1577,6 +1587,40 @@
   return NULL;
 }
 
+// Try to constant-fold a stable array element.
+static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
+  assert(ary->is_stable(), "array should be stable");
+
+  if (ary->const_oop() != NULL) {
+    // Decode the results of GraphKit::array_element_address.
+    ciArray* aobj = ary->const_oop()->as_array();
+    ciConstant con = aobj->element_value_by_offset(off);
+
+    if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+      const Type* con_type = Type::make_from_constant(con);
+      if (con_type != NULL) {
+        if (con_type->isa_aryptr()) {
+          // Join with the array element type, in case it is also stable.
+          int dim = ary->stable_dimension();
+          con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
+        }
+        if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
+          con_type = con_type->make_narrowoop();
+        }
+#ifndef PRODUCT
+        if (TraceIterativeGVN) {
+          tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
+          con_type->dump(); tty->cr();
+        }
+#endif //PRODUCT
+        return con_type;
+      }
+    }
+  }
+
+  return NULL;
+}
+
 //------------------------------Value-----------------------------------------
 const Type *LoadNode::Value( PhaseTransform *phase ) const {
   // Either input is TOP ==> the result is TOP
@@ -1591,8 +1635,31 @@
   Compile* C = phase->C;
 
   // Try to guess loaded type from pointer type
-  if (tp->base() == Type::AryPtr) {
-    const Type *t = tp->is_aryptr()->elem();
+  if (tp->isa_aryptr()) {
+    const TypeAryPtr* ary = tp->is_aryptr();
+    const Type *t = ary->elem();
+
+    // Determine whether the reference is beyond the header or not, by comparing
+    // the offset against the offset of the start of the array's data.
+    // Different array types begin at slightly different offsets (12 vs. 16).
+    // We choose T_BYTE as an example base type that is least restrictive
+    // as to alignment, which will therefore produce the smallest
+    // possible base offset.
+    const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+    const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
+
+    // Try to constant-fold a stable array element.
+    if (FoldStableValues && ary->is_stable()) {
+      // Make sure the reference is not into the header
+      if (off_beyond_header && off != Type::OffsetBot) {
+        assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
+        const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
+        if (con_type != NULL) {
+          return con_type;
+        }
+      }
+    }
+
     // Don't do this for integer types. There is only potential profit if
     // the element type t is lower than _type; that is, for int types, if _type is
     // more restrictive than t.  This only happens here if one is short and the other
@@ -1613,14 +1680,7 @@
         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
       // t might actually be lower than _type, if _type is a unique
       // concrete subclass of abstract class t.
-      // Make sure the reference is not into the header, by comparing
-      // the offset against the offset of the start of the array's data.
-      // Different array types begin at slightly different offsets (12 vs. 16).
-      // We choose T_BYTE as an example base type that is least restrictive
-      // as to alignment, which will therefore produce the smallest
-      // possible base offset.
-      const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
-      if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
+      if (off_beyond_header) {  // is the offset beyond the header?
         const Type* jt = t->join(_type);
         // In any case, do not allow the join, per se, to empty out the type.
         if (jt->empty() && !t->empty()) {
@@ -1971,7 +2031,7 @@
   assert(adr_type != NULL, "expecting TypeKlassPtr");
 #ifdef _LP64
   if (adr_type->is_ptr_to_narrowklass()) {
-    assert(UseCompressedKlassPointers, "no compressed klasses");
+    assert(UseCompressedClassPointers, "no compressed klasses");
     Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
     return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   }
@@ -2309,7 +2369,7 @@
       val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
       return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
-               (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+               (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
                 adr->bottom_type()->isa_rawptr())) {
       val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
       return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
--- a/src/share/vm/opto/multnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/multnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -24,7 +24,9 @@
 
 #include "precompiled.hpp"
 #include "opto/callnode.hpp"
+#include "opto/cfgnode.hpp"
 #include "opto/matcher.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/multnode.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/phaseX.hpp"
@@ -46,15 +48,21 @@
   assert(Opcode() != Op_If || outcnt() == 2, "bad if #1");
   for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
     Node *p = fast_out(i);
-    if( !p->is_Proj() ) {
+    if (p->is_Proj()) {
+      ProjNode *proj = p->as_Proj();
+      if (proj->_con == which_proj) {
+        assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
+        return proj;
+      }
+    } else if (p->is_FlagsProj()) {
+      FlagsProjNode *proj = p->as_FlagsProj();
+      if (proj->_con == which_proj) {
+        return proj;
+      }
+    } else {
       assert(p == this && this->is_Start(), "else must be proj");
       continue;
     }
-    ProjNode *proj = p->as_Proj();
-    if( proj->_con == which_proj ) {
-      assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
-      return proj;
-    }
   }
   return NULL;
 }
@@ -143,3 +151,59 @@
 uint ProjNode::ideal_reg() const {
   return bottom_type()->ideal_reg();
 }
+
+//-------------------------------is_uncommon_trap_proj----------------------------
+// Return true if proj is the form of "proj->[region->..]call_uct"
+bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
+  int path_limit = 10;
+  Node* out = this;
+  for (int ct = 0; ct < path_limit; ct++) {
+    out = out->unique_ctrl_out();
+    if (out == NULL)
+      return false;
+    if (out->is_CallStaticJava()) {
+      int req = out->as_CallStaticJava()->uncommon_trap_request();
+      if (req != 0) {
+        Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
+        if (trap_reason == reason || reason == Deoptimization::Reason_none) {
+           return true;
+        }
+      }
+      return false; // don't do further after call
+    }
+    if (out->Opcode() != Op_Region)
+      return false;
+  }
+  return false;
+}
+
+//-------------------------------is_uncommon_trap_if_pattern-------------------------
+// Return true  for "if(test)-> proj -> ...
+//                          |
+//                          V
+//                      other_proj->[region->..]call_uct"
+//
+// "must_reason_predicate" means the uct reason must be Reason_predicate
+bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
+  Node *in0 = in(0);
+  if (!in0->is_If()) return false;
+  // Variation of a dead If node.
+  if (in0->outcnt() < 2)  return false;
+  IfNode* iff = in0->as_If();
+
+  // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
+  if (reason != Deoptimization::Reason_none) {
+    if (iff->in(1)->Opcode() != Op_Conv2B ||
+       iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
+      return false;
+    }
+  }
+
+  ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj();
+  if (other_proj->is_uncommon_trap_proj(reason)) {
+    assert(reason == Deoptimization::Reason_none ||
+           Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
+    return true;
+  }
+  return false;
+}
--- a/src/share/vm/opto/multnode.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/multnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -88,6 +88,14 @@
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
+
+  // Return true if proj is for "proj->[region->..]call_uct"
+  bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
+  // Return true for    "if(test)-> proj -> ...
+  //                          |
+  //                          V
+  //                      other_proj->[region->..]call_uct"
+  bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
 };
 
 #endif // SHARE_VM_OPTO_MULTNODE_HPP
--- a/src/share/vm/opto/node.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/node.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -773,6 +773,21 @@
   _in[_cnt] = NULL;       // NULL out emptied slot
 }
 
+//------------------------------del_req_ordered--------------------------------
+// Delete the required edge and compact the edge array with preserved order
+void Node::del_req_ordered( uint idx ) {
+  assert( idx < _cnt, "oob");
+  assert( !VerifyHashTableKeys || _hash_lock == 0,
+          "remove node from hash table before modifying it");
+  // First remove corresponding def-use edge
+  Node *n = in(idx);
+  if (n != NULL) n->del_out((Node *)this);
+  if (idx < _cnt - 1) { // Not last edge ?
+    Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
+  }
+  _in[--_cnt] = NULL;   // NULL out emptied slot
+}
+
 //------------------------------ins_req----------------------------------------
 // Insert a new required input at the end
 void Node::ins_req( uint idx, Node *n ) {
--- a/src/share/vm/opto/node.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/node.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@
 class EncodePKlassNode;
 class FastLockNode;
 class FastUnlockNode;
+class FlagsProjNode;
 class IfNode;
 class IfFalseNode;
 class IfTrueNode;
@@ -99,6 +100,7 @@
 class MachSpillCopyNode;
 class MachTempNode;
 class Matcher;
+class MathExactNode;
 class MemBarNode;
 class MemBarStoreStoreNode;
 class MemNode;
@@ -211,7 +213,7 @@
 
   // New Operator that takes a Compile pointer, this will eventually
   // be the "new" New operator.
-  inline void* operator new( size_t x, Compile* C) {
+  inline void* operator new( size_t x, Compile* C) throw() {
     Node* n = (Node*)C->node_arena()->Amalloc_D(x);
 #ifdef ASSERT
     n->_in = (Node**)n; // magic cookie for assertion check
@@ -384,6 +386,7 @@
   void add_req( Node *n ); // Append a NEW required input
   void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   void del_req( uint idx ); // Delete required edge & compact
+  void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
   void ins_req( uint i, Node *n ); // Insert a NEW required input
   void set_req( uint i, Node *n ) {
     assert( is_not_dead(n), "can not use dead node");
@@ -566,6 +569,7 @@
       DEFINE_CLASS_ID(MemBar,      Multi, 3)
         DEFINE_CLASS_ID(Initialize,       MemBar, 0)
         DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
+      DEFINE_CLASS_ID(MathExact,   Multi, 4)
 
     DEFINE_CLASS_ID(Mach,  Node, 1)
       DEFINE_CLASS_ID(MachReturn, Mach, 0)
@@ -622,6 +626,7 @@
       DEFINE_CLASS_ID(Cmp,   Sub, 0)
         DEFINE_CLASS_ID(FastLock,   Cmp, 0)
         DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
+        DEFINE_CLASS_ID(FlagsProj, Cmp, 2)
 
     DEFINE_CLASS_ID(MergeMem, Node, 7)
     DEFINE_CLASS_ID(Bool,     Node, 8)
@@ -725,6 +730,7 @@
   DEFINE_CLASS_QUERY(EncodePKlass)
   DEFINE_CLASS_QUERY(FastLock)
   DEFINE_CLASS_QUERY(FastUnlock)
+  DEFINE_CLASS_QUERY(FlagsProj)
   DEFINE_CLASS_QUERY(If)
   DEFINE_CLASS_QUERY(IfFalse)
   DEFINE_CLASS_QUERY(IfTrue)
@@ -753,6 +759,7 @@
   DEFINE_CLASS_QUERY(MachSafePoint)
   DEFINE_CLASS_QUERY(MachSpillCopy)
   DEFINE_CLASS_QUERY(MachTemp)
+  DEFINE_CLASS_QUERY(MathExact)
   DEFINE_CLASS_QUERY(Mem)
   DEFINE_CLASS_QUERY(MemBar)
   DEFINE_CLASS_QUERY(MemBarStoreStore)
--- a/src/share/vm/opto/output.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/output.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -57,7 +57,7 @@
 // Convert Nodes to instruction bits and pass off to the VM
 void Compile::Output() {
   // RootNode goes
-  assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
+  assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
 
   // The number of new nodes (mostly MachNop) is proportional to
   // the number of java calls and inner loops which are aligned.
@@ -70,11 +70,11 @@
   Block *entry = _cfg->get_block(1);
   Block *broot = _cfg->get_root_block();
 
-  const StartNode *start = entry->_nodes[0]->as_Start();
+  const StartNode *start = entry->head()->as_Start();
 
   // Replace StartNode with prolog
   MachPrologNode *prolog = new (this) MachPrologNode();
-  entry->_nodes.map( 0, prolog );
+  entry->map_node(prolog, 0);
   _cfg->map_node_to_block(prolog, entry);
   _cfg->unmap_node_from_block(start); // start is no longer in any block
 
@@ -144,8 +144,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       tty->print("\nBB#%03d:\n", i);
       Block* block = _cfg->get_block(i);
-      for (uint j = 0; j < block->_nodes.size(); j++) {
-        Node* n = block->_nodes[j];
+      for (uint j = 0; j < block->number_of_nodes(); j++) {
+        Node* n = block->get_node(j);
         OptoReg::Name reg = _regalloc->get_reg_first(n);
         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
         n->dump();
@@ -226,8 +226,8 @@
   // Insert call to zap runtime stub before every node with an oop map
   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
     Block *b = _cfg->get_block(i);
-    for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
-      Node *n = b->_nodes[j];
+    for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
+      Node *n = b->get_node(j);
 
       // Determining if we should insert a zap-a-lot node in output.
       // We do that for all nodes that has oopmap info, except for calls
@@ -256,7 +256,7 @@
         }
         if (insert) {
           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
-          b->_nodes.insert( j, zap );
+          b->insert_node(zap, j);
           _cfg->map_node_to_block(zap, b);
           ++j;
         }
@@ -379,10 +379,10 @@
     DEBUG_ONLY( jmp_rule[i]   = 0; )
 
     // Sum all instruction sizes to compute block size
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
     uint blk_size = 0;
     for (uint j = 0; j < last_inst; j++) {
-      Node* nj = block->_nodes[j];
+      Node* nj = block->get_node(j);
       // Handle machine instruction nodes
       if (nj->is_Mach()) {
         MachNode *mach = nj->as_Mach();
@@ -477,18 +477,18 @@
     for (uint i = 0; i < nblocks; i++) {
       Block* block = _cfg->get_block(i);
       int idx = jmp_nidx[i];
-      MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
+      MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
       if (mach != NULL && mach->may_be_short_branch()) {
 #ifdef ASSERT
         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
         int j;
         // Find the branch; ignore trailing NOPs.
-        for (j = block->_nodes.size()-1; j>=0; j--) {
-          Node* n = block->_nodes[j];
+        for (j = block->number_of_nodes()-1; j>=0; j--) {
+          Node* n = block->get_node(j);
           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
             break;
         }
-        assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
+        assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 #endif
         int br_size = jmp_size[i];
         int br_offs = blk_starts[i] + jmp_offset[i];
@@ -522,7 +522,7 @@
             diff -= nop_size;
           }
           adjust_block_start += diff;
-          block->_nodes.map(idx, replacement);
+          block->map_node(replacement, idx);
           mach->subsume_by(replacement, C);
           mach = replacement;
           progress = true;
@@ -639,7 +639,7 @@
                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
       Compile::set_sv_for_object_node(objs, sv);
 
-      uint first_ind = spobj->first_index();
+      uint first_ind = spobj->first_index(sfpt->jvms());
       for (uint i = 0; i < spobj->n_fields(); i++) {
         Node* fld_node = sfpt->in(first_ind+i);
         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
@@ -894,7 +894,7 @@
     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
 
     // Loop over monitors and insert into array
-    for(idx = 0; idx < num_mon; idx++) {
+    for (idx = 0; idx < num_mon; idx++) {
       // Grab the node that defines this monitor
       Node* box_node = sfn->monitor_box(jvms, idx);
       Node* obj_node = sfn->monitor_obj(jvms, idx);
@@ -902,11 +902,11 @@
       // Create ScopeValue for object
       ScopeValue *scval = NULL;
 
-      if( obj_node->is_SafePointScalarObject() ) {
+      if (obj_node->is_SafePointScalarObject()) {
         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
         scval = Compile::sv_for_node_id(objs, spobj->_idx);
         if (scval == NULL) {
-          const Type *t = obj_node->bottom_type();
+          const Type *t = spobj->bottom_type();
           ciKlass* cik = t->is_oopptr()->klass();
           assert(cik->is_instance_klass() ||
                  cik->is_array_klass(), "Not supported allocation.");
@@ -914,14 +914,14 @@
                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
           Compile::set_sv_for_object_node(objs, sv);
 
-          uint first_ind = spobj->first_index();
+          uint first_ind = spobj->first_index(youngest_jvms);
           for (uint i = 0; i < spobj->n_fields(); i++) {
             Node* fld_node = sfn->in(first_ind+i);
             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
           }
           scval = sv;
         }
-      } else if( !obj_node->is_Con() ) {
+      } else if (!obj_node->is_Con()) {
         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
@@ -1088,8 +1088,8 @@
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
       Block* b = _cfg->get_block(i);
 
-      for (uint j = 0; j < b->_nodes.size(); j++) {
-        Node* n = b->_nodes[j];
+      for (uint j = 0; j < b->number_of_nodes(); j++) {
+        Node* n = b->get_node(j);
 
         // If the node is a MachConstantNode evaluate the constant
         // value section.
@@ -1247,14 +1247,14 @@
     // Define the label at the beginning of the basic block
     MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
 
-    uint last_inst = block->_nodes.size();
+    uint last_inst = block->number_of_nodes();
 
     // Emit block normally, except for last instruction.
     // Emit means "dump code bits into code buffer".
     for (uint j = 0; j<last_inst; j++) {
 
       // Get the node
-      Node* n = block->_nodes[j];
+      Node* n = block->get_node(j);
 
       // See if delay slots are supported
       if (valid_bundle_info(n) &&
@@ -1308,7 +1308,7 @@
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
           int nops_cnt = padding / nop_size;
           MachNode *nop = new (this) MachNopNode(nops_cnt);
-          block->_nodes.insert(j++, nop);
+          block->insert_node(nop, j++);
           last_inst++;
           _cfg->map_node_to_block(nop, block);
           nop->emit(*cb, _regalloc);
@@ -1394,7 +1394,7 @@
               // Insert padding between avoid_back_to_back branches.
               if (needs_padding && replacement->avoid_back_to_back()) {
                 MachNode *nop = new (this) MachNopNode();
-                block->_nodes.insert(j++, nop);
+                block->insert_node(nop, j++);
                 _cfg->map_node_to_block(nop, block);
                 last_inst++;
                 nop->emit(*cb, _regalloc);
@@ -1407,7 +1407,7 @@
               jmp_size[i]   = new_size;
               jmp_rule[i]   = mach->rule();
 #endif
-              block->_nodes.map(j, replacement);
+              block->map_node(replacement, j);
               mach->subsume_by(replacement, C);
               n    = replacement;
               mach = replacement;
@@ -1438,7 +1438,7 @@
             count++;
             uint i4;
             for (i4 = 0; i4 < last_inst; ++i4) {
-              if (block->_nodes[i4] == oop_store) {
+              if (block->get_node(i4) == oop_store) {
                 break;
               }
             }
@@ -1548,7 +1548,7 @@
       int padding = nb->alignment_padding(current_offset);
       if( padding > 0 ) {
         MachNode *nop = new (this) MachNopNode(padding / nop_size);
-        block->_nodes.insert(block->_nodes.size(), nop);
+        block->insert_node(nop, block->number_of_nodes());
         _cfg->map_node_to_block(nop, block);
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
@@ -1655,8 +1655,8 @@
     int j;
 
     // Find the branch; ignore trailing NOPs.
-    for (j = block->_nodes.size() - 1; j >= 0; j--) {
-      n = block->_nodes[j];
+    for (j = block->number_of_nodes() - 1; j >= 0; j--) {
+      n = block->get_node(j);
       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
         break;
       }
@@ -1675,8 +1675,8 @@
       uint call_return = call_returns[block->_pre_order];
 #ifdef ASSERT
       assert( call_return > 0, "no call seen for this basic block" );
-      while (block->_nodes[--j]->is_MachProj()) ;
-      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+      while (block->get_node(--j)->is_MachProj()) ;
+      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
 #endif
       // last instruction is a CatchNode, find it's CatchProjNodes
       int nof_succs = block->_num_succs;
@@ -1782,7 +1782,7 @@
   // Get the last node
   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
 
-  _next_node = block->_nodes[block->_nodes.size() - 1];
+  _next_node = block->get_node(block->number_of_nodes() - 1);
 }
 
 #ifndef PRODUCT
@@ -1875,7 +1875,7 @@
     // Used to allow latency 0 to force an instruction to the beginning
     // of the bb
     uint latency = 1;
-    Node *use = bb->_nodes[j];
+    Node *use = bb->get_node(j);
     uint nlen = use->len();
 
     // Walk over all the inputs
@@ -2286,7 +2286,7 @@
        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
 
     // Push any trailing projections
-    if( bb->_nodes[bb->_nodes.size()-1] != n ) {
+    if( bb->get_node(bb->number_of_nodes()-1) != n ) {
       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
         Node *foi = n->fast_out(i);
         if( foi->is_Proj() )
@@ -2329,21 +2329,21 @@
   _unconditional_delay_slot = NULL;
 
 #ifdef ASSERT
-  for( uint i=0; i < bb->_nodes.size(); i++ )
-    assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
+  for( uint i=0; i < bb->number_of_nodes(); i++ )
+    assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
 #endif
 
   // Force the _uses count to never go to zero for unscheduable pieces
   // of the block
   for( uint k = 0; k < _bb_start; k++ )
-    _uses[bb->_nodes[k]->_idx] = 1;
-  for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
-    _uses[bb->_nodes[l]->_idx] = 1;
+    _uses[bb->get_node(k)->_idx] = 1;
+  for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
+    _uses[bb->get_node(l)->_idx] = 1;
 
   // Iterate backwards over the instructions in the block.  Don't count the
   // branch projections at end or the block header instructions.
   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
-    Node *n = bb->_nodes[j];
+    Node *n = bb->get_node(j);
     if( n->is_Proj() ) continue; // Projections handled another way
 
     // Account for all uses
@@ -2398,8 +2398,8 @@
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (initial)\n", i);
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        bb->_nodes[j]->dump();
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        bb->get_node(j)->dump();
       }
     }
 #endif
@@ -2426,10 +2426,10 @@
     }
 
     // Leave untouched the starting instruction, any Phis, a CreateEx node
-    // or Top.  bb->_nodes[_bb_start] is the first schedulable instruction.
-    _bb_end = bb->_nodes.size()-1;
+    // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
+    _bb_end = bb->number_of_nodes()-1;
     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
-      Node *n = bb->_nodes[_bb_start];
+      Node *n = bb->get_node(_bb_start);
       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
       // Also, MachIdealNodes do not get scheduled
       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
@@ -2449,19 +2449,19 @@
     // in the block), because they have delay slots we can fill.  Calls all
     // have their delay slots filled in the template expansions, so we don't
     // bother scheduling them.
-    Node *last = bb->_nodes[_bb_end];
+    Node *last = bb->get_node(_bb_end);
     // Ignore trailing NOPs.
     while (_bb_end > 0 && last->is_Mach() &&
            last->as_Mach()->ideal_Opcode() == Op_Con) {
-      last = bb->_nodes[--_bb_end];
+      last = bb->get_node(--_bb_end);
     }
     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
     if( last->is_Catch() ||
        // Exclude unreachable path case when Halt node is in a separate block.
        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
       // There must be a prior call.  Skip it.
-      while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
-        assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
+      while( !bb->get_node(--_bb_end)->is_MachCall() ) {
+        assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
       }
     } else if( last->is_MachNullCheck() ) {
       // Backup so the last null-checked memory instruction is
@@ -2470,7 +2470,7 @@
       Node *mem = last->in(1);
       do {
         _bb_end--;
-      } while (mem != bb->_nodes[_bb_end]);
+      } while (mem != bb->get_node(_bb_end));
     } else {
       // Set _bb_end to point after last schedulable inst.
       _bb_end++;
@@ -2499,7 +2499,7 @@
     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
 #ifdef ASSERT
     for( uint l = _bb_start; l < _bb_end; l++ ) {
-      Node *n = bb->_nodes[l];
+      Node *n = bb->get_node(l);
       uint m;
       for( m = 0; m < _bb_end-_bb_start; m++ )
         if( _scheduled[m] == n )
@@ -2510,14 +2510,14 @@
 
     // Now copy the instructions (in reverse order) back to the block
     for ( uint k = _bb_start; k < _bb_end; k++ )
-      bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
+      bb->map_node(_scheduled[_bb_end-k-1], k);
 
 #ifndef PRODUCT
     if (_cfg->C->trace_opto_output()) {
       tty->print("#  Schedule BB#%03d (final)\n", i);
       uint current = 0;
-      for (uint j = 0; j < bb->_nodes.size(); j++) {
-        Node *n = bb->_nodes[j];
+      for (uint j = 0; j < bb->number_of_nodes(); j++) {
+        Node *n = bb->get_node(j);
         if( valid_bundle_info(n) ) {
           Bundle *bundle = node_bundling(n);
           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
@@ -2579,8 +2579,8 @@
   // Walk over the block backwards.  Check to make sure each DEF doesn't
   // kill a live value (other than the one it's supposed to).  Add each
   // USE to the live set.
-  for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+  for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
+    Node *n = b->get_node(i);
     int n_op = n->Opcode();
     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2711,7 +2711,7 @@
         pinch->req() == 1 ) {   // pinch not yet in block?
       pinch->del_req(0);        // yank pointer to later-def, also set flag
       // Insert the pinch-point in the block just after the last use
-      b->_nodes.insert(b->find_node(use)+1,pinch);
+      b->insert_node(pinch, b->find_node(use) + 1);
       _bb_end++;                // Increase size scheduled region in block
     }
 
@@ -2763,10 +2763,10 @@
   // it being in the current block.
   bool fat_proj_seen = false;
   uint last_safept = _bb_end-1;
-  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
+  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
   Node* last_safept_node = end_node;
   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
       // Fat-proj kills a slew of registers
@@ -2815,7 +2815,7 @@
     // Do not allow defs of new derived values to float above GC
     // points unless the base is definitely available at the GC point.
 
-    Node *m = b->_nodes[i];
+    Node *m = b->get_node(i);
 
     // Add precedence edge from following safepoint to use of derived pointer
     if( last_safept_node != end_node &&
@@ -2832,11 +2832,11 @@
 
     if( n->jvms() ) {           // Precedence edge from derived to safept
       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
-      if( b->_nodes[last_safept] != last_safept_node ) {
+      if( b->get_node(last_safept) != last_safept_node ) {
         last_safept = b->find_node(last_safept_node);
       }
       for( uint j=last_safept; j > i; j-- ) {
-        Node *mach = b->_nodes[j];
+        Node *mach = b->get_node(j);
         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
           mach->add_prec( n );
       }
--- a/src/share/vm/opto/parse.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/parse.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -73,6 +73,7 @@
   bool        try_to_inline(ciMethod* callee_method,
                             ciMethod* caller_method,
                             int caller_bci,
+                            JVMState* jvms,
                             ciCallProfile& profile,
                             WarmCallInfo* wci_result,
                             bool& should_delay);
@@ -83,6 +84,7 @@
                             WarmCallInfo* wci_result);
   bool        should_not_inline(ciMethod* callee_method,
                                 ciMethod* caller_method,
+                                JVMState* jvms,
                                 WarmCallInfo* wci_result);
   void        print_inlining(ciMethod* callee_method, int caller_bci,
                              bool success) const;
@@ -347,13 +349,15 @@
   int _est_switch_depth;        // Debugging SwitchRanges.
 #endif
 
+  // parser for the caller of the method of this object
+  Parse* const _parent;
+
  public:
   // Constructor
-  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
+  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
 
   virtual Parse* is_Parse() const { return (Parse*)this; }
 
- public:
   // Accessors.
   JVMState*     caller()        const { return _caller; }
   float         expected_uses() const { return _expected_uses; }
@@ -405,6 +409,8 @@
     return block()->successor_for_bci(bci);
   }
 
+  Parse* parent_parser() const { return _parent; }
+
  private:
   // Create a JVMS & map for the initial state of this method.
   SafePointNode* create_entry_map();
@@ -518,7 +524,7 @@
 
   // loading from a constant field or the constant pool
   // returns false if push failed (non-perm field constants only, not ldcs)
-  bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false);
+  bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
 
   // implementation of object creation bytecodes
   void emit_guard_for_new(ciInstanceKlass* klass);
@@ -601,6 +607,9 @@
   // Assumes that there is no applicable local handler.
   void throw_to_exit(SafePointNode* ex_map);
 
+  // Use speculative type to optimize CmpP node
+  Node* optimize_cmp_with_klass(Node* c);
+
  public:
 #ifndef PRODUCT
   // Handle PrintOpto, etc.
--- a/src/share/vm/opto/parse1.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/parse1.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -381,8 +381,8 @@
 
 //------------------------------Parse------------------------------------------
 // Main parser constructor.
-Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
-  : _exits(caller)
+Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
+  : _exits(caller), _parent(parent)
 {
   // Init some variables
   _caller = caller;
@@ -1102,6 +1102,10 @@
     _synch_lock = shared_lock(lock_obj);
   }
 
+  // Feed profiling data for parameters to the type system so it can
+  // propagate it as speculative types
+  record_profiled_parameters_for_speculation();
+
   if (depth() == 1) {
     increment_and_test_invocation_counter(Tier2CompileThreshold);
   }
--- a/src/share/vm/opto/parse2.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/parse2.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -268,7 +268,7 @@
     return adjoinRange(value, value, dest, table_index);
   }
 
-  void print(ciEnv* env) {
+  void print() {
     if (is_singleton())
       tty->print(" {%d}=>%d", lo(), dest());
     else if (lo() == min_jint)
@@ -471,8 +471,8 @@
   // These are the switch destinations hanging off the jumpnode
   int i = 0;
   for (SwitchRange* r = lo; r <= hi; r++) {
-    for (int j = r->lo(); j <= r->hi(); j++, i++) {
-      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
+    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
+      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
       {
         PreserveJVMState pjvms(this);
         set_control(input);
@@ -632,7 +632,7 @@
     }
     tty->print("   ");
     for( r = lo; r <= hi; r++ ) {
-      r->print(env());
+      r->print();
     }
     tty->print_cr("");
   }
@@ -1366,6 +1366,56 @@
   }
 }
 
+/**
+ * Use speculative type to optimize CmpP node: if comparison is
+ * against the low level class, cast the object to the speculative
+ * type if any. CmpP should then go away.
+ *
+ * @param c  expected CmpP node
+ * @return   result of CmpP on object casted to speculative type
+ *
+ */
+Node* Parse::optimize_cmp_with_klass(Node* c) {
+  // If this is transformed by the _gvn to a comparison with the low
+  // level klass then we may be able to use speculation
+  if (c->Opcode() == Op_CmpP &&
+      (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
+      c->in(2)->is_Con()) {
+    Node* load_klass = NULL;
+    Node* decode = NULL;
+    if (c->in(1)->Opcode() == Op_DecodeNKlass) {
+      decode = c->in(1);
+      load_klass = c->in(1)->in(1);
+    } else {
+      load_klass = c->in(1);
+    }
+    if (load_klass->in(2)->is_AddP()) {
+      Node* addp = load_klass->in(2);
+      Node* obj = addp->in(AddPNode::Address);
+      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+      if (obj_type->speculative_type() != NULL) {
+        ciKlass* k = obj_type->speculative_type();
+        inc_sp(2);
+        obj = maybe_cast_profiled_obj(obj, k);
+        dec_sp(2);
+        // Make the CmpP use the casted obj
+        addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
+        load_klass = load_klass->clone();
+        load_klass->set_req(2, addp);
+        load_klass = _gvn.transform(load_klass);
+        if (decode != NULL) {
+          decode = decode->clone();
+          decode->set_req(1, load_klass);
+          load_klass = _gvn.transform(decode);
+        }
+        c = c->clone();
+        c->set_req(1, load_klass);
+        c = _gvn.transform(c);
+      }
+    }
+  }
+  return c;
+}
 
 //------------------------------do_one_bytecode--------------------------------
 // Parse this bytecode, and alter the Parsers JVM->Node mapping
@@ -2239,6 +2289,7 @@
     a = pop();
     b = pop();
     c = _gvn.transform( new (C) CmpPNode(b, a) );
+    c = optimize_cmp_with_klass(c);
     do_if(btest, c);
     break;
 
--- a/src/share/vm/opto/parse3.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/parse3.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -147,7 +147,15 @@
 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
   // Does this field have a constant value?  If so, just push the value.
   if (field->is_constant()) {
-    // final field
+    // final or stable field
+    const Type* stable_type = NULL;
+    if (FoldStableValues && field->is_stable()) {
+      stable_type = Type::get_const_type(field->type());
+      if (field->type()->is_array_klass()) {
+        int stable_dimension = field->type()->as_array_klass()->dimension();
+        stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
+      }
+    }
     if (field->is_static()) {
       // final static field
       if (C->eliminate_boxing()) {
@@ -167,11 +175,10 @@
           }
         }
       }
-      if (push_constant(field->constant_value()))
+      if (push_constant(field->constant_value(), false, false, stable_type))
         return;
-    }
-    else {
-      // final non-static field
+    } else {
+      // final or stable non-static field
       // Treat final non-static fields of trusted classes (classes in
       // java.lang.invoke and sun.invoke packages and subpackages) as
       // compile time constants.
@@ -179,8 +186,12 @@
         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
         ciObject* constant_oop = oop_ptr->const_oop();
         ciConstant constant = field->constant_value_of(constant_oop);
-        if (push_constant(constant, true))
-          return;
+        if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
+          // fall through to field load; the field is not yet initialized
+        } else {
+          if (push_constant(constant, true, false, stable_type))
+            return;
+        }
       }
     }
   }
@@ -301,7 +312,8 @@
   // Note the presence of writes to final non-static fields, so that we
   // can insert a memory barrier later on to keep the writes from floating
   // out of the constructor.
-  if (is_field && field->is_final()) {
+  // Any method can write a @Stable field; insert memory barriers after those also.
+  if (is_field && (field->is_final() || field->is_stable())) {
     set_wrote_final(true);
     // Preserve allocation ptr to create precedent edge to it in membar
     // generated on exit from constructor.
@@ -314,35 +326,21 @@
 }
 
 
-bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache) {
+
+bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) {
+  const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache);
   switch (constant.basic_type()) {
-  case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
-  case T_INT:      push( intcon(constant.as_int())     ); break;
-  case T_CHAR:     push( intcon(constant.as_char())    ); break;
-  case T_BYTE:     push( intcon(constant.as_byte())    ); break;
-  case T_SHORT:    push( intcon(constant.as_short())   ); break;
-  case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
-  case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
-  case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
   case T_ARRAY:
-  case T_OBJECT: {
+  case T_OBJECT:
     // cases:
     //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
     // An oop is not scavengable if it is in the perm gen.
-    ciObject* oop_constant = constant.as_object();
-    if (oop_constant->is_null_object()) {
-      push( zerocon(T_OBJECT) );
-      break;
-    } else if (require_constant || oop_constant->should_be_constant()) {
-      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache)) );
-      break;
-    } else {
-      // we cannot inline the oop, but we can use it later to narrow a type
-      return false;
-    }
-  }
-  case T_ILLEGAL: {
+    if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
+      con_type = con_type->join(stable_type);
+    break;
+
+  case T_ILLEGAL:
     // Invalid ciConstant returned due to OutOfMemoryError in the CI
     assert(C->env()->failing(), "otherwise should not see this");
     // These always occur because of object types; we are going to
@@ -350,17 +348,16 @@
     push( zerocon(T_OBJECT) );
     return false;
   }
-  default:
-    ShouldNotReachHere();
+
+  if (con_type == NULL)
+    // we cannot inline the oop, but we can use it later to narrow a type
     return false;
-  }
 
-  // success
+  push_node(constant.basic_type(), makecon(con_type));
   return true;
 }
 
 
-
 //=============================================================================
 void Parse::do_anewarray() {
   bool will_link;
--- a/src/share/vm/opto/parseHelper.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/parseHelper.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -128,7 +128,7 @@
   }
 
   // Push the bool result back on stack
-  Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)));
+  Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);
 
   // Pop from stack AFTER gen_instanceof because it can uncommon trap.
   pop();
@@ -343,10 +343,14 @@
 
   // Get the Method* node.
   ciMethod* m = method();
-  address counters_adr = m->ensure_method_counters();
+  MethodCounters* counters_adr = m->ensure_method_counters();
+  if (counters_adr == NULL) {
+    C->record_failure("method counters allocation failed");
+    return;
+  }
 
   Node* ctrl = control();
-  const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
+  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
   Node *counters_node = makecon(adr_type);
   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
--- a/src/share/vm/opto/phaseX.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/phaseX.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1385,6 +1385,20 @@
   }
 }
 
+/**
+ * Remove the speculative part of all types that we know of
+ */
+void PhaseIterGVN::remove_speculative_types()  {
+  assert(UseTypeSpeculation, "speculation is off");
+  for (uint i = 0; i < _types.Size(); i++)  {
+    const Type* t = _types.fast_lookup(i);
+    if (t != NULL && t->isa_oopptr()) {
+      const TypeOopPtr* to = t->is_oopptr();
+      _types.map(i, to->remove_speculative());
+    }
+  }
+}
+
 //=============================================================================
 #ifndef PRODUCT
 uint PhaseCCP::_total_invokes   = 0;
@@ -1648,10 +1662,10 @@
     bool block_not_printed = true;
 
     // and each instruction within a block
-    uint end_index = block->_nodes.size();
+    uint end_index = block->number_of_nodes();
     // block->end_idx() not valid after PhaseRegAlloc
     for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
-      Node     *n = block->_nodes.at(instruction_index);
+      Node     *n = block->get_node(instruction_index);
       if( n->is_Mach() ) {
         MachNode *m = n->as_Mach();
         int deleted_count = 0;
@@ -1673,7 +1687,7 @@
             }
             // Print instructions being deleted
             for( int i = (deleted_count - 1); i >= 0; --i ) {
-              block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
+              block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
             }
             tty->print_cr("replaced with");
             // Print new instruction
@@ -1687,11 +1701,11 @@
           //  the node index to live range mappings.)
           uint safe_instruction_index = (instruction_index - deleted_count);
           for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
-            block->_nodes.remove( instruction_index );
+            block->remove_node( instruction_index );
           }
           // install new node after safe_instruction_index
-          block->_nodes.insert( safe_instruction_index + 1, m2 );
-          end_index = block->_nodes.size() - 1; // Recompute new block size
+          block->insert_node(m2, safe_instruction_index + 1);
+          end_index = block->number_of_nodes() - 1; // Recompute new block size
           NOT_PRODUCT( inc_peepholes(); )
         }
       }
--- a/src/share/vm/opto/phaseX.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/phaseX.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -500,6 +500,8 @@
   ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
                                         Deoptimization::DeoptReason reason);
 
+  void remove_speculative_types();
+
 #ifndef PRODUCT
 protected:
   // Sub-quadratic implementation of VerifyIterativeGVN.
--- a/src/share/vm/opto/postaloc.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/postaloc.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -97,7 +97,8 @@
 static bool expected_yanked_node(Node *old, Node *orig_old) {
   // This code is expected only next original nodes:
   // - load from constant table node which may have next data input nodes:
-  //     MachConstantBase, Phi, MachTemp, MachSpillCopy
+  //     MachConstantBase, MachTemp, MachSpillCopy
+  // - Phi nodes that are considered Junk
   // - load constant node which may have next data input nodes:
   //     MachTemp, MachSpillCopy
   // - MachSpillCopy
@@ -112,7 +113,9 @@
     return (old == orig_old);
   } else if (old->is_MachTemp()) {
     return orig_old->is_Con();
-  } else if (old->is_Phi() || old->is_MachConstantBase()) {
+  } else if (old->is_Phi()) { // Junk phi's
+    return true;
+  } else if (old->is_MachConstantBase()) {
     return (orig_old->is_Con() && orig_old->is_MachConstant());
   }
   return false;
@@ -423,8 +426,8 @@
 
     // Count of Phis in block
     uint phi_dex;
-    for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) {
-      Node* phi = block->_nodes[phi_dex];
+    for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
+      Node* phi = block->get_node(phi_dex);
       if (!phi->is_Phi()) {
         break;
       }
@@ -439,7 +442,7 @@
       Block* pb = _cfg.get_block_for_node(block->pred(j));
       // Remove copies along phi edges
       for (uint k = 1; k < phi_dex; k++) {
-        elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
+        elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
       }
       if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
         // See if this predecessor's mappings have been used by everybody
@@ -510,7 +513,7 @@
     // For all Phi's
     for (j = 1; j < phi_dex; j++) {
       uint k;
-      Node *phi = block->_nodes[j];
+      Node *phi = block->get_node(j);
       uint pidx = _lrg_map.live_range_id(phi);
       OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
 
@@ -522,11 +525,9 @@
           u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
       }
       if (u != NodeSentinel) {    // Junk Phi.  Remove
-        block->_nodes.remove(j--);
+        phi->replace_by(u);
+        j -= yank_if_dead(phi, block, &value, &regnd);
         phi_dex--;
-        _cfg.unmap_node_from_block(phi);
-        phi->replace_by(u);
-        phi->disconnect_inputs(NULL, C);
         continue;
       }
       // Note that if value[pidx] exists, then we merged no new values here
@@ -552,8 +553,8 @@
     }
 
     // For all remaining instructions
-    for (j = phi_dex; j < block->_nodes.size(); j++) {
-      Node* n = block->_nodes[j];
+    for (j = phi_dex; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
 
       if(n->outcnt() == 0 &&   // Dead?
          n != C->top() &&      // (ignore TOP, it has no du info)
--- a/src/share/vm/opto/reg_split.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/reg_split.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -51,15 +51,6 @@
 
 static const char out_of_nodes[] = "out of nodes during split";
 
-static bool contains_no_live_range_input(const Node* def) {
-  for (uint i = 1; i < def->req(); ++i) {
-    if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
-      return false;
-    }
-  }
-  return true;
-}
-
 //------------------------------get_spillcopy_wide-----------------------------
 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
@@ -112,17 +103,17 @@
 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
   // Skip intervening ProjNodes.  Do not insert between a ProjNode and
   // its definer.
-  while( i < b->_nodes.size() &&
-         (b->_nodes[i]->is_Proj() ||
-          b->_nodes[i]->is_Phi() ) )
+  while( i < b->number_of_nodes() &&
+         (b->get_node(i)->is_Proj() ||
+          b->get_node(i)->is_Phi() ) )
     i++;
 
   // Do not insert between a call and his Catch
-  if( b->_nodes[i]->is_Catch() ) {
+  if( b->get_node(i)->is_Catch() ) {
     // Put the instruction at the top of the fall-thru block.
     // Find the fall-thru projection
     while( 1 ) {
-      const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
+      const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
       if( cp->_con == CatchProjNode::fall_through_index )
         break;
     }
@@ -131,7 +122,7 @@
     i = 1;                      // Right at start of block
   }
 
-  b->_nodes.insert(i,spill);    // Insert node in block
+  b->insert_node(spill, i);    // Insert node in block
   _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
   // Adjust the point where we go hi-pressure
   if( i <= b->_ihrp_index ) b->_ihrp_index++;
@@ -160,9 +151,9 @@
   // (The implicit_null_check function ensures the use is also dominated
   // by the branch-not-taken block.)
   Node *be = b->end();
-  if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
+  if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
     // Spill goes in the branch-not-taken block
-    b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue];
+    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
     loc = 0;                    // Just past the Region
   }
   assert( loc >= 0, "must insert past block head" );
@@ -326,12 +317,11 @@
   if( def->req() > 1 ) {
     for( uint i = 1; i < def->req(); i++ ) {
       Node *in = def->in(i);
-      // Check for single-def (LRG cannot redefined)
       uint lidx = _lrg_map.live_range_id(in);
-      if (lidx >= _lrg_map.max_lrg_id()) {
-        continue; // Value is a recent spill-copy
-      }
-      if (lrgs(lidx).is_singledef()) {
+      // We do not need this for live ranges that are only defined once.
+      // However, this is not true for spill copies that are added in this
+      // Split() pass, since they might get coalesced later on in this pass.
+      if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
         continue;
       }
 
@@ -450,7 +440,7 @@
 
   // Scan block for 1st use.
   for( uint i = 1; i <= b->end_idx(); i++ ) {
-    Node *n = b->_nodes[i];
+    Node *n = b->get_node(i);
     // Ignore PHI use, these can be up or down
     if (n->is_Phi()) {
       continue;
@@ -485,7 +475,6 @@
 
   uint                 bidx, pidx, slidx, insidx, inpidx, twoidx;
   uint                 non_phi = 1, spill_cnt = 0;
-  Node               **Reachblock;
   Node                *n1, *n2, *n3;
   Node_List           *defs,*phis;
   bool                *UPblock;
@@ -568,7 +557,7 @@
 
     b  = _cfg.get_block(bidx);
     // Reaches & UP arrays for this block
-    Reachblock = Reaches[b->_pre_order];
+    Node** Reachblock = Reaches[b->_pre_order];
     UPblock    = UP[b->_pre_order];
     // Reset counter of start of non-Phi nodes in block
     non_phi = 1;
@@ -647,7 +636,7 @@
 
       // check block for appropriate phinode & update edges
       for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-        n1 = b->_nodes[insidx];
+        n1 = b->get_node(insidx);
         // bail if this is not a phi
         phi = n1->is_Phi() ? n1->as_Phi() : NULL;
         if( phi == NULL ) {
@@ -747,7 +736,7 @@
     //----------Walk Instructions in the Block and Split----------
     // For all non-phi instructions in the block
     for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       // Find the defining Node's live range index
       uint defidx = _lrg_map.find_id(n);
       uint cnt = n->req();
@@ -776,7 +765,7 @@
               assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
               n->replace_by(u); // Then replace with unique input
               n->disconnect_inputs(NULL, C);
-              b->_nodes.remove(insidx);
+              b->remove_node(insidx);
               insidx--;
               b->_ihrp_index--;
               b->_fhrp_index--;
@@ -789,12 +778,12 @@
               (b->_reg_pressure < (uint)INTPRESSURE) ||
               b->_ihrp_index > 4000000 ||
               b->_ihrp_index >= b->end_idx() ||
-              !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
+              !b->get_node(b->_ihrp_index)->is_Proj(), "" );
       assert( insidx > b->_fhrp_index ||
               (b->_freg_pressure < (uint)FLOATPRESSURE) ||
               b->_fhrp_index > 4000000 ||
               b->_fhrp_index >= b->end_idx() ||
-              !b->_nodes[b->_fhrp_index]->is_Proj(), "" );
+              !b->get_node(b->_fhrp_index)->is_Proj(), "" );
 
       // ********** Handle Crossing HRP Boundry **********
       if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
@@ -819,7 +808,7 @@
                 // Insert point is just past last use or def in the block
                 int insert_point = insidx-1;
                 while( insert_point > 0 ) {
-                  Node *n = b->_nodes[insert_point];
+                  Node *n = b->get_node(insert_point);
                   // Hit top of block?  Quit going backwards
                   if (n->is_Phi()) {
                     break;
@@ -865,7 +854,7 @@
             }
           }  // end if LRG is UP
         }  // end for all spilling live ranges
-        assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
+        assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
       }  // end if crossing HRP Boundry
 
       // If the LRG index is oob, then this is a new spillcopy, skip it.
@@ -878,7 +867,7 @@
       if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
         n->replace_by( n->in(copyidx) );
         n->set_req( copyidx, NULL );
-        b->_nodes.remove(insidx--);
+        b->remove_node(insidx--);
         b->_ihrp_index--; // Adjust the point where we go hi-pressure
         b->_fhrp_index--;
         continue;
@@ -932,10 +921,10 @@
             // Rematerializable?  Then clone def at use site instead
             // of store/load
             if( def->rematerialize() ) {
-              int old_size = b->_nodes.size();
+              int old_size = b->number_of_nodes();
               def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
               if( !def ) return 0; // Bail out
-              insidx += b->_nodes.size()-old_size;
+              insidx += b->number_of_nodes()-old_size;
             }
 
             MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
@@ -1324,16 +1313,17 @@
       pidx = pred->_pre_order;
       // Grab reaching def
       Node *def = Reaches[pidx][slidx];
+      Node** Reachblock = Reaches[pidx];
       assert( def, "must have reaching def" );
       // If input up/down sense and reg-pressure DISagree
-      if (def->rematerialize() && contains_no_live_range_input(def)) {
+      if (def->rematerialize()) {
         // Place the rematerialized node above any MSCs created during
         // phi node splitting.  end_idx points at the insertion point
         // so look at the node before it.
         int insert = pred->end_idx();
         while (insert >= 1 &&
-               pred->_nodes[insert - 1]->is_SpillCopy() &&
-               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
+               pred->get_node(insert - 1)->is_SpillCopy() &&
+               _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
           insert--;
         }
         def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
@@ -1402,7 +1392,7 @@
   for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
     b  = _cfg.get_block(bidx);
     for (insidx = 0; insidx <= b->end_idx(); insidx++) {
-      Node *n = b->_nodes[insidx];
+      Node *n = b->get_node(insidx);
       uint defidx = _lrg_map.find(n);
       assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
       assert(defidx < maxlrg,"Bad live range index in Split");
--- a/src/share/vm/opto/runtime.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/runtime.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -141,9 +141,10 @@
 
 
 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
-  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
+  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
+  if (var == NULL) { return false; }
 
-void OptoRuntime::generate(ciEnv* env) {
+bool OptoRuntime::generate(ciEnv* env) {
 
   generate_exception_blob();
 
@@ -161,7 +162,7 @@
   gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
-  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
+  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
 
   gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
@@ -171,7 +172,7 @@
   gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
 # endif
-
+  return true;
 }
 
 #undef gen
@@ -988,26 +989,32 @@
   address handler_address = NULL;
 
   Handle exception(thread, thread->exception_oop());
+  address pc = thread->exception_pc();
+
+  // Clear out the exception oop and pc since looking up an
+  // exception handler can cause class loading, which might throw an
+  // exception and those fields are expected to be clear during
+  // normal bytecode execution.
+  thread->clear_exception_oop_and_pc();
 
   if (TraceExceptions) {
-    trace_exception(exception(), thread->exception_pc(), "");
+    trace_exception(exception(), pc, "");
   }
+
   // for AbortVMOnException flag
   NOT_PRODUCT(Exceptions::debug_check_abort(exception));
 
-  #ifdef ASSERT
-    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
-      // should throw an exception here
-      ShouldNotReachHere();
-    }
-  #endif
-
+#ifdef ASSERT
+  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
+    // should throw an exception here
+    ShouldNotReachHere();
+  }
+#endif
 
   // new exception handling: this method is entered only from adapters
   // exceptions from compiled java methods are handled in compiled code
   // using rethrow node
 
-  address pc = thread->exception_pc();
   nm = CodeCache::find_nmethod(pc);
   assert(nm != NULL, "No NMethod found");
   if (nm->is_native_method()) {
@@ -1358,7 +1365,8 @@
   tty->print(" in ");
   CodeBlob* blob = CodeCache::find_blob(exception_pc);
   if (blob->is_nmethod()) {
-    ((nmethod*)blob)->method()->print_value();
+    nmethod* nm = blob->as_nmethod_or_null();
+    nm->method()->print_value();
   } else if (blob->is_runtime_stub()) {
     tty->print("<runtime-stub>");
   } else {
--- a/src/share/vm/opto/runtime.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/runtime.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -203,8 +203,10 @@
 
   static bool is_callee_saved_register(MachRegisterNumbers reg);
 
-  // One time only generate runtime code stubs
-  static void generate(ciEnv* env);
+  // One time only generate runtime code stubs. Returns true
+  // when runtime stubs have been generated successfully and
+  // false otherwise.
+  static bool generate(ciEnv* env);
 
   // Returns the name of a stub
   static const char* stub_name(address entry);
--- a/src/share/vm/opto/stringopts.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/stringopts.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,11 @@
   Node*               _arguments;      // The list of arguments to be concatenated
   GrowableArray<int>  _mode;           // into a String along with a mode flag
                                        // indicating how to treat the value.
-
+  Node_List           _constructors;   // List of constructors (many in case of stacked concat)
   Node_List           _control;        // List of control nodes that will be deleted
   Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
                                        // to restart at the initial JVMState.
+
  public:
   // Mode for converting arguments to Strings
   enum {
@@ -73,6 +74,7 @@
     _arguments->del_req(0);
   }
 
+  bool validate_mem_flow();
   bool validate_control_flow();
 
   void merge_add() {
@@ -189,6 +191,10 @@
     assert(!_control.contains(ctrl), "only push once");
     _control.push(ctrl);
   }
+  void add_constructor(Node* init) {
+    assert(!_constructors.contains(init), "only push once");
+    _constructors.push(init);
+  }
   CallStaticJavaNode* end() { return _end; }
   AllocateNode* begin() { return _begin; }
   Node* string_alloc() { return _string_alloc; }
@@ -301,6 +307,12 @@
     }
   }
   result->set_allocation(other->_begin);
+  for (uint i = 0; i < _constructors.size(); i++) {
+    result->add_constructor(_constructors.at(i));
+  }
+  for (uint i = 0; i < other->_constructors.size(); i++) {
+    result->add_constructor(other->_constructors.at(i));
+  }
   result->_multiple = true;
   return result;
 }
@@ -510,7 +522,8 @@
       sc->add_control(constructor);
       sc->add_control(alloc);
       sc->set_allocation(alloc);
-      if (sc->validate_control_flow()) {
+      sc->add_constructor(constructor);
+      if (sc->validate_control_flow() && sc->validate_mem_flow()) {
         return sc;
       } else {
         return NULL;
@@ -620,7 +633,7 @@
 #endif
 
             StringConcat* merged = sc->merge(other, arg);
-            if (merged->validate_control_flow()) {
+            if (merged->validate_control_flow() && merged->validate_mem_flow()) {
 #ifndef PRODUCT
               if (PrintOptimizeStringConcat) {
                 tty->print_cr("stacking would succeed");
@@ -708,6 +721,139 @@
 }
 
 
+bool StringConcat::validate_mem_flow() {
+  Compile* C = _stringopts->C;
+
+  for (uint i = 0; i < _control.size(); i++) {
+#ifndef PRODUCT
+    Node_List path;
+#endif
+    Node* curr = _control.at(i);
+    if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation
+      // Now here's the main invariant in our case:
+      // For memory between the constructor, and appends, and toString we should only see bottom memory,
+      // produced by the previous call we know about.
+      if (!_constructors.contains(curr)) {
+        NOT_PRODUCT(path.push(curr);)
+        Node* mem = curr->in(TypeFunc::Memory);
+        assert(mem != NULL, "calls should have memory edge");
+        assert(!mem->is_Phi(), "should be handled by control flow validation");
+        NOT_PRODUCT(path.push(mem);)
+        while (mem->is_MergeMem()) {
+          for (uint i = 1; i < mem->req(); i++) {
+            if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) {
+#ifndef PRODUCT
+              if (PrintOptimizeStringConcat) {
+                tty->print("fusion has incorrect memory flow (side effects) for ");
+                _begin->jvms()->dump_spec(tty); tty->cr();
+                path.dump();
+              }
+#endif
+              return false;
+            }
+          }
+          // skip through a potential MergeMem chain, linked through Bot
+          mem = mem->in(Compile::AliasIdxBot);
+          NOT_PRODUCT(path.push(mem);)
+        }
+        // now let it fall through, and see if we have a projection
+        if (mem->is_Proj()) {
+          // Should point to a previous known call
+          Node *prev = mem->in(0);
+          NOT_PRODUCT(path.push(prev);)
+          if (!prev->is_Call() || !_control.contains(prev)) {
+#ifndef PRODUCT
+            if (PrintOptimizeStringConcat) {
+              tty->print("fusion has incorrect memory flow (unknown call) for ");
+              _begin->jvms()->dump_spec(tty); tty->cr();
+              path.dump();
+            }
+#endif
+            return false;
+          }
+        } else {
+          assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name()));
+#ifndef PRODUCT
+          if (PrintOptimizeStringConcat) {
+            tty->print("fusion has incorrect memory flow (unexpected source) for ");
+            _begin->jvms()->dump_spec(tty); tty->cr();
+            path.dump();
+          }
+#endif
+          return false;
+        }
+      } else {
+        // For memory that feeds into constructors it's more complicated.
+        // However the advantage is that any side effect that happens between the Allocate/Initialize and
+        // the constructor will have to be control-dependent on Initialize.
+        // So we actually don't have to do anything, since it's going to be caught by the control flow
+        // analysis.
+#ifdef ASSERT
+        // Do a quick verification of the control pattern between the constructor and the initialize node
+        assert(curr->is_Call(), "constructor should be a call");
+        // Go up the control starting from the constructor call
+        Node* ctrl = curr->in(0);
+        IfNode* iff = NULL;
+        RegionNode* copy = NULL;
+
+        while (true) {
+          // skip known check patterns
+          if (ctrl->is_Region()) {
+            if (ctrl->as_Region()->is_copy()) {
+              copy = ctrl->as_Region();
+              ctrl = copy->is_copy();
+            } else { // a cast
+              assert(ctrl->req() == 3 &&
+                     ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
+                     ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
+                     ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
+                     ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
+                     "must be a simple diamond");
+              Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
+              for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
+                Node* use = i.get();
+                assert(use == ctrl || use->is_ConstraintCast(),
+                       err_msg_res("unexpected user: %s", use->Name()));
+              }
+
+              iff = ctrl->in(1)->in(0)->as_If();
+              ctrl = iff->in(0);
+            }
+          } else if (ctrl->is_IfTrue()) { // null checks, class checks
+            iff = ctrl->in(0)->as_If();
+            assert(iff->is_If(), "must be if");
+            // Verify that the other arm is an uncommon trap
+            Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con);
+            CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+            assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap");
+            ctrl = iff->in(0);
+          } else {
+            break;
+          }
+        }
+
+        assert(ctrl->is_Proj(), "must be a projection");
+        assert(ctrl->in(0)->is_Initialize(), "should be initialize");
+        for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) {
+          Node* use = i.get();
+          assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(),
+                 err_msg_res("unexpected user: %s", use->Name()));
+        }
+#endif // ASSERT
+      }
+    }
+  }
+
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat) {
+    tty->print("fusion has correct memory flow for ");
+    _begin->jvms()->dump_spec(tty); tty->cr();
+    tty->cr();
+  }
+#endif
+  return true;
+}
+
 bool StringConcat::validate_control_flow() {
   // We found all the calls and arguments now lets see if it's
   // safe to transform the graph as we would expect.
@@ -753,7 +899,7 @@
     }
   }
 
-  // Skip backwards through the control checking for unexpected contro flow
+  // Skip backwards through the control checking for unexpected control flow
   Node* ptr = _end;
   bool fail = false;
   while (ptr != _begin) {
@@ -936,7 +1082,7 @@
   if (PrintOptimizeStringConcat && !fail) {
     ttyLocker ttyl;
     tty->cr();
-    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
+    tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size());
     _begin->jvms()->dump_spec(tty); tty->cr();
     for (int i = 0; i < num_arguments(); i++) {
       argument(i)->dump();
--- a/src/share/vm/opto/subnode.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/subnode.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1064,7 +1064,7 @@
 // Print special per-node info
 #ifndef PRODUCT
 void BoolTest::dump_on(outputStream *st) const {
-  const char *msg[] = {"eq","gt","??","lt","ne","le","??","ge"};
+  const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"};
   st->print(msg[_test]);
 }
 #endif
@@ -1126,7 +1126,7 @@
   Node *cmp = in(1);
   if( !cmp->is_Sub() ) return NULL;
   int cop = cmp->Opcode();
-  if( cop == Op_FastLock || cop == Op_FastUnlock ) return NULL;
+  if( cop == Op_FastLock || cop == Op_FastUnlock || cop == Op_FlagsProj) return NULL;
   Node *cmp1 = cmp->in(1);
   Node *cmp2 = cmp->in(2);
   if( !cmp1 ) return NULL;
--- a/src/share/vm/opto/subnode.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/subnode.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -263,16 +263,16 @@
 // We pick the values as 3 bits; the low order 2 bits we compare against the
 // condition codes, the high bit flips the sense of the result.
 struct BoolTest VALUE_OBJ_CLASS_SPEC {
-  enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, illegal = 8 };
+  enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, overflow = 2, no_overflow = 6, illegal = 8 };
   mask _test;
   BoolTest( mask btm ) : _test(btm) {}
   const Type *cc2logical( const Type *CC ) const;
   // Commute the test.  I use a small table lookup.  The table is created as
   // a simple char array where each element is the ASCII version of a 'mask'
   // enum from above.
-  mask commute( ) const { return mask("038147858"[_test]-'0'); }
+  mask commute( ) const { return mask("032147658"[_test]-'0'); }
   mask negate( ) const { return mask(_test^4); }
-  bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le); }
+  bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); }
 #ifndef PRODUCT
   void dump_on(outputStream *st) const;
 #endif
--- a/src/share/vm/opto/type.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/type.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -189,6 +189,38 @@
 }
 
 
+//-----------------------make_from_constant------------------------------------
+const Type* Type::make_from_constant(ciConstant constant,
+                                     bool require_constant, bool is_autobox_cache) {
+  switch (constant.basic_type()) {
+  case T_BOOLEAN:  return TypeInt::make(constant.as_boolean());
+  case T_CHAR:     return TypeInt::make(constant.as_char());
+  case T_BYTE:     return TypeInt::make(constant.as_byte());
+  case T_SHORT:    return TypeInt::make(constant.as_short());
+  case T_INT:      return TypeInt::make(constant.as_int());
+  case T_LONG:     return TypeLong::make(constant.as_long());
+  case T_FLOAT:    return TypeF::make(constant.as_float());
+  case T_DOUBLE:   return TypeD::make(constant.as_double());
+  case T_ARRAY:
+  case T_OBJECT:
+    {
+      // cases:
+      //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
+      //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
+      // An oop is not scavengable if it is in the perm gen.
+      ciObject* oop_constant = constant.as_object();
+      if (oop_constant->is_null_object()) {
+        return Type::get_zero_type(T_OBJECT);
+      } else if (require_constant || oop_constant->should_be_constant()) {
+        return TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache);
+      }
+    }
+  }
+  // Fall through to failure
+  return NULL;
+}
+
+
 //------------------------------make-------------------------------------------
 // Create a simple Type, with default empty symbol sets.  Then hashcons it
 // and look for an existing copy in the type dictionary.
@@ -326,7 +358,7 @@
                                            false, 0, oopDesc::mark_offset_in_bytes());
   TypeInstPtr::KLASS   = TypeInstPtr::make(TypePtr::BotPTR,  current->env()->Object_klass(),
                                            false, 0, oopDesc::klass_offset_in_bytes());
-  TypeOopPtr::BOTTOM  = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot);
+  TypeOopPtr::BOTTOM  = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot, NULL);
 
   TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, OffsetBot);
 
@@ -398,6 +430,16 @@
   longpair[1] = TypeLong::LONG;
   TypeTuple::LONG_PAIR = TypeTuple::make(2, longpair);
 
+  const Type **intccpair = TypeTuple::fields(2);
+  intccpair[0] = TypeInt::INT;
+  intccpair[1] = TypeInt::CC;
+  TypeTuple::INT_CC_PAIR = TypeTuple::make(2, intccpair);
+
+  const Type **longccpair = TypeTuple::fields(2);
+  longccpair[0] = TypeLong::LONG;
+  longccpair[1] = TypeInt::CC;
+  TypeTuple::LONG_CC_PAIR = TypeTuple::make(2, longccpair);
+
   _const_basic_type[T_NARROWOOP]   = TypeNarrowOop::BOTTOM;
   _const_basic_type[T_NARROWKLASS] = Type::BOTTOM;
   _const_basic_type[T_BOOLEAN]     = TypeInt::BOOL;
@@ -540,7 +582,7 @@
 
 //----------------------interface_vs_oop---------------------------------------
 #ifdef ASSERT
-bool Type::interface_vs_oop(const Type *t) const {
+bool Type::interface_vs_oop_helper(const Type *t) const {
   bool result = false;
 
   const TypePtr* this_ptr = this->make_ptr(); // In case it is narrow_oop
@@ -558,6 +600,29 @@
 
   return result;
 }
+
+bool Type::interface_vs_oop(const Type *t) const {
+  if (interface_vs_oop_helper(t)) {
+    return true;
+  }
+  // Now check the speculative parts as well
+  const TypeOopPtr* this_spec = isa_oopptr() != NULL ? isa_oopptr()->speculative() : NULL;
+  const TypeOopPtr* t_spec = t->isa_oopptr() != NULL ? t->isa_oopptr()->speculative() : NULL;
+  if (this_spec != NULL && t_spec != NULL) {
+    if (this_spec->interface_vs_oop_helper(t_spec)) {
+      return true;
+    }
+    return false;
+  }
+  if (this_spec != NULL && this_spec->interface_vs_oop_helper(t)) {
+    return true;
+  }
+  if (t_spec != NULL && interface_vs_oop_helper(t_spec)) {
+    return true;
+  }
+  return false;
+}
+
 #endif
 
 //------------------------------meet-------------------------------------------
@@ -1614,6 +1679,8 @@
 const TypeTuple *TypeTuple::START_I2C;
 const TypeTuple *TypeTuple::INT_PAIR;
 const TypeTuple *TypeTuple::LONG_PAIR;
+const TypeTuple *TypeTuple::INT_CC_PAIR;
+const TypeTuple *TypeTuple::LONG_CC_PAIR;
 
 
 //------------------------------make-------------------------------------------
@@ -1824,12 +1891,12 @@
 }
 
 //------------------------------make-------------------------------------------
-const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
+const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) {
   if (UseCompressedOops && elem->isa_oopptr()) {
     elem = elem->make_narrowoop();
   }
   size = normalize_array_size(size);
-  return (TypeAry*)(new TypeAry(elem,size))->hashcons();
+  return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons();
 }
 
 //------------------------------meet-------------------------------------------
@@ -1850,7 +1917,8 @@
   case Array: {                 // Meeting 2 arrays?
     const TypeAry *a = t->is_ary();
     return TypeAry::make(_elem->meet(a->_elem),
-                         _size->xmeet(a->_size)->is_int());
+                         _size->xmeet(a->_size)->is_int(),
+                         _stable & a->_stable);
   }
   case Top:
     break;
@@ -1863,7 +1931,7 @@
 const Type *TypeAry::xdual() const {
   const TypeInt* size_dual = _size->dual()->is_int();
   size_dual = normalize_array_size(size_dual);
-  return new TypeAry( _elem->dual(), size_dual);
+  return new TypeAry(_elem->dual(), size_dual, !_stable);
 }
 
 //------------------------------eq---------------------------------------------
@@ -1871,13 +1939,14 @@
 bool TypeAry::eq( const Type *t ) const {
   const TypeAry *a = (const TypeAry*)t;
   return _elem == a->_elem &&
+    _stable == a->_stable &&
     _size == a->_size;
 }
 
 //------------------------------hash-------------------------------------------
 // Type-specific hashing function.
 int TypeAry::hash(void) const {
-  return (intptr_t)_elem + (intptr_t)_size;
+  return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
 }
 
 //----------------------interface_vs_oop---------------------------------------
@@ -1894,6 +1963,7 @@
 //------------------------------dump2------------------------------------------
 #ifndef PRODUCT
 void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
+  if (_stable)  st->print("stable:");
   _elem->dump2(d, depth, st);
   st->print("[");
   _size->dump2(d, depth, st);
@@ -2366,14 +2436,15 @@
 const TypeOopPtr *TypeOopPtr::BOTTOM;
 
 //------------------------------TypeOopPtr-------------------------------------
-TypeOopPtr::TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id )
+TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative)
   : TypePtr(t, ptr, offset),
     _const_oop(o), _klass(k),
     _klass_is_exact(xk),
     _is_ptr_to_narrowoop(false),
     _is_ptr_to_narrowklass(false),
     _is_ptr_to_boxed_value(false),
-    _instance_id(instance_id) {
+    _instance_id(instance_id),
+    _speculative(speculative) {
   if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
       (offset > 0) && xk && (k != 0) && k->is_instance_klass()) {
     _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset);
@@ -2381,7 +2452,7 @@
 #ifdef _LP64
   if (_offset != 0) {
     if (_offset == oopDesc::klass_offset_in_bytes()) {
-      _is_ptr_to_narrowklass = UseCompressedKlassPointers;
+      _is_ptr_to_narrowklass = UseCompressedClassPointers;
     } else if (klass() == NULL) {
       // Array with unknown body type
       assert(this->isa_aryptr(), "only arrays without klass");
@@ -2440,12 +2511,12 @@
 
 //------------------------------make-------------------------------------------
 const TypeOopPtr *TypeOopPtr::make(PTR ptr,
-                                   int offset, int instance_id) {
+                                   int offset, int instance_id, const TypeOopPtr* speculative) {
   assert(ptr != Constant, "no constant generic pointers");
   ciKlass*  k = Compile::current()->env()->Object_klass();
   bool      xk = false;
   ciObject* o = NULL;
-  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id))->hashcons();
+  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id, speculative))->hashcons();
 }
 
 
@@ -2453,7 +2524,7 @@
 const Type *TypeOopPtr::cast_to_ptr_type(PTR ptr) const {
   assert(_base == OopPtr, "subclass must override cast_to_ptr_type");
   if( ptr == _ptr ) return this;
-  return make(ptr, _offset, _instance_id);
+  return make(ptr, _offset, _instance_id, _speculative);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
@@ -2483,10 +2554,31 @@
     return TypeKlassPtr::make(xk? Constant: NotNull, k, 0);
 }
 
+const Type *TypeOopPtr::xmeet(const Type *t) const {
+  const Type* res = xmeet_helper(t);
+  if (res->isa_oopptr() == NULL) {
+    return res;
+  }
+
+  if (res->isa_oopptr() != NULL) {
+    // type->speculative() == NULL means that speculation is no better
+    // than type, i.e. type->speculative() == type. So there are 2
+    // ways to represent the fact that we have no useful speculative
+    // data and we should use a single one to be able to test for
+    // equality between types. Check whether type->speculative() ==
+    // type and set speculative to NULL if it is the case.
+    const TypeOopPtr* res_oopptr = res->is_oopptr();
+    if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
+      return res_oopptr->remove_speculative();
+    }
+  }
+
+  return res;
+}
 
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  It returns a new Type object.
-const Type *TypeOopPtr::xmeet( const Type *t ) const {
+const Type *TypeOopPtr::xmeet_helper(const Type *t) const {
   // Perform a fast test for common case; meeting the same types together.
   if( this == t ) return this;  // Meeting same type-rep?
 
@@ -2528,7 +2620,8 @@
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      return make(ptr, offset, instance_id);
+      const TypeOopPtr* speculative = _speculative;
+      return make(ptr, offset, instance_id, speculative);
     }
     case BotPTR:
     case NotNull:
@@ -2540,7 +2633,8 @@
   case OopPtr: {                 // Meeting to other OopPtrs
     const TypeOopPtr *tp = t->is_oopptr();
     int instance_id = meet_instance_id(tp->instance_id());
-    return make( meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id );
+    const TypeOopPtr* speculative = meet_speculative(tp);
+    return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative);
   }
 
   case InstPtr:                  // For these, flip the call around to cut down
@@ -2557,7 +2651,7 @@
 const Type *TypeOopPtr::xdual() const {
   assert(klass() == Compile::current()->env()->Object_klass(), "no klasses here");
   assert(const_oop() == NULL,             "no constants here");
-  return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id()  );
+  return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative());
 }
 
 //--------------------------make_from_klass_common-----------------------------
@@ -2648,7 +2742,7 @@
     } else if (!o->should_be_constant()) {
       return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
     }
-    const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, is_autobox_cache);
+    const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, NULL, is_autobox_cache);
     return arr;
   } else if (klass->is_type_array_klass()) {
     // Element is an typeArray
@@ -2693,13 +2787,11 @@
 
 //-----------------------------filter------------------------------------------
 // Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeOopPtr::filter( const Type *kills ) const {
+const Type *TypeOopPtr::filter(const Type *kills) const {
 
   const Type* ft = join(kills);
   const TypeInstPtr* ftip = ft->isa_instptr();
   const TypeInstPtr* ktip = kills->isa_instptr();
-  const TypeKlassPtr* ftkp = ft->isa_klassptr();
-  const TypeKlassPtr* ktkp = kills->isa_klassptr();
 
   if (ft->empty()) {
     // Check for evil case of 'this' being a class and 'kills' expecting an
@@ -2713,8 +2805,6 @@
     // uplift the type.
     if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface())
       return kills;             // Uplift to interface
-    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
-      return kills;             // Uplift to interface
 
     return Type::TOP;           // Canonical empty value
   }
@@ -2731,14 +2821,6 @@
     assert(!ftip->klass_is_exact(), "interface could not be exact");
     return ktip->cast_to_ptr_type(ftip->ptr());
   }
-  // Interface klass type could be exact in opposite to interface type,
-  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
-  if (ftkp != NULL && ktkp != NULL &&
-      ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
-      !ftkp->klass_is_exact() && // Keep exact interface klass
-      ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
-    return ktkp->cast_to_ptr_type(ftkp->ptr());
-  }
 
   return ft;
 }
@@ -2748,7 +2830,8 @@
 bool TypeOopPtr::eq( const Type *t ) const {
   const TypeOopPtr *a = (const TypeOopPtr*)t;
   if (_klass_is_exact != a->_klass_is_exact ||
-      _instance_id != a->_instance_id)  return false;
+      _instance_id != a->_instance_id ||
+      !eq_speculative(a))  return false;
   ciObject* one = const_oop();
   ciObject* two = a->const_oop();
   if (one == NULL || two == NULL) {
@@ -2765,6 +2848,7 @@
     (const_oop() ? const_oop()->hash() : 0) +
     _klass_is_exact +
     _instance_id +
+    hash_speculative() +
     TypePtr::hash();
 }
 
@@ -2784,6 +2868,19 @@
     st->print(",iid=top");
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
+
+  dump_speculative(st);
+}
+
+/**
+ *dump the speculative part of the type
+ */
+void TypeOopPtr::dump_speculative(outputStream *st) const {
+  if (_speculative != NULL) {
+    st->print(" (speculative=");
+    _speculative->dump_on(st);
+    st->print(")");
+  }
 }
 #endif
 
@@ -2797,8 +2894,15 @@
 }
 
 //------------------------------add_offset-------------------------------------
-const TypePtr *TypeOopPtr::add_offset( intptr_t offset ) const {
-  return make( _ptr, xadd_offset(offset), _instance_id);
+const TypePtr *TypeOopPtr::add_offset(intptr_t offset) const {
+  return make(_ptr, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
+}
+
+/**
+ * Return same type without a speculative part
+ */
+const TypeOopPtr* TypeOopPtr::remove_speculative() const {
+  return make(_ptr, _offset, _instance_id, NULL);
 }
 
 //------------------------------meet_instance_id--------------------------------
@@ -2818,6 +2922,89 @@
   return _instance_id;              // Map everything else into self
 }
 
+/**
+ * meet of the speculative parts of 2 types
+ *
+ * @param other  type to meet with
+ */
+const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
+  bool this_has_spec = (_speculative != NULL);
+  bool other_has_spec = (other->speculative() != NULL);
+
+  if (!this_has_spec && !other_has_spec) {
+    return NULL;
+  }
+
+  // If we are at a point where control flow meets and one branch has
+  // a speculative type and the other has not, we meet the speculative
+  // type of one branch with the actual type of the other. If the
+  // actual type is exact and the speculative is as well, then the
+  // result is a speculative type which is exact and we can continue
+  // speculation further.
+  const TypeOopPtr* this_spec = _speculative;
+  const TypeOopPtr* other_spec = other->speculative();
+
+  if (!this_has_spec) {
+    this_spec = this;
+  }
+
+  if (!other_has_spec) {
+    other_spec = other;
+  }
+
+  return this_spec->meet(other_spec)->is_oopptr();
+}
+
+/**
+ * dual of the speculative part of the type
+ */
+const TypeOopPtr* TypeOopPtr::dual_speculative() const {
+  if (_speculative == NULL) {
+    return NULL;
+  }
+  return _speculative->dual()->is_oopptr();
+}
+
+/**
+ * add offset to the speculative part of the type
+ *
+ * @param offset  offset to add
+ */
+const TypeOopPtr* TypeOopPtr::add_offset_speculative(intptr_t offset) const {
+  if (_speculative == NULL) {
+    return NULL;
+  }
+  return _speculative->add_offset(offset)->is_oopptr();
+}
+
+/**
+ * Are the speculative parts of 2 types equal?
+ *
+ * @param other  type to compare this one to
+ */
+bool TypeOopPtr::eq_speculative(const TypeOopPtr* other) const {
+  if (_speculative == NULL || other->speculative() == NULL) {
+    return _speculative == other->speculative();
+  }
+
+  if (_speculative->base() != other->speculative()->base()) {
+    return false;
+  }
+
+  return _speculative->eq(other->speculative());
+}
+
+/**
+ * Hash of the speculative part of the type
+ */
+int TypeOopPtr::hash_speculative() const {
+  if (_speculative == NULL) {
+    return 0;
+  }
+
+  return _speculative->hash();
+}
+
 
 //=============================================================================
 // Convenience common pre-built types.
@@ -2828,8 +3015,8 @@
 const TypeInstPtr *TypeInstPtr::KLASS;
 
 //------------------------------TypeInstPtr-------------------------------------
-TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id)
- : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id), _name(k->name()) {
+TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id, const TypeOopPtr* speculative)
+  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative), _name(k->name()) {
    assert(k != NULL &&
           (k->is_loaded() || o == NULL),
           "cannot have constants with non-loaded klass");
@@ -2841,7 +3028,8 @@
                                      bool xk,
                                      ciObject* o,
                                      int offset,
-                                     int instance_id) {
+                                     int instance_id,
+                                     const TypeOopPtr* speculative) {
   assert( !k->is_loaded() || k->is_instance_klass(), "Must be for instance");
   // Either const_oop() is NULL or else ptr is Constant
   assert( (!o && ptr != Constant) || (o && ptr == Constant),
@@ -2862,7 +3050,7 @@
 
   // Now hash this baby
   TypeInstPtr *result =
-    (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id))->hashcons();
+    (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id, speculative))->hashcons();
 
   return result;
 }
@@ -2895,7 +3083,7 @@
   if( ptr == _ptr ) return this;
   // Reconstruct _sig info here since not a problem with later lazy
   // construction, _sig will show up on demand.
-  return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id);
+  return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative);
 }
 
 
@@ -2907,13 +3095,13 @@
   ciInstanceKlass* ik = _klass->as_instance_klass();
   if( (ik->is_final() || _const_oop) )  return this;  // cannot clear xk
   if( ik->is_interface() )              return this;  // cannot set xk
-  return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id);
+  return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id, _speculative);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
 const TypeOopPtr *TypeInstPtr::cast_to_instance_id(int instance_id) const {
   if( instance_id == _instance_id ) return this;
-  return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id);
+  return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative);
 }
 
 //------------------------------xmeet_unloaded---------------------------------
@@ -2923,6 +3111,7 @@
     int off = meet_offset(tinst->offset());
     PTR ptr = meet_ptr(tinst->ptr());
     int instance_id = meet_instance_id(tinst->instance_id());
+    const TypeOopPtr* speculative = meet_speculative(tinst);
 
     const TypeInstPtr *loaded    = is_loaded() ? this  : tinst;
     const TypeInstPtr *unloaded  = is_loaded() ? tinst : this;
@@ -2943,7 +3132,7 @@
       assert(loaded->ptr() != TypePtr::Null, "insanity check");
       //
       if(      loaded->ptr() == TypePtr::TopPTR ) { return unloaded; }
-      else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make( ptr, unloaded->klass(), false, NULL, off, instance_id ); }
+      else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make(ptr, unloaded->klass(), false, NULL, off, instance_id, speculative); }
       else if (loaded->ptr() == TypePtr::BotPTR ) { return TypeInstPtr::BOTTOM; }
       else if (loaded->ptr() == TypePtr::Constant || loaded->ptr() == TypePtr::NotNull) {
         if (unloaded->ptr() == TypePtr::BotPTR  ) { return TypeInstPtr::BOTTOM;  }
@@ -2965,7 +3154,7 @@
 
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  It returns a new Type object.
-const Type *TypeInstPtr::xmeet( const Type *t ) const {
+const Type *TypeInstPtr::xmeet_helper(const Type *t) const {
   // Perform a fast test for common case; meeting the same types together.
   if( this == t ) return this;  // Meeting same type-rep?
 
@@ -2999,16 +3188,20 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
+    const TypeOopPtr* speculative = meet_speculative(tp);
     switch (ptr) {
     case TopPTR:
     case AnyNull:                // Fall 'down' to dual of object klass
-      if (klass()->equals(ciEnv::current()->Object_klass())) {
-        return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id);
+      // For instances when a subclass meets a superclass we fall
+      // below the centerline when the superclass is exact. We need to
+      // do the same here.
+      if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) {
+        return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative);
       } else {
         // cannot subclass, so the meet has to fall badly below the centerline
         ptr = NotNull;
         instance_id = InstanceBot;
-        return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id);
+        return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative);
       }
     case Constant:
     case NotNull:
@@ -3017,10 +3210,13 @@
       if( above_centerline(_ptr) ) { // if( _ptr == TopPTR || _ptr == AnyNull )
         // If 'this' (InstPtr) is above the centerline and it is Object class
         // then we can subclass in the Java class hierarchy.
-        if (klass()->equals(ciEnv::current()->Object_klass())) {
+        // For instances when a subclass meets a superclass we fall
+        // below the centerline when the superclass is exact. We need
+        // to do the same here.
+        if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) {
           // that is, tp's array type is a subtype of my klass
           return TypeAryPtr::make(ptr, (ptr == Constant ? tp->const_oop() : NULL),
-                                  tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id);
+                                  tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative);
         }
       }
       // The other case cannot happen, since I cannot be a subtype of an array.
@@ -3028,7 +3224,7 @@
       if( ptr == Constant )
          ptr = NotNull;
       instance_id = InstanceBot;
-      return make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id );
+      return make(ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative);
     default: typerr(t);
     }
   }
@@ -3042,13 +3238,15 @@
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
+      const TypeOopPtr* speculative = meet_speculative(tp);
       return make(ptr, klass(), klass_is_exact(),
-                  (ptr == Constant ? const_oop() : NULL), offset, instance_id);
+                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
     }
     case NotNull:
     case BotPTR: {
       int instance_id = meet_instance_id(tp->instance_id());
-      return TypeOopPtr::make(ptr, offset, instance_id);
+      const TypeOopPtr* speculative = meet_speculative(tp);
+      return TypeOopPtr::make(ptr, offset, instance_id, speculative);
     }
     default: typerr(t);
     }
@@ -3061,17 +3259,18 @@
     PTR ptr = meet_ptr(tp->ptr());
     switch (tp->ptr()) {
     case Null:
-      if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset );
+      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset);
       // else fall through to AnyNull
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      return make( ptr, klass(), klass_is_exact(),
-                   (ptr == Constant ? const_oop() : NULL), offset, instance_id);
+      const TypeOopPtr* speculative = _speculative;
+      return make(ptr, klass(), klass_is_exact(),
+                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
     }
     case NotNull:
     case BotPTR:
-      return TypePtr::make( AnyPtr, ptr, offset );
+      return TypePtr::make(AnyPtr, ptr, offset);
     default: typerr(t);
     }
   }
@@ -3098,13 +3297,14 @@
     int off = meet_offset( tinst->offset() );
     PTR ptr = meet_ptr( tinst->ptr() );
     int instance_id = meet_instance_id(tinst->instance_id());
+    const TypeOopPtr* speculative = meet_speculative(tinst);
 
     // Check for easy case; klasses are equal (and perhaps not loaded!)
     // If we have constants, then we created oops so classes are loaded
     // and we can handle the constants further down.  This case handles
     // both-not-loaded or both-loaded classes
     if (ptr != Constant && klass()->equals(tinst->klass()) && klass_is_exact() == tinst->klass_is_exact()) {
-      return make( ptr, klass(), klass_is_exact(), NULL, off, instance_id );
+      return make(ptr, klass(), klass_is_exact(), NULL, off, instance_id, speculative);
     }
 
     // Classes require inspection in the Java klass hierarchy.  Must be loaded.
@@ -3126,7 +3326,8 @@
     }
 
     // Handle mixing oops and interfaces first.
-    if( this_klass->is_interface() && !tinst_klass->is_interface() ) {
+    if( this_klass->is_interface() && !(tinst_klass->is_interface() ||
+                                        tinst_klass == ciEnv::current()->Object_klass())) {
       ciKlass *tmp = tinst_klass; // Swap interface around
       tinst_klass = this_klass;
       this_klass = tmp;
@@ -3167,7 +3368,7 @@
         // Find out which constant.
         o = (this_klass == klass()) ? const_oop() : tinst->const_oop();
       }
-      return make( ptr, k, xk, o, off, instance_id );
+      return make(ptr, k, xk, o, off, instance_id, speculative);
     }
 
     // Either oop vs oop or interface vs interface or interface vs Object
@@ -3244,7 +3445,7 @@
         else
           ptr = NotNull;
       }
-      return make( ptr, this_klass, this_xk, o, off, instance_id );
+      return make(ptr, this_klass, this_xk, o, off, instance_id, speculative);
     } // Else classes are not equal
 
     // Since klasses are different, we require a LCA in the Java
@@ -3255,7 +3456,7 @@
 
     // Now we find the LCA of Java classes
     ciKlass* k = this_klass->least_common_ancestor(tinst_klass);
-    return make( ptr, k, false, NULL, off, instance_id );
+    return make(ptr, k, false, NULL, off, instance_id, speculative);
   } // End of case InstPtr
 
   } // End of switch
@@ -3279,7 +3480,7 @@
 // Dual: do NOT dual on klasses.  This means I do NOT understand the Java
 // inheritance mechanism.
 const Type *TypeInstPtr::xdual() const {
-  return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id()  );
+  return new TypeInstPtr(dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative());
 }
 
 //------------------------------eq---------------------------------------------
@@ -3335,12 +3536,18 @@
     st->print(",iid=top");
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
+
+  dump_speculative(st);
 }
 #endif
 
 //------------------------------add_offset-------------------------------------
-const TypePtr *TypeInstPtr::add_offset( intptr_t offset ) const {
-  return make( _ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id );
+const TypePtr *TypeInstPtr::add_offset(intptr_t offset) const {
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
+}
+
+const TypeOopPtr *TypeInstPtr::remove_speculative() const {
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL);
 }
 
 //=============================================================================
@@ -3357,30 +3564,30 @@
 const TypeAryPtr *TypeAryPtr::DOUBLES;
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id ) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   if (!xk)  xk = ary->ary_must_be_exact();
   assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
   if (!UseExactTypes)  xk = (ptr == Constant);
-  return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false))->hashcons();
+  return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative))->hashcons();
 }
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, bool is_autobox_cache) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, bool is_autobox_cache) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
   if (!xk)  xk = (o != NULL) || ary->ary_must_be_exact();
   assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
   if (!UseExactTypes)  xk = (ptr == Constant);
-  return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache))->hashcons();
+  return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative))->hashcons();
 }
 
 //------------------------------cast_to_ptr_type-------------------------------
 const Type *TypeAryPtr::cast_to_ptr_type(PTR ptr) const {
   if( ptr == _ptr ) return this;
-  return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id);
+  return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative);
 }
 
 
@@ -3389,13 +3596,13 @@
   if( klass_is_exact == _klass_is_exact ) return this;
   if (!UseExactTypes)  return this;
   if (_ary->ary_must_be_exact())  return this;  // cannot clear xk
-  return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id);
+  return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id, _speculative);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
 const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const {
   if( instance_id == _instance_id ) return this;
-  return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id);
+  return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative);
 }
 
 //-----------------------------narrow_size_type-------------------------------
@@ -3457,10 +3664,38 @@
   assert(new_size != NULL, "");
   new_size = narrow_size_type(new_size);
   if (new_size == size())  return this;
-  const TypeAry* new_ary = TypeAry::make(elem(), new_size);
+  const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable());
+  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative);
+}
+
+
+//------------------------------cast_to_stable---------------------------------
+const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const {
+  if (stable_dimension <= 0 || (stable_dimension == 1 && stable == this->is_stable()))
+    return this;
+
+  const Type* elem = this->elem();
+  const TypePtr* elem_ptr = elem->make_ptr();
+
+  if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->isa_aryptr()) {
+    // If this is widened from a narrow oop, TypeAry::make will re-narrow it.
+    elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1);
+  }
+
+  const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
+
   return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
 }
 
+//-----------------------------stable_dimension--------------------------------
+int TypeAryPtr::stable_dimension() const {
+  if (!is_stable())  return 0;
+  int dim = 1;
+  const TypePtr* elem_ptr = elem()->make_ptr();
+  if (elem_ptr != NULL && elem_ptr->isa_aryptr())
+    dim += elem_ptr->is_aryptr()->stable_dimension();
+  return dim;
+}
 
 //------------------------------eq---------------------------------------------
 // Structural equality check for Type representations
@@ -3479,7 +3714,7 @@
 
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  It returns a new Type object.
-const Type *TypeAryPtr::xmeet( const Type *t ) const {
+const Type *TypeAryPtr::xmeet_helper(const Type *t) const {
   // Perform a fast test for common case; meeting the same types together.
   if( this == t ) return this;  // Meeting same type-rep?
   // Current "this->_base" is Pointer
@@ -3513,13 +3748,15 @@
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
+      const TypeOopPtr* speculative = meet_speculative(tp);
       return make(ptr, (ptr == Constant ? const_oop() : NULL),
-                  _ary, _klass, _klass_is_exact, offset, instance_id);
+                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
     }
     case BotPTR:
     case NotNull: {
       int instance_id = meet_instance_id(tp->instance_id());
-      return TypeOopPtr::make(ptr, offset, instance_id);
+      const TypeOopPtr* speculative = meet_speculative(tp);
+      return TypeOopPtr::make(ptr, offset, instance_id, speculative);
     }
     default: ShouldNotReachHere();
     }
@@ -3541,8 +3778,9 @@
       // else fall through to AnyNull
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      return make( ptr, (ptr == Constant ? const_oop() : NULL),
-                  _ary, _klass, _klass_is_exact, offset, instance_id);
+      const TypeOopPtr* speculative = _speculative;
+      return make(ptr, (ptr == Constant ? const_oop() : NULL),
+                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
     }
     default: ShouldNotReachHere();
     }
@@ -3558,6 +3796,7 @@
     const TypeAry *tary = _ary->meet(tap->_ary)->is_ary();
     PTR ptr = meet_ptr(tap->ptr());
     int instance_id = meet_instance_id(tap->instance_id());
+    const TypeOopPtr* speculative = meet_speculative(tap);
     ciKlass* lazy_klass = NULL;
     if (tary->_elem->isa_int()) {
       // Integral array element types have irrelevant lattice relations.
@@ -3570,7 +3809,7 @@
         // Something like byte[int+] meets char[int+].
         // This must fall to bottom, not (int[-128..65535])[int+].
         instance_id = InstanceBot;
-        tary = TypeAry::make(Type::BOTTOM, tary->_size);
+        tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
       }
     } else // Non integral arrays.
     // Must fall to bottom if exact klasses in upper lattice
@@ -3584,8 +3823,8 @@
          (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
          // 'this' is exact and super or unrelated:
          (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
-      tary = TypeAry::make(Type::BOTTOM, tary->_size);
-      return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
+      tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+      return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot);
     }
 
     bool xk = false;
@@ -3593,8 +3832,12 @@
     case AnyNull:
     case TopPTR:
       // Compute new klass on demand, do not use tap->_klass
-      xk = (tap->_klass_is_exact | this->_klass_is_exact);
-      return make( ptr, const_oop(), tary, lazy_klass, xk, off, instance_id );
+      if (below_centerline(this->_ptr)) {
+        xk = this->_klass_is_exact;
+      } else {
+        xk = (tap->_klass_is_exact | this->_klass_is_exact);
+      }
+      return make(ptr, const_oop(), tary, lazy_klass, xk, off, instance_id, speculative);
     case Constant: {
       ciObject* o = const_oop();
       if( _ptr == Constant ) {
@@ -3606,25 +3849,23 @@
         } else {
           xk = true;
         }
-      } else if( above_centerline(_ptr) ) {
+      } else if(above_centerline(_ptr)) {
         o = tap->const_oop();
         xk = true;
       } else {
         // Only precise for identical arrays
         xk = this->_klass_is_exact && (klass() == tap->klass());
       }
-      return TypeAryPtr::make( ptr, o, tary, lazy_klass, xk, off, instance_id );
+      return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, instance_id, speculative);
     }
     case NotNull:
     case BotPTR:
       // Compute new klass on demand, do not use tap->_klass
       if (above_centerline(this->_ptr))
             xk = tap->_klass_is_exact;
-      else if (above_centerline(tap->_ptr))
-            xk = this->_klass_is_exact;
       else  xk = (tap->_klass_is_exact & this->_klass_is_exact) &&
               (klass() == tap->klass()); // Only precise for identical arrays
-      return TypeAryPtr::make( ptr, NULL, tary, lazy_klass, xk, off, instance_id );
+      return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, instance_id, speculative);
     default: ShouldNotReachHere();
     }
   }
@@ -3635,16 +3876,20 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
+    const TypeOopPtr* speculative = meet_speculative(tp);
     switch (ptr) {
     case TopPTR:
     case AnyNull:                // Fall 'down' to dual of object klass
-      if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) {
-        return TypeAryPtr::make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id );
+      // For instances when a subclass meets a superclass we fall
+      // below the centerline when the superclass is exact. We need to
+      // do the same here.
+      if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) {
+        return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
       } else {
         // cannot subclass, so the meet has to fall badly below the centerline
         ptr = NotNull;
         instance_id = InstanceBot;
-        return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id);
+        return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative);
       }
     case Constant:
     case NotNull:
@@ -3653,10 +3898,13 @@
       if (above_centerline(tp->ptr())) {
         // If 'tp'  is above the centerline and it is Object class
         // then we can subclass in the Java class hierarchy.
-        if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) {
+        // For instances when a subclass meets a superclass we fall
+        // below the centerline when the superclass is exact. We need
+        // to do the same here.
+        if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) {
           // that is, my array type is a subtype of 'tp' klass
-          return make( ptr, (ptr == Constant ? const_oop() : NULL),
-                       _ary, _klass, _klass_is_exact, offset, instance_id );
+          return make(ptr, (ptr == Constant ? const_oop() : NULL),
+                      _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
         }
       }
       // The other case cannot happen, since t cannot be a subtype of an array.
@@ -3664,7 +3912,7 @@
       if( ptr == Constant )
          ptr = NotNull;
       instance_id = InstanceBot;
-      return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id);
+      return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative);
     default: typerr(t);
     }
   }
@@ -3675,7 +3923,7 @@
 //------------------------------xdual------------------------------------------
 // Dual: compute field-by-field dual
 const Type *TypeAryPtr::xdual() const {
-  return new TypeAryPtr( dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache() );
+  return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative());
 }
 
 //----------------------interface_vs_oop---------------------------------------
@@ -3727,6 +3975,8 @@
     st->print(",iid=top");
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
+
+  dump_speculative(st);
 }
 #endif
 
@@ -3736,10 +3986,13 @@
 }
 
 //------------------------------add_offset-------------------------------------
-const TypePtr *TypeAryPtr::add_offset( intptr_t offset ) const {
-  return make( _ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id );
-}
-
+const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const {
+  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
+}
+
+const TypeOopPtr *TypeAryPtr::remove_speculative() const {
+  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL);
+}
 
 //=============================================================================
 
@@ -4120,6 +4373,33 @@
   return (_offset == 0) && !below_centerline(_ptr);
 }
 
+// Do not allow interface-vs.-noninterface joins to collapse to top.
+const Type *TypeKlassPtr::filter(const Type *kills) const {
+  // logic here mirrors the one from TypeOopPtr::filter. See comments
+  // there.
+  const Type* ft = join(kills);
+  const TypeKlassPtr* ftkp = ft->isa_klassptr();
+  const TypeKlassPtr* ktkp = kills->isa_klassptr();
+
+  if (ft->empty()) {
+    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
+      return kills;             // Uplift to interface
+
+    return Type::TOP;           // Canonical empty value
+  }
+
+  // Interface klass type could be exact in opposite to interface type,
+  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
+  if (ftkp != NULL && ktkp != NULL &&
+      ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
+      !ftkp->klass_is_exact() && // Keep exact interface klass
+      ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
+    return ktkp->cast_to_ptr_type(ftkp->ptr());
+  }
+
+  return ft;
+}
+
 //----------------------compute_klass------------------------------------------
 // Compute the defining klass for this class
 ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
--- a/src/share/vm/opto/type.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/opto/type.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,7 @@
 class     TypeOopPtr;
 class       TypeInstPtr;
 class       TypeAryPtr;
-class       TypeKlassPtr;
+class     TypeKlassPtr;
 class     TypeMetadataPtr;
 
 //------------------------------Type-------------------------------------------
@@ -159,6 +159,11 @@
   // Table for efficient dualing of base types
   static const TYPES dual_type[lastype];
 
+#ifdef ASSERT
+  // One type is interface, the other is oop
+  virtual bool interface_vs_oop_helper(const Type *t) const;
+#endif
+
 protected:
   // Each class of type is also identified by its base.
   const TYPES _base;            // Enum of Types type
@@ -169,7 +174,7 @@
 
 public:
 
-  inline void* operator new( size_t x ) {
+  inline void* operator new( size_t x ) throw() {
     Compile* compile = Compile::current();
     compile->set_type_last_size(x);
     void *temp = compile->type_arena()->Amalloc_D(x);
@@ -372,6 +377,13 @@
   // Mapping from CI type system to compiler type:
   static const Type* get_typeflow_type(ciType* type);
 
+  static const Type* make_from_constant(ciConstant constant,
+                                        bool require_constant = false,
+                                        bool is_autobox_cache = false);
+
+  // Speculative type. See TypeInstPtr
+  virtual ciKlass* speculative_type() const { return NULL; }
+
 private:
   // support arrays
   static const BasicType _basic_type[];
@@ -580,6 +592,8 @@
   static const TypeTuple *START_I2C;
   static const TypeTuple *INT_PAIR;
   static const TypeTuple *LONG_PAIR;
+  static const TypeTuple *INT_CC_PAIR;
+  static const TypeTuple *LONG_CC_PAIR;
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint, outputStream *st  ) const; // Specialized per-Type dumping
 #endif
@@ -588,8 +602,8 @@
 //------------------------------TypeAry----------------------------------------
 // Class of Array Types
 class TypeAry : public Type {
-  TypeAry( const Type *elem, const TypeInt *size) : Type(Array),
-    _elem(elem), _size(size) {}
+  TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array),
+      _elem(elem), _size(size), _stable(stable) {}
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -599,10 +613,11 @@
 private:
   const Type *_elem;            // Element type of array
   const TypeInt *_size;         // Elements in array
+  const bool _stable;           // Are elements @Stable?
   friend class TypeAryPtr;
 
 public:
-  static const TypeAry *make(  const Type *elem, const TypeInt *size);
+  static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false);
 
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
@@ -778,7 +793,7 @@
 // Some kind of oop (Java pointer), either klass or instance or array.
 class TypeOopPtr : public TypePtr {
 protected:
-  TypeOopPtr( TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id );
+  TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -804,11 +819,27 @@
   // This is the the node index of the allocation node creating this instance.
   int           _instance_id;
 
+  // Extra type information profiling gave us. We propagate it the
+  // same way the rest of the type info is propagated. If we want to
+  // use it, then we have to emit a guard: this part of the type is
+  // not something we know but something we speculate about the type.
+  const TypeOopPtr*   _speculative;
+
   static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact);
 
   int dual_instance_id() const;
   int meet_instance_id(int uid) const;
 
+  // utility methods to work on the speculative part of the type
+  const TypeOopPtr* dual_speculative() const;
+  const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
+  bool eq_speculative(const TypeOopPtr* other) const;
+  int hash_speculative() const;
+  const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
+#ifndef PRODUCT
+  void dump_speculative(outputStream *st) const;
+#endif
+
 public:
   // Creates a type given a klass. Correctly handles multi-dimensional arrays
   // Respects UseUniqueSubclasses.
@@ -835,7 +866,7 @@
                                               bool not_null_elements = false);
 
   // Make a generic (unclassed) pointer to an oop.
-  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id);
+  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative);
 
   ciObject* const_oop()    const { return _const_oop; }
   virtual ciKlass* klass() const { return _klass;     }
@@ -849,6 +880,7 @@
   bool is_known_instance()       const { return _instance_id > 0; }
   int  instance_id()             const { return _instance_id; }
   bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
+  const TypeOopPtr* speculative() const { return _speculative; }
 
   virtual intptr_t get_con() const;
 
@@ -862,9 +894,13 @@
   const TypeKlassPtr* as_klass_type() const;
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
+  // Return same type without a speculative part
+  virtual const TypeOopPtr* remove_speculative() const;
 
-  virtual const Type *xmeet( const Type *t ) const;
+  virtual const Type *xmeet(const Type *t) const;
   virtual const Type *xdual() const;    // Compute dual right now.
+  // the core of the computation of the meet for TypeOopPtr and for its subclasses
+  virtual const Type *xmeet_helper(const Type *t) const;
 
   // Do not allow interface-vs.-noninterface joins to collapse to top.
   virtual const Type *filter( const Type *kills ) const;
@@ -874,13 +910,24 @@
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
 #endif
+
+  // Return the speculative type if any
+  ciKlass* speculative_type() const {
+    if (_speculative != NULL) {
+      const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
+      if (speculative->klass_is_exact()) {
+       return speculative->klass();
+      }
+    }
+    return NULL;
+  }
 };
 
 //------------------------------TypeInstPtr------------------------------------
 // Class of Java object pointers, pointing either to non-array Java instances
 // or to a Klass* (including array klasses).
 class TypeInstPtr : public TypeOopPtr {
-  TypeInstPtr( PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id );
+  TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
 
@@ -893,30 +940,30 @@
 
   // Make a pointer to a constant oop.
   static const TypeInstPtr *make(ciObject* o) {
-    return make(TypePtr::Constant, o->klass(), true, o, 0);
+    return make(TypePtr::Constant, o->klass(), true, o, 0, InstanceBot);
   }
   // Make a pointer to a constant oop with offset.
   static const TypeInstPtr *make(ciObject* o, int offset) {
-    return make(TypePtr::Constant, o->klass(), true, o, offset);
+    return make(TypePtr::Constant, o->klass(), true, o, offset, InstanceBot);
   }
 
   // Make a pointer to some value of type klass.
   static const TypeInstPtr *make(PTR ptr, ciKlass* klass) {
-    return make(ptr, klass, false, NULL, 0);
+    return make(ptr, klass, false, NULL, 0, InstanceBot);
   }
 
   // Make a pointer to some non-polymorphic value of exactly type klass.
   static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) {
-    return make(ptr, klass, true, NULL, 0);
+    return make(ptr, klass, true, NULL, 0, InstanceBot);
   }
 
   // Make a pointer to some value of type klass with offset.
   static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) {
-    return make(ptr, klass, false, NULL, offset);
+    return make(ptr, klass, false, NULL, offset, InstanceBot);
   }
 
   // Make a pointer to an oop.
-  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot );
+  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
 
   /** Create constant type for a constant boxed value */
   const Type* get_const_boxed_value() const;
@@ -933,8 +980,11 @@
   virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
+  // Return same type without a speculative part
+  virtual const TypeOopPtr* remove_speculative() const;
 
-  virtual const Type *xmeet( const Type *t ) const;
+  // the core of the computation of the meet of 2 types
+  virtual const Type *xmeet_helper(const Type *t) const;
   virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
@@ -953,8 +1003,8 @@
 // Class of Java array pointers
 class TypeAryPtr : public TypeOopPtr {
   TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
-              int offset, int instance_id, bool is_autobox_cache )
-  : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id),
+              int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative)
+    : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative),
     _ary(ary),
     _is_autobox_cache(is_autobox_cache)
  {
@@ -988,12 +1038,13 @@
   const TypeAry* ary() const  { return _ary; }
   const Type*    elem() const { return _ary->_elem; }
   const TypeInt* size() const { return _ary->_size; }
+  bool      is_stable() const { return _ary->_stable; }
 
   bool is_autobox_cache() const { return _is_autobox_cache; }
 
-  static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
+  static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
   // Constant pointer to array
-  static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, bool is_autobox_cache = false);
+  static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, bool is_autobox_cache = false);
 
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
@@ -1007,10 +1058,16 @@
 
   virtual bool empty(void) const;        // TRUE if type is vacuous
   virtual const TypePtr *add_offset( intptr_t offset ) const;
+  // Return same type without a speculative part
+  virtual const TypeOopPtr* remove_speculative() const;
 
-  virtual const Type *xmeet( const Type *t ) const;
+  // the core of the computation of the meet of 2 types
+  virtual const Type *xmeet_helper(const Type *t) const;
   virtual const Type *xdual() const;    // Compute dual right now.
 
+  const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
+  int stable_dimension() const;
+
   // Convenience common pre-built types.
   static const TypeAryPtr *RANGE;
   static const TypeAryPtr *OOPS;
@@ -1145,6 +1202,9 @@
 
   virtual intptr_t get_con() const;
 
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter( const Type *kills ) const;
+
   // Convenience common pre-built types.
   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
--- a/src/share/vm/prims/jni.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jni.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1336,6 +1336,7 @@
       if (call_type == JNI_VIRTUAL) {
         // jni_GetMethodID makes sure class is linked and initialized
         // so m should have a valid vtable index.
+        assert(!m->has_itable_index(), "");
         int vtbl_index = m->vtable_index();
         if (vtbl_index != Method::nonvirtual_vtable_index) {
           Klass* k = h_recv->klass();
@@ -1355,12 +1356,7 @@
       // interface call
       KlassHandle h_holder(THREAD, holder);
 
-      int itbl_index = m->cached_itable_index();
-      if (itbl_index == -1) {
-        itbl_index = klassItable::compute_itable_index(m);
-        m->set_cached_itable_index(itbl_index);
-        // the above may have grabbed a lock, 'm' and anything non-handlized can't be used again
-      }
+      int itbl_index = m->itable_index();
       Klass* k = h_recv->klass();
       selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
     }
@@ -1595,10 +1591,8 @@
     }
   } else {
     m = klass->lookup_method(name, signature);
-    // Look up interfaces
-    if (m == NULL && klass->oop_is_instance()) {
-      m = InstanceKlass::cast(klass())->lookup_method_in_all_interfaces(name,
-                                                                   signature);
+    if (m == NULL &&  klass->oop_is_instance()) {
+      m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature);
     }
   }
   if (m == NULL || (m->is_static() != is_static)) {
@@ -3214,7 +3208,11 @@
   HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(
                                     env, string);
 #endif /* USDT2 */
-  jsize ret = java_lang_String::length(JNIHandles::resolve_non_null(string));
+  jsize ret = 0;
+  oop s = JNIHandles::resolve_non_null(string);
+  if (java_lang_String::value(s) != NULL) {
+    ret = java_lang_String::length(s);
+  }
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret);
 #else /* USDT2 */
@@ -3234,20 +3232,23 @@
  HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
                                   env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
+  jchar* buf = NULL;
   oop s = JNIHandles::resolve_non_null(string);
-  int s_len = java_lang_String::length(s);
   typeArrayOop s_value = java_lang_String::value(s);
-  int s_offset = java_lang_String::offset(s);
-  jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
-  /* JNI Specification states return NULL on OOM */
-  if (buf != NULL) {
-    if (s_len > 0) {
-      memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
-    }
-    buf[s_len] = 0;
-    //%note jni_5
-    if (isCopy != NULL) {
-      *isCopy = JNI_TRUE;
+  if (s_value != NULL) {
+    int s_len = java_lang_String::length(s);
+    int s_offset = java_lang_String::offset(s);
+    buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
+    /* JNI Specification states return NULL on OOM */
+    if (buf != NULL) {
+      if (s_len > 0) {
+        memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
+      }
+      buf[s_len] = 0;
+      //%note jni_5
+      if (isCopy != NULL) {
+        *isCopy = JNI_TRUE;
+      }
     }
   }
 #ifndef USDT2
@@ -3317,7 +3318,11 @@
  HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(
                                       env, string);
 #endif /* USDT2 */
-  jsize ret = java_lang_String::utf8_length(JNIHandles::resolve_non_null(string));
+  jsize ret = 0;
+  oop java_string = JNIHandles::resolve_non_null(string);
+  if (java_lang_String::value(java_string) != NULL) {
+    ret = java_lang_String::utf8_length(java_string);
+  }
 #ifndef USDT2
   DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret);
 #else /* USDT2 */
@@ -3336,14 +3341,17 @@
  HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(
                                      env, string, (uintptr_t *) isCopy);
 #endif /* USDT2 */
+  char* result = NULL;
   oop java_string = JNIHandles::resolve_non_null(string);
-  size_t length = java_lang_String::utf8_length(java_string);
-  /* JNI Specification states return NULL on OOM */
-  char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
-  if (result != NULL) {
-    java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
-    if (isCopy != NULL) {
-      *isCopy = JNI_TRUE;
+  if (java_lang_String::value(java_string) != NULL) {
+    size_t length = java_lang_String::utf8_length(java_string);
+    /* JNI Specification states return NULL on OOM */
+    result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+    if (result != NULL) {
+      java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
+      if (isCopy != NULL) {
+        *isCopy = JNI_TRUE;
+      }
     }
   }
 #ifndef USDT2
@@ -5037,6 +5045,7 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif
 #include "utilities/quickSort.hpp"
+#include "utilities/ostream.hpp"
 #if INCLUDE_VM_STRUCTS
 #include "runtime/vmStructs.hpp"
 #endif
@@ -5048,22 +5057,35 @@
 // Forward declaration
 void TestReservedSpace_test();
 void TestReserveMemorySpecial_test();
+void TestVirtualSpace_test();
+void TestMetaspaceAux_test();
+void TestMetachunk_test();
+void TestVirtualSpaceNode_test();
+#if INCLUDE_ALL_GCS
+void TestG1BiasedArray_test();
+#endif
 
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
     run_unit_test(TestReservedSpace_test());
     run_unit_test(TestReserveMemorySpecial_test());
+    run_unit_test(TestVirtualSpace_test());
+    run_unit_test(TestMetaspaceAux_test());
+    run_unit_test(TestMetachunk_test());
+    run_unit_test(TestVirtualSpaceNode_test());
     run_unit_test(GlobalDefinitions::test_globals());
     run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
     run_unit_test(CollectedHeap::test_is_in());
     run_unit_test(QuickSort::test_quick_sort());
     run_unit_test(AltHashing::test_alt_hash());
+    run_unit_test(test_loggc_filename());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
 #if INCLUDE_ALL_GCS
+    run_unit_test(TestG1BiasedArray_test());
     run_unit_test(HeapRegionRemSet::test_prt());
 #endif
     tty->print_cr("All internal VM tests passed");
--- a/src/share/vm/prims/jniCheck.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jniCheck.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1324,18 +1324,19 @@
     IN_VM(
       checkString(thr, str);
     )
+    jchar* newResult = NULL;
     const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy);
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected");
-
-    size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
-    jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
-    *tagLocation = STRING_TAG;
-    jchar* newResult = (jchar*) (tagLocation + 1);
-    memcpy(newResult, result, len * sizeof(jchar));
-    // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
-    // Note that the dtrace arguments for the allocated memory will not match up with this solution.
-    FreeHeap((char*)result);
-
+    if (result != NULL) {
+      size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
+      jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
+      *tagLocation = STRING_TAG;
+      newResult = (jchar*) (tagLocation + 1);
+      memcpy(newResult, result, len * sizeof(jchar));
+      // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
+      // Note that the dtrace arguments for the allocated memory will not match up with this solution.
+      FreeHeap((char*)result);
+    }
     functionExit(env);
     return newResult;
 JNI_END
@@ -1394,18 +1395,19 @@
     IN_VM(
       checkString(thr, str);
     )
+    char* newResult = NULL;
     const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy);
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected");
-
-    size_t len = strlen(result) + 1; // + 1 for NULL termination
-    jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
-    *tagLocation = STRING_UTF_TAG;
-    char* newResult = (char*) (tagLocation + 1);
-    strcpy(newResult, result);
-    // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
-    // Note that the dtrace arguments for the allocated memory will not match up with this solution.
-    FreeHeap((char*)result, mtInternal);
-
+    if (result != NULL) {
+      size_t len = strlen(result) + 1; // + 1 for NULL termination
+      jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
+      *tagLocation = STRING_UTF_TAG;
+      newResult = (char*) (tagLocation + 1);
+      strcpy(newResult, result);
+      // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
+      // Note that the dtrace arguments for the allocated memory will not match up with this solution.
+      FreeHeap((char*)result, mtInternal);
+    }
     functionExit(env);
     return newResult;
 JNI_END
--- a/src/share/vm/prims/jvm.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvm.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -671,13 +671,12 @@
 JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
   JVMWrapper("JVM_GetCallerClass");
 
-  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
-  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
+  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
+  // sun.reflect.Reflection.getCallerClass with a depth parameter is provided
+  // temporarily for existing code to use until a replacement API is defined.
+  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
     Klass* k = thread->security_get_caller_class(depth);
     return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
-  } else {
-    // Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
-    assert(depth == -1, "wrong handshake depth");
   }
 
   // Getting the class of the caller frame.
@@ -1827,7 +1826,7 @@
     }
 
     if (!publicOnly || fs.access_flags().is_public()) {
-      fd.initialize(k(), fs.index());
+      fd.reinitialize(k(), fs.index());
       oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
       result->obj_at_put(out_idx, field);
       ++out_idx;
@@ -1838,16 +1837,27 @@
 }
 JVM_END
 
-JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
-{
-  JVMWrapper("JVM_GetClassDeclaredMethods");
+static bool select_method(methodHandle method, bool want_constructor) {
+  if (want_constructor) {
+    return (method->is_initializer() && !method->is_static());
+  } else {
+    return  (!method->is_initializer() && !method->is_overpass());
+  }
+}
+
+static jobjectArray get_class_declared_methods_helper(
+                                  JNIEnv *env,
+                                  jclass ofClass, jboolean publicOnly,
+                                  bool want_constructor,
+                                  Klass* klass, TRAPS) {
+
   JvmtiVMObjectAllocEventCollector oam;
 
   // Exclude primitive types and array types
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
       || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
     // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
+    oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
   }
 
@@ -1858,87 +1868,67 @@
 
   Array<Method*>* methods = k->methods();
   int methods_length = methods->length();
+
+  // Save original method_idnum in case of redefinition, which can change
+  // the idnum of obsolete methods.  The new method will have the same idnum
+  // but if we refresh the methods array, the counts will be wrong.
+  ResourceMark rm(THREAD);
+  GrowableArray<int>* idnums = new GrowableArray<int>(methods_length);
   int num_methods = 0;
 
-  int i;
-  for (i = 0; i < methods_length; i++) {
+  for (int i = 0; i < methods_length; i++) {
     methodHandle method(THREAD, methods->at(i));
-    if (!method->is_initializer() && !method->is_overpass()) {
+    if (select_method(method, want_constructor)) {
       if (!publicOnly || method->is_public()) {
+        idnums->push(method->method_idnum());
         ++num_methods;
       }
     }
   }
 
   // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL);
   objArrayHandle result (THREAD, r);
 
-  int out_idx = 0;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (!method->is_initializer() && !method->is_overpass()) {
-      if (!publicOnly || method->is_public()) {
-        oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
-        result->obj_at_put(out_idx, m);
-        ++out_idx;
+  // Now just put the methods that we selected above, but go by their idnum
+  // in case of redefinition.  The methods can be redefined at any safepoint,
+  // so above when allocating the oop array and below when creating reflect
+  // objects.
+  for (int i = 0; i < num_methods; i++) {
+    methodHandle method(THREAD, k->method_with_idnum(idnums->at(i)));
+    if (method.is_null()) {
+      // Method may have been deleted and seems this API can handle null
+      // Otherwise should probably put a method that throws NSME
+      result->obj_at_put(i, NULL);
+    } else {
+      oop m;
+      if (want_constructor) {
+        m = Reflection::new_constructor(method, CHECK_NULL);
+      } else {
+        m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
       }
+      result->obj_at_put(i, m);
     }
   }
-  assert(out_idx == num_methods, "just checking");
+
   return (jobjectArray) JNIHandles::make_local(env, result());
 }
+
+JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
+{
+  JVMWrapper("JVM_GetClassDeclaredMethods");
+  return get_class_declared_methods_helper(env, ofClass, publicOnly,
+                                           /*want_constructor*/ false,
+                                           SystemDictionary::reflect_Method_klass(), THREAD);
+}
 JVM_END
 
 JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly))
 {
   JVMWrapper("JVM_GetClassDeclaredConstructors");
-  JvmtiVMObjectAllocEventCollector oam;
-
-  // Exclude primitive types and array types
-  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
-      || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
-    // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
-    return (jobjectArray) JNIHandles::make_local(env, res);
-  }
-
-  instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass)));
-
-  // Ensure class is linked
-  k->link_class(CHECK_NULL);
-
-  Array<Method*>* methods = k->methods();
-  int methods_length = methods->length();
-  int num_constructors = 0;
-
-  int i;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (method->is_initializer() && !method->is_static()) {
-      if (!publicOnly || method->is_public()) {
-        ++num_constructors;
-      }
-    }
-  }
-
-  // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
-  objArrayHandle result(THREAD, r);
-
-  int out_idx = 0;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (method->is_initializer() && !method->is_static()) {
-      if (!publicOnly || method->is_public()) {
-        oop m = Reflection::new_constructor(method, CHECK_NULL);
-        result->obj_at_put(out_idx, m);
-        ++out_idx;
-      }
-    }
-  }
-  assert(out_idx == num_constructors, "just checking");
-  return (jobjectArray) JNIHandles::make_local(env, result());
+  return get_class_declared_methods_helper(env, ofClass, publicOnly,
+                                           /*want_constructor*/ true,
+                                           SystemDictionary::reflect_Constructor_klass(), THREAD);
 }
 JVM_END
 
@@ -3966,248 +3956,6 @@
 }
 
 
-// Serialization
-JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
-                                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
-  assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
-
-  typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
-  typeArrayOop dbuf   = typeArrayOop(JNIHandles::resolve(data));
-  typeArrayOop fids   = typeArrayOop(JNIHandles::resolve(fieldIDs));
-  oop          o      = JNIHandles::resolve(obj);
-
-  if (o == NULL || fids == NULL  || dbuf == NULL  || tcodes == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  jsize nfids = fids->length();
-  if (nfids == 0) return;
-
-  if (tcodes->length() < nfids) {
-    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
-  }
-
-  jsize off = 0;
-  /* loop through fields, setting values */
-  for (jsize i = 0; i < nfids; i++) {
-    jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
-    int field_offset;
-    if (fid != NULL) {
-      // NULL is a legal value for fid, but retrieving the field offset
-      // trigger assertion in that case
-      field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
-    }
-
-    switch (tcodes->char_at(i)) {
-      case 'Z':
-        if (fid != NULL) {
-          jboolean val = (dbuf->byte_at(off) != 0) ? JNI_TRUE : JNI_FALSE;
-          o->bool_field_put(field_offset, val);
-        }
-        off++;
-        break;
-
-      case 'B':
-        if (fid != NULL) {
-          o->byte_field_put(field_offset, dbuf->byte_at(off));
-        }
-        off++;
-        break;
-
-      case 'C':
-        if (fid != NULL) {
-          jchar val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
-                    + ((dbuf->byte_at(off + 1) & 0xFF) << 0);
-          o->char_field_put(field_offset, val);
-        }
-        off += 2;
-        break;
-
-      case 'S':
-        if (fid != NULL) {
-          jshort val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
-                     + ((dbuf->byte_at(off + 1) & 0xFF) << 0);
-          o->short_field_put(field_offset, val);
-        }
-        off += 2;
-        break;
-
-      case 'I':
-        if (fid != NULL) {
-          jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
-                    + ((dbuf->byte_at(off + 1) & 0xFF) << 16)
-                    + ((dbuf->byte_at(off + 2) & 0xFF) << 8)
-                    + ((dbuf->byte_at(off + 3) & 0xFF) << 0);
-          o->int_field_put(field_offset, ival);
-        }
-        off += 4;
-        break;
-
-      case 'F':
-        if (fid != NULL) {
-          jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
-                    + ((dbuf->byte_at(off + 1) & 0xFF) << 16)
-                    + ((dbuf->byte_at(off + 2) & 0xFF) << 8)
-                    + ((dbuf->byte_at(off + 3) & 0xFF) << 0);
-          jfloat fval = (*int_bits_to_float_fn)(env, NULL, ival);
-          o->float_field_put(field_offset, fval);
-        }
-        off += 4;
-        break;
-
-      case 'J':
-        if (fid != NULL) {
-          jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
-                     + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
-                     + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
-                     + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
-                     + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
-                     + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
-                     + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
-                     + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
-          o->long_field_put(field_offset, lval);
-        }
-        off += 8;
-        break;
-
-      case 'D':
-        if (fid != NULL) {
-          jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
-                     + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
-                     + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
-                     + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
-                     + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
-                     + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
-                     + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
-                     + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
-          jdouble dval = (*long_bits_to_double_fn)(env, NULL, lval);
-          o->double_field_put(field_offset, dval);
-        }
-        off += 8;
-        break;
-
-      default:
-        // Illegal typecode
-        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
-    }
-  }
-JVM_END
-
-
-JVM_ENTRY(void, JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
-                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
-  assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
-
-  typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
-  typeArrayOop dbuf   = typeArrayOop(JNIHandles::resolve(data));
-  typeArrayOop fids   = typeArrayOop(JNIHandles::resolve(fieldIDs));
-  oop          o      = JNIHandles::resolve(obj);
-
-  if (o == NULL || fids == NULL  || dbuf == NULL  || tcodes == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  jsize nfids = fids->length();
-  if (nfids == 0) return;
-
-  if (tcodes->length() < nfids) {
-    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
-  }
-
-  /* loop through fields, fetching values */
-  jsize off = 0;
-  for (jsize i = 0; i < nfids; i++) {
-    jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
-    if (fid == NULL) {
-      THROW(vmSymbols::java_lang_NullPointerException());
-    }
-    int field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
-
-     switch (tcodes->char_at(i)) {
-       case 'Z':
-         {
-           jboolean val = o->bool_field(field_offset);
-           dbuf->byte_at_put(off++, (val != 0) ? 1 : 0);
-         }
-         break;
-
-       case 'B':
-         dbuf->byte_at_put(off++, o->byte_field(field_offset));
-         break;
-
-       case 'C':
-         {
-           jchar val = o->char_field(field_offset);
-           dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
-         }
-         break;
-
-       case 'S':
-         {
-           jshort val = o->short_field(field_offset);
-           dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
-         }
-         break;
-
-       case 'I':
-         {
-           jint val = o->int_field(field_offset);
-           dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 8)  & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
-         }
-         break;
-
-       case 'F':
-         {
-           jfloat fval = o->float_field(field_offset);
-           jint ival = (*float_to_int_bits_fn)(env, NULL, fval);
-           dbuf->byte_at_put(off++, (ival >> 24) & 0xFF);
-           dbuf->byte_at_put(off++, (ival >> 16) & 0xFF);
-           dbuf->byte_at_put(off++, (ival >> 8)  & 0xFF);
-           dbuf->byte_at_put(off++, (ival >> 0)  & 0xFF);
-         }
-         break;
-
-       case 'J':
-         {
-           jlong val = o->long_field(field_offset);
-           dbuf->byte_at_put(off++, (val >> 56) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 48) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 40) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 32) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 8)  & 0xFF);
-           dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
-         }
-         break;
-
-       case 'D':
-         {
-           jdouble dval = o->double_field(field_offset);
-           jlong lval = (*double_to_long_bits_fn)(env, NULL, dval);
-           dbuf->byte_at_put(off++, (lval >> 56) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 48) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 40) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 32) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 24) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 16) & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 8)  & 0xFF);
-           dbuf->byte_at_put(off++, (lval >> 0)  & 0xFF);
-         }
-         break;
-
-       default:
-         // Illegal typecode
-         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
-     }
-  }
-JVM_END
-
 
 // Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
 
@@ -4238,13 +3986,13 @@
 
 JVM_LEAF(jboolean, JVM_AccessVMBooleanFlag(const char* name, jboolean* value, jboolean is_get))
   JVMWrapper("JVM_AccessBoolVMFlag");
-  return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, INTERNAL);
+  return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, Flag::INTERNAL);
 JVM_END
 
 JVM_LEAF(jboolean, JVM_AccessVMIntFlag(const char* name, jint* value, jboolean is_get))
   JVMWrapper("JVM_AccessVMIntFlag");
   intx v;
-  jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, INTERNAL);
+  jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, Flag::INTERNAL);
   *value = (jint)v;
   return result;
 JVM_END
--- a/src/share/vm/prims/jvm.h	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvm.h	Tue Nov 05 17:38:04 2013 -0800
@@ -377,6 +377,9 @@
 /*
  * java.lang.Class and java.lang.ClassLoader
  */
+
+#define JVM_CALLER_DEPTH -1
+
 /*
  * Returns the class in which the code invoking the native method
  * belongs.
--- a/src/share/vm/prims/jvm_misc.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvm_misc.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -36,22 +36,6 @@
 void trace_class_resolution(Klass* to_class);
 
 /*
- * Support for Serialization and RMI. Currently used by HotSpot only.
- */
-
-extern "C" {
-
-void JNICALL
-JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
-                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
-
-void JNICALL
-JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
-                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
-
-}
-
-/*
  * Support for -Xcheck:jni
  */
 
--- a/src/share/vm/prims/jvmti.xml	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmti.xml	Tue Nov 05 17:38:04 2013 -0800
@@ -458,8 +458,10 @@
       the same name from being loaded dynamically.
 <p/>
       The VM will invoke the Agent_OnUnload_L function of the agent, if such
-      a function is exported, at the same point during startup as it would
-      have called the dynamic entry point Agent_OnUnLoad.
+      a function is exported, at the same point during VM execution as it would
+      have called the dynamic entry point Agent_OnUnLoad. A statically loaded
+      agent cannot be unloaded. The Agent_OnUnload_L function will still be
+      called to do any other agent shutdown related tasks. 
       If a <i>statically linked</i> agent L exports a function called
       Agent_OnUnLoad_L and a function called Agent_OnUnLoad, the Agent_OnUnLoad
       function will be ignored.
--- a/src/share/vm/prims/jvmtiEnv.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -259,8 +259,7 @@
       // bytes to the InstanceKlass here because they have not been
       // validated and we're not at a safepoint.
       constantPoolHandle  constants(current_thread, ikh->constants());
-      oop cplock = constants->lock();
-      ObjectLocker ol(cplock, current_thread, cplock != NULL);    // lock constant pool while we query it
+      MonitorLockerEx ml(constants->lock());    // lock constant pool while we query it
 
       JvmtiClassFileReconstituter reconstituter(ikh);
       if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
@@ -2418,8 +2417,7 @@
 
   instanceKlassHandle ikh(thread, k_oop);
   constantPoolHandle  constants(thread, ikh->constants());
-  oop cplock = constants->lock();
-  ObjectLocker ol(cplock, thread, cplock != NULL);    // lock constant pool while we query it
+  MonitorLockerEx ml(constants->lock());    // lock constant pool while we query it
 
   JvmtiConstantPoolReconstituter reconstituter(ikh);
   if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -406,7 +406,11 @@
   VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
   jvmtiError result() { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+        _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+    }
   }
 };
 
--- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -29,8 +29,43 @@
 #include "runtime/thread.hpp"
 
 
+// The closure for GetLoadedClasses
+class LoadedClassesClosure : public KlassClosure {
+private:
+  Stack<jclass, mtInternal> _classStack;
+  JvmtiEnv* _env;
 
-// The closure for GetLoadedClasses and GetClassLoaderClasses
+public:
+  LoadedClassesClosure(JvmtiEnv* env) {
+    _env = env;
+  }
+
+  void do_klass(Klass* k) {
+    // Collect all jclasses
+    _classStack.push((jclass) _env->jni_reference(k->java_mirror()));
+  }
+
+  int extract(jclass* result_list) {
+    // The size of the Stack will be 0 after extract, so get it here
+    int count = (int)_classStack.size();
+    int i = count;
+
+    // Pop all jclasses, fill backwards
+    while (!_classStack.is_empty()) {
+      result_list[--i] = _classStack.pop();
+    }
+
+    // Return the number of elements written
+    return count;
+  }
+
+  // Return current size of the Stack
+  int get_count() {
+    return (int)_classStack.size();
+  }
+};
+
+// The closure for GetClassLoaderClasses
 class JvmtiGetLoadedClassesClosure : public StackObj {
   // Since the SystemDictionary::classes_do callback
   // doesn't pass a closureData pointer,
@@ -165,19 +200,6 @@
     }
   }
 
-  // Finally, the static methods that are the callbacks
-  static void increment(Klass* k) {
-    JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
-    if (that->get_initiatingLoader() == NULL) {
-      for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) {
-        that->set_count(that->get_count() + 1);
-      }
-    } else if (k != NULL) {
-      // if initiating loader not null, just include the instance with 1 dimension
-      that->set_count(that->get_count() + 1);
-    }
-  }
-
   static void increment_with_loader(Klass* k, ClassLoaderData* loader_data) {
     JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
     oop class_loader = loader_data->class_loader();
@@ -196,24 +218,6 @@
     }
   }
 
-  static void add(Klass* k) {
-    JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
-    if (that->available()) {
-      if (that->get_initiatingLoader() == NULL) {
-        for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) {
-          oop mirror = l->java_mirror();
-          that->set_element(that->get_index(), mirror);
-          that->set_index(that->get_index() + 1);
-        }
-      } else if (k != NULL) {
-        // if initiating loader not null, just include the instance with 1 dimension
-        oop mirror = k->java_mirror();
-        that->set_element(that->get_index(), mirror);
-        that->set_index(that->get_index() + 1);
-      }
-    }
-  }
-
   static void add_with_loader(Klass* k, ClassLoaderData* loader_data) {
     JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this();
     if (that->available()) {
@@ -255,39 +259,30 @@
 
 jvmtiError
 JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr) {
-  // Since SystemDictionary::classes_do only takes a function pointer
-  // and doesn't call back with a closure data pointer,
-  // we can only pass static methods.
 
-  JvmtiGetLoadedClassesClosure closure;
+  LoadedClassesClosure closure(env);
   {
     // To get a consistent list of classes we need MultiArray_lock to ensure
-    // array classes aren't created, and SystemDictionary_lock to ensure that
-    // classes aren't added to the system dictionary,
+    // array classes aren't created.
     MutexLocker ma(MultiArray_lock);
-    MutexLocker sd(SystemDictionary_lock);
+
+    // Iterate through all classes in ClassLoaderDataGraph
+    // and collect them using the LoadedClassesClosure
+    ClassLoaderDataGraph::loaded_classes_do(&closure);
+  }
 
-    // First, count the classes
-    SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::increment);
-    Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::increment);
-    // Next, fill in the classes
-    closure.allocate();
-    SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::add);
-    Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::add);
-    // Drop the SystemDictionary_lock, so the results could be wrong from here,
-    // but we still have a snapshot.
+  // Return results by extracting the collected contents into a list
+  // allocated via JvmtiEnv
+  jclass* result_list;
+  jvmtiError error = env->Allocate(closure.get_count() * sizeof(jclass),
+                               (unsigned char**)&result_list);
+
+  if (error == JVMTI_ERROR_NONE) {
+    int count = closure.extract(result_list);
+    *classCountPtr = count;
+    *classesPtr = result_list;
   }
-  // Post results
-  jclass* result_list;
-  jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass),
-                                 (unsigned char**)&result_list);
-  if (err != JVMTI_ERROR_NONE) {
-    return err;
-  }
-  closure.extract(env, result_list);
-  *classCountPtr = closure.get_count();
-  *classesPtr = result_list;
-  return JVMTI_ERROR_NONE;
+  return error;
 }
 
 jvmtiError
--- a/src/share/vm/prims/jvmtiImpl.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -225,18 +225,20 @@
   _method = NULL;
   _bci    = 0;
   _class_loader = NULL;
-#ifdef CHECK_UNHANDLED_OOPS
-  // This one is always allocated with new, but check it just in case.
-  Thread *thread = Thread::current();
-  if (thread->is_in_stack((address)&_method)) {
-    thread->allow_unhandled_oop((oop*)&_method);
-  }
-#endif // CHECK_UNHANDLED_OOPS
 }
 
 JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) {
   _method        = m_method;
   _class_loader  = _method->method_holder()->class_loader_data()->class_loader();
+#ifdef CHECK_UNHANDLED_OOPS
+  // _class_loader can't be wrapped in a Handle, because JvmtiBreakpoint:s are
+  // eventually allocated on the heap.
+  //
+  // The code handling JvmtiBreakpoint:s allocated on the stack can't be
+  // interrupted by a GC until _class_loader is reachable by the GC via the
+  // oops_do method.
+  Thread::current()->allow_unhandled_oop(&_class_loader);
+#endif // CHECK_UNHANDLED_OOPS
   assert(_method != NULL, "_method != NULL");
   _bci           = (int) location;
   assert(_bci >= 0, "_bci >= 0");
@@ -273,59 +275,49 @@
 
   // add/remove breakpoint to/from versions of the method that
   // are EMCP. Directly or transitively obsolete methods are
-  // not saved in the PreviousVersionInfo.
+  // not saved in the PreviousVersionNodes.
   Thread *thread = Thread::current();
   instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
   Symbol* m_name = _method->name();
   Symbol* m_signature = _method->signature();
 
-  {
-    ResourceMark rm(thread);
-    // PreviousVersionInfo objects returned via PreviousVersionWalker
-    // contain a GrowableArray of handles. We have to clean up the
-    // GrowableArray _after_ the PreviousVersionWalker destructor
-    // has destroyed the handles.
-    {
-      // search previous versions if they exist
-      PreviousVersionWalker pvw((InstanceKlass *)ikh());
-      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-           pv_info != NULL; pv_info = pvw.next_previous_version()) {
-        GrowableArray<methodHandle>* methods =
-          pv_info->prev_EMCP_method_handles();
+  // search previous versions if they exist
+  PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
+  for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+       pv_node != NULL; pv_node = pvw.next_previous_version()) {
+    GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
 
-        if (methods == NULL) {
-          // We have run into a PreviousVersion generation where
-          // all methods were made obsolete during that generation's
-          // RedefineClasses() operation. At the time of that
-          // operation, all EMCP methods were flushed so we don't
-          // have to go back any further.
-          //
-          // A NULL methods array is different than an empty methods
-          // array. We cannot infer any optimizations about older
-          // generations from an empty methods array for the current
-          // generation.
-          break;
-        }
+    if (methods == NULL) {
+      // We have run into a PreviousVersion generation where
+      // all methods were made obsolete during that generation's
+      // RedefineClasses() operation. At the time of that
+      // operation, all EMCP methods were flushed so we don't
+      // have to go back any further.
+      //
+      // A NULL methods array is different than an empty methods
+      // array. We cannot infer any optimizations about older
+      // generations from an empty methods array for the current
+      // generation.
+      break;
+    }
 
-        for (int i = methods->length() - 1; i >= 0; i--) {
-          methodHandle method = methods->at(i);
-          // obsolete methods that are running are not deleted from
-          // previous version array, but they are skipped here.
-          if (!method->is_obsolete() &&
-              method->name() == m_name &&
-              method->signature() == m_signature) {
-            RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
-              meth_act == &Method::set_breakpoint ? "sett" : "clear",
-              method->name()->as_C_string(),
-              method->signature()->as_C_string()));
+    for (int i = methods->length() - 1; i >= 0; i--) {
+      Method* method = methods->at(i);
+      // obsolete methods that are running are not deleted from
+      // previous version array, but they are skipped here.
+      if (!method->is_obsolete() &&
+          method->name() == m_name &&
+          method->signature() == m_signature) {
+        RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
+          meth_act == &Method::set_breakpoint ? "sett" : "clear",
+          method->name()->as_C_string(),
+          method->signature()->as_C_string()));
 
-            ((Method*)method()->*meth_act)(_bci);
-            break;
-          }
-        }
+        (method->*meth_act)(_bci);
+        break;
       }
-    } // pvw is cleaned up
-  } // rm is cleaned up
+    }
+  }
 }
 
 void JvmtiBreakpoint::set() {
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1072,8 +1072,17 @@
     }
 
     res = merge_cp_and_rewrite(the_class, scratch_class, THREAD);
-    if (res != JVMTI_ERROR_NONE) {
-      return res;
+    if (HAS_PENDING_EXCEPTION) {
+      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("merge_cp_and_rewrite exception: '%s'", ex_name->as_C_string()));
+      CLEAR_PENDING_EXCEPTION;
+      if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
+        return JVMTI_ERROR_OUT_OF_MEMORY;
+      } else {
+        return JVMTI_ERROR_INTERNAL;
+      }
     }
 
     if (VerifyMergedCPBytecodes) {
@@ -1105,6 +1114,9 @@
     }
     if (HAS_PENDING_EXCEPTION) {
       Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("Rewriter::rewrite or link_methods exception: '%s'", ex_name->as_C_string()));
       CLEAR_PENDING_EXCEPTION;
       if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
         return JVMTI_ERROR_OUT_OF_MEMORY;
@@ -1395,8 +1407,8 @@
   ClassLoaderData* loader_data = the_class->class_loader_data();
   ConstantPool* merge_cp_oop =
     ConstantPool::allocate(loader_data,
-                                  merge_cp_length,
-                                  THREAD);
+                           merge_cp_length,
+                           CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
   MergeCPCleaner cp_cleaner(loader_data, merge_cp_oop);
 
   HandleMark hm(THREAD);  // make sure handles are cleared before
@@ -1472,7 +1484,8 @@
 
       // Replace the new constant pool with a shrunken copy of the
       // merged constant pool
-      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+                            CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
       // The new constant pool replaces scratch_cp so have cleaner clean it up.
       // It can't be cleaned up while there are handles to it.
       cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1502,7 +1515,8 @@
     // merged constant pool so now the rewritten bytecodes have
     // valid references; the previous new constant pool will get
     // GCed.
-    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
+    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
+                          CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
     // The new constant pool replaces scratch_cp so have cleaner clean it up.
     // It can't be cleaned up while there are handles to it.
     cp_cleaner.add_scratch_cp(scratch_cp());
@@ -1590,11 +1604,23 @@
   for (int i = methods->length() - 1; i >= 0; i--) {
     methodHandle method(THREAD, methods->at(i));
     methodHandle new_method;
-    rewrite_cp_refs_in_method(method, &new_method, CHECK_false);
+    rewrite_cp_refs_in_method(method, &new_method, THREAD);
     if (!new_method.is_null()) {
       // the method has been replaced so save the new method version
+      // even in the case of an exception.  original method is on the
+      // deallocation list.
       methods->at_put(i, new_method());
     }
+    if (HAS_PENDING_EXCEPTION) {
+      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
+      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
+      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
+        ("rewrite_cp_refs_in_method exception: '%s'", ex_name->as_C_string()));
+      // Need to clear pending exception here as the super caller sets
+      // the JVMTI_ERROR_INTERNAL if the returned value is false.
+      CLEAR_PENDING_EXCEPTION;
+      return false;
+    }
   }
 
   return true;
@@ -1674,10 +1700,7 @@
               Pause_No_Safepoint_Verifier pnsv(&nsv);
 
               // ldc is 2 bytes and ldc_w is 3 bytes
-              m = rc.insert_space_at(bci, 3, inst_buffer, THREAD);
-              if (m.is_null() || HAS_PENDING_EXCEPTION) {
-                guarantee(false, "insert_space_at() failed");
-              }
+              m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);
             }
 
             // return the new method so that the caller can update
@@ -2487,8 +2510,8 @@
   // scratch_cp is a merged constant pool and has enough space for a
   // worst case merge situation. We want to associate the minimum
   // sized constant pool with the klass to save space.
-  constantPoolHandle smaller_cp(THREAD,
-          ConstantPool::allocate(loader_data, scratch_cp_length, THREAD));
+  ConstantPool* cp = ConstantPool::allocate(loader_data, scratch_cp_length, CHECK);
+  constantPoolHandle smaller_cp(THREAD, cp);
 
   // preserve version() value in the smaller copy
   int version = scratch_cp->version();
@@ -2500,6 +2523,11 @@
   smaller_cp->set_pool_holder(scratch_class());
 
   scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    // Exception is handled in the caller
+    loader_data->add_to_deallocate_list(smaller_cp());
+    return;
+  }
   scratch_cp = smaller_cp;
 
   // attach new constant pool to klass
@@ -2727,13 +2755,26 @@
     // InstanceKlass around to hold obsolete methods so we don't have
     // any other InstanceKlass embedded vtables to update. The vtable
     // holds the Method*s for virtual (but not final) methods.
-    if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) {
+    // Default methods, or concrete methods in interfaces are stored
+    // in the vtable, so if an interface changes we need to check
+    // adjust_method_entries() for every InstanceKlass, which will also
+    // adjust the default method vtable indices.
+    // We also need to adjust any default method entries that are
+    // not yet in the vtable, because the vtable setup is in progress.
+    // This must be done after we adjust the default_methods and
+    // default_vtable_indices for methods already in the vtable.
+    if (ik->vtable_length() > 0 && (_the_class_oop->is_interface()
+        || ik->is_subtype_of(_the_class_oop))) {
       // ik->vtable() creates a wrapper object; rm cleans it up
       ResourceMark rm(_thread);
       ik->vtable()->adjust_method_entries(_matching_old_methods,
                                           _matching_new_methods,
                                           _matching_methods_length,
                                           &trace_name_printed);
+      ik->adjust_default_methods(_matching_old_methods,
+                                 _matching_new_methods,
+                                 _matching_methods_length,
+                                 &trace_name_printed);
     }
 
     // If the current class has an itable and we are either redefining an
@@ -2779,28 +2820,20 @@
                                         &trace_name_printed);
       }
     }
-    {
-      ResourceMark rm(_thread);
-      // PreviousVersionInfo objects returned via PreviousVersionWalker
-      // contain a GrowableArray of handles. We have to clean up the
-      // GrowableArray _after_ the PreviousVersionWalker destructor
-      // has destroyed the handles.
-      {
-        // the previous versions' constant pool caches may need adjustment
-        PreviousVersionWalker pvw(ik);
-        for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-             pv_info != NULL; pv_info = pvw.next_previous_version()) {
-          other_cp = pv_info->prev_constant_pool_handle();
-          cp_cache = other_cp->cache();
-          if (cp_cache != NULL) {
-            cp_cache->adjust_method_entries(_matching_old_methods,
-                                            _matching_new_methods,
-                                            _matching_methods_length,
-                                            &trace_name_printed);
-          }
-        }
-      } // pvw is cleaned up
-    } // rm is cleaned up
+
+    // the previous versions' constant pool caches may need adjustment
+    PreviousVersionWalker pvw(_thread, ik);
+    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+         pv_node != NULL; pv_node = pvw.next_previous_version()) {
+      other_cp = pv_node->prev_constant_pool();
+      cp_cache = other_cp->cache();
+      if (cp_cache != NULL) {
+        cp_cache->adjust_method_entries(_matching_old_methods,
+                                        _matching_new_methods,
+                                        _matching_methods_length,
+                                        &trace_name_printed);
+      }
+    }
   }
 }
 
@@ -2911,13 +2944,13 @@
       old_method->set_is_obsolete();
       obsolete_count++;
 
-      // obsolete methods need a unique idnum
+      // obsolete methods need a unique idnum so they become new entries in
+      // the jmethodID cache in InstanceKlass
       u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
       if (num != ConstMethod::UNSET_IDNUM) {
-//      u2 old_num = old_method->method_idnum();
         old_method->set_method_idnum(num);
-// TO DO: attach obsolete annotations to obsolete method's new idnum
       }
+
       // With tracing we try not to "yack" too much. The position of
       // this trace assumes there are fewer obsolete methods than
       // EMCP methods.
@@ -2930,7 +2963,7 @@
   for (int i = 0; i < _deleted_methods_length; ++i) {
     Method* old_method = _deleted_methods[i];
 
-    assert(old_method->vtable_index() < 0,
+    assert(!old_method->has_vtable_index(),
            "cannot delete methods with vtable entries");;
 
     // Mark all deleted methods as old and obsolete
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -165,7 +165,7 @@
   static unsigned int hash(oop key, int size) {
     // shift right to get better distribution (as these bits will be zero
     // with aligned addresses)
-    unsigned int addr = (unsigned int)((intptr_t)key);
+    unsigned int addr = (unsigned int)(cast_from_oop<intptr_t>(key));
 #ifdef _LP64
     return (addr >> 3) % size;
 #else
--- a/src/share/vm/prims/methodHandles.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/methodHandles.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -127,25 +127,37 @@
 }
 
 oop MethodHandles::init_MemberName(Handle mname, Handle target) {
+  // This method is used from java.lang.invoke.MemberName constructors.
+  // It fills in the new MemberName from a java.lang.reflect.Member.
   Thread* thread = Thread::current();
   oop target_oop = target();
   Klass* target_klass = target_oop->klass();
   if (target_klass == SystemDictionary::reflect_Field_klass()) {
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
     int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
-    int mods  = java_lang_reflect_Field::modifiers(target_oop);
-    oop type  = java_lang_reflect_Field::type(target_oop);
-    oop name  = java_lang_reflect_Field::name(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    intptr_t offset = InstanceKlass::cast(k())->field_offset(slot);
-    return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset);
+    if (!k.is_null() && k->oop_is_instance()) {
+      fieldDescriptor fd(InstanceKlass::cast(k()), slot);
+      oop mname2 = init_field_MemberName(mname, fd);
+      if (mname2 != NULL) {
+        // Since we have the reified name and type handy, add them to the result.
+        if (java_lang_invoke_MemberName::name(mname2) == NULL)
+          java_lang_invoke_MemberName::set_name(mname2, java_lang_reflect_Field::name(target_oop));
+        if (java_lang_invoke_MemberName::type(mname2) == NULL)
+          java_lang_invoke_MemberName::set_type(mname2, java_lang_reflect_Field::type(target_oop));
+      }
+      return mname2;
+    }
   } else if (target_klass == SystemDictionary::reflect_Method_klass()) {
     oop clazz  = java_lang_reflect_Method::clazz(target_oop);
     int slot   = java_lang_reflect_Method::slot(target_oop);
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
     if (!k.is_null() && k->oop_is_instance()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
-      return init_method_MemberName(mname, m, true, k);
+      if (m == NULL || is_signature_polymorphic(m->intrinsic_id()))
+        return NULL;            // do not resolve unless there is a concrete signature
+      CallInfo info(m, k());
+      return init_method_MemberName(mname, info);
     }
   } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) {
     oop clazz  = java_lang_reflect_Constructor::clazz(target_oop);
@@ -153,65 +165,72 @@
     KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
     if (!k.is_null() && k->oop_is_instance()) {
       Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
-      return init_method_MemberName(mname, m, false, k);
-    }
-  } else if (target_klass == SystemDictionary::MemberName_klass()) {
-    // Note: This only works if the MemberName has already been resolved.
-    oop clazz        = java_lang_invoke_MemberName::clazz(target_oop);
-    int flags        = java_lang_invoke_MemberName::flags(target_oop);
-    Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop);
-    intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop);
-    KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
-    int ref_kind     = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
-    if (vmtarget == NULL)  return NULL;  // not resolved
-    if ((flags & IS_FIELD) != 0) {
-      assert(vmtarget->is_klass(), "field vmtarget is Klass*");
-      int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0);
-      // FIXME:  how does k (receiver_limit) contribute?
-      KlassHandle k_vmtarget(thread, (Klass*)vmtarget);
-      return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex);
-    } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) {
-      assert(vmtarget->is_method(), "method or constructor vmtarget is Method*");
-      return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k);
-    } else {
-      return NULL;
+      if (m == NULL)  return NULL;
+      CallInfo info(m, k());
+      return init_method_MemberName(mname, info);
     }
   }
   return NULL;
 }
 
-oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch,
-                                          KlassHandle receiver_limit_h) {
-  Klass* receiver_limit = receiver_limit_h();
-  AccessFlags mods = m->access_flags();
-  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
-  int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch
-  Klass* mklass = m->method_holder();
-  if (receiver_limit == NULL)
-    receiver_limit = mklass;
-  if (m->is_initializer()) {
-    flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-  } else if (mods.is_static()) {
-    flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
-  } else if (receiver_limit != mklass &&
-             !receiver_limit->is_subtype_of(mklass)) {
-    return NULL;  // bad receiver limit
-  } else if (do_dispatch && receiver_limit->is_interface() &&
-             mklass->is_interface()) {
+oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
+  assert(info.resolved_appendix().is_null(), "only normal methods here");
+  KlassHandle receiver_limit = info.resolved_klass();
+  methodHandle m = info.resolved_method();
+  int flags = (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
+  int vmindex = Method::invalid_vtable_index;
+
+  switch (info.call_kind()) {
+  case CallInfo::itable_call:
+    vmindex = info.itable_index();
+    // More importantly, the itable index only works with the method holder.
+    receiver_limit = m->method_holder();
+    assert(receiver_limit->verify_itable_index(vmindex), "");
     flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
-    receiver_limit = mklass;  // ignore passed-in limit; interfaces are interconvertible
-    vmindex = klassItable::compute_itable_index(m);
-  } else if (do_dispatch && mklass != receiver_limit && mklass->is_interface()) {
+    if (TraceInvokeDynamic) {
+      ResourceMark rm;
+      tty->print_cr("memberName: invokeinterface method_holder::method: %s, receiver: %s, itableindex: %d, access_flags:",
+            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
+            receiver_limit()->internal_name(), vmindex);
+       m->access_flags().print_on(tty);
+       if (!m->is_abstract()) {
+         tty->print("default");
+       }
+       tty->cr();
+    }
+    break;
+
+  case CallInfo::vtable_call:
+    vmindex = info.vtable_index();
     flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
-    // it is a miranda method, so m->vtable_index is not what we want
-    ResourceMark rm;
-    klassVtable* vt = InstanceKlass::cast(receiver_limit)->vtable();
-    vmindex = vt->index_of_miranda(m->name(), m->signature());
-  } else if (!do_dispatch || m->can_be_statically_bound()) {
-    flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-  } else {
-    flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
-    vmindex = m->vtable_index();
+    assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
+    if (TraceInvokeDynamic) {
+      ResourceMark rm;
+      tty->print_cr("memberName: invokevirtual method_holder::method: %s, receiver: %s, vtableindex: %d, access_flags:",
+            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
+            receiver_limit()->internal_name(), vmindex);
+       m->access_flags().print_on(tty);
+       if (m->is_default_method()) {
+         tty->print("default");
+       }
+       tty->cr();
+    }
+    break;
+
+  case CallInfo::direct_call:
+    vmindex = Method::nonvirtual_vtable_index;
+    if (m->is_static()) {
+      flags |= IS_METHOD      | (JVM_REF_invokeStatic  << REFERENCE_KIND_SHIFT);
+    } else if (m->is_initializer()) {
+      flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+      assert(receiver_limit == m->method_holder(), "constructor call must be exactly typed");
+    } else {
+      flags |= IS_METHOD      | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
+      assert(receiver_limit->is_subtype_of(m->method_holder()), "special call must be type-safe");
+    }
+    break;
+
+  default:  assert(false, "bad CallInfo");  return NULL;
   }
 
   // @CallerSensitive annotation detected
@@ -221,7 +240,7 @@
 
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags(   mname_oop, flags);
-  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
+  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m());
   java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex);   // vtable/itable index
   java_lang_invoke_MemberName::set_clazz(   mname_oop, receiver_limit->java_mirror());
   // Note:  name and type can be lazily computed by resolve_MemberName,
@@ -237,59 +256,19 @@
   return mname();
 }
 
-Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) {
-  Handle empty;
-  if (info.resolved_appendix().not_null()) {
-    // The resolved MemberName must not be accompanied by an appendix argument,
-    // since there is no way to bind this value into the MemberName.
-    // Caller is responsible to prevent this from happening.
-    THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
-  }
-  methodHandle m = info.resolved_method();
-  KlassHandle defc = info.resolved_klass();
-  int vmindex = Method::invalid_vtable_index;
-  if (defc->is_interface() && m->method_holder()->is_interface()) {
-    // static interface methods do not reference vtable or itable
-    if (m->is_static()) {
-      vmindex = Method::nonvirtual_vtable_index;
-    }
-    // interface methods invoked via invokespecial also
-    // do not reference vtable or itable.
-    int ref_kind = ((java_lang_invoke_MemberName::flags(mname()) >>
-                     REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK);
-    if (ref_kind == JVM_REF_invokeSpecial) {
-      vmindex = Method::nonvirtual_vtable_index;
-    }
-    // If neither m is static nor ref_kind is invokespecial,
-    // set it to itable index.
-    if (vmindex == Method::invalid_vtable_index) {
-      // LinkResolver does not report itable indexes!  (fix this?)
-      vmindex = klassItable::compute_itable_index(m());
-    }
-  } else if (m->can_be_statically_bound()) {
-    // LinkResolver reports vtable index even for final methods!
-    vmindex = Method::nonvirtual_vtable_index;
-  } else {
-    vmindex = info.vtable_index();
-  }
-  oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc());
-  assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), "");
-  return Handle(THREAD, res);
-}
-
-oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder,
-                                         AccessFlags mods, oop type, oop name,
-                                         intptr_t offset, bool is_setter) {
-  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
-  flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
+oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
+  int flags = (jushort)( fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
+  flags |= IS_FIELD | ((fd.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
   if (is_setter)  flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT);
-  Metadata* vmtarget = field_holder();
-  int vmindex  = offset;  // determines the field uniquely when combined with static bit
+  Metadata* vmtarget = fd.field_holder();
+  int vmindex        = fd.offset();  // determines the field uniquely when combined with static bit
   oop mname_oop = mname();
   java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
   java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget);
   java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);
-  java_lang_invoke_MemberName::set_clazz(mname_oop,    field_holder->java_mirror());
+  java_lang_invoke_MemberName::set_clazz(mname_oop,    fd.field_holder()->java_mirror());
+  oop type = field_signature_type_or_null(fd.signature());
+  oop name = field_name_or_null(fd.name());
   if (name != NULL)
     java_lang_invoke_MemberName::set_name(mname_oop,   name);
   if (type != NULL)
@@ -305,19 +284,6 @@
   return mname();
 }
 
-Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) {
-  return Handle();
-#if 0 // FIXME
-  KlassHandle field_holder = info.klass();
-  intptr_t    field_offset = info.field_offset();
-  return init_field_MemberName(mname_oop, field_holder(),
-                               info.access_flags(),
-                               type, name,
-                               field_offset, false /*is_setter*/);
-#endif
-}
-
-
 // JVM 2.9 Special Methods:
 // A method is signature polymorphic if and only if all of the following conditions hold :
 // * It is declared in the java.lang.invoke.MethodHandle class.
@@ -573,12 +539,12 @@
   return SystemDictionary::Object_klass()->java_mirror();
 }
 
-static oop field_name_or_null(Symbol* s) {
+oop MethodHandles::field_name_or_null(Symbol* s) {
   if (s == NULL)  return NULL;
   return StringTable::lookup(s);
 }
 
-static oop field_signature_type_or_null(Symbol* s) {
+oop MethodHandles::field_signature_type_or_null(Symbol* s) {
   if (s == NULL)  return NULL;
   BasicType bt = FieldType::basic_type(s);
   if (is_java_primitive(bt)) {
@@ -701,7 +667,14 @@
           return empty;
         }
       }
-      return init_method_MemberName(mname, result, THREAD);
+      if (result.resolved_appendix().not_null()) {
+        // The resolved MemberName must not be accompanied by an appendix argument,
+        // since there is no way to bind this value into the MemberName.
+        // Caller is responsible to prevent this from happening.
+        THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
+      }
+      oop mname2 = init_method_MemberName(mname, result);
+      return Handle(THREAD, mname2);
     }
   case IS_CONSTRUCTOR:
     {
@@ -719,22 +692,21 @@
         }
       }
       assert(result.is_statically_bound(), "");
-      return init_method_MemberName(mname, result, THREAD);
+      oop mname2 = init_method_MemberName(mname, result);
+      return Handle(THREAD, mname2);
     }
   case IS_FIELD:
     {
-      // This is taken from LinkResolver::resolve_field, sans access checks.
-      fieldDescriptor fd; // find_field initializes fd if found
-      KlassHandle sel_klass(THREAD, InstanceKlass::cast(defc())->find_field(name, type, &fd));
-      // check if field exists; i.e., if a klass containing the field def has been selected
-      if (sel_klass.is_null())  return empty;  // should not happen
-      oop type = field_signature_type_or_null(fd.signature());
-      oop name = field_name_or_null(fd.name());
-      bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind));
-      mname = Handle(THREAD,
-                     init_field_MemberName(mname, sel_klass,
-                                           fd.access_flags(), type, name, fd.offset(), is_setter));
-      return mname;
+      fieldDescriptor result; // find_field initializes fd if found
+      {
+        assert(!HAS_PENDING_EXCEPTION, "");
+        LinkResolver::resolve_field(result, defc, name, type, KlassHandle(), Bytecodes::_nop, false, false, THREAD);
+        if (HAS_PENDING_EXCEPTION) {
+          return empty;
+        }
+      }
+      oop mname2 = init_field_MemberName(mname, result, ref_kind_is_setter(ref_kind));
+      return Handle(THREAD, mname2);
     }
   default:
     THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty);
@@ -793,7 +765,6 @@
     }
   case IS_FIELD:
     {
-      // This is taken from LinkResolver::resolve_field, sans access checks.
       assert(vmtarget->is_klass(), "field vmtarget is Klass*");
       if (!((Klass*) vmtarget)->oop_is_instance())  break;
       instanceKlassHandle defc(THREAD, (Klass*) vmtarget);
@@ -872,11 +843,7 @@
         Handle result(thread, results->obj_at(rfill++));
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
-        oop type = field_signature_type_or_null(st.signature());
-        oop name = field_name_or_null(st.name());
-        oop saved = MethodHandles::init_field_MemberName(result, st.klass(),
-                                                         st.access_flags(), type, name,
-                                                         st.offset());
+        oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
         if (saved != result())
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
@@ -926,7 +893,8 @@
         Handle result(thread, results->obj_at(rfill++));
         if (!java_lang_invoke_MemberName::is_instance(result()))
           return -99;  // caller bug!
-        oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL);
+        CallInfo info(m);
+        oop saved = MethodHandles::init_method_MemberName(result, info);
         if (saved != result())
           results->obj_at_put(rfill-1, saved);  // show saved instance to user
       } else if (++overflow >= overflow_limit) {
@@ -1226,8 +1194,7 @@
   } else if (vmtarget->is_klass()) {
     x = ((Klass*) vmtarget)->java_mirror();
   } else if (vmtarget->is_method()) {
-    Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL);
-    x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL);
+    x = mname();
   }
   result->obj_at_put(1, x);
   return JNIHandles::make_local(env, result());
--- a/src/share/vm/prims/methodHandles.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/methodHandles.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -49,19 +49,18 @@
   // Adapters.
   static MethodHandlesAdapterBlob* _adapter_code;
 
+  // utility functions for reifying names and types
+  static oop field_name_or_null(Symbol* s);
+  static oop field_signature_type_or_null(Symbol* s);
+
  public:
   // working with member names
   static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
-  static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch,
-                                    KlassHandle receiver_limit_h);
-  static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h,
-                                   AccessFlags mods, oop type, oop name,
-                                   intptr_t offset, bool is_setter = false);
-  static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS);
-  static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS);
+  static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
+  static oop init_method_MemberName(Handle mname_h, CallInfo& info);
   static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
   static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
                               int mflags, KlassHandle caller,
--- a/src/share/vm/prims/nativeLookup.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/nativeLookup.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -132,10 +132,6 @@
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
 
 static JNINativeMethod lookup_special_native_methods[] = {
-  // Next two functions only exist for compatibility with 1.3.1 and earlier.
-  { CC"Java_java_io_ObjectOutputStream_getPrimitiveFieldValues",   NULL, FN_PTR(JVM_GetPrimitiveFieldValues)     },  // intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization
-  { CC"Java_java_io_ObjectInputStream_setPrimitiveFieldValues",    NULL, FN_PTR(JVM_SetPrimitiveFieldValues)     },  // intercept ObjectInputStream setPrimitiveFieldValues for faster serialization
-
   { CC"Java_sun_misc_Unsafe_registerNatives",                      NULL, FN_PTR(JVM_RegisterUnsafeMethods)       },
   { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
   { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         },
@@ -143,9 +139,8 @@
 };
 
 static address lookup_special_native(char* jni_name) {
-  int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2;  // see comment in lookup_special_native_methods
   int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod);
-  for (; i < count; i++) {
+  for (int i = 0; i < count; i++) {
     // NB: To ignore the jni prefix and jni postfix strstr is used matching.
     if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) {
       return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr);
--- a/src/share/vm/prims/unsafe.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/unsafe.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -292,9 +292,9 @@
   volatile oop v;
   if (UseCompressedOops) {
     volatile narrowOop n = *(volatile narrowOop*) addr;
-    v = oopDesc::decode_heap_oop(n);
+    (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
   } else {
-    v = *(volatile oop*) addr;
+    (void)const_cast<oop&>(v = *(volatile oop*) addr);
   }
   OrderAccess::acquire();
   return JNIHandles::make_local(env, v);
@@ -1222,9 +1222,9 @@
 #endif /* USDT2 */
   if (event.should_commit()) {
     oop obj = thread->current_park_blocker();
-    event.set_klass(obj ? obj->klass() : NULL);
+    event.set_klass((obj != NULL) ? obj->klass() : NULL);
     event.set_timeout(time);
-    event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0);
+    event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
     event.commit();
   }
 UNSAFE_END
--- a/src/share/vm/prims/wbtestmethods/parserTests.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/wbtestmethods/parserTests.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,11 +117,12 @@
 
   const char* c_cmdline = java_lang_String::as_utf8_string(JNIHandles::resolve(j_cmdline));
   objArrayOop argumentArray = objArrayOop(JNIHandles::resolve_non_null(arguments));
+  objArrayHandle argumentArray_ah(THREAD, argumentArray);
 
-  int length = argumentArray->length();
+  int length = argumentArray_ah->length();
 
   for (int i = 0; i < length; i++) {
-    oop argument_oop = argumentArray->obj_at(i);
+    oop argument_oop = argumentArray_ah->obj_at(i);
     fill_in_parser(&parser, argument_oop);
   }
 
@@ -130,19 +131,20 @@
 
   Klass* k = SystemDictionary::Object_klass();
   objArrayOop returnvalue_array = oopFactory::new_objArray(k, parser.num_arguments() * 2, CHECK_NULL);
+  objArrayHandle returnvalue_array_ah(THREAD, returnvalue_array);
 
   GrowableArray<const char *>*parsedArgNames = parser.argument_name_array();
 
   for (int i = 0; i < parser.num_arguments(); i++) {
     oop parsedName = java_lang_String::create_oop_from_str(parsedArgNames->at(i), CHECK_NULL);
-    returnvalue_array->obj_at_put(i*2, parsedName);
+    returnvalue_array_ah->obj_at_put(i*2, parsedName);
     GenDCmdArgument* arg = parser.lookup_dcmd_option(parsedArgNames->at(i), strlen(parsedArgNames->at(i)));
     char buf[VALUE_MAXLEN];
     arg->value_as_str(buf, sizeof(buf));
     oop parsedValue = java_lang_String::create_oop_from_str(buf, CHECK_NULL);
-    returnvalue_array->obj_at_put(i*2+1, parsedValue);
+    returnvalue_array_ah->obj_at_put(i*2+1, parsedValue);
   }
 
-  return (jobjectArray) JNIHandles::make_local(returnvalue_array);
+  return (jobjectArray) JNIHandles::make_local(returnvalue_array_ah());
 
 WB_END
--- a/src/share/vm/prims/whitebox.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/prims/whitebox.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -33,6 +33,7 @@
 #include "prims/whitebox.hpp"
 #include "prims/wbtestmethods/parserTests.hpp"
 
+#include "runtime/arguments.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 #include "utilities/debug.hpp"
@@ -94,6 +95,11 @@
   return closure.found();
 WB_END
 
+WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
+  return (jlong)Arguments::max_heap_for_compressed_oops();
+}
+WB_END
+
 WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   CollectorPolicy * p = Universe::heap()->collector_policy();
   gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
@@ -436,6 +442,8 @@
       CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
+  {CC"getCompressedOopsMaxHeapSize", CC"()J",
+      (void*)&WB_GetCompressedOopsMaxHeapSize},
   {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
 #if INCLUDE_ALL_GCS
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
--- a/src/share/vm/runtime/arguments.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/arguments.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,6 +28,7 @@
 #include "compiler/compilerOracle.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/cardTableRS.hpp"
+#include "memory/genCollectedHeap.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -57,6 +58,8 @@
 #endif
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
 
 // Note: This is a special bug reporting site for the JVM
@@ -93,6 +96,7 @@
 SystemProperty* Arguments::_system_properties   = NULL;
 const char*  Arguments::_gc_log_filename        = NULL;
 bool   Arguments::_has_profile                  = false;
+size_t Arguments::_conservative_max_heap_alignment = 0;
 uintx  Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
@@ -624,11 +628,11 @@
   }
 }
 
-static bool set_bool_flag(char* name, bool value, FlagValueOrigin origin) {
+static bool set_bool_flag(char* name, bool value, Flag::Flags origin) {
   return CommandLineFlags::boolAtPut(name, &value, origin);
 }
 
-static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
+static bool set_fp_numeric_flag(char* name, char* value, Flag::Flags origin) {
   double v;
   if (sscanf(value, "%lf", &v) != 1) {
     return false;
@@ -640,7 +644,7 @@
   return false;
 }
 
-static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
+static bool set_numeric_flag(char* name, char* value, Flag::Flags origin) {
   julong v;
   intx intx_v;
   bool is_neg = false;
@@ -673,14 +677,14 @@
   return false;
 }
 
-static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) {
+static bool set_string_flag(char* name, const char* value, Flag::Flags origin) {
   if (!CommandLineFlags::ccstrAtPut(name, &value, origin))  return false;
   // Contract:  CommandLineFlags always returns a pointer that needs freeing.
   FREE_C_HEAP_ARRAY(char, value, mtInternal);
   return true;
 }
 
-static bool append_to_string_flag(char* name, const char* new_value, FlagValueOrigin origin) {
+static bool append_to_string_flag(char* name, const char* new_value, Flag::Flags origin) {
   const char* old_value = "";
   if (!CommandLineFlags::ccstrAt(name, &old_value))  return false;
   size_t old_len = old_value != NULL ? strlen(old_value) : 0;
@@ -708,7 +712,7 @@
   return true;
 }
 
-bool Arguments::parse_argument(const char* arg, FlagValueOrigin origin) {
+bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
 
   // range of acceptable characters spelled out for portability reasons
 #define NAME_RANGE  "[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_]"
@@ -849,7 +853,7 @@
 }
 
 bool Arguments::process_argument(const char* arg,
-    jboolean ignore_unrecognized, FlagValueOrigin origin) {
+    jboolean ignore_unrecognized, Flag::Flags origin) {
 
   JDK_Version since = JDK_Version();
 
@@ -903,7 +907,7 @@
       jio_fprintf(defaultStream::error_stream(),
                   "Did you mean '%s%s%s'?\n",
                   (fuzzy_matched->is_bool()) ? "(+/-)" : "",
-                  fuzzy_matched->name,
+                  fuzzy_matched->_name,
                   (fuzzy_matched->is_bool()) ? "" : "=<value>");
     }
   }
@@ -951,7 +955,7 @@
         // this allows a way to include spaces in string-valued options
         token[pos] = '\0';
         logOption(token);
-        result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
+        result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
         build_jvm_flags(token);
         pos = 0;
         in_white_space = true;
@@ -969,7 +973,7 @@
   }
   if (pos > 0) {
     token[pos] = '\0';
-    result &= process_argument(token, ignore_unrecognized, CONFIG_FILE);
+    result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
     build_jvm_flags(token);
   }
   fclose(stream);
@@ -1099,6 +1103,7 @@
   }
 }
 
+#if defined(COMPILER2) || defined(_LP64) || !INCLUDE_CDS
 // Conflict: required to use shared spaces (-Xshare:on), but
 // incompatible command line options were chosen.
 
@@ -1111,6 +1116,7 @@
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
   }
 }
+#endif
 
 void Arguments::set_tiered_flags() {
   // With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
@@ -1129,6 +1135,9 @@
     Tier3InvokeNotifyFreqLog = 0;
     Tier4InvocationThreshold = 0;
   }
+  if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
+    FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
+  }
 }
 
 #if INCLUDE_ALL_GCS
@@ -1394,10 +1403,17 @@
   return true;
 }
 
-inline uintx max_heap_for_compressed_oops() {
+uintx Arguments::max_heap_for_compressed_oops() {
   // Avoid sign flip.
   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
-  LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
+  // We need to fit both the NULL page and the heap into the memory budget, while
+  // keeping alignment constraints of the heap. To guarantee the latter, as the
+  // NULL page is located before the heap, we pad the NULL page to the conservative
+  // maximum alignment that the GC may ever impose upon the heap.
+  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
+    Arguments::conservative_max_heap_alignment());
+
+  LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
   NOT_LP64(ShouldNotReachHere(); return 0);
 }
 
@@ -1442,7 +1458,7 @@
     if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
       warning("Max heap size too large for Compressed Oops");
       FLAG_SET_DEFAULT(UseCompressedOops, false);
-      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+      FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
     }
   }
 #endif // _LP64
@@ -1455,22 +1471,22 @@
 void Arguments::set_use_compressed_klass_ptrs() {
 #ifndef ZERO
 #ifdef _LP64
-  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
+  // UseCompressedOops must be on for UseCompressedClassPointers to be on.
   if (!UseCompressedOops) {
-    if (UseCompressedKlassPointers) {
-      warning("UseCompressedKlassPointers requires UseCompressedOops");
+    if (UseCompressedClassPointers) {
+      warning("UseCompressedClassPointers requires UseCompressedOops");
     }
-    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+    FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
   } else {
-    // Turn on UseCompressedKlassPointers too
-    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
-      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
+    // Turn on UseCompressedClassPointers too
+    if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
+      FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
     }
-    // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
-    if (UseCompressedKlassPointers) {
-      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
-        warning("Class metaspace size is too large for UseCompressedKlassPointers");
-        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+    // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
+    if (UseCompressedClassPointers) {
+      if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
+        warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
+        FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
       }
     }
   }
@@ -1478,6 +1494,23 @@
 #endif // !ZERO
 }
 
+void Arguments::set_conservative_max_heap_alignment() {
+  // The conservative maximum required alignment for the heap is the maximum of
+  // the alignments imposed by several sources: any requirements from the heap
+  // itself, the collector policy and the maximum page size we may run the VM
+  // with.
+  size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
+#if INCLUDE_ALL_GCS
+  if (UseParallelGC) {
+    heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
+  } else if (UseG1GC) {
+    heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
+  }
+#endif // INCLUDE_ALL_GCS
+  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
+    CollectorPolicy::compute_max_alignment());
+}
+
 void Arguments::set_ergonomics_flags() {
 
   if (os::is_server_class_machine()) {
@@ -1495,16 +1528,20 @@
         FLAG_SET_ERGO(bool, UseParallelGC, true);
       }
     }
-    // Shared spaces work fine with other GCs but causes bytecode rewriting
-    // to be disabled, which hurts interpreter performance and decreases
-    // server performance.   On server class machines, keep the default
-    // off unless it is asked for.  Future work: either add bytecode rewriting
-    // at link time, or rewrite bytecodes in non-shared methods.
-    if (!DumpSharedSpaces && !RequireSharedSpaces &&
-        (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
-      no_shared_spaces();
-    }
   }
+#ifdef COMPILER2
+  // Shared spaces work fine with other GCs but causes bytecode rewriting
+  // to be disabled, which hurts interpreter performance and decreases
+  // server performance.  When -server is specified, keep the default off
+  // unless it is asked for.  Future work: either add bytecode rewriting
+  // at link time, or rewrite bytecodes in non-shared methods.
+  if (!DumpSharedSpaces && !RequireSharedSpaces &&
+      (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
+    no_shared_spaces();
+  }
+#endif
+
+  set_conservative_max_heap_alignment();
 
 #ifndef ZERO
 #ifdef _LP64
@@ -1608,17 +1645,6 @@
   return result;
 }
 
-void Arguments::set_heap_base_min_address() {
-  if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
-    // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
-    // G1 currently needs a lot of C-heap, so on Solaris we have to give G1
-    // some extra space for the C-heap compared to other collectors.
-    // Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that
-    // code that checks for default values work correctly.
-    FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G);
-  }
-}
-
 void Arguments::set_heap_size() {
   if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
     // Deprecated flag
@@ -1853,7 +1879,7 @@
         (NumberOfGCLogFiles == 0)  ||
         (GCLogFileSize == 0)) {
       jio_fprintf(defaultStream::output_stream(),
-                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
+                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>[k|K|m|M|g|G]\n"
                   "where num_of_file > 0 and num_of_size > 0\n"
                   "GC log rotation is turned off\n");
       UseGCLogFileRotation = false;
@@ -1867,6 +1893,51 @@
   }
 }
 
+// This function is called for -Xloggc:<filename>, it can be used
+// to check if a given file name(or string) conforms to the following
+// specification:
+// A valid string only contains "[A-Z][a-z][0-9].-_%[p|t]"
+// %p and %t only allowed once. We only limit usage of filename not path
+bool is_filename_valid(const char *file_name) {
+  const char* p = file_name;
+  char file_sep = os::file_separator()[0];
+  const char* cp;
+  // skip prefix path
+  for (cp = file_name; *cp != '\0'; cp++) {
+    if (*cp == '/' || *cp == file_sep) {
+      p = cp + 1;
+    }
+  }
+
+  int count_p = 0;
+  int count_t = 0;
+  while (*p != '\0') {
+    if ((*p >= '0' && *p <= '9') ||
+        (*p >= 'A' && *p <= 'Z') ||
+        (*p >= 'a' && *p <= 'z') ||
+         *p == '-'               ||
+         *p == '_'               ||
+         *p == '.') {
+       p++;
+       continue;
+    }
+    if (*p == '%') {
+      if(*(p + 1) == 'p') {
+        p += 2;
+        count_p ++;
+        continue;
+      }
+      if (*(p + 1) == 't') {
+        p += 2;
+        count_t ++;
+        continue;
+      }
+    }
+    return false;
+  }
+  return count_p < 2 && count_t < 2;
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
   check_gclog_consistency();
@@ -1889,12 +1960,6 @@
                 "please refer to the release notes for the combinations "
                 "allowed\n");
     status = false;
-  } else if (ReservedCodeCacheSize > 2*G) {
-    // Code cache size larger than MAXINT is not supported.
-    jio_fprintf(defaultStream::error_stream(),
-                "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
-                (2*G)/M);
-    status = false;
   }
   return status;
 }
@@ -1926,6 +1991,15 @@
     warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. "
         "Use MaxRAMFraction instead.");
   }
+  if (FLAG_IS_CMDLINE(UseCMSCompactAtFullCollection)) {
+    warning("UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release.");
+  }
+  if (FLAG_IS_CMDLINE(CMSFullGCsBeforeCompaction)) {
+    warning("CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release.");
+  }
+  if (FLAG_IS_CMDLINE(UseCMSCollectionPassing)) {
+    warning("UseCMSCollectionPassing is deprecated and will likely be removed in a future release.");
+  }
 }
 
 // Check stack pages settings
@@ -1977,6 +2051,9 @@
   status = status && verify_interval(StringTableSize, minimumStringTableSize,
     (max_uintx / StringTable::bucket_size()), "StringTable size");
 
+  status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize,
+    (max_uintx / SymbolTable::bucket_size()), "SymbolTable size");
+
   if (MinHeapFreeRatio > MaxHeapFreeRatio) {
     jio_fprintf(defaultStream::error_stream(),
                 "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
@@ -2162,8 +2239,8 @@
 
   status = status && verify_object_alignment();
 
-  status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
-                                      "ClassMetaspaceSize");
+  status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
+                                      "CompressedClassSpaceSize");
 
   status = status && verify_interval(MarkStackSizeMax,
                                   1, (max_jint - 1), "MarkStackSizeMax");
@@ -2233,7 +2310,7 @@
   // among the distinct pages.
   if (ContendedPaddingWidth < 0 || ContendedPaddingWidth > 8192) {
     jio_fprintf(defaultStream::error_stream(),
-                "ContendedPaddingWidth=" INTX_FORMAT " must be the between %d and %d\n",
+                "ContendedPaddingWidth=" INTX_FORMAT " must be in between %d and %d\n",
                 ContendedPaddingWidth, 0, 8192);
     status = false;
   }
@@ -2242,7 +2319,7 @@
   // It is sufficient to check against the largest type size.
   if ((ContendedPaddingWidth % BytesPerLong) != 0) {
     jio_fprintf(defaultStream::error_stream(),
-                "ContendedPaddingWidth=" INTX_FORMAT " must be the multiple of %d\n",
+                "ContendedPaddingWidth=" INTX_FORMAT " must be a multiple of %d\n",
                 ContendedPaddingWidth, BytesPerLong);
     status = false;
   }
@@ -2272,6 +2349,10 @@
                 (2*G)/M);
     status = false;
   }
+
+  status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
+  status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
+
   return status;
 }
 
@@ -2373,26 +2454,11 @@
   }
 
   // Parse JavaVMInitArgs structure passed in
-  result = parse_each_vm_init_arg(args, &scp, &scp_assembly_required, COMMAND_LINE);
+  result = parse_each_vm_init_arg(args, &scp, &scp_assembly_required, Flag::COMMAND_LINE);
   if (result != JNI_OK) {
     return result;
   }
 
-  if (AggressiveOpts) {
-    // Insert alt-rt.jar between user-specified bootclasspath
-    // prefix and the default bootclasspath.  os::set_boot_path()
-    // uses meta_index_dir as the default bootclasspath directory.
-    const char* altclasses_jar = "alt-rt.jar";
-    size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
-                                 strlen(altclasses_jar);
-    char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
-    strcpy(altclasses_path, get_meta_index_dir());
-    strcat(altclasses_path, altclasses_jar);
-    scp.add_suffix_to_prefix(altclasses_path);
-    scp_assembly_required = true;
-    FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
-  }
-
   // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
   result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
   if (result != JNI_OK) {
@@ -2460,7 +2526,7 @@
 jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
                                        SysClassPath* scp_p,
                                        bool* scp_assembly_required_p,
-                                       FlagValueOrigin origin) {
+                                       Flag::Flags origin) {
   // Remaining part of option string
   const char* tail;
 
@@ -2597,16 +2663,16 @@
       FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
     // -Xmn for compatibility with other JVM vendors
     } else if (match_option(option, "-Xmn", &tail)) {
-      julong long_initial_eden_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
+      julong long_initial_young_size = 0;
+      ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1);
       if (errcode != arg_in_range) {
         jio_fprintf(defaultStream::error_stream(),
-                    "Invalid initial eden size: %s\n", option->optionString);
+                    "Invalid initial young generation size: %s\n", option->optionString);
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
-      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
-      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
+      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size);
+      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size);
     // -Xms
     } else if (match_option(option, "-Xms", &tail)) {
       julong long_initial_heap_size = 0;
@@ -2634,8 +2700,9 @@
       FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
     // Xmaxf
     } else if (match_option(option, "-Xmaxf", &tail)) {
-      int maxf = (int)(atof(tail) * 100);
-      if (maxf < 0 || maxf > 100) {
+      char* err;
+      int maxf = (int)(strtod(tail, &err) * 100);
+      if (*err != '\0' || maxf < 0 || maxf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad max heap free percentage size: %s\n",
                     option->optionString);
@@ -2645,8 +2712,9 @@
       }
     // Xminf
     } else if (match_option(option, "-Xminf", &tail)) {
-      int minf = (int)(atof(tail) * 100);
-      if (minf < 0 || minf > 100) {
+      char* err;
+      int minf = (int)(strtod(tail, &err) * 100);
+      if (*err != '\0' || minf < 0 || minf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad min heap free percentage size: %s\n",
                     option->optionString);
@@ -2820,6 +2888,13 @@
       // ostream_init_log(), when called will use this filename
       // to initialize a fileStream.
       _gc_log_filename = strdup(tail);
+     if (!is_filename_valid(_gc_log_filename)) {
+       jio_fprintf(defaultStream::output_stream(),
+                  "Invalid file name for use with -Xloggc: Filename can only contain the "
+                  "characters [A-Z][a-z][0-9]-_.%%[p|t] but it has been %s\n"
+                  "Note %%p or %%t can only be used once\n", _gc_log_filename);
+        return JNI_EINVAL;
+      }
       FLAG_SET_CMDLINE(bool, PrintGC, true);
       FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
 
@@ -3276,7 +3351,7 @@
       }
     }
 
-    return(parse_each_vm_init_arg(&vm_args, scp_p, scp_assembly_required_p, ENVIRON_VAR));
+    return(parse_each_vm_init_arg(&vm_args, scp_p, scp_assembly_required_p, Flag::ENVIRON_VAR));
   }
   return JNI_OK;
 }
@@ -3288,13 +3363,13 @@
     }
     UseSharedSpaces = false;
 #ifdef _LP64
-    if (!UseCompressedOops || !UseCompressedKlassPointers) {
+    if (!UseCompressedOops || !UseCompressedClassPointers) {
       vm_exit_during_initialization(
-        "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
+        "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
     }
   } else {
-    // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
-    if (!UseCompressedOops || !UseCompressedKlassPointers) {
+    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
+    if (!UseCompressedOops || !UseCompressedClassPointers) {
       no_shared_spaces();
     }
 #endif
@@ -3340,6 +3415,33 @@
   return shared_archive_path;
 }
 
+#ifndef PRODUCT
+// Determine whether LogVMOutput should be implicitly turned on.
+static bool use_vm_log() {
+  if (LogCompilation || !FLAG_IS_DEFAULT(LogFile) ||
+      PrintCompilation || PrintInlining || PrintDependencies || PrintNativeNMethods ||
+      PrintDebugInfo || PrintRelocations || PrintNMethods || PrintExceptionHandlers ||
+      PrintAssembly || TraceDeoptimization || TraceDependencies ||
+      (VerifyDependencies && FLAG_IS_CMDLINE(VerifyDependencies))) {
+    return true;
+  }
+
+#ifdef COMPILER1
+  if (PrintC1Statistics) {
+    return true;
+  }
+#endif // COMPILER1
+
+#ifdef COMPILER2
+  if (PrintOptoAssembly || PrintOptoStatistics) {
+    return true;
+  }
+#endif // COMPILER2
+
+  return false;
+}
+#endif // PRODUCT
+
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* args) {
@@ -3520,6 +3622,11 @@
   no_shared_spaces();
 #endif // INCLUDE_CDS
 
+  return JNI_OK;
+}
+
+jint Arguments::apply_ergo() {
+
   // Set flags based on ergonomics.
   set_ergonomics_flags();
 
@@ -3540,8 +3647,6 @@
     }
   }
 
-  set_heap_base_min_address();
-
   // Set heap size based on available physical memory
   set_heap_size();
 
@@ -3569,6 +3674,9 @@
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // INCLUDE_ALL_GCS
 
+  // Initialize Metaspace flags and alignments.
+  Metaspace::ergo_initialize();
+
   // Set bytecode rewriting flags
   set_bytecode_flags();
 
@@ -3597,7 +3705,7 @@
   FLAG_SET_DEFAULT(ProfileInterpreter, false);
   FLAG_SET_DEFAULT(UseBiasedLocking, false);
   LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
-  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
+  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
 #endif // CC_INTERP
 
 #ifdef COMPILER2
@@ -3619,6 +3727,18 @@
     // incremental inlining: bump MaxNodeLimit
     FLAG_SET_DEFAULT(MaxNodeLimit, (intx)75000);
   }
+  if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
+    // nothing to use the profiling, turn if off
+    FLAG_SET_DEFAULT(TypeProfileLevel, 0);
+  }
+  if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) {
+    // Doing the replace in parent maps helps speculation
+    FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
+  }
+#ifndef X86
+  // Only on x86 for now
+  FLAG_SET_DEFAULT(TypeProfileLevel, 0);
+#endif
 #endif
 
   if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
@@ -3626,6 +3746,10 @@
     DebugNonSafepoints = true;
   }
 
+  if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
+    warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
+  }
+
 #ifndef PRODUCT
   if (CompileTheWorld) {
     // Force NmethodSweeper to sweep whole CodeCache each time.
@@ -3633,7 +3757,13 @@
       NmethodSweepFraction = 1;
     }
   }
-#endif
+
+  if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
+    if (use_vm_log()) {
+      LogVMOutput = true;
+    }
+  }
+#endif // PRODUCT
 
   if (PrintCommandLineFlags) {
     CommandLineFlags::printSetFlags(tty);
--- a/src/share/vm/runtime/arguments.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/arguments.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -144,7 +144,7 @@
   void set_os_lib(void* os_lib)             { _os_lib = os_lib; }
   AgentLibrary* next() const                { return _next; }
   bool is_static_lib() const                { return _is_static_lib; }
-  void set_static_lib(bool static_lib)      { _is_static_lib = static_lib; }
+  void set_static_lib(bool is_static_lib)   { _is_static_lib = is_static_lib; }
   bool valid()                              { return (_state == agent_valid); }
   void set_valid()                          { _state = agent_valid; }
   void set_invalid()                        { _state = agent_invalid; }
@@ -280,6 +280,9 @@
   // Option flags
   static bool   _has_profile;
   static const char*  _gc_log_filename;
+  // Value of the conservative maximum heap alignment needed
+  static size_t  _conservative_max_heap_alignment;
+
   static uintx  _min_heap_size;
 
   // -Xrun arguments
@@ -327,6 +330,7 @@
   // Garbage-First (UseG1GC)
   static void set_g1_gc_flags();
   // GC ergonomics
+  static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
   static void set_use_compressed_klass_ptrs();
   static void set_ergonomics_flags();
@@ -334,8 +338,6 @@
   // limits the given memory size by the maximum amount of memory this process is
   // currently allowed to allocate or reserve.
   static julong limit_by_allocatable_memory(julong size);
-  // Setup HeapBaseMinAddress
-  static void set_heap_base_min_address();
   // Setup heap size
   static void set_heap_size();
   // Based on automatic selection criteria, should the
@@ -358,15 +360,15 @@
 
   // Argument parsing
   static void do_pd_flag_adjustments();
-  static bool parse_argument(const char* arg, FlagValueOrigin origin);
-  static bool process_argument(const char* arg, jboolean ignore_unrecognized, FlagValueOrigin origin);
+  static bool parse_argument(const char* arg, Flag::Flags origin);
+  static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
   static void process_java_launcher_argument(const char*, void*);
   static void process_java_compiler_argument(char* arg);
   static jint parse_options_environment_variable(const char* name, SysClassPath* scp_p, bool* scp_assembly_required_p);
   static jint parse_java_tool_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
   static jint parse_java_options_environment_variable(SysClassPath* scp_p, bool* scp_assembly_required_p);
   static jint parse_vm_init_args(const JavaVMInitArgs* args);
-  static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, FlagValueOrigin origin);
+  static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, Flag::Flags origin);
   static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required);
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore,
     const char* option_type);
@@ -432,8 +434,10 @@
   static char*  SharedArchivePath;
 
  public:
-  // Parses the arguments
+  // Parses the arguments, first phase
   static jint parse(const JavaVMInitArgs* args);
+  // Apply ergonomics
+  static jint apply_ergo();
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
   // Check for consistency in the selection of the garbage collector.
@@ -447,6 +451,10 @@
   // Used by os_solaris
   static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
 
+  static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
+  // Return the maximum size a heap with compressed oops can take
+  static size_t max_heap_for_compressed_oops();
+
   // return a char* array containing all options
   static char** jvm_flags_array()          { return _jvm_flags_array; }
   static char** jvm_args_array()           { return _jvm_args_array; }
--- a/src/share/vm/runtime/biasedLocking.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/biasedLocking.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,7 +161,7 @@
   if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     ResourceMark rm;
     tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
-                  (intptr_t) obj, (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
+                  (void *)obj, (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
   }
 
   JavaThread* biased_thread = mark->biased_locker();
@@ -214,8 +214,8 @@
     if (mon_info->owner() == obj) {
       if (TraceBiasedLocking && Verbose) {
         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
-                      (intptr_t) mon_info->owner(),
-                      (intptr_t) obj);
+                      (void *) mon_info->owner(),
+                      (void *) obj);
       }
       // Assume recursive case and fix up highest lock later
       markOop mark = markOopDesc::encode((BasicLock*) NULL);
@@ -224,8 +224,8 @@
     } else {
       if (TraceBiasedLocking && Verbose) {
         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
-                      (intptr_t) mon_info->owner(),
-                      (intptr_t) obj);
+                      (void *) mon_info->owner(),
+                      (void *) obj);
       }
     }
   }
@@ -326,7 +326,7 @@
     tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
                   INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
                   (bulk_rebias ? "rebias" : "revoke"),
-                  (intptr_t) o, (intptr_t) o->mark(), o->klass()->external_name());
+                  (void *) o, (intptr_t) o->mark(), o->klass()->external_name());
   }
 
   jlong cur_time = os::javaTimeMillis();
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/deoptimization.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -237,7 +237,7 @@
         assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
         if (TraceDeoptimization) {
           ttyLocker ttyl;
-          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
+          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
         }
       }
       bool reallocated = false;
@@ -281,7 +281,7 @@
                   first = false;
                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
                 }
-                tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
+                tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
               }
             }
           }
@@ -980,7 +980,7 @@
     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
     Handle obj = sv->value();
 
-    tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
+    tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
     k->print_value();
     tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
     tty->cr();
@@ -1754,7 +1754,7 @@
   else    return trap_state & ~DS_RECOMPILE_BIT;
 }
 //---------------------------format_trap_state---------------------------------
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
                                               int trap_state) {
   DeoptReason reason      = trap_state_reason(trap_state);
@@ -1831,7 +1831,7 @@
   return buf;
 }
 
-// This is used for debugging and diagnostics, including hotspot.log output.
+// This is used for debugging and diagnostics, including LogFile output.
 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
                                                 int trap_request) {
   jint unloaded_class_index = trap_request_index(trap_request);
--- a/src/share/vm/runtime/fieldDescriptor.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/fieldDescriptor.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -97,18 +97,32 @@
   return constants()->uncached_string_at(initial_value_index(), CHECK_0);
 }
 
-void fieldDescriptor::initialize(InstanceKlass* ik, int index) {
-  _cp = ik->constants();
+void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
+  if (_cp.is_null() || field_holder() != ik) {
+    _cp = constantPoolHandle(Thread::current(), ik->constants());
+    // _cp should now reference ik's constant pool; i.e., ik is now field_holder.
+    assert(field_holder() == ik, "must be already initialized to this class");
+  }
   FieldInfo* f = ik->field(index);
   assert(!f->is_internal(), "regular Java fields only");
 
   _access_flags = accessFlags_from(f->access_flags());
   guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
   _index = index;
+  verify();
 }
 
 #ifndef PRODUCT
 
+void fieldDescriptor::verify() const {
+  if (_cp.is_null()) {
+    assert(_index == badInt, "constructor must be called");  // see constructor
+  } else {
+    assert(_index >= 0, "good index");
+    assert(_index < field_holder()->java_fields_count(), "oob");
+  }
+}
+
 void fieldDescriptor::print_on(outputStream* st) const {
   access_flags().print_on(st);
   name()->print_value_on(st);
--- a/src/share/vm/runtime/fieldDescriptor.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/fieldDescriptor.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,13 @@
   }
 
  public:
+  fieldDescriptor() {
+    DEBUG_ONLY(_index = badInt);
+  }
+  fieldDescriptor(InstanceKlass* ik, int index) {
+    DEBUG_ONLY(_index = badInt);
+    reinitialize(ik, index);
+  }
   Symbol* name() const {
     return field()->name(_cp);
   }
@@ -112,12 +119,13 @@
   }
 
   // Initialization
-  void initialize(InstanceKlass* ik, int index);
+  void reinitialize(InstanceKlass* ik, int index);
 
   // Print
   void print() { print_on(tty); }
   void print_on(outputStream* st) const         PRODUCT_RETURN;
   void print_on_for(outputStream* st, oop obj)  PRODUCT_RETURN;
+  void verify() const                           PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
--- a/src/share/vm/runtime/fprofiler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/fprofiler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -264,7 +264,7 @@
 
  public:
 
-  void* operator new(size_t size, ThreadProfiler* tp);
+  void* operator new(size_t size, ThreadProfiler* tp) throw();
   void  operator delete(void* p);
 
   ProfilerNode() {
@@ -373,7 +373,7 @@
   }
 };
 
-void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
+void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
   void* result = (void*) tp->area_top;
   tp->area_top += size;
 
@@ -925,6 +925,8 @@
       FlatProfiler::interval_print();
       FlatProfiler::interval_reset();
     }
+
+    FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
   } else {
     // Couldn't get the threads lock, just record that rather than blocking
     FlatProfiler::threads_lock_ticks += 1;
--- a/src/share/vm/runtime/frame.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/frame.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -652,7 +652,7 @@
 // Return whether the frame is in the VM or os indicating a Hotspot problem.
 // Otherwise, it's likely a bug in the native library that the Java code calls,
 // hopefully indicating where to submit bugs.
-static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
+void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
   // C/C++ frame
   bool in_vm = os::address_is_in_vm(pc);
   st->print(in_vm ? "V" : "C");
@@ -1097,7 +1097,7 @@
     return NULL;
   }
   oop r = *oop_adr;
-  assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r));
+  assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r));
   return r;
 }
 
@@ -1228,9 +1228,7 @@
 
 void frame::ZapDeadClosure::do_oop(oop* p) {
   if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
-  // Need cast because on _LP64 the conversion to oop is ambiguous.  Constant
-  // can be either long or int.
-  *p = (oop)(int)0xbabebabe;
+  *p = cast_to_oop<intptr_t>(0xbabebabe);
 }
 frame::ZapDeadClosure frame::_zap_dead;
 
--- a/src/share/vm/runtime/frame.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/frame.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -409,6 +409,7 @@
   void print_on(outputStream* st) const;
   void interpreter_frame_print_on(outputStream* st) const;
   void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
+  static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
 
   // Add annotated descriptions of memory locations belonging to this frame to values
   void describe(FrameValues& values, int frame_no);
--- a/src/share/vm/runtime/globals.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/globals.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -62,26 +62,174 @@
 MATERIALIZE_FLAGS_EXT
 
 
+void Flag::check_writable() {
+  if (is_constant_in_binary()) {
+    fatal(err_msg("flag is constant: %s", _name));
+  }
+}
+
+bool Flag::is_bool() const {
+  return strcmp(_type, "bool") == 0;
+}
+
+bool Flag::get_bool() const {
+  return *((bool*) _addr);
+}
+
+void Flag::set_bool(bool value) {
+  check_writable();
+  *((bool*) _addr) = value;
+}
+
+bool Flag::is_intx() const {
+  return strcmp(_type, "intx")  == 0;
+}
+
+intx Flag::get_intx() const {
+  return *((intx*) _addr);
+}
+
+void Flag::set_intx(intx value) {
+  check_writable();
+  *((intx*) _addr) = value;
+}
+
+bool Flag::is_uintx() const {
+  return strcmp(_type, "uintx") == 0;
+}
+
+uintx Flag::get_uintx() const {
+  return *((uintx*) _addr);
+}
+
+void Flag::set_uintx(uintx value) {
+  check_writable();
+  *((uintx*) _addr) = value;
+}
+
+bool Flag::is_uint64_t() const {
+  return strcmp(_type, "uint64_t") == 0;
+}
+
+uint64_t Flag::get_uint64_t() const {
+  return *((uint64_t*) _addr);
+}
+
+void Flag::set_uint64_t(uint64_t value) {
+  check_writable();
+  *((uint64_t*) _addr) = value;
+}
+
+bool Flag::is_double() const {
+  return strcmp(_type, "double") == 0;
+}
+
+double Flag::get_double() const {
+  return *((double*) _addr);
+}
+
+void Flag::set_double(double value) {
+  check_writable();
+  *((double*) _addr) = value;
+}
+
+bool Flag::is_ccstr() const {
+  return strcmp(_type, "ccstr") == 0 || strcmp(_type, "ccstrlist") == 0;
+}
+
+bool Flag::ccstr_accumulates() const {
+  return strcmp(_type, "ccstrlist") == 0;
+}
+
+ccstr Flag::get_ccstr() const {
+  return *((ccstr*) _addr);
+}
+
+void Flag::set_ccstr(ccstr value) {
+  check_writable();
+  *((ccstr*) _addr) = value;
+}
+
+
+Flag::Flags Flag::get_origin() {
+  return Flags(_flags & VALUE_ORIGIN_MASK);
+}
+
+void Flag::set_origin(Flags origin) {
+  assert((origin & VALUE_ORIGIN_MASK) == origin, "sanity");
+  _flags = Flags((_flags & ~VALUE_ORIGIN_MASK) | origin);
+}
+
+bool Flag::is_default() {
+  return (get_origin() == DEFAULT);
+}
+
+bool Flag::is_ergonomic() {
+  return (get_origin() == ERGONOMIC);
+}
+
+bool Flag::is_command_line() {
+  return (get_origin() == COMMAND_LINE);
+}
+
+bool Flag::is_product() const {
+  return (_flags & KIND_PRODUCT) != 0;
+}
+
+bool Flag::is_manageable() const {
+  return (_flags & KIND_MANAGEABLE) != 0;
+}
+
+bool Flag::is_diagnostic() const {
+  return (_flags & KIND_DIAGNOSTIC) != 0;
+}
+
+bool Flag::is_experimental() const {
+  return (_flags & KIND_EXPERIMENTAL) != 0;
+}
+
+bool Flag::is_notproduct() const {
+  return (_flags & KIND_NOT_PRODUCT) != 0;
+}
+
+bool Flag::is_develop() const {
+  return (_flags & KIND_DEVELOP) != 0;
+}
+
+bool Flag::is_read_write() const {
+  return (_flags & KIND_READ_WRITE) != 0;
+}
+
+bool Flag::is_commercial() const {
+  return (_flags & KIND_COMMERCIAL) != 0;
+}
+
+/**
+ * Returns if this flag is a constant in the binary.  Right now this is
+ * true for notproduct and develop flags in product builds.
+ */
+bool Flag::is_constant_in_binary() const {
+#ifdef PRODUCT
+    return is_notproduct() || is_develop();
+#else
+    return false;
+#endif
+}
+
 bool Flag::is_unlocker() const {
-  return strcmp(name, "UnlockDiagnosticVMOptions") == 0     ||
-         strcmp(name, "UnlockExperimentalVMOptions") == 0   ||
+  return strcmp(_name, "UnlockDiagnosticVMOptions") == 0     ||
+         strcmp(_name, "UnlockExperimentalVMOptions") == 0   ||
          is_unlocker_ext();
 }
 
 bool Flag::is_unlocked() const {
-  if (strcmp(kind, "{diagnostic}") == 0 ||
-      strcmp(kind, "{C2 diagnostic}") == 0 ||
-      strcmp(kind, "{ARCH diagnostic}") == 0 ||
-      strcmp(kind, "{Shark diagnostic}") == 0) {
+  if (is_diagnostic()) {
     return UnlockDiagnosticVMOptions;
-  } else if (strcmp(kind, "{experimental}") == 0 ||
-             strcmp(kind, "{C2 experimental}") == 0 ||
-             strcmp(kind, "{ARCH experimental}") == 0 ||
-             strcmp(kind, "{Shark experimental}") == 0) {
+  }
+  if (is_experimental()) {
     return UnlockExperimentalVMOptions;
-  } else {
-    return is_unlocked_ext();
   }
+  return is_unlocked_ext();
 }
 
 // Get custom message for this locked flag, or return NULL if
@@ -91,16 +239,14 @@
 }
 
 bool Flag::is_writeable() const {
-  return strcmp(kind, "{manageable}") == 0 ||
-         strcmp(kind, "{product rw}") == 0 ||
-         is_writeable_ext();
+  return is_manageable() || (is_product() && is_read_write()) || is_writeable_ext();
 }
 
 // All flags except "manageable" are assumed to be internal flags.
 // Long term, we need to define a mechanism to specify which flags
 // are external/stable and change this function accordingly.
 bool Flag::is_external() const {
-  return strcmp(kind, "{manageable}") == 0 || is_external_ext();
+  return is_manageable() || is_external_ext();
 }
 
 
@@ -108,53 +254,113 @@
 #define FORMAT_BUFFER_LEN 16
 
 void Flag::print_on(outputStream* st, bool withComments) {
-  st->print("%9s %-40s %c= ", type, name, (origin != DEFAULT ? ':' : ' '));
-  if (is_bool())     st->print("%-16s", get_bool() ? "true" : "false");
-  if (is_intx())     st->print("%-16ld", get_intx());
-  if (is_uintx())    st->print("%-16lu", get_uintx());
-  if (is_uint64_t()) st->print("%-16lu", get_uint64_t());
-  if (is_double())   st->print("%-16f", get_double());
+  // Don't print notproduct and develop flags in a product build.
+  if (is_constant_in_binary()) {
+    return;
+  }
+
+  st->print("%9s %-40s %c= ", _type, _name, (!is_default() ? ':' : ' '));
 
+  if (is_bool()) {
+    st->print("%-16s", get_bool() ? "true" : "false");
+  }
+  if (is_intx()) {
+    st->print("%-16ld", get_intx());
+  }
+  if (is_uintx()) {
+    st->print("%-16lu", get_uintx());
+  }
+  if (is_uint64_t()) {
+    st->print("%-16lu", get_uint64_t());
+  }
+  if (is_double()) {
+    st->print("%-16f", get_double());
+  }
   if (is_ccstr()) {
-     const char* cp = get_ccstr();
-     if (cp != NULL) {
-       const char* eol;
-       while ((eol = strchr(cp, '\n')) != NULL) {
-         char format_buffer[FORMAT_BUFFER_LEN];
-         size_t llen = pointer_delta(eol, cp, sizeof(char));
-         jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
-                     "%%." SIZE_FORMAT "s", llen);
-         st->print(format_buffer, cp);
-         st->cr();
-         cp = eol+1;
-         st->print("%5s %-35s += ", "", name);
-       }
-       st->print("%-16s", cp);
-     }
-     else st->print("%-16s", "");
+    const char* cp = get_ccstr();
+    if (cp != NULL) {
+      const char* eol;
+      while ((eol = strchr(cp, '\n')) != NULL) {
+        char format_buffer[FORMAT_BUFFER_LEN];
+        size_t llen = pointer_delta(eol, cp, sizeof(char));
+        jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
+            "%%." SIZE_FORMAT "s", llen);
+        st->print(format_buffer, cp);
+        st->cr();
+        cp = eol+1;
+        st->print("%5s %-35s += ", "", _name);
+      }
+      st->print("%-16s", cp);
+    }
+    else st->print("%-16s", "");
   }
-  st->print("%-20s", kind);
+
+  st->print("%-20");
+  print_kind(st);
+
   if (withComments) {
 #ifndef PRODUCT
-    st->print("%s", doc );
+    st->print("%s", _doc);
 #endif
   }
   st->cr();
 }
 
+void Flag::print_kind(outputStream* st) {
+  struct Data {
+    int flag;
+    const char* name;
+  };
+
+  Data data[] = {
+      { KIND_C1, "C1" },
+      { KIND_C2, "C2" },
+      { KIND_ARCH, "ARCH" },
+      { KIND_SHARK, "SHARK" },
+      { KIND_PLATFORM_DEPENDENT, "pd" },
+      { KIND_PRODUCT, "product" },
+      { KIND_MANAGEABLE, "manageable" },
+      { KIND_DIAGNOSTIC, "diagnostic" },
+      { KIND_NOT_PRODUCT, "notproduct" },
+      { KIND_DEVELOP, "develop" },
+      { KIND_LP64_PRODUCT, "lp64_product" },
+      { KIND_READ_WRITE, "rw" },
+      { -1, "" }
+  };
+
+  if ((_flags & KIND_MASK) != 0) {
+    st->print("{");
+    bool is_first = true;
+
+    for (int i = 0; data[i].flag != -1; i++) {
+      Data d = data[i];
+      if ((_flags & d.flag) != 0) {
+        if (is_first) {
+          is_first = false;
+        } else {
+          st->print(" ");
+        }
+        st->print(d.name);
+      }
+    }
+
+    st->print("}");
+  }
+}
+
 void Flag::print_as_flag(outputStream* st) {
   if (is_bool()) {
-    st->print("-XX:%s%s", get_bool() ? "+" : "-", name);
+    st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
   } else if (is_intx()) {
-    st->print("-XX:%s=" INTX_FORMAT, name, get_intx());
+    st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
   } else if (is_uintx()) {
-    st->print("-XX:%s=" UINTX_FORMAT, name, get_uintx());
+    st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
   } else if (is_uint64_t()) {
-    st->print("-XX:%s=" UINT64_FORMAT, name, get_uint64_t());
+    st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
   } else if (is_double()) {
-    st->print("-XX:%s=%f", name, get_double());
+    st->print("-XX:%s=%f", _name, get_double());
   } else if (is_ccstr()) {
-    st->print("-XX:%s=", name);
+    st->print("-XX:%s=", _name);
     const char* cp = get_ccstr();
     if (cp != NULL) {
       // Need to turn embedded '\n's back into separate arguments
@@ -167,7 +373,7 @@
             st->print("%c", *cp);
             break;
           case '\n':
-            st->print(" -XX:%s=", name);
+            st->print(" -XX:%s=", _name);
             break;
         }
       }
@@ -180,78 +386,51 @@
 // 4991491 do not "optimize out" the was_set false values: omitting them
 // tickles a Microsoft compiler bug causing flagTable to be malformed
 
-#define RUNTIME_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product}", DEFAULT },
-#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{pd product}", DEFAULT },
-#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{diagnostic}", DEFAULT },
-#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{experimental}", DEFAULT },
-#define RUNTIME_MANAGEABLE_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{manageable}", DEFAULT },
-#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{product rw}", DEFAULT },
+#define NAME(name) NOT_PRODUCT(&name) PRODUCT_ONLY(&CONST_##name)
 
-#ifdef PRODUCT
-  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
-  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
-#else
-  #define RUNTIME_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "", DEFAULT },
-  #define RUNTIME_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{pd}", DEFAULT },
-  #define RUNTIME_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{notproduct}", DEFAULT },
-#endif
+#define RUNTIME_PRODUCT_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT) },
+#define RUNTIME_PD_PRODUCT_FLAG_STRUCT(  type, name,        doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
+#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT(  type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DIAGNOSTIC) },
+#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_EXPERIMENTAL) },
+#define RUNTIME_MANAGEABLE_FLAG_STRUCT(  type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_MANAGEABLE) },
+#define RUNTIME_PRODUCT_RW_FLAG_STRUCT(  type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_READ_WRITE) },
+#define RUNTIME_DEVELOP_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP) },
+#define RUNTIME_PD_DEVELOP_FLAG_STRUCT(  type, name,        doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
+#define RUNTIME_NOTPRODUCT_FLAG_STRUCT(  type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_NOT_PRODUCT) },
 
 #ifdef _LP64
-  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{lp64_product}", DEFAULT },
+#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_LP64_PRODUCT) },
 #else
-  #define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
 #endif // _LP64
 
-#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
-#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
-#ifdef PRODUCT
-  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
-  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
-#else
-  #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1}", DEFAULT },
-  #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{C1 pd}", DEFAULT },
-  #define C1_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C1 notproduct}", DEFAULT },
-#endif
+#define C1_PRODUCT_FLAG_STRUCT(          type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT) },
+#define C1_PD_PRODUCT_FLAG_STRUCT(       type, name,        doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
+#define C1_DIAGNOSTIC_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DIAGNOSTIC) },
+#define C1_DEVELOP_FLAG_STRUCT(          type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP) },
+#define C1_PD_DEVELOP_FLAG_STRUCT(       type, name,        doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
+#define C1_NOTPRODUCT_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_NOT_PRODUCT) },
 
-#define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 product}", DEFAULT },
-#define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 pd product}", DEFAULT },
-#define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 diagnostic}", DEFAULT },
-#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C2 experimental}", DEFAULT },
-#ifdef PRODUCT
-  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
-  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
-#else
-  #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2}", DEFAULT },
-  #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{C2 pd}", DEFAULT },
-  #define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{C2 notproduct}", DEFAULT },
-#endif
+#define C2_PRODUCT_FLAG_STRUCT(          type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT) },
+#define C2_PD_PRODUCT_FLAG_STRUCT(       type, name,        doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
+#define C2_DIAGNOSTIC_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DIAGNOSTIC) },
+#define C2_EXPERIMENTAL_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_EXPERIMENTAL) },
+#define C2_DEVELOP_FLAG_STRUCT(          type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP) },
+#define C2_PD_DEVELOP_FLAG_STRUCT(       type, name,        doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
+#define C2_NOTPRODUCT_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_NOT_PRODUCT) },
 
-#define ARCH_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH product}", DEFAULT },
-#define ARCH_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH diagnostic}", DEFAULT },
-#define ARCH_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{ARCH experimental}", DEFAULT },
-#ifdef PRODUCT
-  #define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-  #define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
-#else
-  #define ARCH_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH}", DEFAULT },
-  #define ARCH_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{ARCH notproduct}", DEFAULT },
-#endif
+#define ARCH_PRODUCT_FLAG_STRUCT(        type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_PRODUCT) },
+#define ARCH_DIAGNOSTIC_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DIAGNOSTIC) },
+#define ARCH_EXPERIMENTAL_FLAG_STRUCT(   type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_EXPERIMENTAL) },
+#define ARCH_DEVELOP_FLAG_STRUCT(        type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DEVELOP) },
+#define ARCH_NOTPRODUCT_FLAG_STRUCT(     type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_NOT_PRODUCT) },
 
-#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark product}", DEFAULT },
-#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark pd product}", DEFAULT },
-#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{Shark diagnostic}", DEFAULT },
-#ifdef PRODUCT
-  #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-  #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
-  #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
-#else
-  #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark}", DEFAULT },
-  #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, doc, "{Shark pd}", DEFAULT },
-  #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, doc, "{Shark notproduct}", DEFAULT },
-#endif
+#define SHARK_PRODUCT_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT) },
+#define SHARK_PD_PRODUCT_FLAG_STRUCT(    type, name,        doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
+#define SHARK_DIAGNOSTIC_FLAG_STRUCT(    type, name, value, doc) { #type, XSTR(name), &name,      NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DIAGNOSTIC) },
+#define SHARK_DEVELOP_FLAG_STRUCT(       type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP) },
+#define SHARK_PD_DEVELOP_FLAG_STRUCT(    type, name,        doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
+#define SHARK_NOTPRODUCT_FLAG_STRUCT(    type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_NOT_PRODUCT) },
 
 static Flag flagTable[] = {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
@@ -260,7 +439,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
@@ -284,9 +463,14 @@
 
 // Search the flag table for a named flag
 Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
-  for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
-    if (str_equal(current->name, name, length)) {
-      // Found a matching entry.  Report locked flags only if allowed.
+  for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
+    if (str_equal(current->_name, name, length)) {
+      // Found a matching entry.
+      // Don't report notproduct and develop flags in product builds.
+      if (current->is_constant_in_binary()) {
+        return NULL;
+      }
+      // Report locked flags only if allowed.
       if (!(current->is_unlocked() || current->is_unlocker())) {
         if (!allow_locked) {
           // disable use of locked flags, e.g. diagnostic, experimental,
@@ -326,8 +510,8 @@
   float score;
   float max_score = -1;
 
-  for (Flag* current = &flagTable[0]; current->name != NULL; current++) {
-    score = str_similar(current->name, name, length);
+  for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
+    score = str_similar(current->_name, name, length);
     if (score > max_score) {
       max_score = score;
       match = current;
@@ -356,25 +540,25 @@
 bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
   assert((size_t)flag < Flag::numFlags, "bad command line flag index");
   Flag* f = &Flag::flags[flag];
-  return (f->origin == DEFAULT);
+  return f->is_default();
 }
 
 bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
   assert((size_t)flag < Flag::numFlags, "bad command line flag index");
   Flag* f = &Flag::flags[flag];
-  return (f->origin == ERGONOMIC);
+  return f->is_ergonomic();
 }
 
 bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
   assert((size_t)flag < Flag::numFlags, "bad command line flag index");
   Flag* f = &Flag::flags[flag];
-  return (f->origin == COMMAND_LINE);
+  return f->is_command_line();
 }
 
 bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
   Flag* result = Flag::find_flag((char*)name, strlen(name));
   if (result == NULL) return false;
-  *value = (result->origin == COMMAND_LINE);
+  *value = result->is_command_line();
   return true;
 }
 
@@ -386,22 +570,22 @@
   return true;
 }
 
-bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin) {
+bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_bool()) return false;
   bool old_value = result->get_bool();
   result->set_bool(*value);
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
-void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
   faddr->set_bool(value);
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
@@ -412,22 +596,22 @@
   return true;
 }
 
-bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin) {
+bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_intx()) return false;
   intx old_value = result->get_intx();
   result->set_intx(*value);
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
-void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
   faddr->set_intx(value);
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) {
@@ -438,22 +622,22 @@
   return true;
 }
 
-bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin) {
+bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uintx()) return false;
   uintx old_value = result->get_uintx();
   result->set_uintx(*value);
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
-void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
   faddr->set_uintx(value);
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
@@ -464,22 +648,22 @@
   return true;
 }
 
-bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin) {
+bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uint64_t()) return false;
   uint64_t old_value = result->get_uint64_t();
   result->set_uint64_t(*value);
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
-void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
   faddr->set_uint64_t(value);
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
@@ -490,22 +674,22 @@
   return true;
 }
 
-bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin) {
+bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_double()) return false;
   double old_value = result->get_double();
   result->set_double(*value);
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
-void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
   faddr->set_double(value);
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) {
@@ -518,7 +702,7 @@
 
 // Contract:  Flag will make private copy of the incoming value.
 // Outgoing value is always malloc-ed, and caller MUST call free.
-bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin) {
+bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_ccstr()) return false;
@@ -529,35 +713,35 @@
     strcpy(new_value, *value);
   }
   result->set_ccstr(new_value);
-  if (result->origin == DEFAULT && old_value != NULL) {
+  if (result->is_default() && old_value != NULL) {
     // Prior value is NOT heap allocated, but was a literal constant.
     char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
     strcpy(old_value_to_free, old_value);
     old_value = old_value_to_free;
   }
   *value = old_value;
-  result->origin = origin;
+  result->set_origin(origin);
   return true;
 }
 
 // Contract:  Flag will make private copy of the incoming value.
-void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin) {
+void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
   ccstr old_value = faddr->get_ccstr();
   char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
   strcpy(new_value, value);
   faddr->set_ccstr(new_value);
-  if (faddr->origin != DEFAULT && old_value != NULL) {
+  if (!faddr->is_default() && old_value != NULL) {
     // Prior value is heap allocated so free it.
     FREE_C_HEAP_ARRAY(char, old_value, mtInternal);
   }
-  faddr->origin = origin;
+  faddr->set_origin(origin);
 }
 
 extern "C" {
   static int compare_flags(const void* void_a, const void* void_b) {
-    return strcmp((*((Flag**) void_a))->name, (*((Flag**) void_b))->name);
+    return strcmp((*((Flag**) void_a))->_name, (*((Flag**) void_b))->_name);
   }
 }
 
@@ -566,20 +750,19 @@
   // note: this method is called before the thread structure is in place
   //       which means resource allocation cannot be used.
 
-  // Compute size
-  int length= 0;
-  while (flagTable[length].name != NULL) length++;
+  // The last entry is the null entry.
+  const size_t length = Flag::numFlags - 1;
 
   // Sort
   Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
-  for (int index = 0; index < length; index++) {
-    array[index] = &flagTable[index];
+  for (size_t i = 0; i < length; i++) {
+    array[i] = &flagTable[i];
   }
   qsort(array, length, sizeof(Flag*), compare_flags);
 
   // Print
-  for (int i = 0; i < length; i++) {
-    if (array[i]->origin /* naked field! */) {
+  for (size_t i = 0; i < length; i++) {
+    if (array[i]->get_origin() /* naked field! */) {
       array[i]->print_as_flag(out);
       out->print(" ");
     }
@@ -602,20 +785,19 @@
   // note: this method is called before the thread structure is in place
   //       which means resource allocation cannot be used.
 
-  // Compute size
-  int length= 0;
-  while (flagTable[length].name != NULL) length++;
+  // The last entry is the null entry.
+  const size_t length = Flag::numFlags - 1;
 
   // Sort
   Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
-  for (int index = 0; index < length; index++) {
-    array[index] = &flagTable[index];
+  for (size_t i = 0; i < length; i++) {
+    array[i] = &flagTable[i];
   }
   qsort(array, length, sizeof(Flag*), compare_flags);
 
   // Print
   out->print_cr("[Global flags]");
-  for (int i = 0; i < length; i++) {
+  for (size_t i = 0; i < length; i++) {
     if (array[i]->is_unlocked()) {
       array[i]->print_on(out, withComments);
     }
--- a/src/share/vm/runtime/globals.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/globals.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -208,29 +208,49 @@
 typedef const char* ccstr;
 typedef const char* ccstrlist;   // represents string arguments which accumulate
 
-enum FlagValueOrigin {
-  DEFAULT          = 0,
-  COMMAND_LINE     = 1,
-  ENVIRON_VAR      = 2,
-  CONFIG_FILE      = 3,
-  MANAGEMENT       = 4,
-  ERGONOMIC        = 5,
-  ATTACH_ON_DEMAND = 6,
-  INTERNAL         = 99
-};
+struct Flag {
+  enum Flags {
+    // value origin
+    DEFAULT          = 0,
+    COMMAND_LINE     = 1,
+    ENVIRON_VAR      = 2,
+    CONFIG_FILE      = 3,
+    MANAGEMENT       = 4,
+    ERGONOMIC        = 5,
+    ATTACH_ON_DEMAND = 6,
+    INTERNAL         = 7,
+
+    LAST_VALUE_ORIGIN = INTERNAL,
+    VALUE_ORIGIN_BITS = 4,
+    VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS),
 
-struct Flag {
-  const char *type;
-  const char *name;
-  void*       addr;
+    // flag kind
+    KIND_PRODUCT            = 1 << 4,
+    KIND_MANAGEABLE         = 1 << 5,
+    KIND_DIAGNOSTIC         = 1 << 6,
+    KIND_EXPERIMENTAL       = 1 << 7,
+    KIND_NOT_PRODUCT        = 1 << 8,
+    KIND_DEVELOP            = 1 << 9,
+    KIND_PLATFORM_DEPENDENT = 1 << 10,
+    KIND_READ_WRITE         = 1 << 11,
+    KIND_C1                 = 1 << 12,
+    KIND_C2                 = 1 << 13,
+    KIND_ARCH               = 1 << 14,
+    KIND_SHARK              = 1 << 15,
+    KIND_LP64_PRODUCT       = 1 << 16,
+    KIND_COMMERCIAL         = 1 << 17,
 
-  NOT_PRODUCT(const char *doc;)
+    KIND_MASK = ~VALUE_ORIGIN_MASK
+  };
 
-  const char *kind;
-  FlagValueOrigin origin;
+  const char* _type;
+  const char* _name;
+  void* _addr;
+  NOT_PRODUCT(const char* _doc;)
+  Flags _flags;
 
   // points to all Flags static array
-  static Flag *flags;
+  static Flag* flags;
 
   // number of flags
   static size_t numFlags;
@@ -238,30 +258,50 @@
   static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
   static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
 
-  bool is_bool() const        { return strcmp(type, "bool") == 0; }
-  bool get_bool() const       { return *((bool*) addr); }
-  void set_bool(bool value)   { *((bool*) addr) = value; }
+  void check_writable();
+
+  bool is_bool() const;
+  bool get_bool() const;
+  void set_bool(bool value);
+
+  bool is_intx() const;
+  intx get_intx() const;
+  void set_intx(intx value);
 
-  bool is_intx()  const       { return strcmp(type, "intx")  == 0; }
-  intx get_intx() const       { return *((intx*) addr); }
-  void set_intx(intx value)   { *((intx*) addr) = value; }
+  bool is_uintx() const;
+  uintx get_uintx() const;
+  void set_uintx(uintx value);
 
-  bool is_uintx() const       { return strcmp(type, "uintx") == 0; }
-  uintx get_uintx() const     { return *((uintx*) addr); }
-  void set_uintx(uintx value) { *((uintx*) addr) = value; }
+  bool is_uint64_t() const;
+  uint64_t get_uint64_t() const;
+  void set_uint64_t(uint64_t value);
+
+  bool is_double() const;
+  double get_double() const;
+  void set_double(double value);
 
-  bool is_uint64_t() const          { return strcmp(type, "uint64_t") == 0; }
-  uint64_t get_uint64_t() const     { return *((uint64_t*) addr); }
-  void set_uint64_t(uint64_t value) { *((uint64_t*) addr) = value; }
+  bool is_ccstr() const;
+  bool ccstr_accumulates() const;
+  ccstr get_ccstr() const;
+  void set_ccstr(ccstr value);
+
+  Flags get_origin();
+  void set_origin(Flags origin);
 
-  bool is_double() const        { return strcmp(type, "double") == 0; }
-  double get_double() const     { return *((double*) addr); }
-  void set_double(double value) { *((double*) addr) = value; }
+  bool is_default();
+  bool is_ergonomic();
+  bool is_command_line();
 
-  bool is_ccstr() const          { return strcmp(type, "ccstr") == 0 || strcmp(type, "ccstrlist") == 0; }
-  bool ccstr_accumulates() const { return strcmp(type, "ccstrlist") == 0; }
-  ccstr get_ccstr() const     { return *((ccstr*) addr); }
-  void set_ccstr(ccstr value) { *((ccstr*) addr) = value; }
+  bool is_product() const;
+  bool is_manageable() const;
+  bool is_diagnostic() const;
+  bool is_experimental() const;
+  bool is_notproduct() const;
+  bool is_develop() const;
+  bool is_read_write() const;
+  bool is_commercial() const;
+
+  bool is_constant_in_binary() const;
 
   bool is_unlocker() const;
   bool is_unlocked() const;
@@ -277,6 +317,7 @@
   void get_locked_message_ext(char*, int) const;
 
   void print_on(outputStream* st, bool withComments = false );
+  void print_kind(outputStream* st);
   void print_as_flag(outputStream* st);
 };
 
@@ -324,33 +365,33 @@
  public:
   static bool boolAt(char* name, size_t len, bool* value);
   static bool boolAt(char* name, bool* value)      { return boolAt(name, strlen(name), value); }
-  static bool boolAtPut(char* name, size_t len, bool* value, FlagValueOrigin origin);
-  static bool boolAtPut(char* name, bool* value, FlagValueOrigin origin)   { return boolAtPut(name, strlen(name), value, origin); }
+  static bool boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin);
+  static bool boolAtPut(char* name, bool* value, Flag::Flags origin)   { return boolAtPut(name, strlen(name), value, origin); }
 
   static bool intxAt(char* name, size_t len, intx* value);
   static bool intxAt(char* name, intx* value)      { return intxAt(name, strlen(name), value); }
-  static bool intxAtPut(char* name, size_t len, intx* value, FlagValueOrigin origin);
-  static bool intxAtPut(char* name, intx* value, FlagValueOrigin origin)   { return intxAtPut(name, strlen(name), value, origin); }
+  static bool intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin);
+  static bool intxAtPut(char* name, intx* value, Flag::Flags origin)   { return intxAtPut(name, strlen(name), value, origin); }
 
   static bool uintxAt(char* name, size_t len, uintx* value);
   static bool uintxAt(char* name, uintx* value)    { return uintxAt(name, strlen(name), value); }
-  static bool uintxAtPut(char* name, size_t len, uintx* value, FlagValueOrigin origin);
-  static bool uintxAtPut(char* name, uintx* value, FlagValueOrigin origin) { return uintxAtPut(name, strlen(name), value, origin); }
+  static bool uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin);
+  static bool uintxAtPut(char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
 
   static bool uint64_tAt(char* name, size_t len, uint64_t* value);
   static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
-  static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin);
-  static bool uint64_tAtPut(char* name, uint64_t* value, FlagValueOrigin origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
+  static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin);
+  static bool uint64_tAtPut(char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
 
   static bool doubleAt(char* name, size_t len, double* value);
   static bool doubleAt(char* name, double* value)    { return doubleAt(name, strlen(name), value); }
-  static bool doubleAtPut(char* name, size_t len, double* value, FlagValueOrigin origin);
-  static bool doubleAtPut(char* name, double* value, FlagValueOrigin origin) { return doubleAtPut(name, strlen(name), value, origin); }
+  static bool doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin);
+  static bool doubleAtPut(char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
 
   static bool ccstrAt(char* name, size_t len, ccstr* value);
   static bool ccstrAt(char* name, ccstr* value)    { return ccstrAt(name, strlen(name), value); }
-  static bool ccstrAtPut(char* name, size_t len, ccstr* value, FlagValueOrigin origin);
-  static bool ccstrAtPut(char* name, ccstr* value, FlagValueOrigin origin) { return ccstrAtPut(name, strlen(name), value, origin); }
+  static bool ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin);
+  static bool ccstrAtPut(char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
 
   // Returns false if name is not a command line flag.
   static bool wasSetOnCmdline(const char* name, bool* value);
@@ -454,21 +495,21 @@
 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \
                                                                             \
   lp64_product(bool, UseCompressedOops, false,                              \
-            "Use 32-bit object references in 64-bit VM  "                   \
-            "lp64_product means flag is always constant in 32 bit VM")      \
-                                                                            \
-  lp64_product(bool, UseCompressedKlassPointers, false,                     \
-            "Use 32-bit klass pointers in 64-bit VM  "                      \
-            "lp64_product means flag is always constant in 32 bit VM")      \
+          "Use 32-bit object references in 64-bit VM. "                     \
+          "lp64_product means flag is always constant in 32 bit VM")        \
+                                                                            \
+  lp64_product(bool, UseCompressedClassPointers, false,                     \
+          "Use 32-bit class pointers in 64-bit VM. "                        \
+          "lp64_product means flag is always constant in 32 bit VM")        \
                                                                             \
   notproduct(bool, CheckCompressedOops, true,                               \
-            "generate checks in encoding/decoding code in debug VM")        \
+          "Generate checks in encoding/decoding code in debug VM")          \
                                                                             \
   product_pd(uintx, HeapBaseMinAddress,                                     \
-            "OS specific low limit for heap base address")                  \
+          "OS specific low limit for heap base address")                    \
                                                                             \
   diagnostic(bool, PrintCompressedOopsMode, false,                          \
-            "Print compressed oops base address and encoding mode")         \
+          "Print compressed oops base address and encoding mode")           \
                                                                             \
   lp64_product(intx, ObjectAlignmentInBytes, 8,                             \
           "Default object alignment in bytes, 8 is minimum")                \
@@ -490,7 +531,7 @@
           "Use lwsync instruction if true, else use slower sync")           \
                                                                             \
   develop(bool, CleanChunkPoolAsync, falseInEmbedded,                       \
-          "Whether to clean the chunk pool asynchronously")                 \
+          "Clean the chunk pool asynchronously")                            \
                                                                             \
   /* Temporary: See 6948537 */                                              \
   experimental(bool, UseMemSetInBOT, true,                                  \
@@ -500,10 +541,12 @@
           "Enable normal processing of flags relating to field diagnostics")\
                                                                             \
   experimental(bool, UnlockExperimentalVMOptions, false,                    \
-          "Enable normal processing of flags relating to experimental features")\
+          "Enable normal processing of flags relating to experimental "     \
+          "features")                                                       \
                                                                             \
   product(bool, JavaMonitorsInStackTrace, true,                             \
-          "Print info. about Java monitor locks when the stacks are dumped")\
+          "Print information about Java monitor locks when the stacks are"  \
+          "dumped")                                                         \
                                                                             \
   product_pd(bool, UseLargePages,                                           \
           "Use large page memory")                                          \
@@ -514,8 +557,12 @@
   develop(bool, LargePagesIndividualAllocationInjectError, false,           \
           "Fail large pages individual allocation")                         \
                                                                             \
+  product(bool, UseLargePagesInMetaspace, false,                            \
+          "Use large page memory in metaspace. "                            \
+          "Only used if UseLargePages is enabled.")                         \
+                                                                            \
   develop(bool, TracePageSizes, false,                                      \
-          "Trace page size selection and usage.")                           \
+          "Trace page size selection and usage")                            \
                                                                             \
   product(bool, UseNUMA, false,                                             \
           "Use NUMA if available")                                          \
@@ -530,12 +577,12 @@
           "Force NUMA optimizations on single-node/UMA systems")            \
                                                                             \
   product(uintx, NUMAChunkResizeWeight, 20,                                 \
-          "Percentage (0-100) used to weigh the current sample when "      \
+          "Percentage (0-100) used to weigh the current sample when "       \
           "computing exponentially decaying average for "                   \
           "AdaptiveNUMAChunkSizing")                                        \
                                                                             \
   product(uintx, NUMASpaceResizeRate, 1*G,                                  \
-          "Do not reallocate more that this amount per collection")         \
+          "Do not reallocate more than this amount per collection")         \
                                                                             \
   product(bool, UseAdaptiveNUMAChunkSizing, true,                           \
           "Enable adaptive chunk sizing for NUMA")                          \
@@ -552,17 +599,17 @@
   product(intx, UseSSE, 99,                                                 \
           "Highest supported SSE instructions set on x86/x64")              \
                                                                             \
-  product(bool, UseAES, false,                                               \
+  product(bool, UseAES, false,                                              \
           "Control whether AES instructions can be used on x86/x64")        \
                                                                             \
   product(uintx, LargePageSizeInBytes, 0,                                   \
-          "Large page size (0 to let VM choose the page size")              \
+          "Large page size (0 to let VM choose the page size)")             \
                                                                             \
   product(uintx, LargePageHeapSizeThreshold, 128*M,                         \
-          "Use large pages if max heap is at least this big")               \
+          "Use large pages if maximum heap is at least this big")           \
                                                                             \
   product(bool, ForceTimeHighResolution, false,                             \
-          "Using high time resolution(For Win32 only)")                     \
+          "Using high time resolution (for Win32 only)")                    \
                                                                             \
   develop(bool, TraceItables, false,                                        \
           "Trace initialization and use of itables")                        \
@@ -578,10 +625,10 @@
                                                                             \
   develop(bool, TraceLongCompiles, false,                                   \
           "Print out every time compilation is longer than "                \
-          "a given threashold")                                             \
+          "a given threshold")                                              \
                                                                             \
   develop(bool, SafepointALot, false,                                       \
-          "Generates a lot of safepoints. Works with "                      \
+          "Generate a lot of safepoints. This works with "                  \
           "GuaranteedSafepointInterval")                                    \
                                                                             \
   product_pd(bool, BackgroundCompilation,                                   \
@@ -589,13 +636,13 @@
           "compilation")                                                    \
                                                                             \
   product(bool, PrintVMQWaitTime, false,                                    \
-          "Prints out the waiting time in VM operation queue")              \
+          "Print out the waiting time in VM operation queue")               \
                                                                             \
   develop(bool, NoYieldsInMicrolock, false,                                 \
           "Disable yields in microlock")                                    \
                                                                             \
   develop(bool, TraceOopMapGeneration, false,                               \
-          "Shows oopmap generation")                                        \
+          "Show OopMapGeneration")                                          \
                                                                             \
   product(bool, MethodFlushing, true,                                       \
           "Reclamation of zombie and not-entrant methods")                  \
@@ -604,10 +651,11 @@
           "Verify stack of each thread when it is entering a runtime call") \
                                                                             \
   diagnostic(bool, ForceUnreachable, false,                                 \
-          "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \
+          "Make all non code cache addresses to be unreachable by "         \
+          "forcing use of 64bit literal fixups")                            \
                                                                             \
   notproduct(bool, StressDerivedPointers, false,                            \
-          "Force scavenge when a derived pointers is detected on stack "    \
+          "Force scavenge when a derived pointer is detected on stack "     \
           "after rtm call")                                                 \
                                                                             \
   develop(bool, TraceDerivedPointers, false,                                \
@@ -626,86 +674,86 @@
           "Use Inline Caches for virtual calls ")                           \
                                                                             \
   develop(bool, InlineArrayCopy, true,                                      \
-          "inline arraycopy native that is known to be part of "            \
+          "Inline arraycopy native that is known to be part of "            \
           "base library DLL")                                               \
                                                                             \
   develop(bool, InlineObjectHash, true,                                     \
-          "inline Object::hashCode() native that is known to be part "      \
+          "Inline Object::hashCode() native that is known to be part "      \
           "of base library DLL")                                            \
                                                                             \
   develop(bool, InlineNatives, true,                                        \
-          "inline natives that are known to be part of base library DLL")   \
+          "Inline natives that are known to be part of base library DLL")   \
                                                                             \
   develop(bool, InlineMathNatives, true,                                    \
-          "inline SinD, CosD, etc.")                                        \
+          "Inline SinD, CosD, etc.")                                        \
                                                                             \
   develop(bool, InlineClassNatives, true,                                   \
-          "inline Class.isInstance, etc")                                   \
+          "Inline Class.isInstance, etc")                                   \
                                                                             \
   develop(bool, InlineThreadNatives, true,                                  \
-          "inline Thread.currentThread, etc")                               \
+          "Inline Thread.currentThread, etc")                               \
                                                                             \
   develop(bool, InlineUnsafeOps, true,                                      \
-          "inline memory ops (native methods) from sun.misc.Unsafe")        \
+          "Inline memory ops (native methods) from sun.misc.Unsafe")        \
                                                                             \
   product(bool, CriticalJNINatives, true,                                   \
-          "check for critical JNI entry points")                            \
+          "Check for critical JNI entry points")                            \
                                                                             \
   notproduct(bool, StressCriticalJNINatives, false,                         \
-            "Exercise register saving code in critical natives")            \
+          "Exercise register saving code in critical natives")              \
                                                                             \
   product(bool, UseSSE42Intrinsics, false,                                  \
           "SSE4.2 versions of intrinsics")                                  \
                                                                             \
   product(bool, UseAESIntrinsics, false,                                    \
-          "use intrinsics for AES versions of crypto")                      \
+          "Use intrinsics for AES versions of crypto")                      \
                                                                             \
   product(bool, UseCRC32Intrinsics, false,                                  \
           "use intrinsics for java.util.zip.CRC32")                         \
                                                                             \
   develop(bool, TraceCallFixup, false,                                      \
-          "traces all call fixups")                                         \
+          "Trace all call fixups")                                          \
                                                                             \
   develop(bool, DeoptimizeALot, false,                                      \
-          "deoptimize at every exit from the runtime system")               \
+          "Deoptimize at every exit from the runtime system")               \
                                                                             \
   notproduct(ccstrlist, DeoptimizeOnlyAt, "",                               \
-          "a comma separated list of bcis to deoptimize at")                \
+          "A comma separated list of bcis to deoptimize at")                \
                                                                             \
   product(bool, DeoptimizeRandom, false,                                    \
-          "deoptimize random frames on random exit from the runtime system")\
+          "Deoptimize random frames on random exit from the runtime system")\
                                                                             \
   notproduct(bool, ZombieALot, false,                                       \
-          "creates zombies (non-entrant) at exit from the runt. system")    \
+          "Create zombies (non-entrant) at exit from the runtime system")   \
                                                                             \
   product(bool, UnlinkSymbolsALot, false,                                   \
-          "unlink unreferenced symbols from the symbol table at safepoints")\
+          "Unlink unreferenced symbols from the symbol table at safepoints")\
                                                                             \
   notproduct(bool, WalkStackALot, false,                                    \
-          "trace stack (no print) at every exit from the runtime system")   \
+          "Trace stack (no print) at every exit from the runtime system")   \
                                                                             \
   product(bool, Debugging, false,                                           \
-          "set when executing debug methods in debug.ccp "                  \
+          "Set when executing debug methods in debug.cpp "                  \
           "(to prevent triggering assertions)")                             \
                                                                             \
   notproduct(bool, StrictSafepointChecks, trueInDebug,                      \
           "Enable strict checks that safepoints cannot happen for threads " \
-          "that used No_Safepoint_Verifier")                                \
+          "that use No_Safepoint_Verifier")                                 \
                                                                             \
   notproduct(bool, VerifyLastFrame, false,                                  \
           "Verify oops on last frame on entry to VM")                       \
                                                                             \
   develop(bool, TraceHandleAllocation, false,                               \
-          "Prints out warnings when suspicious many handles are allocated") \
+          "Print out warnings when suspiciously many handles are allocated")\
                                                                             \
   product(bool, UseCompilerSafepoints, true,                                \
           "Stop at safepoints in compiled code")                            \
                                                                             \
   product(bool, FailOverToOldVerifier, true,                                \
-          "fail over to old verifier when split verifier fails")            \
+          "Fail over to old verifier when split verifier fails")            \
                                                                             \
   develop(bool, ShowSafepointMsgs, false,                                   \
-          "Show msg. about safepoint synch.")                               \
+          "Show message about safepoint synchronization")                   \
                                                                             \
   product(bool, SafepointTimeout, false,                                    \
           "Time out and warn or fail after SafepointTimeoutDelay "          \
@@ -729,19 +777,19 @@
           "Trace external suspend wait failures")                           \
                                                                             \
   product(bool, MaxFDLimit, true,                                           \
-          "Bump the number of file descriptors to max in solaris.")         \
+          "Bump the number of file descriptors to maximum in Solaris")      \
                                                                             \
   diagnostic(bool, LogEvents, true,                                         \
-             "Enable the various ring buffer event logs")                   \
+          "Enable the various ring buffer event logs")                      \
                                                                             \
   diagnostic(uintx, LogEventsBufferEntries, 10,                             \
-             "Enable the various ring buffer event logs")                   \
+          "Number of ring buffer event logs")                               \
                                                                             \
   product(bool, BytecodeVerificationRemote, true,                           \
-          "Enables the Java bytecode verifier for remote classes")          \
+          "Enable the Java bytecode verifier for remote classes")           \
                                                                             \
   product(bool, BytecodeVerificationLocal, false,                           \
-          "Enables the Java bytecode verifier for local classes")           \
+          "Enable the Java bytecode verifier for local classes")            \
                                                                             \
   develop(bool, ForceFloatExceptions, trueInDebug,                          \
           "Force exceptions on FP stack under/overflow")                    \
@@ -753,7 +801,7 @@
           "Trace java language assertions")                                 \
                                                                             \
   notproduct(bool, CheckAssertionStatusDirectives, false,                   \
-          "temporary - see javaClasses.cpp")                                \
+          "Temporary - see javaClasses.cpp")                                \
                                                                             \
   notproduct(bool, PrintMallocFree, false,                                  \
           "Trace calls to C heap malloc/free allocation")                   \
@@ -772,16 +820,16 @@
           "entering the VM")                                                \
                                                                             \
   notproduct(bool, CheckOopishValues, false,                                \
-          "Warn if value contains oop ( requires ZapDeadLocals)")           \
+          "Warn if value contains oop (requires ZapDeadLocals)")            \
                                                                             \
   develop(bool, UseMallocOnly, false,                                       \
-          "use only malloc/free for allocation (no resource area/arena)")   \
+          "Use only malloc/free for allocation (no resource area/arena)")   \
                                                                             \
   develop(bool, PrintMalloc, false,                                         \
-          "print all malloc/free calls")                                    \
+          "Print all malloc/free calls")                                    \
                                                                             \
   develop(bool, PrintMallocStatistics, false,                               \
-          "print malloc/free statistics")                                   \
+          "Print malloc/free statistics")                                   \
                                                                             \
   develop(bool, ZapResourceArea, trueInDebug,                               \
           "Zap freed resource/arena space with 0xABABABAB")                 \
@@ -793,7 +841,7 @@
           "Zap freed JNI handle space with 0xFEFEFEFE")                     \
                                                                             \
   notproduct(bool, ZapStackSegments, trueInDebug,                           \
-             "Zap allocated/freed Stack segments with 0xFADFADED")          \
+          "Zap allocated/freed stack segments with 0xFADFADED")             \
                                                                             \
   develop(bool, ZapUnusedHeapArea, trueInDebug,                             \
           "Zap unused heap space with 0xBAADBABE")                          \
@@ -808,7 +856,7 @@
           "Zap filler objects with 0xDEAFBABE")                             \
                                                                             \
   develop(bool, PrintVMMessages, true,                                      \
-          "Print vm messages on console")                                   \
+          "Print VM messages on console")                                   \
                                                                             \
   product(bool, PrintGCApplicationConcurrentTime, false,                    \
           "Print the time the application has been running")                \
@@ -817,21 +865,21 @@
           "Print the time the application has been stopped")                \
                                                                             \
   diagnostic(bool, VerboseVerification, false,                              \
-             "Display detailed verification details")                       \
+          "Display detailed verification details")                          \
                                                                             \
   notproduct(uintx, ErrorHandlerTest, 0,                                    \
-          "If > 0, provokes an error after VM initialization; the value"    \
-          "determines which error to provoke.  See test_error_handler()"    \
+          "If > 0, provokes an error after VM initialization; the value "   \
+          "determines which error to provoke. See test_error_handler() "    \
           "in debug.cpp.")                                                  \
                                                                             \
   develop(bool, Verbose, false,                                             \
-          "Prints additional debugging information from other modes")       \
+          "Print additional debugging information from other modes")        \
                                                                             \
   develop(bool, PrintMiscellaneous, false,                                  \
-          "Prints uncategorized debugging information (requires +Verbose)") \
+          "Print uncategorized debugging information (requires +Verbose)")  \
                                                                             \
   develop(bool, WizardMode, false,                                          \
-          "Prints much more debugging information")                         \
+          "Print much more debugging information")                          \
                                                                             \
   product(bool, ShowMessageBoxOnError, false,                               \
           "Keep process alive on VM fatal error")                           \
@@ -843,7 +891,7 @@
           "Let VM fatal error propagate to the OS (ie. WER on Windows)")    \
                                                                             \
   product(bool, SuppressFatalErrorMessage, false,                           \
-          "Do NO Fatal Error report [Avoid deadlock]")                      \
+          "Report NO fatal error message (avoid deadlock)")                 \
                                                                             \
   product(ccstrlist, OnError, "",                                           \
           "Run user-defined commands on fatal error; see VMError.cpp "      \
@@ -853,17 +901,17 @@
           "Run user-defined commands on first java.lang.OutOfMemoryError")  \
                                                                             \
   manageable(bool, HeapDumpBeforeFullGC, false,                             \
-          "Dump heap to file before any major stop-world GC")               \
+          "Dump heap to file before any major stop-the-world GC")           \
                                                                             \
   manageable(bool, HeapDumpAfterFullGC, false,                              \
-          "Dump heap to file after any major stop-world GC")                \
+          "Dump heap to file after any major stop-the-world GC")            \
                                                                             \
   manageable(bool, HeapDumpOnOutOfMemoryError, false,                       \
           "Dump heap to file when java.lang.OutOfMemoryError is thrown")    \
                                                                             \
   manageable(ccstr, HeapDumpPath, NULL,                                     \
-          "When HeapDumpOnOutOfMemoryError is on, the path (filename or"    \
-          "directory) of the dump file (defaults to java_pid<pid>.hprof"    \
+          "When HeapDumpOnOutOfMemoryError is on, the path (filename or "   \
+          "directory) of the dump file (defaults to java_pid<pid>.hprof "   \
           "in the working directory)")                                      \
                                                                             \
   develop(uintx, SegmentedHeapDumpThreshold, 2*G,                           \
@@ -877,10 +925,10 @@
           "Execute breakpoint upon encountering VM warning")                \
                                                                             \
   develop(bool, TraceVMOperation, false,                                    \
-          "Trace vm operations")                                            \
+          "Trace VM operations")                                            \
                                                                             \
   develop(bool, UseFakeTimers, false,                                       \
-          "Tells whether the VM should use system time or a fake timer")    \
+          "Tell whether the VM should use system time or a fake timer")     \
                                                                             \
   product(ccstr, NativeMemoryTracking, "off",                               \
           "Native memory tracking options")                                 \
@@ -890,22 +938,22 @@
                                                                             \
   diagnostic(bool, AutoShutdownNMT, true,                                   \
           "Automatically shutdown native memory tracking under stress "     \
-          "situation. When set to false, native memory tracking tries to "  \
+          "situations. When set to false, native memory tracking tries to " \
           "stay alive at the expense of JVM performance")                   \
                                                                             \
   diagnostic(bool, LogCompilation, false,                                   \
-          "Log compilation activity in detail to hotspot.log or LogFile")   \
+          "Log compilation activity in detail to LogFile")                  \
                                                                             \
   product(bool, PrintCompilation, false,                                    \
           "Print compilations")                                             \
                                                                             \
   diagnostic(bool, TraceNMethodInstalls, false,                             \
-             "Trace nmethod intallation")                                   \
+          "Trace nmethod installation")                                     \
                                                                             \
   diagnostic(intx, ScavengeRootsInCode, 2,                                  \
-             "0: do not allow scavengable oops in the code cache; "         \
-             "1: allow scavenging from the code cache; "                    \
-             "2: emit as many constants as the compiler can see")           \
+          "0: do not allow scavengable oops in the code cache; "            \
+          "1: allow scavenging from the code cache; "                       \
+          "2: emit as many constants as the compiler can see")              \
                                                                             \
   product(bool, AlwaysRestoreFPU, false,                                    \
           "Restore the FPU control word after every JNI call (expensive)")  \
@@ -926,7 +974,7 @@
           "Print assembly code (using external disassembler.so)")           \
                                                                             \
   diagnostic(ccstr, PrintAssemblyOptions, NULL,                             \
-          "Options string passed to disassembler.so")                       \
+          "Print options string passed to disassembler.so")                 \
                                                                             \
   diagnostic(bool, PrintNMethods, false,                                    \
           "Print assembly code for nmethods when generated")                \
@@ -947,20 +995,21 @@
           "Print exception handler tables for all nmethods when generated") \
                                                                             \
   develop(bool, StressCompiledExceptionHandlers, false,                     \
-         "Exercise compiled exception handlers")                            \
+          "Exercise compiled exception handlers")                           \
                                                                             \
   develop(bool, InterceptOSException, false,                                \
-          "Starts debugger when an implicit OS (e.g., NULL) "               \
+          "Start debugger when an implicit OS (e.g. NULL) "                 \
           "exception happens")                                              \
                                                                             \
   product(bool, PrintCodeCache, false,                                      \
           "Print the code cache memory usage when exiting")                 \
                                                                             \
   develop(bool, PrintCodeCache2, false,                                     \
-          "Print detailed usage info on the code cache when exiting")       \
+          "Print detailed usage information on the code cache when exiting")\
                                                                             \
   product(bool, PrintCodeCacheOnCompilation, false,                         \
-          "Print the code cache memory usage each time a method is compiled") \
+          "Print the code cache memory usage each time a method is "        \
+          "compiled")                                                       \
                                                                             \
   diagnostic(bool, PrintStubCode, false,                                    \
           "Print generated stub code")                                      \
@@ -972,40 +1021,40 @@
           "Omit backtraces for some 'hot' exceptions in optimized code")    \
                                                                             \
   product(bool, ProfilerPrintByteCodeStatistics, false,                     \
-          "Prints byte code statictics when dumping profiler output")       \
+          "Print bytecode statistics when dumping profiler output")         \
                                                                             \
   product(bool, ProfilerRecordPC, false,                                    \
-          "Collects tick for each 16 byte interval of compiled code")       \
+          "Collect ticks for each 16 byte interval of compiled code")       \
                                                                             \
   product(bool, ProfileVM, false,                                           \
-          "Profiles ticks that fall within VM (either in the VM Thread "    \
+          "Profile ticks that fall within VM (either in the VM Thread "     \
           "or VM code called through stubs)")                               \
                                                                             \
   product(bool, ProfileIntervals, false,                                    \
-          "Prints profiles for each interval (see ProfileIntervalsTicks)")  \
+          "Print profiles for each interval (see ProfileIntervalsTicks)")   \
                                                                             \
   notproduct(bool, ProfilerCheckIntervals, false,                           \
-          "Collect and print info on spacing of profiler ticks")            \
+          "Collect and print information on spacing of profiler ticks")     \
                                                                             \
   develop(bool, PrintJVMWarnings, false,                                    \
-          "Prints warnings for unimplemented JVM functions")                \
+          "Print warnings for unimplemented JVM functions")                 \
                                                                             \
   product(bool, PrintWarnings, true,                                        \
-          "Prints JVM warnings to output stream")                           \
+          "Print JVM warnings to output stream")                            \
                                                                             \
   notproduct(uintx, WarnOnStalledSpinLock, 0,                               \
-          "Prints warnings for stalled SpinLocks")                          \
+          "Print warnings for stalled SpinLocks")                           \
                                                                             \
   product(bool, RegisterFinalizersAtInit, true,                             \
           "Register finalizable objects at end of Object.<init> or "        \
           "after allocation")                                               \
                                                                             \
   develop(bool, RegisterReferences, true,                                   \
-          "Tells whether the VM should register soft/weak/final/phantom "   \
+          "Tell whether the VM should register soft/weak/final/phantom "    \
           "references")                                                     \
                                                                             \
   develop(bool, IgnoreRewrites, false,                                      \
-          "Supress rewrites of bytecodes in the oopmap generator. "         \
+          "Suppress rewrites of bytecodes in the oopmap generator. "        \
           "This is unsafe!")                                                \
                                                                             \
   develop(bool, PrintCodeCacheExtension, false,                             \
@@ -1015,8 +1064,7 @@
           "Enable the security JVM functions")                              \
                                                                             \
   develop(bool, ProtectionDomainVerification, true,                         \
-          "Verifies protection domain before resolution in system "         \
-          "dictionary")                                                     \
+          "Verify protection domain before resolution in system dictionary")\
                                                                             \
   product(bool, ClassUnloading, true,                                       \
           "Do unloading of classes")                                        \
@@ -1029,14 +1077,14 @@
           "Write memory usage profiling to log file")                       \
                                                                             \
   notproduct(bool, PrintSystemDictionaryAtExit, false,                      \
-          "Prints the system dictionary at exit")                           \
+          "Print the system dictionary at exit")                            \
                                                                             \
   experimental(intx, PredictedLoadedClassCount, 0,                          \
-          "Experimental: Tune loaded class cache starting size.")           \
+          "Experimental: Tune loaded class cache starting size")            \
                                                                             \
   diagnostic(bool, UnsyncloadClass, false,                                  \
           "Unstable: VM calls loadClass unsynchronized. Custom "            \
-          "class loader  must call VM synchronized for findClass "          \
+          "class loader must call VM synchronized for findClass "           \
           "and defineClass.")                                               \
                                                                             \
   product(bool, AlwaysLockClassLoader, false,                               \
@@ -1052,22 +1100,22 @@
           "Call loadClassInternal() rather than loadClass()")               \
                                                                             \
   product_pd(bool, DontYieldALot,                                           \
-          "Throw away obvious excess yield calls (for SOLARIS only)")       \
+          "Throw away obvious excess yield calls (for Solaris only)")       \
                                                                             \
   product_pd(bool, ConvertSleepToYield,                                     \
-          "Converts sleep(0) to thread yield "                              \
-          "(may be off for SOLARIS to improve GUI)")                        \
+          "Convert sleep(0) to thread yield "                               \
+          "(may be off for Solaris to improve GUI)")                        \
                                                                             \
   product(bool, ConvertYieldToSleep, false,                                 \
-          "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\
-          "behavior (SOLARIS only)")                                        \
+          "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \
+          "behavior (Solaris only)")                                        \
                                                                             \
   product(bool, UseBoundThreads, true,                                      \
-          "Bind user level threads to kernel threads (for SOLARIS only)")   \
+          "Bind user level threads to kernel threads (for Solaris only)")   \
                                                                             \
   develop(bool, UseDetachedThreads, true,                                   \
           "Use detached threads that are recycled upon termination "        \
-          "(for SOLARIS only)")                                             \
+          "(for Solaris only)")                                             \
                                                                             \
   product(bool, UseLWPSynchronization, true,                                \
           "Use LWP-based instead of libthread-based synchronization "       \
@@ -1077,41 +1125,43 @@
           "(Unstable) Various monitor synchronization tunables")            \
                                                                             \
   product(intx, EmitSync, 0,                                                \
-          "(Unsafe,Unstable) "                                              \
-          " Controls emission of inline sync fast-path code")               \
+          "(Unsafe, Unstable) "                                             \
+          "Control emission of inline sync fast-path code")                 \
                                                                             \
   product(intx, MonitorBound, 0, "Bound Monitor population")                \
                                                                             \
   product(bool, MonitorInUseLists, false, "Track Monitors for Deflation")   \
                                                                             \
-  product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \
-                                                                            \
-  product(intx, SyncVerbose, 0, "(Unstable)" )                              \
-                                                                            \
-  product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" )                    \
+  product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
+                                                                            \
+  product(intx, SyncVerbose, 0, "(Unstable)")                               \
+                                                                            \
+  product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)")                    \
                                                                             \
   product(intx, hashCode, 5,                                                \
-         "(Unstable) select hashCode generation algorithm" )                \
+          "(Unstable) select hashCode generation algorithm")                \
                                                                             \
   product(intx, WorkAroundNPTLTimedWaitHang, 1,                             \
-         "(Unstable, Linux-specific)"                                       \
-         " avoid NPTL-FUTEX hang pthread_cond_timedwait" )                  \
+          "(Unstable, Linux-specific) "                                     \
+          "avoid NPTL-FUTEX hang pthread_cond_timedwait")                   \
                                                                             \
   product(bool, FilterSpuriousWakeups, true,                                \
           "Prevent spurious or premature wakeups from object.wait "         \
           "(Solaris only)")                                                 \
                                                                             \
-  product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
-  product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
-  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" )                  \
+  product(intx, NativeMonitorTimeout, -1, "(Unstable)")                     \
+                                                                            \
+  product(intx, NativeMonitorFlags, 0, "(Unstable)")                        \
+                                                                            \
+  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)")                   \
                                                                             \
   develop(bool, UsePthreads, false,                                         \
           "Use pthread-based instead of libthread-based synchronization "   \
           "(SPARC only)")                                                   \
                                                                             \
   product(bool, AdjustConcurrency, false,                                   \
-          "call thr_setconcurrency at thread create time to avoid "         \
-          "LWP starvation on MP systems (For Solaris Only)")                \
+          "Call thr_setconcurrency at thread creation time to avoid "       \
+          "LWP starvation on MP systems (for Solaris Only)")                \
                                                                             \
   product(bool, ReduceSignalUsage, false,                                   \
           "Reduce the use of OS signals in Java and/or the VM")             \
@@ -1120,13 +1170,14 @@
           "Share vtable stubs (smaller code but worse branch prediction")   \
                                                                             \
   develop(bool, LoadLineNumberTables, true,                                 \
-          "Tells whether the class file parser loads line number tables")   \
+          "Tell whether the class file parser loads line number tables")    \
                                                                             \
   develop(bool, LoadLocalVariableTables, true,                              \
-          "Tells whether the class file parser loads local variable tables")\
+          "Tell whether the class file parser loads local variable tables") \
                                                                             \
   develop(bool, LoadLocalVariableTypeTables, true,                          \
-          "Tells whether the class file parser loads local variable type tables")\
+          "Tell whether the class file parser loads local variable type"    \
+          "tables")                                                         \
                                                                             \
   product(bool, AllowUserSignalHandlers, false,                             \
           "Do not complain if the application installs signal handlers "    \
@@ -1157,10 +1208,12 @@
                                                                             \
   product(bool, EagerXrunInit, false,                                       \
           "Eagerly initialize -Xrun libraries; allows startup profiling, "  \
-          " but not all -Xrun libraries may support the state of the VM at this time") \
+          "but not all -Xrun libraries may support the state of the VM "    \
+          "at this time")                                                   \
                                                                             \
   product(bool, PreserveAllAnnotations, false,                              \
-          "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \
+          "Preserve RuntimeInvisibleAnnotations as well "                   \
+          "as RuntimeVisibleAnnotations")                                   \
                                                                             \
   develop(uintx, PreallocatedOutOfMemoryErrorCount, 4,                      \
           "Number of OutOfMemoryErrors preallocated with backtrace")        \
@@ -1235,7 +1288,7 @@
           "Trace level for JVMTI RedefineClasses")                          \
                                                                             \
   develop(bool, StressMethodComparator, false,                              \
-          "run the MethodComparator on all loaded methods")                 \
+          "Run the MethodComparator on all loaded methods")                 \
                                                                             \
   /* change to false by default sometime after Mustang */                   \
   product(bool, VerifyMergedCPBytecodes, true,                              \
@@ -1269,7 +1322,7 @@
           "Trace dependencies")                                             \
                                                                             \
   develop(bool, VerifyDependencies, trueInDebug,                            \
-         "Exercise and verify the compilation dependency mechanism")        \
+          "Exercise and verify the compilation dependency mechanism")       \
                                                                             \
   develop(bool, TraceNewOopMapGeneration, false,                            \
           "Trace OopMapGeneration")                                         \
@@ -1287,7 +1340,7 @@
           "Trace monitor matching failures during OopMapGeneration")        \
                                                                             \
   develop(bool, TraceOopMapRewrites, false,                                 \
-          "Trace rewritting of method oops during oop map generation")      \
+          "Trace rewriting of method oops during oop map generation")       \
                                                                             \
   develop(bool, TraceSafepoint, false,                                      \
           "Trace safepoint operations")                                     \
@@ -1305,10 +1358,10 @@
           "Trace setup time")                                               \
                                                                             \
   develop(bool, TraceProtectionDomainVerification, false,                   \
-          "Trace protection domain verifcation")                            \
+          "Trace protection domain verification")                           \
                                                                             \
   develop(bool, TraceClearedExceptions, false,                              \
-          "Prints when an exception is forcibly cleared")                   \
+          "Print when an exception is forcibly cleared")                    \
                                                                             \
   product(bool, TraceClassResolution, false,                                \
           "Trace all constant pool resolutions (for debugging)")            \
@@ -1322,7 +1375,7 @@
   /* gc */                                                                  \
                                                                             \
   product(bool, UseSerialGC, false,                                         \
-          "Use the serial garbage collector")                               \
+          "Use the Serial garbage collector")                               \
                                                                             \
   product(bool, UseG1GC, false,                                             \
           "Use the Garbage-First garbage collector")                        \
@@ -1341,16 +1394,16 @@
           "The collection count for the first maximum compaction")          \
                                                                             \
   product(bool, UseMaximumCompactionOnSystemGC, true,                       \
-          "In the Parallel Old garbage collector maximum compaction for "   \
-          "a system GC")                                                    \
+          "Use maximum compaction in the Parallel Old garbage collector "   \
+          "for a system GC")                                                \
                                                                             \
   product(uintx, ParallelOldDeadWoodLimiterMean, 50,                        \
-          "The mean used by the par compact dead wood"                      \
-          "limiter (a number between 0-100).")                              \
+          "The mean used by the parallel compact dead wood "                \
+          "limiter (a number between 0-100)")                               \
                                                                             \
   product(uintx, ParallelOldDeadWoodLimiterStdDev, 80,                      \
-          "The standard deviation used by the par compact dead wood"        \
-          "limiter (a number between 0-100).")                              \
+          "The standard deviation used by the parallel compact dead wood "  \
+          "limiter (a number between 0-100)")                               \
                                                                             \
   product(uintx, ParallelGCThreads, 0,                                      \
           "Number of parallel threads parallel gc will use")                \
@@ -1360,7 +1413,7 @@
           "parallel gc will use")                                           \
                                                                             \
   diagnostic(bool, ForceDynamicNumberOfGCThreads, false,                    \
-          "Force dynamic selection of the number of"                        \
+          "Force dynamic selection of the number of "                       \
           "parallel threads parallel gc will use to aid debugging")         \
                                                                             \
   product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M),               \
@@ -1371,7 +1424,7 @@
           "Trace the dynamic GC thread usage")                              \
                                                                             \
   develop(bool, ParallelOldGCSplitALot, false,                              \
-          "Provoke splitting (copying data from a young gen space to"       \
+          "Provoke splitting (copying data from a young gen space to "      \
           "multiple destination spaces)")                                   \
                                                                             \
   develop(uintx, ParallelOldGCSplitInterval, 3,                             \
@@ -1381,19 +1434,19 @@
           "Number of threads concurrent gc will use")                       \
                                                                             \
   product(uintx, YoungPLABSize, 4096,                                       \
-          "Size of young gen promotion labs (in HeapWords)")                \
+          "Size of young gen promotion LAB's (in HeapWords)")               \
                                                                             \
   product(uintx, OldPLABSize, 1024,                                         \
-          "Size of old gen promotion labs (in HeapWords)")                  \
+          "Size of old gen promotion LAB's (in HeapWords)")                 \
                                                                             \
   product(uintx, GCTaskTimeStampEntries, 200,                               \
           "Number of time stamp entries per gc worker thread")              \
                                                                             \
   product(bool, AlwaysTenure, false,                                        \
-          "Always tenure objects in eden. (ParallelGC only)")               \
+          "Always tenure objects in eden (ParallelGC only)")                \
                                                                             \
   product(bool, NeverTenure, false,                                         \
-          "Never tenure objects in eden, May tenure on overflow "           \
+          "Never tenure objects in eden, may tenure on overflow "           \
           "(ParallelGC only)")                                              \
                                                                             \
   product(bool, ScavengeBeforeFullGC, true,                                 \
@@ -1401,14 +1454,14 @@
           "used with UseParallelGC")                                        \
                                                                             \
   develop(bool, ScavengeWithObjectsInToSpace, false,                        \
-          "Allow scavenges to occur when to_space contains objects.")       \
+          "Allow scavenges to occur when to-space contains objects")        \
                                                                             \
   product(bool, UseConcMarkSweepGC, false,                                  \
           "Use Concurrent Mark-Sweep GC in the old generation")             \
                                                                             \
   product(bool, ExplicitGCInvokesConcurrent, false,                         \
-          "A System.gc() request invokes a concurrent collection;"          \
-          " (effective only when UseConcMarkSweepGC)")                      \
+          "A System.gc() request invokes a concurrent collection; "         \
+          "(effective only when UseConcMarkSweepGC)")                       \
                                                                             \
   product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false,        \
           "A System.gc() request invokes a concurrent collection and "      \
@@ -1416,19 +1469,19 @@
           "(effective only when UseConcMarkSweepGC)")                       \
                                                                             \
   product(bool, GCLockerInvokesConcurrent, false,                           \
-          "The exit of a JNI CS necessitating a scavenge also"              \
-          " kicks off a bkgrd concurrent collection")                       \
+          "The exit of a JNI critical section necessitating a scavenge, "   \
+          "also kicks off a background concurrent collection")              \
                                                                             \
   product(uintx, GCLockerEdenExpansionPercent, 5,                           \
-          "How much the GC can expand the eden by while the GC locker  "    \
+          "How much the GC can expand the eden by while the GC locker "     \
           "is active (as a percentage)")                                    \
                                                                             \
   diagnostic(intx, GCLockerRetryAllocationCount, 2,                         \
-          "Number of times to retry allocations when"                       \
-          " blocked by the GC locker")                                      \
+          "Number of times to retry allocations when "                      \
+          "blocked by the GC locker")                                       \
                                                                             \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
-          "Use Adaptive Free Lists in the CMS generation")                  \
+          "Use adaptive free lists in the CMS generation")                  \
                                                                             \
   develop(bool, UseAsyncConcMarkSweepGC, true,                              \
           "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
@@ -1443,44 +1496,46 @@
           "Use passing of collection from background to foreground")        \
                                                                             \
   product(bool, UseParNewGC, false,                                         \
-          "Use parallel threads in the new generation.")                    \
+          "Use parallel threads in the new generation")                     \
                                                                             \
   product(bool, ParallelGCVerbose, false,                                   \
-          "Verbose output for parallel GC.")                                \
+          "Verbose output for parallel gc")                                 \
                                                                             \
   product(uintx, ParallelGCBufferWastePct, 10,                              \
-          "Wasted fraction of parallel allocation buffer.")                 \
+          "Wasted fraction of parallel allocation buffer")                  \
                                                                             \
   diagnostic(bool, ParallelGCRetainPLAB, false,                             \
-             "Retain parallel allocation buffers across scavenges; "        \
-             " -- disabled because this currently conflicts with "          \
-             " parallel card scanning under certain conditions ")           \
+          "Retain parallel allocation buffers across scavenges; "           \
+          "it is disabled because this currently conflicts with "           \
+          "parallel card scanning under certain conditions.")               \
                                                                             \
   product(uintx, TargetPLABWastePct, 10,                                    \
           "Target wasted space in last buffer as percent of overall "       \
           "allocation")                                                     \
                                                                             \
   product(uintx, PLABWeight, 75,                                            \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decaying average for ResizePLAB.")       \
+          "Percentage (0-100) used to weigh the current sample when "       \
+          "computing exponentially decaying average for ResizePLAB")        \
                                                                             \
   product(bool, ResizePLAB, true,                                           \
-          "Dynamically resize (survivor space) promotion labs")             \
+          "Dynamically resize (survivor space) promotion LAB's")            \
                                                                             \
   product(bool, PrintPLAB, false,                                           \
-          "Print (survivor space) promotion labs sizing decisions")         \
+          "Print (survivor space) promotion LAB's sizing decisions")        \
                                                                             \
   product(intx, ParGCArrayScanChunk, 50,                                    \
-          "Scan a subset and push remainder, if array is bigger than this") \
+          "Scan a subset of object array and push remainder, if array is "  \
+          "bigger than this")                                               \
                                                                             \
   product(bool, ParGCUseLocalOverflow, false,                               \
           "Instead of a global overflow list, use local overflow stacks")   \
                                                                             \
   product(bool, ParGCTrimOverflow, true,                                    \
-          "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \
+          "Eagerly trim the local overflow lists "                          \
+          "(when ParGCUseLocalOverflow)")                                   \
                                                                             \
   notproduct(bool, ParGCWorkQueueOverflowALot, false,                       \
-          "Whether we should simulate work queue overflow in ParNew")       \
+          "Simulate work queue overflow in ParNew")                         \
                                                                             \
   notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
           "An `interval' counter that determines how frequently "           \
@@ -1498,43 +1553,46 @@
           "during card table scanning")                                     \
                                                                             \
   product(uintx, CMSParPromoteBlocksToClaim, 16,                            \
-          "Number of blocks to attempt to claim when refilling CMS LAB for "\
-          "parallel GC.")                                                   \
+          "Number of blocks to attempt to claim when refilling CMS LAB's "  \
+          "for parallel GC")                                                \
                                                                             \
   product(uintx, OldPLABWeight, 50,                                         \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \
+          "Percentage (0-100) used to weight the current sample when "      \
+          "computing exponentially decaying average for resizing "          \
+          "CMSParPromoteBlocksToClaim")                                     \
                                                                             \
   product(bool, ResizeOldPLAB, true,                                        \
-          "Dynamically resize (old gen) promotion labs")                    \
+          "Dynamically resize (old gen) promotion LAB's")                   \
                                                                             \
   product(bool, PrintOldPLAB, false,                                        \
-          "Print (old gen) promotion labs sizing decisions")                \
+          "Print (old gen) promotion LAB's sizing decisions")               \
                                                                             \
   product(uintx, CMSOldPLABMin, 16,                                         \
-          "Min size of CMS gen promotion lab caches per worker per blksize")\
+          "Minimum size of CMS gen promotion LAB caches per worker "        \
+          "per block size")                                                 \
                                                                             \
   product(uintx, CMSOldPLABMax, 1024,                                       \
-          "Max size of CMS gen promotion lab caches per worker per blksize")\
+          "Maximum size of CMS gen promotion LAB caches per worker "        \
+          "per block size")                                                 \
                                                                             \
   product(uintx, CMSOldPLABNumRefills, 4,                                   \
-          "Nominal number of refills of CMS gen promotion lab cache"        \
-          " per worker per block size")                                     \
+          "Nominal number of refills of CMS gen promotion LAB cache "       \
+          "per worker per block size")                                      \
                                                                             \
   product(bool, CMSOldPLABResizeQuicker, false,                             \
-          "Whether to react on-the-fly during a scavenge to a sudden"       \
-          " change in block demand rate")                                   \
+          "React on-the-fly during a scavenge to a sudden "                 \
+          "change in block demand rate")                                    \
                                                                             \
   product(uintx, CMSOldPLABToleranceFactor, 4,                              \
-          "The tolerance of the phase-change detector for on-the-fly"       \
-          " PLAB resizing during a scavenge")                               \
+          "The tolerance of the phase-change detector for on-the-fly "      \
+          "PLAB resizing during a scavenge")                                \
                                                                             \
   product(uintx, CMSOldPLABReactivityFactor, 2,                             \
-          "The gain in the feedback loop for on-the-fly PLAB resizing"      \
-          " during a scavenge")                                             \
+          "The gain in the feedback loop for on-the-fly PLAB resizing "     \
+          "during a scavenge")                                              \
                                                                             \
   product(bool, AlwaysPreTouch, false,                                      \
-          "It forces all freshly committed pages to be pre-touched.")       \
+          "Force all freshly committed pages to be pre-touched")            \
                                                                             \
   product_pd(uintx, CMSYoungGenPerWorker,                                   \
           "The maximum size of young gen chosen by default per GC worker "  \
@@ -1544,64 +1602,67 @@
           "Whether CMS GC should operate in \"incremental\" mode")          \
                                                                             \
   product(uintx, CMSIncrementalDutyCycle, 10,                               \
-          "CMS incremental mode duty cycle (a percentage, 0-100).  If"      \
-          "CMSIncrementalPacing is enabled, then this is just the initial"  \
-          "value")                                                          \
+          "Percentage (0-100) of CMS incremental mode duty cycle. If "      \
+          "CMSIncrementalPacing is enabled, then this is just the initial " \
+          "value.")                                                         \
                                                                             \
   product(bool, CMSIncrementalPacing, true,                                 \
           "Whether the CMS incremental mode duty cycle should be "          \
           "automatically adjusted")                                         \
                                                                             \
   product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
-          "Lower bound on the duty cycle when CMSIncrementalPacing is "     \
-          "enabled (a percentage, 0-100)")                                  \
+          "Minimum percentage (0-100) of the CMS incremental duty cycle "   \
+          "used when CMSIncrementalPacing is enabled")                      \
                                                                             \
   product(uintx, CMSIncrementalSafetyFactor, 10,                            \
           "Percentage (0-100) used to add conservatism when computing the " \
           "duty cycle")                                                     \
                                                                             \
   product(uintx, CMSIncrementalOffset, 0,                                   \
-          "Percentage (0-100) by which the CMS incremental mode duty cycle" \
-          " is shifted to the right within the period between young GCs")   \
+          "Percentage (0-100) by which the CMS incremental mode duty cycle "\
+          "is shifted to the right within the period between young GCs")    \
                                                                             \
   product(uintx, CMSExpAvgFactor, 50,                                       \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponential averages for CMS statistics.")             \
+          "Percentage (0-100) used to weigh the current sample when "       \
+          "computing exponential averages for CMS statistics")              \
                                                                             \
   product(uintx, CMS_FLSWeight, 75,                                         \
-          "Percentage (0-100) used to weight the current sample when"       \
-          "computing exponentially decating averages for CMS FLS statistics.") \
+          "Percentage (0-100) used to weigh the current sample when "       \
+          "computing exponentially decaying averages for CMS FLS "          \
+          "statistics")                                                     \
                                                                             \
   product(uintx, CMS_FLSPadding, 1,                                         \
-          "The multiple of deviation from mean to use for buffering"        \
-          "against volatility in free list demand.")                        \
+          "The multiple of deviation from mean to use for buffering "       \
+          "against volatility in free list demand")                         \
                                                                             \
   product(uintx, FLSCoalescePolicy, 2,                                      \
-          "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
+          "CMS: aggressiveness level for coalescing, increasing "           \
+          "from 0 to 4")                                                    \
                                                                             \
   product(bool, FLSAlwaysCoalesceLarge, false,                              \
-          "CMS: Larger free blocks are always available for coalescing")    \
+          "CMS: larger free blocks are always available for coalescing")    \
                                                                             \
   product(double, FLSLargestBlockCoalesceProximity, 0.99,                   \
-          "CMS: the smaller the percentage the greater the coalition force")\
+          "CMS: the smaller the percentage the greater the coalescing "     \
+          "force")                                                          \
                                                                             \
   product(double, CMSSmallCoalSurplusPercent, 1.05,                         \
-          "CMS: the factor by which to inflate estimated demand of small"   \
-          " block sizes to prevent coalescing with an adjoining block")     \
+          "CMS: the factor by which to inflate estimated demand of small "  \
+          "block sizes to prevent coalescing with an adjoining block")      \
                                                                             \
   product(double, CMSLargeCoalSurplusPercent, 0.95,                         \
-          "CMS: the factor by which to inflate estimated demand of large"   \
-          " block sizes to prevent coalescing with an adjoining block")     \
+          "CMS: the factor by which to inflate estimated demand of large "  \
+          "block sizes to prevent coalescing with an adjoining block")      \
                                                                             \
   product(double, CMSSmallSplitSurplusPercent, 1.10,                        \
-          "CMS: the factor by which to inflate estimated demand of small"   \
-          " block sizes to prevent splitting to supply demand for smaller"  \
-          " blocks")                                                        \
+          "CMS: the factor by which to inflate estimated demand of small "  \
+          "block sizes to prevent splitting to supply demand for smaller "  \
+          "blocks")                                                         \
                                                                             \
   product(double, CMSLargeSplitSurplusPercent, 1.00,                        \
-          "CMS: the factor by which to inflate estimated demand of large"   \
-          " block sizes to prevent splitting to supply demand for smaller"  \
-          " blocks")                                                        \
+          "CMS: the factor by which to inflate estimated demand of large "  \
+          "block sizes to prevent splitting to supply demand for smaller "  \
+          "blocks")                                                         \
                                                                             \
   product(bool, CMSExtrapolateSweep, false,                                 \
           "CMS: cushion for block demand during sweep")                     \
@@ -1613,11 +1674,11 @@
                                                                             \
   product(uintx, CMS_SweepPadding, 1,                                       \
           "The multiple of deviation from mean to use for buffering "       \
-          "against volatility in inter-sweep duration.")                    \
+          "against volatility in inter-sweep duration")                     \
                                                                             \
   product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
           "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
-          "duration exceeds this threhold in milliseconds")                 \
+          "duration exceeds this threshold in milliseconds")                \
                                                                             \
   develop(bool, CMSTraceIncrementalMode, false,                             \
           "Trace CMS incremental mode")                                     \
@@ -1632,14 +1693,15 @@
           "Whether class unloading enabled when using CMS GC")              \
                                                                             \
   product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
-          "When CMS class unloading is enabled, the maximum CMS cycle count"\
-          " for which classes may not be unloaded")                         \
+          "When CMS class unloading is enabled, the maximum CMS cycle "     \
+          "count for which classes may not be unloaded")                    \
                                                                             \
   product(bool, CMSCompactWhenClearAllSoftRefs, true,                       \
-          "Compact when asked to collect CMS gen with clear_all_soft_refs") \
+          "Compact when asked to collect CMS gen with "                     \
+          "clear_all_soft_refs()")                                          \
                                                                             \
   product(bool, UseCMSCompactAtFullCollection, true,                        \
-          "Use mark sweep compact at full collections")                     \
+          "Use Mark-Sweep-Compact algorithm at full collections")           \
                                                                             \
   product(uintx, CMSFullGCsBeforeCompaction, 0,                             \
           "Number of CMS full collection done before compaction if > 0")    \
@@ -1661,38 +1723,37 @@
           "Warn in case of excessive CMS looping")                          \
                                                                             \
   develop(bool, CMSOverflowEarlyRestoration, false,                         \
-          "Whether preserved marks should be restored early")               \
+          "Restore preserved marks early")                                  \
                                                                             \
   product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),              \
           "Size of marking stack")                                          \
                                                                             \
   product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),          \
-          "Max size of marking stack")                                      \
+          "Maximum size of marking stack")                                  \
                                                                             \
   notproduct(bool, CMSMarkStackOverflowALot, false,                         \
-          "Whether we should simulate frequent marking stack / work queue"  \
-          " overflow")                                                      \
+          "Simulate frequent marking stack / work queue overflow")          \
                                                                             \
   notproduct(uintx, CMSMarkStackOverflowInterval, 1000,                     \
-          "An `interval' counter that determines how frequently"            \
-          " we simulate overflow; a smaller number increases frequency")    \
+          "An \"interval\" counter that determines how frequently "         \
+          "to simulate overflow; a smaller number increases frequency")     \
                                                                             \
   product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
-          "(Temporary, subject to experimentation)"                         \
+          "(Temporary, subject to experimentation) "                        \
           "Maximum number of abortable preclean iterations, if > 0")        \
                                                                             \
   product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
-          "(Temporary, subject to experimentation)"                         \
-          "Maximum time in abortable preclean in ms")                       \
+          "(Temporary, subject to experimentation) "                        \
+          "Maximum time in abortable preclean (in milliseconds)")           \
                                                                             \
   product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
-          "(Temporary, subject to experimentation)"                         \
+          "(Temporary, subject to experimentation) "                        \
           "Nominal minimum work per abortable preclean iteration")          \
                                                                             \
   manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
-          "(Temporary, subject to experimentation)"                         \
-          " Time that we sleep between iterations when not given"           \
-          " enough work per iteration")                                     \
+          "(Temporary, subject to experimentation) "                        \
+          "Time that we sleep between iterations when not given "           \
+          "enough work per iteration")                                      \
                                                                             \
   product(uintx, CMSRescanMultiple, 32,                                     \
           "Size (in cards) of CMS parallel rescan task")                    \
@@ -1710,23 +1771,24 @@
           "Whether parallel remark enabled (only if ParNewGC)")             \
                                                                             \
   product(bool, CMSParallelSurvivorRemarkEnabled, true,                     \
-          "Whether parallel remark of survivor space"                       \
-          " enabled (effective only if CMSParallelRemarkEnabled)")          \
+          "Whether parallel remark of survivor space "                      \
+          "enabled (effective only if CMSParallelRemarkEnabled)")           \
                                                                             \
   product(bool, CMSPLABRecordAlways, true,                                  \
-          "Whether to always record survivor space PLAB bdries"             \
-          " (effective only if CMSParallelSurvivorRemarkEnabled)")          \
+          "Always record survivor space PLAB boundaries (effective only "   \
+          "if CMSParallelSurvivorRemarkEnabled)")                           \
                                                                             \
   product(bool, CMSEdenChunksRecordAlways, true,                            \
-          "Whether to always record eden chunks used for "                  \
-          "the parallel initial mark or remark of eden" )                   \
+          "Always record eden chunks used for the parallel initial mark "   \
+          "or remark of eden")                                              \
                                                                             \
   product(bool, CMSPrintEdenSurvivorChunks, false,                          \
           "Print the eden and the survivor chunks used for the parallel "   \
           "initial mark or remark of the eden/survivor spaces")             \
                                                                             \
   product(bool, CMSConcurrentMTEnabled, true,                               \
-          "Whether multi-threaded concurrent work enabled (if ParNewGC)")   \
+          "Whether multi-threaded concurrent work enabled "                 \
+          "(effective only if ParNewGC)")                                   \
                                                                             \
   product(bool, CMSPrecleaningEnabled, true,                                \
           "Whether concurrent precleaning enabled")                         \
@@ -1735,12 +1797,12 @@
           "Maximum number of precleaning iteration passes")                 \
                                                                             \
   product(uintx, CMSPrecleanNumerator, 2,                                   \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence"  \
-          " ratio")                                                         \
+          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
+          "ratio")                                                          \
                                                                             \
   product(uintx, CMSPrecleanDenominator, 3,                                 \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence"  \
-          " ratio")                                                         \
+          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
+          "ratio")                                                          \
                                                                             \
   product(bool, CMSPrecleanRefLists1, true,                                 \
           "Preclean ref lists during (initial) preclean phase")             \
@@ -1755,7 +1817,7 @@
           "Preclean survivors during abortable preclean phase")             \
                                                                             \
   product(uintx, CMSPrecleanThreshold, 1000,                                \
-          "Don't re-iterate if #dirty cards less than this")                \
+          "Do not iterate again if number of dirty cards is less than this")\
                                                                             \
   product(bool, CMSCleanOnEnter, true,                                      \
           "Clean-on-enter optimization for reducing number of dirty cards") \
@@ -1764,14 +1826,16 @@
           "Choose variant (1,2) of verification following remark")          \
                                                                             \
   product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M,                   \
-          "If Eden used is below this value, don't try to schedule remark") \
+          "If Eden size is below this, do not try to schedule remark")      \
                                                                             \
   product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
-          "The Eden occupancy % at which to try and schedule remark pause") \
+          "The Eden occupancy percentage (0-100) at which "                 \
+          "to try and schedule remark pause")                               \
                                                                             \
   product(uintx, CMSScheduleRemarkSamplingRatio, 5,                         \
-          "Start sampling Eden top at least before yg occupancy reaches"    \
-          " 1/<ratio> of the size at which we plan to schedule remark")     \
+          "Start sampling eden top at least before young gen "              \
+          "occupancy reaches 1/<ratio> of the size at which "               \
+          "we plan to schedule remark")                                     \
                                                                             \
   product(uintx, CMSSamplingGrain, 16*K,                                    \
           "The minimum distance between eden samples for CMS (see above)")  \
@@ -1793,27 +1857,27 @@
           "should start a collection cycle")                                \
                                                                             \
   product(bool, CMSYield, true,                                             \
-          "Yield between steps of concurrent mark & sweep")                 \
+          "Yield between steps of CMS")                                     \
                                                                             \
   product(uintx, CMSBitMapYieldQuantum, 10*M,                               \
-          "Bitmap operations should process at most this many bits"         \
+          "Bitmap operations should process at most this many bits "        \
           "between yields")                                                 \
                                                                             \
   product(bool, CMSDumpAtPromotionFailure, false,                           \
           "Dump useful information about the state of the CMS old "         \
-          " generation upon a promotion failure.")                          \
+          "generation upon a promotion failure")                            \
                                                                             \
   product(bool, CMSPrintChunksInDump, false,                                \
           "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
-          " more detailed information about the free chunks.")              \
+          "more detailed information about the free chunks")                \
                                                                             \
   product(bool, CMSPrintObjectsInDump, false,                               \
           "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
-          " more detailed information about the allocated objects.")        \
+          "more detailed information about the allocated objects")          \
                                                                             \
   diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
-          "Verify that all refs across the FLS boundary "                   \
-          " are to valid objects")                                          \
+          "Verify that all references across the FLS boundary "             \
+          "are to valid objects")                                           \
                                                                             \
   diagnostic(bool, FLSVerifyLists, false,                                   \
           "Do lots of (expensive) FreeListSpace verification")              \
@@ -1825,17 +1889,18 @@
           "Do lots of (expensive) FLS dictionary verification")             \
                                                                             \
   develop(bool, VerifyBlockOffsetArray, false,                              \
-          "Do (expensive!) block offset array verification")                \
+          "Do (expensive) block offset array verification")                 \
                                                                             \
   diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false,              \
-          "Maintain _unallocated_block in BlockOffsetArray"                 \
-          " (currently applicable only to CMS collector)")                  \
+          "Maintain _unallocated_block in BlockOffsetArray "                \
+          "(currently applicable only to CMS collector)")                   \
                                                                             \
   develop(bool, TraceCMSState, false,                                       \
           "Trace the state of the CMS collection")                          \
                                                                             \
   product(intx, RefDiscoveryPolicy, 0,                                      \
-          "Whether reference-based(0) or referent-based(1)")                \
+          "Select type of reference discovery policy: "                     \
+          "reference-based(0) or referent-based(1)")                        \
                                                                             \
   product(bool, ParallelRefProcEnabled, false,                              \
           "Enable parallel reference processing whenever possible")         \
@@ -1863,7 +1928,7 @@
           "denotes 'do constant GC cycles'.")                               \
                                                                             \
   product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
-          "Only use occupancy as a crierion for starting a CMS collection") \
+          "Only use occupancy as a criterion for starting a CMS collection")\
                                                                             \
   product(uintx, CMSIsTooFullPercentage, 98,                                \
           "An absolute ceiling above which CMS will always consider the "   \
@@ -1875,7 +1940,7 @@
                                                                             \
   notproduct(bool, CMSVerifyReturnedBytes, false,                           \
           "Check that all the garbage collected was returned to the "       \
-          "free lists.")                                                    \
+          "free lists")                                                     \
                                                                             \
   notproduct(bool, ScavengeALot, false,                                     \
           "Force scavenge at every Nth exit from the runtime system "       \
@@ -1890,16 +1955,16 @@
                                                                             \
   product(bool, PrintPromotionFailure, false,                               \
           "Print additional diagnostic information following "              \
-          " promotion failure")                                             \
+          "promotion failure")                                              \
                                                                             \
   notproduct(bool, PromotionFailureALot, false,                             \
           "Use promotion failure handling on every youngest generation "    \
           "collection")                                                     \
                                                                             \
   develop(uintx, PromotionFailureALotCount, 1000,                           \
-          "Number of promotion failures occurring at ParGCAllocBuffer"      \
+          "Number of promotion failures occurring at ParGCAllocBuffer "     \
           "refill attempts (ParNew) or promotion attempts "                 \
-          "(other young collectors) ")                                      \
+          "(other young collectors)")                                       \
                                                                             \
   develop(uintx, PromotionFailureALotInterval, 5,                           \
           "Total collections between promotion failures alot")              \
@@ -1918,7 +1983,7 @@
           "Ratio of hard spins to calls to yield")                          \
                                                                             \
   develop(uintx, ObjArrayMarkingStride, 512,                                \
-          "Number of ObjArray elements to push onto the marking stack"      \
+          "Number of object array elements to push onto the marking stack " \
           "before pushing a continuation entry")                            \
                                                                             \
   develop(bool, MetadataAllocationFailALot, false,                          \
@@ -1926,14 +1991,7 @@
           "MetadataAllocationFailALotInterval")                             \
                                                                             \
   develop(uintx, MetadataAllocationFailALotInterval, 1000,                  \
-          "metadata allocation failure alot interval")                      \
-                                                                            \
-  develop(bool, MetaDataDeallocateALot, false,                              \
-          "Deallocation bunches of metadata at intervals controlled by "    \
-          "MetaDataAllocateALotInterval")                                   \
-                                                                            \
-  develop(uintx, MetaDataDeallocateALotInterval, 100,                       \
-          "Metadata deallocation alot interval")                            \
+          "Metadata allocation failure a lot interval")                     \
                                                                             \
   develop(bool, TraceMetadataChunkAllocation, false,                        \
           "Trace chunk metadata allocations")                               \
@@ -1945,7 +2003,7 @@
           "Trace virtual space metadata allocations")                       \
                                                                             \
   notproduct(bool, ExecuteInternalVMTests, false,                           \
-          "Enable execution of internal VM tests.")                         \
+          "Enable execution of internal VM tests")                          \
                                                                             \
   notproduct(bool, VerboseInternalVMTests, false,                           \
           "Turn on logging for internal VM tests.")                         \
@@ -1953,7 +2011,7 @@
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
-          "Dynamically resize tlab size for threads")                       \
+          "Dynamically resize TLAB size for threads")                       \
                                                                             \
   product(bool, ZeroTLAB, false,                                            \
           "Zero out the newly created TLAB")                                \
@@ -1965,7 +2023,8 @@
           "Print various TLAB related information")                         \
                                                                             \
   product(bool, TLABStats, true,                                            \
-          "Print various TLAB related information")                         \
+          "Provide more detailed and expensive TLAB statistics "            \
+          "(with PrintTLAB)")                                               \
                                                                             \
   EMBEDDED_ONLY(product(bool, LowMemoryProtection, true,                    \
           "Enable LowMemoryProtection"))                                    \
@@ -1999,14 +2058,14 @@
           "Fraction (1/n) of real memory used for initial heap size")       \
                                                                             \
   develop(uintx, MaxVirtMemFraction, 2,                                     \
-          "Maximum fraction (1/n) of virtual memory used for ergonomically" \
+          "Maximum fraction (1/n) of virtual memory used for ergonomically "\
           "determining maximum heap size")                                  \
                                                                             \
   product(bool, UseAutoGCSelectPolicy, false,                               \
           "Use automatic collection selection policy")                      \
                                                                             \
   product(uintx, AutoGCSelectPauseMillis, 5000,                             \
-          "Automatic GC selection pause threshhold in ms")                  \
+          "Automatic GC selection pause threshold in milliseconds")         \
                                                                             \
   product(bool, UseAdaptiveSizePolicy, true,                                \
           "Use adaptive generation sizing policies")                        \
@@ -2021,7 +2080,7 @@
           "Use adaptive young-old sizing policies at major collections")    \
                                                                             \
   product(bool, UseAdaptiveSizePolicyWithSystemGC, false,                   \
-          "Use statistics from System.GC for adaptive size policy")         \
+          "Include statistics from System.gc() for adaptive size policy")   \
                                                                             \
   product(bool, UseAdaptiveGCBoundary, false,                               \
           "Allow young-old boundary to move")                               \
@@ -2033,16 +2092,16 @@
           "Resize the virtual spaces of the young or old generations")      \
                                                                             \
   product(uintx, AdaptiveSizeThroughPutPolicy, 0,                           \
-          "Policy for changeing generation size for throughput goals")      \
+          "Policy for changing generation size for throughput goals")       \
                                                                             \
   product(uintx, AdaptiveSizePausePolicy, 0,                                \
           "Policy for changing generation size for pause goals")            \
                                                                             \
   develop(bool, PSAdjustTenuredGenForMinorPause, false,                     \
-          "Adjust tenured generation to achive a minor pause goal")         \
+          "Adjust tenured generation to achieve a minor pause goal")        \
                                                                             \
   develop(bool, PSAdjustYoungGenForMajorPause, false,                       \
-          "Adjust young generation to achive a major pause goal")           \
+          "Adjust young generation to achieve a major pause goal")          \
                                                                             \
   product(uintx, AdaptiveSizePolicyInitializingSteps, 20,                   \
           "Number of steps where heuristics is used before data is used")   \
@@ -2097,14 +2156,15 @@
           "Decay factor to TenuredGenerationSizeIncrement")                 \
                                                                             \
   product(uintx, MaxGCPauseMillis, max_uintx,                               \
-          "Adaptive size policy maximum GC pause time goal in msec, "       \
-          "or (G1 Only) the max. GC time per MMU time slice")               \
+          "Adaptive size policy maximum GC pause time goal in millisecond, "\
+          "or (G1 Only) the maximum GC time per MMU time slice")            \
                                                                             \
   product(uintx, GCPauseIntervalMillis, 0,                                  \
           "Time slice for MMU specification")                               \
                                                                             \
   product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
-          "Adaptive size policy maximum GC minor pause time goal in msec")  \
+          "Adaptive size policy maximum GC minor pause time goal "          \
+          "in millisecond")                                                 \
                                                                             \
   product(uintx, GCTimeRatio, 99,                                           \
           "Adaptive size policy application time to GC time ratio")         \
@@ -2122,7 +2182,7 @@
           "Minimum ratio of young generation/survivor space size")          \
                                                                             \
   product(uintx, InitialSurvivorRatio, 8,                                   \
-          "Initial ratio of eden/survivor space size")                      \
+          "Initial ratio of young generation/survivor space size")          \
                                                                             \
   product(uintx, BaseFootPrintEstimate, 256*M,                              \
           "Estimate of footprint other than Java Heap")                     \
@@ -2132,8 +2192,8 @@
           "before an OutOfMemory error is thrown")                          \
                                                                             \
   product(uintx, GCTimeLimit, 98,                                           \
-          "Limit of proportion of time spent in GC before an OutOfMemory"   \
-          "error is thrown (used with GCHeapFreeLimit)")                    \
+          "Limit of the proportion of time spent in GC before "             \
+          "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)")      \
                                                                             \
   product(uintx, GCHeapFreeLimit, 2,                                        \
           "Minimum percentage of free space after a full GC before an "     \
@@ -2155,7 +2215,7 @@
           "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
                                                                             \
   diagnostic(bool, VerifySilently, false,                                   \
-          "Don't print print the verification progress")                    \
+          "Do not print the verification progress")                         \
                                                                             \
   diagnostic(bool, VerifyDuringStartup, false,                              \
           "Verify memory system before executing any Java code "            \
@@ -2178,7 +2238,7 @@
                                                                             \
   diagnostic(bool, DeferInitialCardMark, false,                             \
           "When +ReduceInitialCardMarks, explicitly defer any that "        \
-           "may arise from new_pre_store_barrier")                          \
+          "may arise from new_pre_store_barrier")                           \
                                                                             \
   diagnostic(bool, VerifyRememberedSets, false,                             \
           "Verify GC remembered sets")                                      \
@@ -2187,10 +2247,10 @@
           "Verify GC object start array if verify before/after")            \
                                                                             \
   product(bool, DisableExplicitGC, false,                                   \
-          "Tells whether calling System.gc() does a full GC")               \
+          "Ignore calls to System.gc()")                                    \
                                                                             \
   notproduct(bool, CheckMemoryInitialization, false,                        \
-          "Checks memory initialization")                                   \
+          "Check memory initialization")                                    \
                                                                             \
   product(bool, CollectGen0First, false,                                    \
           "Collect youngest generation before each full GC")                \
@@ -2211,44 +2271,45 @@
           "Stride through processors when distributing processes")          \
                                                                             \
   product(uintx, CMSCoordinatorYieldSleepCount, 10,                         \
-          "number of times the coordinator GC thread will sleep while "     \
+          "Number of times the coordinator GC thread will sleep while "     \
           "yielding before giving up and resuming GC")                      \
                                                                             \
   product(uintx, CMSYieldSleepCount, 0,                                     \
-          "number of times a GC thread (minus the coordinator) "            \
+          "Number of times a GC thread (minus the coordinator) "            \
           "will sleep while yielding before giving up and resuming GC")     \
                                                                             \
   /* gc tracing */                                                          \
   manageable(bool, PrintGC, false,                                          \
-          "Print message at garbage collect")                               \
+          "Print message at garbage collection")                            \
                                                                             \
   manageable(bool, PrintGCDetails, false,                                   \
-          "Print more details at garbage collect")                          \
+          "Print more details at garbage collection")                       \
                                                                             \
   manageable(bool, PrintGCDateStamps, false,                                \
-          "Print date stamps at garbage collect")                           \
+          "Print date stamps at garbage collection")                        \
                                                                             \
   manageable(bool, PrintGCTimeStamps, false,                                \
-          "Print timestamps at garbage collect")                            \
+          "Print timestamps at garbage collection")                         \
                                                                             \
   product(bool, PrintGCTaskTimeStamps, false,                               \
           "Print timestamps for individual gc worker thread tasks")         \
                                                                             \
   develop(intx, ConcGCYieldTimeout, 0,                                      \
-          "If non-zero, assert that GC threads yield within this # of ms.") \
+          "If non-zero, assert that GC threads yield within this "          \
+          "number of milliseconds")                                         \
                                                                             \
   notproduct(bool, TraceMarkSweep, false,                                   \
           "Trace mark sweep")                                               \
                                                                             \
   product(bool, PrintReferenceGC, false,                                    \
           "Print times spent handling reference objects during GC "         \
-          " (enabled only when PrintGCDetails)")                            \
+          "(enabled only when PrintGCDetails)")                             \
                                                                             \
   develop(bool, TraceReferenceGC, false,                                    \
           "Trace handling of soft/weak/final/phantom references")           \
                                                                             \
   develop(bool, TraceFinalizerRegistration, false,                          \
-         "Trace registration of final references")                          \
+          "Trace registration of final references")                         \
                                                                             \
   notproduct(bool, TraceScavenge, false,                                    \
           "Trace scavenge")                                                 \
@@ -2285,7 +2346,7 @@
           "Print heap layout before and after each GC")                     \
                                                                             \
   product_rw(bool, PrintHeapAtGCExtended, false,                            \
-          "Prints extended information about the layout of the heap "       \
+          "Print extended information about the layout of the heap "        \
           "when -XX:+PrintHeapAtGC is set")                                 \
                                                                             \
   product(bool, PrintHeapAtSIGBREAK, true,                                  \
@@ -2322,45 +2383,45 @@
           "Trace actions of the GC task threads")                           \
                                                                             \
   product(bool, PrintParallelOldGCPhaseTimes, false,                        \
-          "Print the time taken by each parallel old gc phase."             \
-          "PrintGCDetails must also be enabled.")                           \
+          "Print the time taken by each phase in ParallelOldGC "            \
+          "(PrintGCDetails must also be enabled)")                          \
                                                                             \
   develop(bool, TraceParallelOldGCMarkingPhase, false,                      \
-          "Trace parallel old gc marking phase")                            \
+          "Trace marking phase in ParallelOldGC")                           \
                                                                             \
   develop(bool, TraceParallelOldGCSummaryPhase, false,                      \
-          "Trace parallel old gc summary phase")                            \
+          "Trace summary phase in ParallelOldGC")                           \
                                                                             \
   develop(bool, TraceParallelOldGCCompactionPhase, false,                   \
-          "Trace parallel old gc compaction phase")                         \
+          "Trace compaction phase in ParallelOldGC")                        \
                                                                             \
   develop(bool, TraceParallelOldGCDensePrefix, false,                       \
-          "Trace parallel old gc dense prefix computation")                 \
+          "Trace dense prefix computation for ParallelOldGC")               \
                                                                             \
   develop(bool, IgnoreLibthreadGPFault, false,                              \
           "Suppress workaround for libthread GP fault")                     \
                                                                             \
   product(bool, PrintJNIGCStalls, false,                                    \
-          "Print diagnostic message when GC is stalled"                     \
+          "Print diagnostic message when GC is stalled "                    \
           "by JNI critical section")                                        \
                                                                             \
   experimental(double, ObjectCountCutOffPercent, 0.5,                       \
           "The percentage of the used heap that the instances of a class "  \
-          "must occupy for the class to generate a trace event.")           \
+          "must occupy for the class to generate a trace event")            \
                                                                             \
   /* GC log rotation setting */                                             \
                                                                             \
   product(bool, UseGCLogFileRotation, false,                                \
-          "Prevent large gclog file for long running app. "                 \
-          "Requires -Xloggc:<filename>")                                    \
+          "Rotate gclog files (for long running applications). It requires "\
+          "-Xloggc:<filename>")                                             \
                                                                             \
   product(uintx, NumberOfGCLogFiles, 0,                                     \
-          "Number of gclog files in rotation, "                             \
-          "Default: 0, no rotation")                                        \
+          "Number of gclog files in rotation "                              \
+          "(default: 0, no rotation)")                                      \
                                                                             \
   product(uintx, GCLogFileSize, 0,                                          \
-          "GC log file size, Default: 0 bytes, no rotation "                \
-          "Only valid with UseGCLogFileRotation")                           \
+          "GC log file size (default: 0 bytes, no rotation). "              \
+          "It requires UseGCLogFileRotation")                               \
                                                                             \
   /* JVMTI heap profiling */                                                \
                                                                             \
@@ -2437,40 +2498,40 @@
           "Generate range checks for array accesses")                       \
                                                                             \
   develop_pd(bool, ImplicitNullChecks,                                      \
-          "generate code for implicit null checks")                         \
+          "Generate code for implicit null checks")                         \
                                                                             \
   product(bool, PrintSafepointStatistics, false,                            \
-          "print statistics about safepoint synchronization")               \
+          "Print statistics about safepoint synchronization")               \
                                                                             \
   product(intx, PrintSafepointStatisticsCount, 300,                         \
-          "total number of safepoint statistics collected "                 \
+          "Total number of safepoint statistics collected "                 \
           "before printing them out")                                       \
                                                                             \
   product(intx, PrintSafepointStatisticsTimeout,  -1,                       \
-          "print safepoint statistics only when safepoint takes"            \
-          " more than PrintSafepointSatisticsTimeout in millis")            \
+          "Print safepoint statistics only when safepoint takes "           \
+          "more than PrintSafepointSatisticsTimeout in millis")             \
                                                                             \
   product(bool, TraceSafepointCleanupTime, false,                           \
-          "print the break down of clean up tasks performed during"         \
-          " safepoint")                                                     \
+          "Print the break down of clean up tasks performed during "        \
+          "safepoint")                                                      \
                                                                             \
   product(bool, Inline, true,                                               \
-          "enable inlining")                                                \
+          "Enable inlining")                                                \
                                                                             \
   product(bool, ClipInlining, true,                                         \
-          "clip inlining if aggregate method exceeds DesiredMethodLimit")   \
+          "Clip inlining if aggregate method exceeds DesiredMethodLimit")   \
                                                                             \
   develop(bool, UseCHA, true,                                               \
-          "enable CHA")                                                     \
+          "Enable CHA")                                                     \
                                                                             \
   product(bool, UseTypeProfile, true,                                       \
           "Check interpreter profile for historically monomorphic calls")   \
                                                                             \
   notproduct(bool, TimeCompiler, false,                                     \
-          "time the compiler")                                              \
+          "Time the compiler")                                              \
                                                                             \
   diagnostic(bool, PrintInlining, false,                                    \
-          "prints inlining optimizations")                                  \
+          "Print inlining optimizations")                                   \
                                                                             \
   product(bool, UsePopCountInstruction, false,                              \
           "Use population count instruction")                               \
@@ -2482,56 +2543,59 @@
           "Print when methods are replaced do to recompilation")            \
                                                                             \
   develop(bool, PrintMethodFlushing, false,                                 \
-          "print the nmethods being flushed")                               \
+          "Print the nmethods being flushed")                               \
                                                                             \
   develop(bool, UseRelocIndex, false,                                       \
-         "use an index to speed random access to relocations")              \
+          "Use an index to speed random access to relocations")             \
                                                                             \
   develop(bool, StressCodeBuffers, false,                                   \
-         "Exercise code buffer expansion and other rare state changes")     \
+          "Exercise code buffer expansion and other rare state changes")    \
                                                                             \
   diagnostic(bool, DebugNonSafepoints, trueInDebug,                         \
-         "Generate extra debugging info for non-safepoints in nmethods")    \
+          "Generate extra debugging information for non-safepoints in "     \
+          "nmethods")                                                       \
                                                                             \
   product(bool, PrintVMOptions, false,                                      \
-         "Print flags that appeared on the command line")                   \
+          "Print flags that appeared on the command line")                  \
                                                                             \
   product(bool, IgnoreUnrecognizedVMOptions, false,                         \
-         "Ignore unrecognized VM options")                                  \
+          "Ignore unrecognized VM options")                                 \
                                                                             \
   product(bool, PrintCommandLineFlags, false,                               \
-         "Print flags specified on command line or set by ergonomics")      \
+          "Print flags specified on command line or set by ergonomics")     \
                                                                             \
   product(bool, PrintFlagsInitial, false,                                   \
-         "Print all VM flags before argument processing and exit VM")       \
+          "Print all VM flags before argument processing and exit VM")      \
                                                                             \
   product(bool, PrintFlagsFinal, false,                                     \
-         "Print all VM flags after argument and ergonomic processing")      \
+          "Print all VM flags after argument and ergonomic processing")     \
                                                                             \
   notproduct(bool, PrintFlagsWithComments, false,                           \
-         "Print all VM flags with default values and descriptions and exit")\
+          "Print all VM flags with default values and descriptions and "    \
+          "exit")                                                           \
                                                                             \
   diagnostic(bool, SerializeVMOutput, true,                                 \
-         "Use a mutex to serialize output to tty and hotspot.log")          \
+          "Use a mutex to serialize output to tty and LogFile")             \
                                                                             \
   diagnostic(bool, DisplayVMOutput, true,                                   \
-         "Display all VM output on the tty, independently of LogVMOutput")  \
-                                                                            \
-  diagnostic(bool, LogVMOutput, trueInDebug,                                \
-         "Save VM output to hotspot.log, or to LogFile")                    \
+          "Display all VM output on the tty, independently of LogVMOutput") \
+                                                                            \
+  diagnostic(bool, LogVMOutput, false,                                      \
+          "Save VM output to LogFile")                                      \
                                                                             \
   diagnostic(ccstr, LogFile, NULL,                                          \
-         "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
+          "If LogVMOutput or LogCompilation is on, save VM output to "      \
+          "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
                                                                             \
   product(ccstr, ErrorFile, NULL,                                           \
-         "If an error occurs, save the error data to this file "            \
-         "[default: ./hs_err_pid%p.log] (%p replaced with pid)")            \
+          "If an error occurs, save the error data to this file "           \
+          "[default: ./hs_err_pid%p.log] (%p replaced with pid)")           \
                                                                             \
   product(bool, DisplayVMOutputToStderr, false,                             \
-         "If DisplayVMOutput is true, display all VM output to stderr")     \
+          "If DisplayVMOutput is true, display all VM output to stderr")    \
                                                                             \
   product(bool, DisplayVMOutputToStdout, false,                             \
-         "If DisplayVMOutput is true, display all VM output to stdout")     \
+          "If DisplayVMOutput is true, display all VM output to stdout")    \
                                                                             \
   product(bool, UseHeavyMonitors, false,                                    \
           "use heavyweight instead of lightweight Java monitors")           \
@@ -2539,6 +2603,9 @@
   product(bool, PrintStringTableStatistics, false,                          \
           "print statistics about the StringTable and SymbolTable")         \
                                                                             \
+  diagnostic(bool, VerifyStringTableAtExit, false,                          \
+          "verify StringTable contents at exit")                            \
+                                                                            \
   notproduct(bool, PrintSymbolTableSizeHistogram, false,                    \
           "print histogram of the symbol table")                            \
                                                                             \
@@ -2552,7 +2619,7 @@
                                                                             \
   notproduct(ccstr, AbortVMOnExceptionMessage, NULL,                        \
           "Call fatal if the exception pointed by AbortVMOnException "      \
-          "has this message.")                                              \
+          "has this message")                                               \
                                                                             \
   develop(bool, DebugVtables, false,                                        \
           "add debugging code to vtable dispatch")                          \
@@ -2617,31 +2684,44 @@
   product(bool, AggressiveOpts, false,                                      \
           "Enable aggressive optimizations - see arguments.cpp")            \
                                                                             \
+  product_pd(uintx, TypeProfileLevel,                                       \
+          "=XYZ, with Z: Type profiling of arguments at call; "             \
+                     "Y: Type profiling of return value at call; "          \
+                     "X: Type profiling of parameters to methods; "         \
+          "X, Y and Z in 0=off ; 1=jsr292 only; 2=all methods")             \
+                                                                            \
+  product(intx, TypeProfileArgsLimit,     2,                                \
+          "max number of call arguments to consider for type profiling")    \
+                                                                            \
+  product(intx, TypeProfileParmsLimit,    2,                                \
+          "max number of incoming parameters to consider for type profiling"\
+          ", -1 for all")                                                   \
+                                                                            \
   /* statistics */                                                          \
   develop(bool, CountCompiledCalls, false,                                  \
-          "counts method invocations")                                      \
+          "Count method invocations")                                       \
                                                                             \
   notproduct(bool, CountRuntimeCalls, false,                                \
-          "counts VM runtime calls")                                        \
+          "Count VM runtime calls")                                         \
                                                                             \
   develop(bool, CountJNICalls, false,                                       \
-          "counts jni method invocations")                                  \
+          "Count jni method invocations")                                   \
                                                                             \
   notproduct(bool, CountJVMCalls, false,                                    \
-          "counts jvm method invocations")                                  \
+          "Count jvm method invocations")                                   \
                                                                             \
   notproduct(bool, CountRemovableExceptions, false,                         \
-          "count exceptions that could be replaced by branches due to "     \
+          "Count exceptions that could be replaced by branches due to "     \
           "inlining")                                                       \
                                                                             \
   notproduct(bool, ICMissHistogram, false,                                  \
-          "produce histogram of IC misses")                                 \
+          "Produce histogram of IC misses")                                 \
                                                                             \
   notproduct(bool, PrintClassStatistics, false,                             \
-          "prints class statistics at end of run")                          \
+          "Print class statistics at end of run")                           \
                                                                             \
   notproduct(bool, PrintMethodStatistics, false,                            \
-          "prints method statistics at end of run")                         \
+          "Print method statistics at end of run")                          \
                                                                             \
   /* interpreter */                                                         \
   develop(bool, ClearInterpreterLocals, false,                              \
@@ -2655,7 +2735,7 @@
           "Rewrite frequently used bytecode pairs into a single bytecode")  \
                                                                             \
   diagnostic(bool, PrintInterpreter, false,                                 \
-          "Prints the generated interpreter code")                          \
+          "Print the generated interpreter code")                           \
                                                                             \
   product(bool, UseInterpreter, true,                                       \
           "Use interpreter for non-compiled methods")                       \
@@ -2673,8 +2753,8 @@
           "Use fast method entry code for accessor methods")                \
                                                                             \
   product_pd(bool, UseOnStackReplacement,                                   \
-           "Use on stack replacement, calls runtime if invoc. counter "     \
-           "overflows in loop")                                             \
+          "Use on stack replacement, calls runtime if invoc. counter "      \
+          "overflows in loop")                                              \
                                                                             \
   notproduct(bool, TraceOnStackReplacement, false,                          \
           "Trace on stack replacement")                                     \
@@ -2722,10 +2802,10 @@
           "Trace frequency based inlining")                                 \
                                                                             \
   develop_pd(bool, InlineIntrinsics,                                        \
-           "Inline intrinsics that can be statically resolved")             \
+          "Inline intrinsics that can be statically resolved")              \
                                                                             \
   product_pd(bool, ProfileInterpreter,                                      \
-           "Profile at the bytecode level during interpretation")           \
+          "Profile at the bytecode level during interpretation")            \
                                                                             \
   develop(bool, TraceProfileInterpreter, false,                             \
           "Trace profiling at the bytecode level during interpretation. "   \
@@ -2740,7 +2820,7 @@
           "CompileThreshold) before using the method's profile")            \
                                                                             \
   develop(bool, PrintMethodData, false,                                     \
-           "Print the results of +ProfileInterpreter at end of run")        \
+          "Print the results of +ProfileInterpreter at end of run")         \
                                                                             \
   develop(bool, VerifyDataPointer, trueInDebug,                             \
           "Verify the method data pointer during interpreter profiling")    \
@@ -2755,7 +2835,7 @@
                                                                             \
   /* compilation */                                                         \
   product(bool, UseCompiler, true,                                          \
-          "use compilation")                                                \
+          "Use Just-In-Time compilation")                                   \
                                                                             \
   develop(bool, TraceCompilationPolicy, false,                              \
           "Trace compilation policy")                                       \
@@ -2764,20 +2844,21 @@
           "Time the compilation policy")                                    \
                                                                             \
   product(bool, UseCounterDecay, true,                                      \
-           "adjust recompilation counters")                                 \
+          "Adjust recompilation counters")                                  \
                                                                             \
   develop(intx, CounterHalfLifeTime,    30,                                 \
-          "half-life time of invocation counters (in secs)")                \
+          "Half-life time of invocation counters (in seconds)")             \
                                                                             \
   develop(intx, CounterDecayMinIntervalLength,   500,                       \
-          "Min. ms. between invocation of CounterDecay")                    \
+          "The minimum interval (in milliseconds) between invocation of "   \
+          "CounterDecay")                                                   \
                                                                             \
   product(bool, AlwaysCompileLoopMethods, false,                            \
-          "when using recompilation, never interpret methods "              \
+          "When using recompilation, never interpret methods "              \
           "containing loops")                                               \
                                                                             \
   product(bool, DontCompileHugeMethods, true,                               \
-          "don't compile methods > HugeMethodLimit")                        \
+          "Do not compile methods > HugeMethodLimit")                       \
                                                                             \
   /* Bytecode escape analysis estimation. */                                \
   product(bool, EstimateArgEscape, true,                                    \
@@ -2787,10 +2868,10 @@
           "How much tracing to do of bytecode escape analysis estimates")   \
                                                                             \
   product(intx, MaxBCEAEstimateLevel, 5,                                    \
-          "Maximum number of nested calls that are analyzed by BC EA.")     \
+          "Maximum number of nested calls that are analyzed by BC EA")      \
                                                                             \
   product(intx, MaxBCEAEstimateSize, 150,                                   \
-          "Maximum bytecode size of a method to be analyzed by BC EA.")     \
+          "Maximum bytecode size of a method to be analyzed by BC EA")      \
                                                                             \
   product(intx,  AllocatePrefetchStyle, 1,                                  \
           "0 = no prefetch, "                                               \
@@ -2805,7 +2886,8 @@
           "Number of lines to prefetch ahead of array allocation pointer")  \
                                                                             \
   product(intx,  AllocateInstancePrefetchLines, 1,                          \
-          "Number of lines to prefetch ahead of instance allocation pointer") \
+          "Number of lines to prefetch ahead of instance allocation "       \
+          "pointer")                                                        \
                                                                             \
   product(intx,  AllocatePrefetchStepSize, 16,                              \
           "Step size in bytes of sequential prefetch instructions")         \
@@ -2825,8 +2907,8 @@
           "(0 means off)")                                                  \
                                                                             \
   product(intx, MaxJavaStackTraceDepth, 1024,                               \
-          "Max. no. of lines in the stack trace for Java exceptions "       \
-          "(0 means all)")                                                  \
+          "The maximum number of lines in the stack trace for Java "        \
+          "exceptions (0 means all)")                                       \
                                                                             \
   NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000,          \
           "Guarantee a safepoint (at least) every so many milliseconds "    \
@@ -2845,11 +2927,15 @@
   product(intx, NmethodSweepCheckInterval, 5,                               \
           "Compilers wake up every n seconds to possibly sweep nmethods")   \
                                                                             \
+  product(intx, NmethodSweepActivity, 10,                                   \
+          "Removes cold nmethods from code cache if > 0. Higher values "    \
+          "result in more aggressive sweeping")                             \
+                                                                            \
   notproduct(bool, LogSweeper, false,                                       \
-            "Keep a ring buffer of sweeper activity")                       \
+          "Keep a ring buffer of sweeper activity")                         \
                                                                             \
   notproduct(intx, SweeperLogEntries, 1024,                                 \
-            "Number of records in the ring buffer of sweeper activity")     \
+          "Number of records in the ring buffer of sweeper activity")       \
                                                                             \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
@@ -2892,34 +2978,35 @@
           "less than this")                                                 \
                                                                             \
   product(intx, MaxInlineSize, 35,                                          \
-          "maximum bytecode size of a method to be inlined")                \
+          "The maximum bytecode size of a method to be inlined")            \
                                                                             \
   product_pd(intx, FreqInlineSize,                                          \
-          "maximum bytecode size of a frequent method to be inlined")       \
+          "The maximum bytecode size of a frequent method to be inlined")   \
                                                                             \
   product(intx, MaxTrivialSize, 6,                                          \
-          "maximum bytecode size of a trivial method to be inlined")        \
+          "The maximum bytecode size of a trivial method to be inlined")    \
                                                                             \
   product(intx, MinInliningThreshold, 250,                                  \
-          "min. invocation count a method needs to have to be inlined")     \
+          "The minimum invocation count a method needs to have to be "      \
+          "inlined")                                                        \
                                                                             \
   develop(intx, MethodHistogramCutoff, 100,                                 \
-          "cutoff value for method invoc. histogram (+CountCalls)")         \
+          "The cutoff value for method invocation histogram (+CountCalls)") \
                                                                             \
   develop(intx, ProfilerNumberOfInterpretedMethods, 25,                     \
-          "# of interpreted methods to show in profile")                    \
+          "Number of interpreted methods to show in profile")               \
                                                                             \
   develop(intx, ProfilerNumberOfCompiledMethods, 25,                        \
-          "# of compiled methods to show in profile")                       \
+          "Number of compiled methods to show in profile")                  \
                                                                             \
   develop(intx, ProfilerNumberOfStubMethods, 25,                            \
-          "# of stub methods to show in profile")                           \
+          "Number of stub methods to show in profile")                      \
                                                                             \
   develop(intx, ProfilerNumberOfRuntimeStubNodes, 25,                       \
-          "# of runtime stub nodes to show in profile")                     \
+          "Number of runtime stub nodes to show in profile")                \
                                                                             \
   product(intx, ProfileIntervalsTicks, 100,                                 \
-          "# of ticks between printing of interval profile "                \
+          "Number of ticks between printing of interval profile "           \
           "(+ProfileIntervals)")                                            \
                                                                             \
   notproduct(intx, ScavengeALotInterval,     1,                             \
@@ -2940,7 +3027,7 @@
                                                                             \
   develop(intx, MinSleepInterval,     1,                                    \
           "Minimum sleep() interval (milliseconds) when "                   \
-          "ConvertSleepToYield is off (used for SOLARIS)")                  \
+          "ConvertSleepToYield is off (used for Solaris)")                  \
                                                                             \
   develop(intx, ProfilerPCTickThreshold,    15,                             \
           "Number of ticks in a PC buckets to be a hotspot")                \
@@ -2955,22 +3042,22 @@
           "Mark nmethods non-entrant at registration")                      \
                                                                             \
   diagnostic(intx, MallocVerifyInterval,     0,                             \
-          "if non-zero, verify C heap after every N calls to "              \
+          "If non-zero, verify C heap after every N calls to "              \
           "malloc/realloc/free")                                            \
                                                                             \
   diagnostic(intx, MallocVerifyStart,     0,                                \
-          "if non-zero, start verifying C heap after Nth call to "          \
+          "If non-zero, start verifying C heap after Nth call to "          \
           "malloc/realloc/free")                                            \
                                                                             \
   diagnostic(uintx, MallocMaxTestWords,     0,                              \
-          "if non-zero, max # of Words that malloc/realloc can allocate "   \
-          "(for testing only)")                                             \
+          "If non-zero, maximum number of words that malloc/realloc can "   \
+          "allocate (for testing only)")                                    \
                                                                             \
   product(intx, TypeProfileWidth,     2,                                    \
-          "number of receiver types to record in call/cast profile")        \
+          "Number of receiver types to record in call/cast profile")        \
                                                                             \
   develop(intx, BciProfileWidth,      2,                                    \
-          "number of return bci's to record in ret profile")                \
+          "Number of return bci's to record in ret profile")                \
                                                                             \
   product(intx, PerMethodRecompilationCutoff, 400,                          \
           "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
@@ -3037,7 +3124,7 @@
           "Percentage of Eden that can be wasted")                          \
                                                                             \
   product(uintx, TLABRefillWasteFraction,    64,                            \
-          "Max TLAB waste at a refill (internal fragmentation)")            \
+          "Maximum TLAB waste at a refill (internal fragmentation)")        \
                                                                             \
   product(uintx, TLABWasteIncrement,    4,                                  \
           "Increment allowed waste at slow allocation")                     \
@@ -3046,7 +3133,7 @@
           "Ratio of eden/survivor space size")                              \
                                                                             \
   product(uintx, NewRatio, 2,                                               \
-          "Ratio of new/old generation sizes")                              \
+          "Ratio of old/new generation sizes")                              \
                                                                             \
   product_pd(uintx, NewSizeThreadIncrease,                                  \
           "Additional size added to desired new generation size per "       \
@@ -3058,33 +3145,39 @@
   product(uintx, MaxMetaspaceSize, max_uintx,                               \
           "Maximum size of Metaspaces (in bytes)")                          \
                                                                             \
-  product(uintx, ClassMetaspaceSize, 1*G,                                   \
-          "Maximum size of InstanceKlass area in Metaspace used for "       \
-          "UseCompressedKlassPointers")                                     \
+  product(uintx, CompressedClassSpaceSize, 1*G,                             \
+          "Maximum size of class area in Metaspace when compressed "        \
+          "class pointers are used")                                        \
                                                                             \
   product(uintx, MinHeapFreeRatio,    40,                                   \
-          "Min percentage of heap free after GC to avoid expansion")        \
+          "The minimum percentage of heap free after GC to avoid expansion."\
+          " For most GCs this applies to the old generation. In G1 it"      \
+          " applies to the whole heap. Not supported by ParallelGC.")       \
                                                                             \
   product(uintx, MaxHeapFreeRatio,    70,                                   \
-          "Max percentage of heap free after GC to avoid shrinking")        \
+          "The maximum percentage of heap free after GC to avoid shrinking."\
+          " For most GCs this applies to the old generation. In G1 it"      \
+          " applies to the whole heap. Not supported by ParallelGC.")       \
                                                                             \
   product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
           "Number of milliseconds per MB of free space in the heap")        \
                                                                             \
   product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K),                \
-          "Min change in heap space due to GC (in bytes)")                  \
+          "The minimum change in heap space due to GC (in bytes)")          \
                                                                             \
   product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K),            \
-          "Min expansion of Metaspace (in bytes)")                          \
+          "The minimum expansion of Metaspace (in bytes)")                  \
                                                                             \
   product(uintx, MinMetaspaceFreeRatio,    40,                              \
-          "Min percentage of Metaspace free after GC to avoid expansion")   \
+          "The minimum percentage of Metaspace free after GC to avoid "     \
+          "expansion")                                                      \
                                                                             \
   product(uintx, MaxMetaspaceFreeRatio,    70,                              \
-          "Max percentage of Metaspace free after GC to avoid shrinking")   \
+          "The maximum percentage of Metaspace free after GC to avoid "     \
+          "shrinking")                                                      \
                                                                             \
   product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M),              \
-          "Max expansion of Metaspace without full GC (in bytes)")          \
+          "The maximum expansion of Metaspace without full GC (in bytes)")  \
                                                                             \
   product(uintx, QueuedAllocationWarningCount, 0,                           \
           "Number of times an allocation that queues behind a GC "          \
@@ -3106,13 +3199,14 @@
           "Desired percentage of survivor space used after scavenge")       \
                                                                             \
   product(uintx, MarkSweepDeadRatio,     5,                                 \
-          "Percentage (0-100) of the old gen allowed as dead wood."         \
-          "Serial mark sweep treats this as both the min and max value."    \
-          "CMS uses this value only if it falls back to mark sweep."        \
-          "Par compact uses a variable scale based on the density of the"   \
-          "generation and treats this as the max value when the heap is"    \
-          "either completely full or completely empty.  Par compact also"   \
-          "has a smaller default value; see arguments.cpp.")                \
+          "Percentage (0-100) of the old gen allowed as dead wood. "        \
+          "Serial mark sweep treats this as both the minimum and maximum "  \
+          "value. "                                                         \
+          "CMS uses this value only if it falls back to mark sweep. "       \
+          "Par compact uses a variable scale based on the density of the "  \
+          "generation and treats this as the maximum value when the heap "  \
+          "is either completely full or completely empty.  Par compact "    \
+          "also has a smaller default value; see arguments.cpp.")           \
                                                                             \
   product(uintx, MarkSweepAlwaysCompactCount,     4,                        \
           "How often should we fully compact the heap (ignoring the dead "  \
@@ -3131,27 +3225,27 @@
           "Census for CMS' FreeListSpace")                                  \
                                                                             \
   develop(uintx, GCExpandToAllocateDelayMillis, 0,                          \
-          "Delay in ms between expansion and allocation")                   \
+          "Delay between expansion and allocation (in milliseconds)")       \
                                                                             \
   develop(uintx, GCWorkerDelayMillis, 0,                                    \
-          "Delay in ms in scheduling GC workers")                           \
+          "Delay in scheduling GC workers (in milliseconds)")               \
                                                                             \
   product(intx, DeferThrSuspendLoopCount,     4000,                         \
           "(Unstable) Number of times to iterate in safepoint loop "        \
-          " before blocking VM threads ")                                   \
+          "before blocking VM threads ")                                    \
                                                                             \
   product(intx, DeferPollingPageLoopCount,     -1,                          \
           "(Unsafe,Unstable) Number of iterations in safepoint loop "       \
           "before changing safepoint polling page to RO ")                  \
                                                                             \
-  product(intx, SafepointSpinBeforeYield, 2000,  "(Unstable)")              \
+  product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)")               \
                                                                             \
   product(bool, PSChunkLargeArrays, true,                                   \
-          "true: process large arrays in chunks")                           \
+          "Process large arrays in chunks")                                 \
                                                                             \
   product(uintx, GCDrainStackTargetSize, 64,                                \
-          "how many entries we'll try to leave on the stack during "        \
-          "parallel GC")                                                    \
+          "Number of entries we will try to leave on the stack "            \
+          "during parallel gc")                                             \
                                                                             \
   /* stack parameters */                                                    \
   product_pd(intx, StackYellowPages,                                        \
@@ -3161,8 +3255,8 @@
           "Number of red zone (unrecoverable overflows) pages")             \
                                                                             \
   product_pd(intx, StackShadowPages,                                        \
-          "Number of shadow zone (for overflow checking) pages"             \
-          " this should exceed the depth of the VM and native call stack")  \
+          "Number of shadow zone (for overflow checking) pages "            \
+          "this should exceed the depth of the VM and native call stack")   \
                                                                             \
   product_pd(intx, ThreadStackSize,                                         \
           "Thread Stack Size (in Kbytes)")                                  \
@@ -3203,60 +3297,51 @@
           "Reserved code cache size (in bytes) - maximum code cache size")  \
                                                                             \
   product(uintx, CodeCacheMinimumFreeSpace, 500*K,                          \
-          "When less than X space left, we stop compiling.")                \
+          "When less than X space left, we stop compiling")                 \
                                                                             \
   product_pd(uintx, CodeCacheExpansionSize,                                 \
           "Code cache expansion size (in bytes)")                           \
                                                                             \
   develop_pd(uintx, CodeCacheMinBlockLength,                                \
-          "Minimum number of segments in a code cache block.")              \
+          "Minimum number of segments in a code cache block")               \
                                                                             \
   notproduct(bool, ExitOnFullCodeCache, false,                              \
-          "Exit the VM if we fill the code cache.")                         \
+          "Exit the VM if we fill the code cache")                          \
                                                                             \
   product(bool, UseCodeCacheFlushing, true,                                 \
           "Attempt to clean the code cache before shutting off compiler")   \
                                                                             \
-  product(intx,  MinCodeCacheFlushingInterval, 30,                          \
-          "Min number of seconds between code cache cleaning sessions")     \
-                                                                            \
-  product(uintx,  CodeCacheFlushingMinimumFreeSpace, 1500*K,                \
-          "When less than X space left, start code cache cleaning")         \
-                                                                            \
-  product(uintx, CodeCacheFlushingFraction, 2,                              \
-          "Fraction of the code cache that is flushed when full")           \
-                                                                            \
   /* interpreter debugging */                                               \
   develop(intx, BinarySwitchThreshold, 5,                                   \
           "Minimal number of lookupswitch entries for rewriting to binary " \
           "switch")                                                         \
                                                                             \
   develop(intx, StopInterpreterAt, 0,                                       \
-          "Stops interpreter execution at specified bytecode number")       \
+          "Stop interpreter execution at specified bytecode number")        \
                                                                             \
   develop(intx, TraceBytecodesAt, 0,                                        \
-          "Traces bytecodes starting with specified bytecode number")       \
+          "Trace bytecodes starting with specified bytecode number")        \
                                                                             \
   /* compiler interface */                                                  \
   develop(intx, CIStart, 0,                                                 \
-          "the id of the first compilation to permit")                      \
+          "The id of the first compilation to permit")                      \
                                                                             \
   develop(intx, CIStop,    -1,                                              \
-          "the id of the last compilation to permit")                       \
+          "The id of the last compilation to permit")                       \
                                                                             \
   develop(intx, CIStartOSR,     0,                                          \
-          "the id of the first osr compilation to permit "                  \
+          "The id of the first osr compilation to permit "                  \
           "(CICountOSR must be on)")                                        \
                                                                             \
   develop(intx, CIStopOSR,    -1,                                           \
-          "the id of the last osr compilation to permit "                   \
+          "The id of the last osr compilation to permit "                   \
           "(CICountOSR must be on)")                                        \
                                                                             \
   develop(intx, CIBreakAtOSR,    -1,                                        \
-          "id of osr compilation to break at")                              \
+          "The id of osr compilation to break at")                          \
                                                                             \
   develop(intx, CIBreakAt,    -1,                                           \
-          "id of compilation to break at")                                  \
+          "The id of compilation to break at")                              \
                                                                             \
   product(ccstrlist, CompileOnly, "",                                       \
           "List of methods (pkg/class.name) to restrict compilation to")    \
@@ -3275,11 +3360,11 @@
           "[default: ./replay_pid%p.log] (%p replaced with pid)")           \
                                                                             \
   develop(intx, ReplaySuppressInitializers, 2,                              \
-          "Controls handling of class initialization during replay"         \
-          "0 - don't do anything special"                                   \
-          "1 - treat all class initializers as empty"                       \
-          "2 - treat class initializers for application classes as empty"   \
-          "3 - allow all class initializers to run during bootstrap but"    \
+          "Control handling of class initialization during replay: "        \
+          "0 - don't do anything special; "                                 \
+          "1 - treat all class initializers as empty; "                     \
+          "2 - treat class initializers for application classes as empty; " \
+          "3 - allow all class initializers to run during bootstrap but "   \
           "    pretend they are empty after starting replay")               \
                                                                             \
   develop(bool, ReplayIgnoreInitErrors, false,                              \
@@ -3308,14 +3393,15 @@
           "0 : Normal.                                                     "\
           "    VM chooses priorities that are appropriate for normal       "\
           "    applications. On Solaris NORM_PRIORITY and above are mapped "\
-          "    to normal native priority. Java priorities below NORM_PRIORITY"\
-          "    map to lower native priority values. On Windows applications"\
-          "    are allowed to use higher native priorities. However, with  "\
-          "    ThreadPriorityPolicy=0, VM will not use the highest possible"\
-          "    native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may   "\
-          "    interfere with system threads. On Linux thread priorities   "\
-          "    are ignored because the OS does not support static priority "\
-          "    in SCHED_OTHER scheduling class which is the only choice for"\
+          "    to normal native priority. Java priorities below "           \
+          "    NORM_PRIORITY map to lower native priority values. On       "\
+          "    Windows applications are allowed to use higher native       "\
+          "    priorities. However, with ThreadPriorityPolicy=0, VM will   "\
+          "    not use the highest possible native priority,               "\
+          "    THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with     "\
+          "    system threads. On Linux thread priorities are ignored      "\
+          "    because the OS does not support static priority in          "\
+          "    SCHED_OTHER scheduling class which is the only choice for   "\
           "    non-root, non-realtime applications.                        "\
           "1 : Aggressive.                                                 "\
           "    Java thread priorities map over to the entire range of      "\
@@ -3346,16 +3432,35 @@
   product(bool, VMThreadHintNoPreempt, false,                               \
           "(Solaris only) Give VM thread an extra quanta")                  \
                                                                             \
-  product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \
-  product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \
+  product(intx, JavaPriority1_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority2_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority3_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority4_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority5_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority6_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority7_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority8_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority9_To_OSPriority, -1,                            \
+          "Map Java priorities to OS priorities")                           \
+                                                                            \
+  product(intx, JavaPriority10_To_OSPriority,-1,                            \
+          "Map Java priorities to OS priorities")                           \
                                                                             \
   experimental(bool, UseCriticalJavaThreadPriority, false,                  \
           "Java thread priority 10 maps to critical scheduling priority")   \
@@ -3386,37 +3491,38 @@
           "Used with +TraceLongCompiles")                                   \
                                                                             \
   product(intx, StarvationMonitorInterval,    200,                          \
-          "Pause between each check in ms")                                 \
+          "Pause between each check (in milliseconds)")                     \
                                                                             \
   /* recompilation */                                                       \
   product_pd(intx, CompileThreshold,                                        \
           "number of interpreted method invocations before (re-)compiling") \
                                                                             \
   product_pd(intx, BackEdgeThreshold,                                       \
-          "Interpreter Back edge threshold at which an OSR compilation is invoked")\
+          "Interpreter Back edge threshold at which an OSR compilation is " \
+          "invoked")                                                        \
                                                                             \
   product(intx, Tier0InvokeNotifyFreqLog, 7,                                \
-          "Interpreter (tier 0) invocation notification frequency.")        \
+          "Interpreter (tier 0) invocation notification frequency")         \
                                                                             \
   product(intx, Tier2InvokeNotifyFreqLog, 11,                               \
-          "C1 without MDO (tier 2) invocation notification frequency.")     \
+          "C1 without MDO (tier 2) invocation notification frequency")      \
                                                                             \
   product(intx, Tier3InvokeNotifyFreqLog, 10,                               \
           "C1 with MDO profiling (tier 3) invocation notification "         \
-          "frequency.")                                                     \
+          "frequency")                                                      \
                                                                             \
   product(intx, Tier23InlineeNotifyFreqLog, 20,                             \
           "Inlinee invocation (tiers 2 and 3) notification frequency")      \
                                                                             \
   product(intx, Tier0BackedgeNotifyFreqLog, 10,                             \
-          "Interpreter (tier 0) invocation notification frequency.")        \
+          "Interpreter (tier 0) invocation notification frequency")         \
                                                                             \
   product(intx, Tier2BackedgeNotifyFreqLog, 14,                             \
-          "C1 without MDO (tier 2) invocation notification frequency.")     \
+          "C1 without MDO (tier 2) invocation notification frequency")      \
                                                                             \
   product(intx, Tier3BackedgeNotifyFreqLog, 13,                             \
           "C1 with MDO profiling (tier 3) invocation notification "         \
-          "frequency.")                                                     \
+          "frequency")                                                      \
                                                                             \
   product(intx, Tier2CompileThreshold, 0,                                   \
           "threshold at which tier 2 compilation is invoked")               \
@@ -3433,7 +3539,7 @@
                                                                             \
   product(intx, Tier3CompileThreshold, 2000,                                \
           "Threshold at which tier 3 compilation is invoked (invocation "   \
-          "minimum must be satisfied.")                                     \
+          "minimum must be satisfied")                                      \
                                                                             \
   product(intx, Tier3BackEdgeThreshold,  60000,                             \
           "Back edge threshold at which tier 3 OSR compilation is invoked") \
@@ -3447,7 +3553,7 @@
                                                                             \
   product(intx, Tier4CompileThreshold, 15000,                               \
           "Threshold at which tier 4 compilation is invoked (invocation "   \
-          "minimum must be satisfied.")                                     \
+          "minimum must be satisfied")                                      \
                                                                             \
   product(intx, Tier4BackEdgeThreshold, 40000,                              \
           "Back edge threshold at which tier 4 OSR compilation is invoked") \
@@ -3476,12 +3582,12 @@
           "Stop at given compilation level")                                \
                                                                             \
   product(intx, Tier0ProfilingStartPercentage, 200,                         \
-          "Start profiling in interpreter if the counters exceed tier 3"    \
+          "Start profiling in interpreter if the counters exceed tier 3 "   \
           "thresholds by the specified percentage")                         \
                                                                             \
   product(uintx, IncreaseFirstTierCompileThresholdAt, 50,                   \
-          "Increase the compile threshold for C1 compilation if the code"   \
-          "cache is filled by the specified percentage.")                   \
+          "Increase the compile threshold for C1 compilation if the code "  \
+          "cache is filled by the specified percentage")                    \
                                                                             \
   product(intx, TieredRateUpdateMinTime, 1,                                 \
           "Minimum rate sampling interval (in milliseconds)")               \
@@ -3496,24 +3602,26 @@
           "Print tiered events notifications")                              \
                                                                             \
   product_pd(intx, OnStackReplacePercentage,                                \
-          "NON_TIERED number of method invocations/branches (expressed as %"\
-          "of CompileThreshold) before (re-)compiling OSR code")            \
+          "NON_TIERED number of method invocations/branches (expressed as " \
+          "% of CompileThreshold) before (re-)compiling OSR code")          \
                                                                             \
   product(intx, InterpreterProfilePercentage, 33,                           \
-          "NON_TIERED number of method invocations/branches (expressed as %"\
-          "of CompileThreshold) before profiling in the interpreter")       \
+          "NON_TIERED number of method invocations/branches (expressed as " \
+          "% of CompileThreshold) before profiling in the interpreter")     \
                                                                             \
   develop(intx, MaxRecompilationSearchLength,    10,                        \
-          "max. # frames to inspect searching for recompilee")              \
+          "The maximum number of frames to inspect when searching for "     \
+          "recompilee")                                                     \
                                                                             \
   develop(intx, MaxInterpretedSearchLength,     3,                          \
-          "max. # interp. frames to skip when searching for recompilee")    \
+          "The maximum number of interpreted frames to skip when searching "\
+          "for recompilee")                                                 \
                                                                             \
   develop(intx, DesiredMethodLimit,  8000,                                  \
-          "desired max. method size (in bytecodes) after inlining")         \
+          "The desired maximum method size (in bytecodes) after inlining")  \
                                                                             \
   develop(intx, HugeMethodLimit,  8000,                                     \
-          "don't compile methods larger than this if "                      \
+          "Don't compile methods larger than this if "                      \
           "+DontCompileHugeMethods")                                        \
                                                                             \
   /* New JDK 1.4 reflection implementation */                               \
@@ -3534,6 +3642,8 @@
           "Temporary flag for transition to AbstractMethodError wrapped "   \
           "in InvocationTargetException. See 6531596")                      \
                                                                             \
+  develop(bool, VerifyLambdaBytecodes, false,                               \
+          "Force verification of jdk 8 lambda metafactory bytecodes")       \
                                                                             \
   develop(intx, FastSuperclassLimit, 8,                                     \
           "Depth of hardwired instanceof accelerator array")                \
@@ -3557,18 +3667,19 @@
   /* flags for performance data collection */                               \
                                                                             \
   product(bool, UsePerfData, falseInEmbedded,                               \
-          "Flag to disable jvmstat instrumentation for performance testing" \
-          "and problem isolation purposes.")                                \
+          "Flag to disable jvmstat instrumentation for performance testing "\
+          "and problem isolation purposes")                                 \
                                                                             \
   product(bool, PerfDataSaveToFile, false,                                  \
           "Save PerfData memory to hsperfdata_<pid> file on exit")          \
                                                                             \
   product(ccstr, PerfDataSaveFile, NULL,                                    \
-          "Save PerfData memory to the specified absolute pathname,"        \
-           "%p in the file name if present will be replaced by pid")        \
-                                                                            \
-  product(intx, PerfDataSamplingInterval, 50 /*ms*/,                        \
-          "Data sampling interval in milliseconds")                         \
+          "Save PerfData memory to the specified absolute pathname. "       \
+          "The string %p in the file name (if present) "                    \
+          "will be replaced by pid")                                        \
+                                                                            \
+  product(intx, PerfDataSamplingInterval, 50,                               \
+          "Data sampling interval (in milliseconds)")                       \
                                                                             \
   develop(bool, PerfTraceDataCreation, false,                               \
           "Trace creation of Performance Data Entries")                     \
@@ -3593,7 +3704,7 @@
           "Bypass Win32 file system criteria checks (Windows Only)")        \
                                                                             \
   product(intx, UnguardOnExecutionViolation, 0,                             \
-          "Unguard page and retry on no-execute fault (Win32 only)"         \
+          "Unguard page and retry on no-execute fault (Win32 only) "        \
           "0=off, 1=conservative, 2=aggressive")                            \
                                                                             \
   /* Serviceability Support */                                              \
@@ -3602,7 +3713,7 @@
           "Create JMX Management Server")                                   \
                                                                             \
   product(bool, DisableAttachMechanism, false,                              \
-         "Disable mechanism that allows tools to attach to this VM")        \
+          "Disable mechanism that allows tools to attach to this VM")       \
                                                                             \
   product(bool, StartAttachListener, false,                                 \
           "Always start Attach Listener at VM startup")                     \
@@ -3625,9 +3736,9 @@
           "Require shared spaces for metadata")                             \
                                                                             \
   product(bool, DumpSharedSpaces, false,                                    \
-           "Special mode: JVM reads a class list, loads classes, builds "   \
-            "shared spaces, and dumps the shared spaces to a file to be "   \
-            "used in future JVM runs.")                                     \
+          "Special mode: JVM reads a class list, loads classes, builds "    \
+          "shared spaces, and dumps the shared spaces to a file to be "     \
+          "used in future JVM runs")                                        \
                                                                             \
   product(bool, PrintSharedSpaces, false,                                   \
           "Print usage of shared spaces")                                   \
@@ -3667,6 +3778,9 @@
   experimental(bool, TrustFinalNonStaticFields, false,                      \
           "trust final non-static declarations for constant folding")       \
                                                                             \
+  experimental(bool, FoldStableValues, false,                               \
+          "Private flag to control optimizations for stable variables")     \
+                                                                            \
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
@@ -3697,27 +3811,24 @@
           "Relax the access control checks in the verifier")                \
                                                                             \
   diagnostic(bool, PrintDTraceDOF, false,                                   \
-             "Print the DTrace DOF passed to the system for JSDT probes")   \
+          "Print the DTrace DOF passed to the system for JSDT probes")      \
                                                                             \
   product(uintx, StringTableSize, defaultStringTableSize,                   \
           "Number of buckets in the interned String table")                 \
                                                                             \
+  experimental(uintx, SymbolTableSize, defaultSymbolTableSize,              \
+          "Number of buckets in the JVM internal Symbol table")             \
+                                                                            \
   develop(bool, TraceDefaultMethods, false,                                 \
           "Trace the default method processing steps")                      \
                                                                             \
-  develop(bool, ParseAllGenericSignatures, false,                           \
-          "Parse all generic signatures while classloading")                \
-                                                                            \
   develop(bool, VerifyGenericSignatures, false,                             \
           "Abort VM on erroneous or inconsistent generic signatures")       \
                                                                             \
-  product(bool, ParseGenericDefaults, false,                                \
-          "Parse generic signatures for default method handling")           \
-                                                                            \
   product(bool, UseVMInterruptibleIO, false,                                \
           "(Unstable, Solaris-specific) Thread interrupt before or with "   \
-          "EINTR for I/O operations results in OS_INTRPT. The default value"\
-          " of this flag is true for JDK 6 and earlier")                    \
+          "EINTR for I/O operations results in OS_INTRPT. The default "     \
+          "value of this flag is true for JDK 6 and earlier")               \
                                                                             \
   diagnostic(bool, WhiteBoxAPI, false,                                      \
           "Enable internal testing APIs")                                   \
@@ -3738,29 +3849,29 @@
                                                                             \
   product(bool, EnableTracing, false,                                       \
           "Enable event-based tracing")                                     \
+                                                                            \
   product(bool, UseLockedTracing, false,                                    \
           "Use locked-tracing when doing event-based tracing")
 
-
 /*
  *  Macros for factoring of globals
  */
 
 // Interface macros
-#define DECLARE_PRODUCT_FLAG(type, name, value, doc)    extern "C" type name;
-#define DECLARE_PD_PRODUCT_FLAG(type, name, doc)        extern "C" type name;
-#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_PRODUCT_FLAG(type, name, value, doc)      extern "C" type name;
+#define DECLARE_PD_PRODUCT_FLAG(type, name, doc)          extern "C" type name;
+#define DECLARE_DIAGNOSTIC_FLAG(type, name, value, doc)   extern "C" type name;
 #define DECLARE_EXPERIMENTAL_FLAG(type, name, value, doc) extern "C" type name;
-#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc) extern "C" type name;
-#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc) extern "C" type name;
+#define DECLARE_MANAGEABLE_FLAG(type, name, value, doc)   extern "C" type name;
+#define DECLARE_PRODUCT_RW_FLAG(type, name, value, doc)   extern "C" type name;
 #ifdef PRODUCT
-#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)  const type name = value;
-#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      const type name = pd_##name;
-#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)
+#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)    extern "C" type CONST_##name; const type name = value;
+#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)        extern "C" type CONST_##name; const type name = pd_##name;
+#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)   extern "C" type CONST_##name;
 #else
-#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)  extern "C" type name;
-#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)      extern "C" type name;
-#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)  extern "C" type name;
+#define DECLARE_DEVELOPER_FLAG(type, name, value, doc)    extern "C" type name;
+#define DECLARE_PD_DEVELOPER_FLAG(type, name, doc)        extern "C" type name;
+#define DECLARE_NOTPRODUCT_FLAG(type, name, value, doc)   extern "C" type name;
 #endif
 // Special LP64 flags, product only needed for now.
 #ifdef _LP64
@@ -3770,23 +3881,23 @@
 #endif // _LP64
 
 // Implementation macros
-#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc)   type name = value;
-#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc)       type name = pd_##name;
-#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_PRODUCT_FLAG(type, name, value, doc)      type name = value;
+#define MATERIALIZE_PD_PRODUCT_FLAG(type, name, doc)          type name = pd_##name;
+#define MATERIALIZE_DIAGNOSTIC_FLAG(type, name, value, doc)   type name = value;
 #define MATERIALIZE_EXPERIMENTAL_FLAG(type, name, value, doc) type name = value;
-#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
-#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc)   type name = value;
+#define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc)   type name = value;
 #ifdef PRODUCT
-#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) /* flag name is constant */
-#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     /* flag name is constant */
-#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)
+#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc)    type CONST_##name = value;
+#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)        type CONST_##name = pd_##name;
+#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)   type CONST_##name = value;
 #else
-#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
-#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)     type name = pd_##name;
-#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
+#define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc)    type name = value;
+#define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc)        type name = pd_##name;
+#define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc)   type name = value;
 #endif
 #ifdef _LP64
-#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc)   type name = value;
+#define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
 #else
 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
 #endif // _LP64
--- a/src/share/vm/runtime/globals_extension.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/globals_extension.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -34,63 +34,42 @@
 // Parens left off in the following for the enum decl below.
 #define FLAG_MEMBER(flag) Flag_##flag
 
-#define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc)        FLAG_MEMBER(name),
-#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
+#define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+#define RUNTIME_PD_PRODUCT_FLAG_MEMBER(type, name, doc)          FLAG_MEMBER(name),
+#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)   FLAG_MEMBER(name),
 #define RUNTIME_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
-#define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
-#define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
-#ifdef PRODUCT
-  #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)  /* flag is constant */
-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      /* flag is constant */
-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
+#define RUNTIME_MANAGEABLE_FLAG_MEMBER(type, name, value, doc)   FLAG_MEMBER(name),
+#define RUNTIME_PRODUCT_RW_FLAG_MEMBER(type, name, value, doc)   FLAG_MEMBER(name),
+#define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+#define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)          FLAG_MEMBER(name),
+#define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)   FLAG_MEMBER(name),
+
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
 #else
-  #define RUNTIME_DEVELOP_FLAG_MEMBER(type, name, value, doc)  FLAG_MEMBER(name),
-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER(type, name, doc)      FLAG_MEMBER(name),
-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
-#endif
-#ifdef _LP64
-#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#else
-#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc)    /* flag is constant */
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
 #endif // _LP64
 
-#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
-#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
-#ifdef PRODUCT
-  #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
-  #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
-  #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
-#else
-  #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
-  #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
-  #define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#endif
+#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)           FLAG_MEMBER(name),
+#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)               FLAG_MEMBER(name),
+#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)        FLAG_MEMBER(name),
+#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)           FLAG_MEMBER(name),
+#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)               FLAG_MEMBER(name),
+#define C1_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)        FLAG_MEMBER(name),
 
-#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
-#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
-#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
-#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#ifdef PRODUCT
-  #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
-  #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
-  #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
-#else
-  #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
-  #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           FLAG_MEMBER(name),
-  #define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#endif
+#define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)           FLAG_MEMBER(name),
+#define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)               FLAG_MEMBER(name),
+#define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)        FLAG_MEMBER(name),
+#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+#define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)           FLAG_MEMBER(name),
+#define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)               FLAG_MEMBER(name),
+#define C2_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)        FLAG_MEMBER(name),
 
 #define ARCH_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define ARCH_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
 #define ARCH_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#ifdef PRODUCT
-  #define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
-  #define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)
-#else
-  #define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc)       FLAG_MEMBER(name),
-  #define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
-#endif
+#define ARCH_DEVELOP_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
+#define ARCH_NOTPRODUCT_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
 
 typedef enum {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
@@ -99,7 +78,7 @@
  G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
+ C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
 #ifdef COMPILER2
  C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@@ -113,63 +92,42 @@
 
 #define FLAG_MEMBER_WITH_TYPE(flag,type) Flag_##flag##_##type
 
-#define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
-#define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)        FLAG_MEMBER_WITH_TYPE(name,type),
-#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)          FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)   FLAG_MEMBER_WITH_TYPE(name,type),
 #define RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
-#define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
-#define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
-#ifdef PRODUCT
-  #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)     /* flag is constant */
-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)         /* flag is constant */
-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
-#else
-  #define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)     FLAG_MEMBER_WITH_TYPE(name,type),
-  #define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
-  #define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)  FLAG_MEMBER_WITH_TYPE(name,type),
-#endif
+#define RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)   FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)   FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)          FLAG_MEMBER_WITH_TYPE(name,type),
+#define RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)   FLAG_MEMBER_WITH_TYPE(name,type),
 
-#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
-#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
-#ifdef PRODUCT
-  #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
-  #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
-  #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
+#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)               FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)        FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)               FLAG_MEMBER_WITH_TYPE(name,type),
+#define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)        FLAG_MEMBER_WITH_TYPE(name,type),
+
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
 #else
-  #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
-  #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
-  #define C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
-#endif
-#ifdef _LP64
-#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
-#else
-#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    /* flag is constant */
+#define RUNTIME_LP64_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
 #endif // _LP64
 
-#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
-#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
-#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)               FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)        FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
-#ifdef PRODUCT
-  #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
-  #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
-  #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
-#else
-  #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
-  #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
-  #define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
-#endif
+#define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)           FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)               FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)        FLAG_MEMBER_WITH_TYPE(name,type),
 
 #define ARCH_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define ARCH_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
-#define ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
-#ifdef PRODUCT
-  #define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
-  #define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)
-#else
-  #define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       FLAG_MEMBER_WITH_TYPE(name,type),
-  #define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
-#endif
+#define ARCH_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)    FLAG_MEMBER_WITH_TYPE(name,type),
+#define ARCH_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
+#define ARCH_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 
 typedef enum {
  RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE,
@@ -204,6 +162,7 @@
           C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
           C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
+          C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
           C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
 #ifdef COMPILER2
@@ -230,19 +189,19 @@
 
 #define FLAG_SET_DEFAULT(name, value) ((name) = (value))
 
-#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), COMMAND_LINE))
-#define FLAG_SET_ERGO(type, name, value)    (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), ERGONOMIC))
+#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::COMMAND_LINE))
+#define FLAG_SET_ERGO(type, name, value)    (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name,type), (type)(value), Flag::ERGONOMIC))
 
 // Can't put the following in CommandLineFlags because
 // of a circular dependency on the enum definition.
 class CommandLineFlagsEx : CommandLineFlags {
  public:
-  static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
-  static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
-  static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
-  static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin);
-  static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
-  static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
+  static void boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin);
+  static void intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
+  static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
+  static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
+  static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
+  static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
 
   static bool is_default(CommandLineFlag flag);
   static bool is_ergo(CommandLineFlag flag);
--- a/src/share/vm/runtime/handles.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/handles.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 oop* HandleArea::allocate_handle(oop obj) {
   assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark");
   assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark");
-  assert(obj->is_oop(), "sanity check");
+  assert(obj->is_oop(), err_msg("not an oop: " INTPTR_FORMAT, (intptr_t*) obj));
   return real_allocate_handle(obj);
 }
 
@@ -179,11 +179,11 @@
   _thread->set_last_handle_mark(previous_handle_mark());
 }
 
-void* HandleMark::operator new(size_t size) {
+void* HandleMark::operator new(size_t size) throw() {
   return AllocateHeap(size, mtThread);
 }
 
-void* HandleMark::operator new [] (size_t size) {
+void* HandleMark::operator new [] (size_t size) throw() {
   return AllocateHeap(size, mtThread);
 }
 
--- a/src/share/vm/runtime/handles.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/handles.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -136,7 +136,7 @@
 // Specific Handles for different oop types
 #define DEF_METADATA_HANDLE(name, type)          \
   class name##Handle;                            \
-  class name##Handle {                           \
+  class name##Handle : public StackObj {         \
     type*     _value;                            \
     Thread*   _thread;                           \
    protected:                                    \
@@ -175,7 +175,7 @@
 // Writing this class explicitly, since DEF_METADATA_HANDLE(klass) doesn't
 // provide the necessary Klass* <-> Klass* conversions. This Klass
 // could be removed when we don't have the Klass* typedef anymore.
-class KlassHandle {
+class KlassHandle : public StackObj {
   Klass* _value;
  protected:
    Klass* obj() const          { return _value; }
@@ -309,8 +309,8 @@
   // called in the destructor of HandleMarkCleaner
   void pop_and_restore();
   // overloaded operators
-  void* operator new(size_t size);
-  void* operator new [](size_t size);
+  void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
   void operator delete(void* p);
   void operator delete[](void* p);
 };
--- a/src/share/vm/runtime/handles.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/handles.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -79,6 +79,7 @@
     } else {                                                           \
       _thread = Thread::current();                                     \
     }                                                                  \
+    assert (_thread->is_in_stack((address)this), "not on stack?");     \
     _thread->metadata_handles()->push((Metadata*)_value);              \
   } else {                                                             \
     _thread = NULL;                                                    \
@@ -95,6 +96,7 @@
     } else {                                                           \
       _thread = Thread::current();                                     \
     }                                                                  \
+    assert (_thread->is_in_stack((address)this), "not on stack?");     \
     _thread->metadata_handles()->push((Metadata*)_value);              \
   } else {                                                             \
     _thread = NULL;                                                    \
--- a/src/share/vm/runtime/interfaceSupport.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/interfaceSupport.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
   }
 
  private:
-  inline void* operator new(size_t size, void* ptr) {
+  inline void* operator new(size_t size, void* ptr) throw() {
     return ptr;
   }
 };
@@ -474,16 +474,6 @@
     VM_ENTRY_BASE(result_type, header, thread)                       \
     debug_only(VMEntryWrapper __vew;)
 
-// Another special case for nmethod_entry_point so the nmethod that the
-// interpreter is about to branch to doesn't get flushed before as we
-// branch to it's interpreter_entry_point.  Skip stress testing here too.
-// Also we don't allow async exceptions because it is just too painful.
-#define IRT_ENTRY_FOR_NMETHOD(result_type, header)                   \
-  result_type header {                                               \
-    nmethodLocker _nmlock(nm);                                       \
-    ThreadInVMfromJavaNoAsyncException __tiv(thread);                                \
-    VM_ENTRY_BASE(result_type, header, thread)
-
 #define IRT_END }
 
 
--- a/src/share/vm/runtime/java.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/java.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -183,6 +183,7 @@
   collected_profiled_methods->sort(&compare_methods);
 
   int count = collected_profiled_methods->length();
+  int total_size = 0;
   if (count > 0) {
     for (int index = 0; index < count; index++) {
       Method* m = collected_profiled_methods->at(index);
@@ -190,10 +191,18 @@
       tty->print_cr("------------------------------------------------------------------------");
       //m->print_name(tty);
       m->print_invocation_count();
+      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
       tty->cr();
+      // Dump data on parameters if any
+      if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) {
+        tty->fill_to(2);
+        m->method_data()->parameters_type_data()->print_data_on(tty);
+      }
       m->print_codes();
+      total_size += m->method_data()->size_in_bytes();
     }
     tty->print_cr("------------------------------------------------------------------------");
+    tty->print_cr("Total MDO size: %d bytes", total_size);
   }
 }
 
@@ -544,6 +553,19 @@
   // it will run into trouble when system destroys static variables.
   MemTracker::shutdown(MemTracker::NMT_normal);
 
+  if (VerifyStringTableAtExit) {
+    int fail_cnt = 0;
+    {
+      MutexLocker ml(StringTable_lock);
+      fail_cnt = StringTable::verify_and_compare_entries();
+    }
+
+    if (fail_cnt != 0) {
+      tty->print_cr("ERROR: fail_cnt=%d", fail_cnt);
+      guarantee(fail_cnt == 0, "unexpected StringTable verification failures");
+    }
+  }
+
   #undef BEFORE_EXIT_NOT_RUN
   #undef BEFORE_EXIT_RUNNING
   #undef BEFORE_EXIT_DONE
--- a/src/share/vm/runtime/javaCalls.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/javaCalls.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -430,7 +430,7 @@
   for(int i = 0; i < _size; i++) {
     if (_is_oop[i]) {
       // Handle conversion
-      _value[i] = (intptr_t)Handle::raw_resolve((oop *)_value[i]);
+      _value[i] = cast_from_oop<intptr_t>(Handle::raw_resolve((oop *)_value[i]));
     }
   }
   // Return argument vector
--- a/src/share/vm/runtime/mutexLocker.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -45,7 +45,6 @@
 Mutex*   VMStatistic_lock             = NULL;
 Mutex*   JNIGlobalHandle_lock         = NULL;
 Mutex*   JNIHandleBlockFreeList_lock  = NULL;
-Mutex*   JNICachedItableIndex_lock    = NULL;
 Mutex*   MemberNameTable_lock         = NULL;
 Mutex*   JmethodIdCreation_lock       = NULL;
 Mutex*   JfieldIdCreation_lock        = NULL;
@@ -253,7 +252,6 @@
   }
   def(Heap_lock                    , Monitor, nonleaf+1,   false);
   def(JfieldIdCreation_lock        , Mutex  , nonleaf+1,   true ); // jfieldID, Used in VM_Operation
-  def(JNICachedItableIndex_lock    , Mutex  , nonleaf+1,   false); // Used to cache an itable index during JNI invoke
   def(MemberNameTable_lock         , Mutex  , nonleaf+1,   false); // Used to protect MemberNameTable
 
   def(CompiledIC_lock              , Mutex  , nonleaf+2,   false); // locks VtableStubs_lock, InlineCacheBuffer_lock
--- a/src/share/vm/runtime/mutexLocker.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/mutexLocker.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,6 @@
 extern Mutex*   VMStatistic_lock;                // a lock used to guard statistics count increment
 extern Mutex*   JNIGlobalHandle_lock;            // a lock on creating JNI global handles
 extern Mutex*   JNIHandleBlockFreeList_lock;     // a lock on the JNI handle block free list
-extern Mutex*   JNICachedItableIndex_lock;       // a lock on caching an itable index during JNI invoke
 extern Mutex*   MemberNameTable_lock;            // a lock on the MemberNameTable updates
 extern Mutex*   JmethodIdCreation_lock;          // a lock on creating JNI method identifiers
 extern Mutex*   JfieldIdCreation_lock;           // a lock on creating JNI static field identifiers
--- a/src/share/vm/runtime/objectMonitor.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/objectMonitor.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -312,10 +312,10 @@
  public:
   static int Knob_Verbose;
   static int Knob_SpinLimit;
-  void* operator new (size_t size) {
+  void* operator new (size_t size) throw() {
     return AllocateHeap(size, mtInternal);
   }
-  void* operator new[] (size_t size) {
+  void* operator new[] (size_t size) throw() {
     return operator new (size);
   }
   void operator delete(void* p) {
--- a/src/share/vm/runtime/os.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/os.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -314,6 +314,11 @@
   }
 }
 
+void os::init_before_ergo() {
+  // We need to initialize large page support here because ergonomics takes some
+  // decisions depending on large page support and the calculated large page size.
+  large_page_init();
+}
 
 void os::signal_init() {
   if (!ReduceSignalUsage) {
@@ -454,6 +459,7 @@
  */
 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
                               const char *syms[], size_t syms_len) {
+  assert(agent_lib != NULL, "sanity check");
   const char *lib_name;
   void *handle = agent_lib->os_lib();
   void *entryName = NULL;
@@ -484,6 +490,7 @@
   void *proc_handle;
   void *save_handle;
 
+  assert(agent_lib != NULL, "sanity check");
   if (agent_lib->name() == NULL) {
     return false;
   }
@@ -493,14 +500,13 @@
   // We want to look in this process' symbol table.
   agent_lib->set_os_lib(proc_handle);
   ret = find_agent_function(agent_lib, true, syms, syms_len);
-  agent_lib->set_os_lib(save_handle);
   if (ret != NULL) {
     // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
-    agent_lib->set_os_lib(proc_handle);
     agent_lib->set_valid();
     agent_lib->set_static_lib(true);
     return true;
   }
+  agent_lib->set_os_lib(save_handle);
   return false;
 }
 
@@ -1485,44 +1491,6 @@
   return result;
 }
 
-// Read file line by line, if line is longer than bsize,
-// skip rest of line.
-int os::get_line_chars(int fd, char* buf, const size_t bsize){
-  size_t sz, i = 0;
-
-  // read until EOF, EOL or buf is full
-  while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-2) && buf[i] != '\n') {
-     ++i;
-  }
-
-  if (buf[i] == '\n') {
-    // EOL reached so ignore EOL character and return
-
-    buf[i] = 0;
-    return (int) i;
-  }
-
-  buf[i+1] = 0;
-
-  if (sz != 1) {
-    // EOF reached. if we read chars before EOF return them and
-    // return EOF on next call otherwise return EOF
-
-    return (i == 0) ? -1 : (int) i;
-  }
-
-  // line is longer than size of buf, skip to EOL
-  char ch;
-  while (read(fd, &ch, 1) == 1 && ch != '\n') {
-    // Do nothing
-  }
-
-  // return initial part of line that fits in buf.
-  // If we reached EOF, it will be returned on next call.
-
-  return (int) i;
-}
-
 void os::SuspendedThreadTask::run() {
   assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
   internal_do_task();
--- a/src/share/vm/runtime/os.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/os.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -95,6 +95,8 @@
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
 class os: AllStatic {
+  friend class VMStructs;
+
  public:
   enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
 
@@ -143,7 +145,10 @@
 
  public:
   static void init(void);                      // Called before command line parsing
+  static void init_before_ergo(void);          // Called after command line parsing
+                                               // before VM ergonomics processing.
   static jint init_2(void);                    // Called after command line parsing
+                                               // and VM ergonomics processing
   static void init_globals(void) {             // Called from init_globals() in init.cpp
     init_globals_ext();
   }
@@ -258,6 +263,11 @@
   static size_t page_size_for_region(size_t region_min_size,
                                      size_t region_max_size,
                                      uint min_pages);
+  // Return the largest page size that can be used
+  static size_t max_page_size() {
+    // The _page_sizes array is sorted in descending order.
+    return _page_sizes[0];
+  }
 
   // Methods for tracing page sizes returned by the above method; enabled by
   // TracePageSizes.  The region_{min,max}_size parameters should be the values
@@ -742,10 +752,6 @@
   // Hook for os specific jvm options that we don't want to abort on seeing
   static bool obsolete_option(const JavaVMOption *option);
 
-  // Read file line by line. If line is longer than bsize,
-  // rest of line is skipped. Returns number of bytes read or -1 on EOF
-  static int get_line_chars(int fd, char *buf, const size_t bsize);
-
   // Extensions
 #include "runtime/os_ext.hpp"
 
@@ -810,6 +816,14 @@
 #endif
 
  public:
+#ifndef PLATFORM_PRINT_NATIVE_STACK
+  // No platform-specific code for printing the native stack.
+  static bool platform_print_native_stack(outputStream* st, void* context,
+                                          char *buf, int buf_size) {
+    return false;
+  }
+#endif
+
   // debugging support (mostly used by debug.cpp but also fatal error handler)
   static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
 
--- a/src/share/vm/runtime/park.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/park.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
 // well as bank access imbalance on Niagara-like platforms,
 // although Niagara's hash function should help.
 
-void * ParkEvent::operator new (size_t sz) {
+void * ParkEvent::operator new (size_t sz) throw() {
   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 }
 
--- a/src/share/vm/runtime/park.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/park.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,7 +166,7 @@
     // aligned on 256-byte address boundaries.  This ensures that the least
     // significant byte of a ParkEvent address is always 0.
 
-    void * operator new (size_t sz) ;
+    void * operator new (size_t sz) throw();
     void operator delete (void * a) ;
 
   public:
--- a/src/share/vm/runtime/reflection.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/reflection.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -952,7 +952,8 @@
         }
       }  else {
         // if the method can be overridden, we resolve using the vtable index.
-        int index  = reflected_method->vtable_index();
+        assert(!reflected_method->has_itable_index(), "");
+        int index = reflected_method->vtable_index();
         method = reflected_method;
         if (index != Method::nonvirtual_vtable_index) {
           // target_klass might be an arrayKlassOop but all vtables start at
--- a/src/share/vm/runtime/reflectionUtils.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/reflectionUtils.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,11 @@
 #include "memory/universe.inline.hpp"
 #include "runtime/reflectionUtils.hpp"
 
-KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only) {
-  _klass = klass;
+KlassStream::KlassStream(instanceKlassHandle klass, bool local_only,
+                         bool classes_only, bool walk_defaults) {
+  _klass = _base_klass = klass;
+  _base_class_search_defaults = false;
+  _defaults_checked = false;
   if (classes_only) {
     _interfaces = Universe::the_empty_klass_array();
   } else {
@@ -37,6 +40,7 @@
   _interface_index = _interfaces->length();
   _local_only = local_only;
   _classes_only = classes_only;
+  _walk_defaults = walk_defaults;
 }
 
 bool KlassStream::eos() {
@@ -45,7 +49,13 @@
   if (!_klass->is_interface() && _klass->super() != NULL) {
     // go up superclass chain (not for interfaces)
     _klass = _klass->super();
+  // Next for method walks, walk default methods
+  } else if (_walk_defaults && (_defaults_checked == false)  && (_base_klass->default_methods() != NULL)) {
+      _base_class_search_defaults = true;
+      _klass = _base_klass;
+      _defaults_checked = true;
   } else {
+    // Next walk transitive interfaces
     if (_interface_index > 0) {
       _klass = _interfaces->at(--_interface_index);
     } else {
--- a/src/share/vm/runtime/reflectionUtils.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/reflectionUtils.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
 // and (super)interfaces. Streaming is done in reverse order (subclasses first,
 // interfaces last).
 //
-//    for (KlassStream st(k, false, false); !st.eos(); st.next()) {
+//    for (KlassStream st(k, false, false, false); !st.eos(); st.next()) {
 //      Klass* k = st.klass();
 //      ...
 //    }
@@ -46,17 +46,21 @@
 class KlassStream VALUE_OBJ_CLASS_SPEC {
  protected:
   instanceKlassHandle _klass;           // current klass/interface iterated over
-  Array<Klass*>*    _interfaces;      // transitive interfaces for initial class
+  instanceKlassHandle _base_klass;      // initial klass/interface to iterate over
+  Array<Klass*>*      _interfaces;      // transitive interfaces for initial class
   int                 _interface_index; // current interface being processed
   bool                _local_only;      // process initial class/interface only
   bool                _classes_only;    // process classes only (no interfaces)
+  bool                _walk_defaults;   // process default methods
+  bool                _base_class_search_defaults; // time to process default methods
+  bool                _defaults_checked; // already checked for default methods
   int                 _index;
 
-  virtual int length() const = 0;
+  virtual int length() = 0;
 
  public:
   // constructor
-  KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only);
+  KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only, bool walk_defaults);
 
   // testing
   bool eos();
@@ -67,6 +71,8 @@
   // accessors
   instanceKlassHandle klass() const { return _klass; }
   int index() const                 { return _index; }
+  bool base_class_search_defaults() const { return _base_class_search_defaults; }
+  void base_class_search_defaults(bool b) { _base_class_search_defaults = b; }
 };
 
 
@@ -81,17 +87,24 @@
 
 class MethodStream : public KlassStream {
  private:
-  int length() const          { return methods()->length(); }
-  Array<Method*>* methods() const { return _klass->methods(); }
+  int length()                    { return methods()->length(); }
+  Array<Method*>* methods() {
+    if (base_class_search_defaults()) {
+      base_class_search_defaults(false);
+      return _klass->default_methods();
+    } else {
+      return _klass->methods();
+    }
+  }
  public:
   MethodStream(instanceKlassHandle klass, bool local_only, bool classes_only)
-    : KlassStream(klass, local_only, classes_only) {
+    : KlassStream(klass, local_only, classes_only, true) {
     _index = length();
     next();
   }
 
   void next() { _index--; }
-  Method* method() const { return methods()->at(index()); }
+  Method* method() { return methods()->at(index()); }
 };
 
 
@@ -107,11 +120,13 @@
 
 class FieldStream : public KlassStream {
  private:
-  int length() const                { return _klass->java_fields_count(); }
+  int length() { return _klass->java_fields_count(); }
+
+  fieldDescriptor _fd_buf;
 
  public:
   FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
-    : KlassStream(klass, local_only, classes_only) {
+    : KlassStream(klass, local_only, classes_only, false) {
     _index = length();
     next();
   }
@@ -134,6 +149,12 @@
   int offset() const {
     return _klass->field_offset( index() );
   }
+  // bridge to a heavier API:
+  fieldDescriptor& field_descriptor() const {
+    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+    field.reinitialize(_klass(), _index);
+    return field;
+  }
 };
 
 class FilteredField : public CHeapObj<mtInternal>  {
--- a/src/share/vm/runtime/safepoint.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/safepoint.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -519,8 +519,8 @@
   }
 
   {
-    TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
-    NMethodSweeper::scan_stacks();
+    TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
+    NMethodSweeper::mark_active_nmethods();
   }
 
   if (SymbolTable::needs_rehashing()) {
@@ -745,14 +745,14 @@
 #endif
 
 static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) {
-  bool is_oop = newptr ? ((oop)newptr)->is_oop() : false;
+  bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
   tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s",
                 oldptr, wasoop?"oop":"   ", oldptr == newptr ? ' ' : '!',
                 newptr, is_oop?"oop":"   ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":"     "));
 }
 
 static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
-  bool is_oop = newptr ? ((oop)(intptr_t)newptr)->is_oop() : false;
+  bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
   tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s",
                 oldptr, wasoop?"oop":"   ", oldptr == newptr ? ' ' : '!',
                 newptr, is_oop?"oop":"   ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":"     "));
--- a/src/share/vm/runtime/sharedRuntime.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -577,7 +577,7 @@
   assert(caller.is_interpreted_frame(), "");
   int args_size = ArgumentSizeComputer(sig).size() + 1;
   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
-  oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
+  oop result = cast_to_oop(*caller.interpreter_frame_tos_at(args_size - 1));
   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
   return result;
 }
@@ -1051,7 +1051,8 @@
 
   // Find receiver for non-static call
   if (bc != Bytecodes::_invokestatic &&
-      bc != Bytecodes::_invokedynamic) {
+      bc != Bytecodes::_invokedynamic &&
+      bc != Bytecodes::_invokehandle) {
     // This register map must be update since we need to find the receiver for
     // compiled frames. The receiver might be in a register.
     RegisterMap reg_map2(thread);
@@ -1078,7 +1079,7 @@
 
 #ifdef ASSERT
   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
-  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
+  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
     assert(receiver.not_null(), "should have thrown exception");
     KlassHandle receiver_klass(THREAD, receiver->klass());
     Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -1240,9 +1241,9 @@
 #endif
 
   if (is_virtual) {
-    assert(receiver.not_null(), "sanity check");
+    assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
-    KlassHandle h_klass(THREAD, receiver->klass());
+    KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
                      is_optimized, static_bound, virtual_call_info,
                      CHECK_(methodHandle()));
@@ -1505,8 +1506,11 @@
                                                 info, CHECK_(methodHandle()));
         inline_cache->set_to_monomorphic(info);
       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
-        // Change to megamorphic
-        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+        // Potential change to megamorphic
+        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+        if (!successful) {
+          inline_cache->set_to_clean();
+        }
       } else {
         // Either clean or megamorphic
       }
@@ -2936,7 +2940,7 @@
         ObjectSynchronizer::inflate_helper(kptr2->obj());
       // Now the displaced header is free to move
       buf[i++] = (intptr_t)lock->displaced_header();
-      buf[i++] = (intptr_t)kptr2->obj();
+      buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
     }
   }
   assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
--- a/src/share/vm/runtime/signature.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/signature.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -378,6 +378,16 @@
   return result;
 }
 
+int SignatureStream::reference_parameter_count() {
+  int args_count = 0;
+  for ( ; !at_return_type(); next()) {
+    if (is_object()) {
+      args_count++;
+    }
+  }
+  return args_count;
+}
+
 bool SignatureVerifier::is_valid_signature(Symbol* sig) {
   const char* signature = (const char*)sig->bytes();
   ssize_t len = sig->utf8_length();
--- a/src/share/vm/runtime/signature.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/signature.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -401,6 +401,9 @@
 
   // return same as_symbol except allocation of new symbols is avoided.
   Symbol* as_symbol_or_null();
+
+  // count the number of references in the signature
+  int reference_parameter_count();
 };
 
 class SignatureVerifier : public StackObj {
--- a/src/share/vm/runtime/sweeper.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/sweeper.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -127,64 +127,79 @@
 #define SWEEP(nm)
 #endif
 
+nmethod*  NMethodSweeper::_current         = NULL; // Current nmethod
+long      NMethodSweeper::_traversals      = 0;    // Nof. stack traversals performed
+int       NMethodSweeper::_seen            = 0;    // Nof. nmethods we have currently processed in current pass of CodeCache
+int       NMethodSweeper::_flushed_count   = 0;    // Nof. nmethods flushed in current sweep
+int       NMethodSweeper::_zombified_count = 0;    // Nof. nmethods made zombie in current sweep
+int       NMethodSweeper::_marked_count    = 0;    // Nof. nmethods marked for reclaim in current sweep
 
-long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
-nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
-int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
-int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
-int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
-int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
-
-volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
+volatile int NMethodSweeper::_invocations   = 0; // Nof. invocations left until we are completed with this pass
 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 
-jint      NMethodSweeper::_locked_seen = 0;
+jint      NMethodSweeper::_locked_seen               = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
-bool      NMethodSweeper::_resweep = false;
-jint      NMethodSweeper::_flush_token = 0;
-jlong     NMethodSweeper::_last_full_flush_time = 0;
-int       NMethodSweeper::_highest_marked = 0;
-int       NMethodSweeper::_dead_compile_ids = 0;
-long      NMethodSweeper::_last_flush_traversal_id = 0;
+bool      NMethodSweeper::_request_mark_phase        = false;
 
-int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
-jlong     NMethodSweeper::_total_time_sweeping = 0;
-jlong     NMethodSweeper::_total_time_this_sweep = 0;
-jlong     NMethodSweeper::_peak_sweep_time = 0;
-jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
-jlong     NMethodSweeper::_total_disconnect_time = 0;
-jlong     NMethodSweeper::_peak_disconnect_time = 0;
+jlong     NMethodSweeper::_total_time_sweeping         = 0;
+jlong     NMethodSweeper::_total_time_this_sweep       = 0;
+jlong     NMethodSweeper::_peak_sweep_time             = 0;
+jlong     NMethodSweeper::_peak_sweep_fraction_time    = 0;
+int       NMethodSweeper::_hotness_counter_reset_val   = 0;
+
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
-    // If we see an activation belonging to a non_entrant nmethod, we mark it.
-    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
-      ((nmethod*)cb)->mark_as_seen_on_stack();
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+      // If we see an activation belonging to a non_entrant nmethod, we mark it.
+      if (nm->is_not_entrant()) {
+        nm->mark_as_seen_on_stack();
+      }
     }
   }
 };
 static MarkActivationClosure mark_activation_closure;
 
+class SetHotnessClosure: public CodeBlobClosure {
+public:
+  virtual void do_code_blob(CodeBlob* cb) {
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
+    }
+  }
+};
+static SetHotnessClosure set_hotness_closure;
+
+
+int NMethodSweeper::hotness_counter_reset_val() {
+  if (_hotness_counter_reset_val == 0) {
+    _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
+  }
+  return _hotness_counter_reset_val;
+}
 bool NMethodSweeper::sweep_in_progress() {
   return (_current != NULL);
 }
 
-void NMethodSweeper::scan_stacks() {
+// Scans the stacks of all Java threads and marks activations of not-entrant methods.
+// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
+// safepoint.
+void NMethodSweeper::mark_active_nmethods() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
-  if (!MethodFlushing) return;
-
-  // No need to synchronize access, since this is always executed at a
-  // safepoint.
-
-  // Make sure CompiledIC_lock in unlocked, since we might update some
-  // inline caches. If it is, we just bail-out and try later.
-  if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
+  // If we do not want to reclaim not-entrant or zombie methods there is no need
+  // to scan stacks
+  if (!MethodFlushing) {
+    return;
+  }
 
   // Check for restart
   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
-  if (!sweep_in_progress() && _resweep) {
+  if (!sweep_in_progress() && need_marking_phase()) {
     _seen        = 0;
     _invocations = NmethodSweepFraction;
     _current     = CodeCache::first_nmethod();
@@ -197,30 +212,22 @@
     Threads::nmethods_do(&mark_activation_closure);
 
     // reset the flags since we started a scan from the beginning.
-    _resweep = false;
+    reset_nmethod_marking();
     _locked_seen = 0;
     _not_entrant_seen_on_stack = 0;
+  } else {
+    // Only set hotness counter
+    Threads::nmethods_do(&set_hotness_closure);
   }
 
-  if (UseCodeCacheFlushing) {
-    // only allow new flushes after the interval is complete.
-    jlong now           = os::javaTimeMillis();
-    jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
-    jlong curr_interval = now - _last_full_flush_time;
-    if (curr_interval > max_interval) {
-      _flush_token = 0;
-    }
-
-    if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
-      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
-      log_sweep("restart_compiler");
-    }
-  }
+  OrderAccess::storestore();
 }
 
 void NMethodSweeper::possibly_sweep() {
   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
-  if (!MethodFlushing || !sweep_in_progress()) return;
+  if (!MethodFlushing || !sweep_in_progress()) {
+    return;
+  }
 
   if (_invocations > 0) {
     // Only one thread at a time will sweep
@@ -258,8 +265,7 @@
   if (!CompileBroker::should_compile_new_jobs()) {
     // If we have turned off compilations we might as well do full sweeps
     // in order to reach the clean state faster. Otherwise the sleeping compiler
-    // threads will slow down sweeping. After a few iterations the cache
-    // will be clean and sweeping stops (_resweep will not be set)
+    // threads will slow down sweeping.
     _invocations = 1;
   }
 
@@ -269,15 +275,19 @@
   // the number of nmethods changes during the sweep so the final
   // stage must iterate until it there are no more nmethods.
   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
+  int swept_count = 0;
+
 
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
+  int freed_memory = 0;
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
     // The last invocation iterates until there are no more nmethods
     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+      swept_count++;
       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
         if (PrintMethodFlushing && Verbose) {
           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
@@ -297,7 +307,7 @@
       // Now ready to process nmethod and give up CodeCache_lock
       {
         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-        process_nmethod(_current);
+        freed_memory += process_nmethod(_current);
       }
       _seen++;
       _current = next;
@@ -306,11 +316,11 @@
 
   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 
-  if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
+  if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
     // we've completed a scan without making progress but there were
     // nmethods we were unable to process either because they were
-    // locked or were still on stack.  We don't have to aggresively
-    // clean them up so just stop scanning.  We could scan once more
+    // locked or were still on stack. We don't have to aggressively
+    // clean them up so just stop scanning. We could scan once more
     // but that complicates the control logic and it's unlikely to
     // matter much.
     if (PrintMethodFlushing) {
@@ -331,7 +341,7 @@
     event.set_endtime(sweep_end_counter);
     event.set_sweepIndex(_traversals);
     event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
-    event.set_sweptCount(todo);
+    event.set_sweptCount(swept_count);
     event.set_flushedCount(_flushed_count);
     event.set_markedCount(_marked_count);
     event.set_zombifiedCount(_zombified_count);
@@ -349,9 +359,16 @@
     log_sweep("finished");
   }
 
-  // Sweeper is the only case where memory is released,
-  // check here if it is time to restart the compiler.
-  if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
+  // Sweeper is the only case where memory is released, check here if it
+  // is time to restart the compiler. Only checking if there is a certain
+  // amount of free memory in the code cache might lead to re-enabling
+  // compilation although no memory has been released. For example, there are
+  // cases when compilation was disabled although there is 4MB (or more) free
+  // memory in the code cache. The reason is code cache fragmentation. Therefore,
+  // it only makes sense to re-enable compilation if we have actually freed memory.
+  // Note that typically several kB are released for sweeping 16MB of the code
+  // cache. As a result, 'freed_memory' > 0 to restart the compiler.
+  if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
     CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
     log_sweep("restart_compiler");
   }
@@ -365,8 +382,8 @@
     _thread = CompilerThread::current();
     if (!nm->is_zombie() && !nm->is_unloaded()) {
       // Only expose live nmethods for scanning
-    _thread->set_scanned_nmethod(nm);
-  }
+      _thread->set_scanned_nmethod(nm);
+    }
   }
   ~NMethodMarker() {
     _thread->set_scanned_nmethod(NULL);
@@ -390,20 +407,20 @@
   nm->flush();
 }
 
-void NMethodSweeper::process_nmethod(nmethod *nm) {
+int NMethodSweeper::process_nmethod(nmethod *nm) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
+  int freed_memory = 0;
   // Make sure this nmethod doesn't get unloaded during the scan,
-  // since the locks acquired below might safepoint.
+  // since safepoints may happen during acquired below locks.
   NMethodMarker nmm(nm);
-
   SWEEP(nm);
 
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
     if (nm->is_alive()) {
-      // Clean-up all inline caches that points to zombie/non-reentrant methods
+      // Clean inline caches that point to zombie/non-entrant methods
       MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
       SWEEP(nm);
@@ -411,18 +428,19 @@
       _locked_seen++;
       SWEEP(nm);
     }
-    return;
+    return freed_memory;
   }
 
   if (nm->is_zombie()) {
-    // If it is first time, we see nmethod then we mark it. Otherwise,
-    // we reclame it. When we have seen a zombie method twice, we know that
+    // If it is the first time we see nmethod then we mark it. Otherwise,
+    // we reclaim it. When we have seen a zombie method twice, we know that
     // there are no inline caches that refer to it.
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
+      freed_memory = nm->total_size();
       release_nmethod(nm);
       _flushed_count++;
     } else {
@@ -430,19 +448,19 @@
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
       }
       nm->mark_for_reclamation();
-      _resweep = true;
+      request_nmethod_marking();
       _marked_count++;
       SWEEP(nm);
     }
   } else if (nm->is_not_entrant()) {
-    // If there is no current activations of this method on the
+    // If there are no current activations of this method on the
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
       }
       nm->make_zombie();
-      _resweep = true;
+      request_nmethod_marking();
       _zombified_count++;
       SWEEP(nm);
     } else {
@@ -457,159 +475,57 @@
     }
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
-    if (PrintMethodFlushing && Verbose)
+    if (PrintMethodFlushing && Verbose) {
       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
-
+    }
     if (nm->is_osr_method()) {
       SWEEP(nm);
       // No inline caches will ever point to osr methods, so we can just remove it
+      freed_memory = nm->total_size();
       release_nmethod(nm);
       _flushed_count++;
     } else {
       nm->make_zombie();
-      _resweep = true;
+      request_nmethod_marking();
       _zombified_count++;
       SWEEP(nm);
     }
   } else {
-    assert(nm->is_alive(), "should be alive");
-
     if (UseCodeCacheFlushing) {
-      if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
-          (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
-        // This method has not been called since the forced cleanup happened
-        nm->make_not_entrant();
+      if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
+        // Do not make native methods and OSR-methods not-entrant
+        nm->dec_hotness_counter();
+        // Get the initial value of the hotness counter. This value depends on the
+        // ReservedCodeCacheSize
+        int reset_val = hotness_counter_reset_val();
+        int time_since_reset = reset_val - nm->hotness_counter();
+        double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+        // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
+        // I.e., 'threshold' increases with lower available space in the code cache and a higher
+        // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
+        // value until it is reset by stack walking - is smaller than the computed threshold, the
+        // corresponding nmethod is considered for removal.
+        if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
+          // A method is marked as not-entrant if the method is
+          // 1) 'old enough': nm->hotness_counter() < threshold
+          // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
+          //    The second condition is necessary if we are dealing with very small code cache
+          //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
+          //    The second condition ensures that methods are not immediately made not-entrant
+          //    after compilation.
+          nm->make_not_entrant();
+          request_nmethod_marking();
+        }
       }
     }
-
-    // Clean-up all inline caches that points to zombie/non-reentrant methods
+    // Clean-up all inline caches that point to zombie/non-reentrant methods
     MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
     SWEEP(nm);
   }
-}
-
-// Code cache unloading: when compilers notice the code cache is getting full,
-// they will call a vm op that comes here. This code attempts to speculatively
-// unload the oldest half of the nmethods (based on the compile job id) by
-// saving the old code in a list in the CodeCache. Then
-// execution resumes. If a method so marked is not called by the second sweeper
-// stack traversal after the current one, the nmethod will be marked non-entrant and
-// got rid of by normal sweeping. If the method is called, the Method*'s
-// _code field is restored and the Method*/nmethod
-// go back to their normal state.
-void NMethodSweeper::handle_full_code_cache(bool is_full) {
-
-  if (is_full) {
-    // Since code cache is full, immediately stop new compiles
-    if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
-      log_sweep("disable_compiler");
-    }
-  }
-
-  // Make sure only one thread can flush
-  // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
-  // no need to check the timeout here.
-  jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
-  if (old != 0) {
-    return;
-  }
-
-  VM_HandleFullCodeCache op(is_full);
-  VMThread::execute(&op);
-
-  // resweep again as soon as possible
-  _resweep = true;
+  return freed_memory;
 }
 
-void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
-  // If there was a race in detecting full code cache, only run
-  // one vm op for it or keep the compiler shut off
-
-  jlong disconnect_start_counter = os::elapsed_counter();
-
-  // Traverse the code cache trying to dump the oldest nmethods
-  int curr_max_comp_id = CompileBroker::get_compilation_id();
-  int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
-
-  log_sweep("start_cleaning");
-
-  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
-  jint disconnected = 0;
-  jint made_not_entrant  = 0;
-  jint nmethod_count = 0;
-
-  while ((nm != NULL)){
-    int curr_comp_id = nm->compile_id();
-
-    // OSR methods cannot be flushed like this. Also, don't flush native methods
-    // since they are part of the JDK in most cases
-    if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
-
-      // only count methods that can be speculatively disconnected
-      nmethod_count++;
-
-      if (nm->is_in_use() && (curr_comp_id < flush_target)) {
-        if ((nm->method()->code() == nm)) {
-          // This method has not been previously considered for
-          // unloading or it was restored already
-          CodeCache::speculatively_disconnect(nm);
-          disconnected++;
-        } else if (nm->is_speculatively_disconnected()) {
-          // This method was previously considered for preemptive unloading and was not called since then
-          CompilationPolicy::policy()->delay_compilation(nm->method());
-          nm->make_not_entrant();
-          made_not_entrant++;
-        }
-
-        if (curr_comp_id > _highest_marked) {
-          _highest_marked = curr_comp_id;
-        }
-      }
-    }
-    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
-  }
-
-  // remember how many compile_ids wheren't seen last flush.
-  _dead_compile_ids = curr_max_comp_id - nmethod_count;
-
-  log_sweep("stop_cleaning",
-                       "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
-                       disconnected, made_not_entrant);
-
-  // Shut off compiler. Sweeper will start over with a new stack scan and
-  // traversal cycle and turn it back on if it clears enough space.
-  if (is_full) {
-    _last_full_flush_time = os::javaTimeMillis();
-  }
-
-  jlong disconnect_end_counter = os::elapsed_counter();
-  jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
-  _total_disconnect_time += disconnect_time;
-  _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
-
-  EventCleanCodeCache event(UNTIMED);
-  if (event.should_commit()) {
-    event.set_starttime(disconnect_start_counter);
-    event.set_endtime(disconnect_end_counter);
-    event.set_disconnectedCount(disconnected);
-    event.set_madeNonEntrantCount(made_not_entrant);
-    event.commit();
-  }
-  _number_of_flushes++;
-
-  // After two more traversals the sweeper will get rid of unrestored nmethods
-  _last_flush_traversal_id = _traversals;
-  _resweep = true;
-#ifdef ASSERT
-
-  if(PrintMethodFlushing && Verbose) {
-    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
-  }
-#endif
-}
-
-
 // Print out some state information about the current sweep and the
 // state of the code cache if it's requested.
 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
--- a/src/share/vm/runtime/sweeper.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/sweeper.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -27,8 +27,30 @@
 
 // An NmethodSweeper is an incremental cleaner for:
 //    - cleanup inline caches
-//    - reclamation of unreferences zombie nmethods
-//
+//    - reclamation of nmethods
+// Removing nmethods from the code cache includes two operations
+//  1) mark active nmethods
+//     Is done in 'mark_active_nmethods()'. This function is called at a
+//     safepoint and marks all nmethods that are active on a thread's stack.
+//  2) sweep nmethods
+//     Is done in sweep_code_cache(). This function is the only place in the
+//     sweeper where memory is reclaimed. Note that sweep_code_cache() is not
+//     called at a safepoint. However, sweep_code_cache() stops executing if
+//     another thread requests a safepoint. Consequently, 'mark_active_nmethods()'
+//     and sweep_code_cache() cannot execute at the same time.
+//     To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
+//     be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
+//     invalidation, and (iv) being replaced be a different method version (tiered
+//     compilation). Not-entrant nmethod cannot be called by Java threads, but they
+//     can still be active on the stack. To ensure that active nmethod are not reclaimed,
+//     we have to wait until the next marking phase has completed. If a not-entrant
+//     nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
+//     remove the nmethod, all inline caches (IC) that point to the the nmethod must be
+//     cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
+//     state change happens during separate sweeps. It may take at least 3 sweeps before an
+//     nmethod's space is freed. Sweeping is currently done by compiler threads between
+//     compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
+//     is full.
 
 class NMethodSweeper : public AllStatic {
   static long      _traversals;      // Stack scan count, also sweep ID.
@@ -41,46 +63,38 @@
   static volatile int  _invocations;   // No. of invocations left until we are completed with this pass
   static volatile int  _sweep_started; // Flag to control conc sweeper
 
-  //The following are reset in scan_stacks and synchronized by the safepoint
-  static bool      _resweep;           // Indicates that a change has happend and we want another sweep,
-                                       // always checked and reset at a safepoint so memory will be in sync.
-  static int       _locked_seen;       // Number of locked nmethods encountered during the scan
+  //The following are reset in mark_active_nmethods and synchronized by the safepoint
+  static bool      _request_mark_phase;        // Indicates that a change has happend and we need another mark pahse,
+                                               // always checked and reset at a safepoint so memory will be in sync.
+  static int       _locked_seen;               // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
-  static jint      _flush_token;       // token that guards method flushing, making sure it is executed only once.
-
-  // These are set during a flush, a VM-operation
-  static long      _last_flush_traversal_id; // trav number at last flush unloading
-  static jlong     _last_full_flush_time;    // timestamp of last emergency unloading
-
-  // These are synchronized by the _sweep_started token
-  static int       _highest_marked;   // highest compile id dumped at last emergency unloading
-  static int       _dead_compile_ids; // number of compile ids that where not in the cache last flush
 
   // Stat counters
-  static int       _number_of_flushes;            // Total of full traversals caused by full cache
   static int       _total_nof_methods_reclaimed;  // Accumulated nof methods flushed
   static jlong     _total_time_sweeping;          // Accumulated time sweeping
   static jlong     _total_time_this_sweep;        // Total time this sweep
   static jlong     _peak_sweep_time;              // Peak time for a full sweep
   static jlong     _peak_sweep_fraction_time;     // Peak time sweeping one fraction
-  static jlong     _total_disconnect_time;        // Total time cleaning code mem
-  static jlong     _peak_disconnect_time;         // Peak time cleaning code mem
 
-  static void process_nmethod(nmethod *nm);
+  static int  process_nmethod(nmethod *nm);
   static void release_nmethod(nmethod* nm);
 
-  static void log_sweep(const char* msg, const char* format = NULL, ...);
   static bool sweep_in_progress();
+  static void sweep_code_cache();
+  static void request_nmethod_marking() { _request_mark_phase = true; }
+  static void reset_nmethod_marking()   { _request_mark_phase = false; }
+  static bool need_marking_phase()      { return _request_mark_phase; }
+
+  static int _hotness_counter_reset_val;
 
  public:
   static long traversal_count()              { return _traversals; }
-  static int  number_of_flushes()            { return _number_of_flushes; }
   static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
   static jlong total_time_sweeping()         { return _total_time_sweeping; }
   static jlong peak_sweep_time()             { return _peak_sweep_time; }
   static jlong peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
-  static jlong total_disconnect_time()       { return _total_disconnect_time; }
-  static jlong peak_disconnect_time()        { return _peak_disconnect_time; }
+  static void log_sweep(const char* msg, const char* format = NULL, ...);
+
 
 #ifdef ASSERT
   static bool is_sweeping(nmethod* which) { return _current == which; }
@@ -90,19 +104,18 @@
   static void report_events();
 #endif
 
-  static void scan_stacks();      // Invoked at the end of each safepoint
-  static void sweep_code_cache(); // Concurrent part of sweep job
-  static void possibly_sweep();   // Compiler threads call this to sweep
+  static void mark_active_nmethods();      // Invoked at the end of each safepoint
+  static void possibly_sweep();            // Compiler threads call this to sweep
 
-  static void notify(nmethod* nm) {
+  static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
+  static int hotness_counter_reset_val();
+
+  static void notify() {
     // Request a new sweep of the code cache from the beginning. No
     // need to synchronize the setting of this flag since it only
     // changes to false at safepoint so we can never overwrite it with false.
-     _resweep = true;
+     request_nmethod_marking();
   }
-
-  static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
-  static void speculative_disconnect_nmethods(bool was_full);   // Called by vm op to deal with alloc failure
 };
 
 #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- a/src/share/vm/runtime/synchronizer.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/synchronizer.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -154,7 +154,7 @@
 static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
 static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
 static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
-#define CHAINMARKER ((oop)-1)
+#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 
 // -----------------------------------------------------------------------------
 //  Fast Monitor Enter/Exit
@@ -510,7 +510,7 @@
          // then for each thread on the list, set the flag and unpark() the thread.
          // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
          // wakes at most one thread whereas we need to wake the entire list.
-         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
+         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
          int YieldThenBlock = 0 ;
          assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
          assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
@@ -565,7 +565,7 @@
      // This variation has the property of being stable (idempotent)
      // between STW operations.  This can be useful in some of the 1-0
      // synchronization schemes.
-     intptr_t addrBits = intptr_t(obj) >> 3 ;
+     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ;
      value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
   } else
   if (hashCode == 2) {
@@ -575,7 +575,7 @@
      value = ++GVars.hcSequence ;
   } else
   if (hashCode == 4) {
-     value = intptr_t(obj) ;
+     value = cast_from_oop<intptr_t>(obj) ;
   } else {
      // Marsaglia's xor-shift scheme with thread-specific state
      // This is probably the best overall implementation -- we'll
@@ -1321,7 +1321,7 @@
             if (object->is_instance()) {
               ResourceMark rm;
               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (intptr_t) object, (intptr_t) object->mark(),
+                (void *) object, (intptr_t) object->mark(),
                 object->klass()->external_name());
             }
           }
@@ -1371,7 +1371,7 @@
         if (object->is_instance()) {
           ResourceMark rm;
           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-            (intptr_t) object, (intptr_t) object->mark(),
+            (void *) object, (intptr_t) object->mark(),
             object->klass()->external_name());
         }
       }
@@ -1439,7 +1439,7 @@
        if (obj->is_instance()) {
          ResourceMark rm;
            tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                (intptr_t) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
+                (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
        }
      }
 
--- a/src/share/vm/runtime/thread.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/thread.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -336,6 +336,8 @@
   // Reclaim the objectmonitors from the omFreeList of the moribund thread.
   ObjectSynchronizer::omFlush (this) ;
 
+  EVENT_THREAD_DESTRUCT(this);
+
   // stack_base can be NULL if the thread is never started or exited before
   // record_stack_base_and_size called. Although, we would like to ensure
   // that all started threads do call record_stack_base_and_size(), there is
@@ -1098,7 +1100,7 @@
 // General purpose hook into Java code, run once when the VM is initialized.
 // The Java library method itself may be changed independently from the VM.
 static void call_postVMInitHook(TRAPS) {
-  Klass* k = SystemDictionary::PostVMInitHook_klass();
+  Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD);
   instanceKlassHandle klass (THREAD, k);
   if (klass.not_null()) {
     JavaValue result(T_VOID);
@@ -1445,7 +1447,7 @@
   _in_deopt_handler = 0;
   _doing_unsafe_access = false;
   _stack_guard_state = stack_guard_unused;
-  _exception_oop = NULL;
+  (void)const_cast<oop&>(_exception_oop = NULL);
   _exception_pc  = 0;
   _exception_handler_pc = 0;
   _is_method_handle_return = 0;
@@ -1455,7 +1457,6 @@
   _interp_only_mode    = 0;
   _special_runtime_exit_condition = _no_async_condition;
   _pending_async_exception = NULL;
-  _is_compiling = false;
   _thread_stat = NULL;
   _thread_stat = new ThreadStatistics();
   _blocked_on_compilation = false;
@@ -1816,7 +1817,8 @@
     // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
     // the execution of the method. If that is not enough, then we don't really care. Thread.stop
     // is deprecated anyhow.
-    { int count = 3;
+    if (!is_Compiler_thread()) {
+      int count = 3;
       while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
         EXCEPTION_MARK;
         JavaValue result(T_VOID);
@@ -1829,7 +1831,6 @@
         CLEAR_PENDING_EXCEPTION;
       }
     }
-
     // notify JVMTI
     if (JvmtiExport::should_post_thread_life()) {
       JvmtiExport::post_thread_end(this);
@@ -3240,6 +3241,7 @@
   _counters = counters;
   _buffer_blob = NULL;
   _scanned_nmethod = NULL;
+  _compiler = NULL;
 
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
@@ -3256,6 +3258,7 @@
   }
 }
 
+
 // ======= Threads ========
 
 // The Threads class links together all active threads, and provides
@@ -3276,8 +3279,6 @@
 // All JavaThreads
 #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
 
-void os_stream();
-
 // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
 void Threads::threads_do(ThreadClosure* tc) {
   assert_locked_or_safepoint(Threads_lock);
@@ -3332,6 +3333,11 @@
   jint parse_result = Arguments::parse(args);
   if (parse_result != JNI_OK) return parse_result;
 
+  os::init_before_ergo();
+
+  jint ergo_result = Arguments::apply_ergo();
+  if (ergo_result != JNI_OK) return ergo_result;
+
   if (PauseAtStartup) {
     os::pause();
   }
@@ -3639,6 +3645,16 @@
   CompileBroker::compilation_init();
 #endif
 
+  if (EnableInvokeDynamic) {
+    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+    // It is done after compilers are initialized, because otherwise compilations of
+    // signature polymorphic MH intrinsics can be missed
+    // (see SystemDictionary::find_method_handle_intrinsic).
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
+    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+  }
+
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
 #endif // INCLUDE_MANAGEMENT
@@ -3707,7 +3723,7 @@
     const char *name = agent->name();
     const char *msg = "Could not find agent library ";
 
-    // First check to see if agent is statcally linked into executable
+    // First check to see if agent is statically linked into executable
     if (os::find_builtin_agent(agent, on_load_symbols, num_symbol_entries)) {
       library = agent->os_lib();
     } else if (agent->is_absolute_path()) {
--- a/src/share/vm/runtime/thread.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/thread.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -113,8 +113,9 @@
   // Support for forcing alignment of thread objects for biased locking
   void*       _real_malloc_address;
  public:
-  void* operator new(size_t size) { return allocate(size, true); }
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { return allocate(size, false); }
+  void* operator new(size_t size) throw() { return allocate(size, true); }
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
+    return allocate(size, false); }
   void  operator delete(void* p);
 
  protected:
@@ -926,9 +927,6 @@
   volatile address _exception_handler_pc;        // PC for handler of exception
   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
-  // support for compilation
-  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
-
   // support for JNI critical regions
   jint    _jni_active_critical;                  // count of entries into JNI critical region
 
@@ -1008,10 +1006,6 @@
   // Testers
   virtual bool is_Java_thread() const            { return true;  }
 
-  // compilation
-  void set_is_compiling(bool f)                  { _is_compiling = f; }
-  bool is_compiling() const                      { return _is_compiling; }
-
   // Thread chain operations
   JavaThread* next() const                       { return _next; }
   void set_next(JavaThread* p)                   { _next = p; }
@@ -1281,11 +1275,16 @@
   address  exception_handler_pc() const          { return _exception_handler_pc; }
   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
 
-  void set_exception_oop(oop o)                  { _exception_oop = o; }
+  void set_exception_oop(oop o)                  { (void)const_cast<oop&>(_exception_oop = o); }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
+  void clear_exception_oop_and_pc() {
+    set_exception_oop(NULL);
+    set_exception_pc(NULL);
+  }
+
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
   address stack_yellow_zone_base()
@@ -1826,13 +1825,14 @@
  private:
   CompilerCounters* _counters;
 
-  ciEnv*        _env;
-  CompileLog*   _log;
-  CompileTask*  _task;
-  CompileQueue* _queue;
-  BufferBlob*   _buffer_blob;
+  ciEnv*            _env;
+  CompileLog*       _log;
+  CompileTask*      _task;
+  CompileQueue*     _queue;
+  BufferBlob*       _buffer_blob;
 
-  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
+  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
+  AbstractCompiler* _compiler;
 
  public:
 
@@ -1844,14 +1844,17 @@
   // Hide this compiler thread from external view.
   bool is_hidden_from_external_view() const      { return true; }
 
-  CompileQueue* queue()                          { return _queue; }
-  CompilerCounters* counters()                   { return _counters; }
+  void set_compiler(AbstractCompiler* c)         { _compiler = c; }
+  AbstractCompiler* compiler() const             { return _compiler; }
+
+  CompileQueue* queue()        const             { return _queue; }
+  CompilerCounters* counters() const             { return _counters; }
 
   // Get/set the thread's compilation environment.
   ciEnv*        env()                            { return _env; }
   void          set_env(ciEnv* env)              { _env = env; }
 
-  BufferBlob*   get_buffer_blob()                { return _buffer_blob; }
+  BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
   void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
 
   // Get/set the thread's logging information
--- a/src/share/vm/runtime/vframeArray.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/vframeArray.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -111,7 +111,7 @@
       case T_OBJECT:
         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
         // preserve object type
-        _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+        _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
       case T_CONFLICT:
         // A dead local.  Will be initialized to null/zero.
@@ -136,7 +136,7 @@
       case T_OBJECT:
         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
         // preserve object type
-        _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
+        _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
       case T_CONFLICT:
         // A dead stack element.  Will be initialized to null/zero.
--- a/src/share/vm/runtime/virtualspace.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/virtualspace.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -371,8 +371,15 @@
 
 
 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
+  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
+  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
+}
+
+bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
   if(!rs.is_reserved()) return false;  // allocation failed.
   assert(_low_boundary == NULL, "VirtualSpace already initialized");
+  assert(max_commit_granularity > 0, "Granularity must be non-zero.");
+
   _low_boundary  = rs.base();
   _high_boundary = low_boundary() + rs.size();
 
@@ -393,7 +400,7 @@
   // No attempt is made to force large page alignment at the very top and
   // bottom of the space if they are not aligned so already.
   _lower_alignment  = os::vm_page_size();
-  _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
+  _middle_alignment = max_commit_granularity;
   _upper_alignment  = os::vm_page_size();
 
   // End of each region
@@ -456,6 +463,42 @@
   return reserved_size() - committed_size();
 }
 
+size_t VirtualSpace::actual_committed_size() const {
+  // Special VirtualSpaces commit all reserved space up front.
+  if (special()) {
+    return reserved_size();
+  }
+
+  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
+  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
+  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
+
+#ifdef ASSERT
+  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
+  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
+  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
+
+  if (committed_high > 0) {
+    assert(committed_low == lower, "Must be");
+    assert(committed_middle == middle, "Must be");
+  }
+
+  if (committed_middle > 0) {
+    assert(committed_low == lower, "Must be");
+  }
+  if (committed_middle < middle) {
+    assert(committed_high == 0, "Must be");
+  }
+
+  if (committed_low < lower) {
+    assert(committed_high == 0, "Must be");
+    assert(committed_middle == 0, "Must be");
+  }
+#endif
+
+  return committed_low + committed_middle + committed_high;
+}
+
 
 bool VirtualSpace::contains(const void* p) const {
   return low() <= (const char*) p && (const char*) p < high();
@@ -721,16 +764,19 @@
   assert(high() <= upper_high(), "upper high");
 }
 
-void VirtualSpace::print() {
-  tty->print   ("Virtual space:");
-  if (special()) tty->print(" (pinned in memory)");
-  tty->cr();
-  tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
-  tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
-  tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
-  tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
+void VirtualSpace::print_on(outputStream* out) {
+  out->print   ("Virtual space:");
+  if (special()) out->print(" (pinned in memory)");
+  out->cr();
+  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
+  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
+  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 }
 
+void VirtualSpace::print() {
+  print_on(tty);
+}
 
 /////////////// Unit tests ///////////////
 
@@ -913,6 +959,178 @@
   TestReservedSpace::test_reserved_space();
 }
 
+#define assert_equals(actual, expected)     \
+  assert(actual == expected,                \
+    err_msg("Got " SIZE_FORMAT " expected " \
+      SIZE_FORMAT, actual, expected));
+
+#define assert_ge(value1, value2)                  \
+  assert(value1 >= value2,                         \
+    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
+      #value2 "': " SIZE_FORMAT, value1, value2));
+
+#define assert_lt(value1, value2)                  \
+  assert(value1 < value2,                          \
+    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
+      #value2 "': " SIZE_FORMAT, value1, value2));
+
+
+class TestVirtualSpace : AllStatic {
+  enum TestLargePages {
+    Default,
+    Disable,
+    Reserve,
+    Commit
+  };
+
+  static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
+    switch(mode) {
+    default:
+    case Default:
+    case Reserve:
+      return ReservedSpace(reserve_size_aligned);
+    case Disable:
+    case Commit:
+      return ReservedSpace(reserve_size_aligned,
+                           os::vm_allocation_granularity(),
+                           /* large */ false, /* exec */ false);
+    }
+  }
+
+  static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
+    switch(mode) {
+    default:
+    case Default:
+    case Reserve:
+      return vs.initialize(rs, 0);
+    case Disable:
+      return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
+    case Commit:
+      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
+    }
+  }
+
+ public:
+  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
+                                                        TestLargePages mode = Default) {
+    size_t granularity = os::vm_allocation_granularity();
+    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
+
+    ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
+
+    assert(reserved.is_reserved(), "Must be");
+
+    VirtualSpace vs;
+    bool initialized = initialize_virtual_space(vs, reserved, mode);
+    assert(initialized, "Failed to initialize VirtualSpace");
+
+    vs.expand_by(commit_size, false);
+
+    if (vs.special()) {
+      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
+    } else {
+      assert_ge(vs.actual_committed_size(), commit_size);
+      // Approximate the commit granularity.
+      // Make sure that we don't commit using large pages
+      // if large pages has been disabled for this VirtualSpace.
+      size_t commit_granularity = (mode == Disable || !UseLargePages) ?
+                                   os::vm_page_size() : os::large_page_size();
+      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
+    }
+
+    reserved.release();
+  }
+
+  static void test_virtual_space_actual_committed_space_one_large_page() {
+    if (!UseLargePages) {
+      return;
+    }
+
+    size_t large_page_size = os::large_page_size();
+
+    ReservedSpace reserved(large_page_size, large_page_size, true, false);
+
+    assert(reserved.is_reserved(), "Must be");
+
+    VirtualSpace vs;
+    bool initialized = vs.initialize(reserved, 0);
+    assert(initialized, "Failed to initialize VirtualSpace");
+
+    vs.expand_by(large_page_size, false);
+
+    assert_equals(vs.actual_committed_size(), large_page_size);
+
+    reserved.release();
+  }
+
+  static void test_virtual_space_actual_committed_space() {
+    test_virtual_space_actual_committed_space(4 * K, 0);
+    test_virtual_space_actual_committed_space(4 * K, 4 * K);
+    test_virtual_space_actual_committed_space(8 * K, 0);
+    test_virtual_space_actual_committed_space(8 * K, 4 * K);
+    test_virtual_space_actual_committed_space(8 * K, 8 * K);
+    test_virtual_space_actual_committed_space(12 * K, 0);
+    test_virtual_space_actual_committed_space(12 * K, 4 * K);
+    test_virtual_space_actual_committed_space(12 * K, 8 * K);
+    test_virtual_space_actual_committed_space(12 * K, 12 * K);
+    test_virtual_space_actual_committed_space(64 * K, 0);
+    test_virtual_space_actual_committed_space(64 * K, 32 * K);
+    test_virtual_space_actual_committed_space(64 * K, 64 * K);
+    test_virtual_space_actual_committed_space(2 * M, 0);
+    test_virtual_space_actual_committed_space(2 * M, 4 * K);
+    test_virtual_space_actual_committed_space(2 * M, 64 * K);
+    test_virtual_space_actual_committed_space(2 * M, 1 * M);
+    test_virtual_space_actual_committed_space(2 * M, 2 * M);
+    test_virtual_space_actual_committed_space(10 * M, 0);
+    test_virtual_space_actual_committed_space(10 * M, 4 * K);
+    test_virtual_space_actual_committed_space(10 * M, 8 * K);
+    test_virtual_space_actual_committed_space(10 * M, 1 * M);
+    test_virtual_space_actual_committed_space(10 * M, 2 * M);
+    test_virtual_space_actual_committed_space(10 * M, 5 * M);
+    test_virtual_space_actual_committed_space(10 * M, 10 * M);
+  }
+
+  static void test_virtual_space_disable_large_pages() {
+    if (!UseLargePages) {
+      return;
+    }
+    // These test cases verify that if we force VirtualSpace to disable large pages
+    test_virtual_space_actual_committed_space(10 * M, 0, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
+    test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
+
+    test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
+    test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
+
+    test_virtual_space_actual_committed_space(10 * M, 0, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
+    test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
+  }
+
+  static void test_virtual_space() {
+    test_virtual_space_actual_committed_space();
+    test_virtual_space_actual_committed_space_one_large_page();
+    test_virtual_space_disable_large_pages();
+  }
+};
+
+void TestVirtualSpace_test() {
+  TestVirtualSpace::test_virtual_space();
+}
+
 #endif // PRODUCT
 
 #endif
--- a/src/share/vm/runtime/virtualspace.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/virtualspace.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -178,16 +178,22 @@
  public:
   // Initialization
   VirtualSpace();
+  bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity);
   bool initialize(ReservedSpace rs, size_t committed_byte_size);
 
   // Destruction
   ~VirtualSpace();
 
-  // Testers (all sizes are byte sizes)
-  size_t committed_size()   const;
-  size_t reserved_size()    const;
+  // Reserved memory
+  size_t reserved_size() const;
+  // Actually committed OS memory
+  size_t actual_committed_size() const;
+  // Memory used/expanded in this virtual space
+  size_t committed_size() const;
+  // Memory left to use/expand in this virtual space
   size_t uncommitted_size() const;
-  bool   contains(const void* p)  const;
+
+  bool   contains(const void* p) const;
 
   // Operations
   // returns true on success, false otherwise
@@ -198,7 +204,8 @@
   void check_for_contiguity() PRODUCT_RETURN;
 
   // Debugging
-  void print() PRODUCT_RETURN;
+  void print_on(outputStream* out) PRODUCT_RETURN;
+  void print();
 };
 
 #endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
--- a/src/share/vm/runtime/vmStructs.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -27,7 +27,6 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/loaderConstraints.hpp"
 #include "classfile/placeholders.hpp"
-#include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "ci/ciField.hpp"
 #include "ci/ciInstance.hpp"
@@ -59,7 +58,7 @@
 #include "memory/generation.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/heap.hpp"
-#include "memory/metablock.hpp"
+#include "memory/metachunk.hpp"
 #include "memory/referenceType.hpp"
 #include "memory/space.hpp"
 #include "memory/tenuredGeneration.hpp"
@@ -179,6 +178,7 @@
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
+#include "opto/mathexactnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/phaseX.hpp"
 #include "opto/parse.hpp"
@@ -294,6 +294,7 @@
   nonstatic_field(ConstantPoolCache,    _constant_pool,                                ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _array_klasses,                                Klass*)                                \
   nonstatic_field(InstanceKlass,               _methods,                                      Array<Method*>*)                       \
+  nonstatic_field(InstanceKlass,               _default_methods,                              Array<Method*>*)                       \
   nonstatic_field(InstanceKlass,               _local_interfaces,                             Array<Klass*>*)                        \
   nonstatic_field(InstanceKlass,               _transitive_interfaces,                        Array<Klass*>*)                        \
   nonstatic_field(InstanceKlass,               _fields,                                       Array<u2>*)                            \
@@ -321,7 +322,6 @@
   nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
   nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
   nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
-  nonstatic_field(InstanceKlass,               _methods_cached_itable_indices,                int*)                                  \
   volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
   nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
   nonstatic_field(InstanceKlass,               _dependencies,                                 nmethodBucket*)                        \
@@ -329,6 +329,7 @@
   nonstatic_field(nmethodBucket,               _count,                                        int)                                   \
   nonstatic_field(nmethodBucket,               _next,                                         nmethodBucket*)                        \
   nonstatic_field(InstanceKlass,               _method_ordering,                              Array<int>*)                           \
+  nonstatic_field(InstanceKlass,               _default_vtable_indices,                       Array<int>*)                           \
   nonstatic_field(Klass,                       _super_check_offset,                           juint)                                 \
   nonstatic_field(Klass,                       _secondary_super_cache,                        Klass*)                                \
   nonstatic_field(Klass,                       _secondary_supers,                             Array<Klass*>*)                        \
@@ -336,11 +337,13 @@
   nonstatic_field(Klass,                       _java_mirror,                                  oop)                                   \
   nonstatic_field(Klass,                       _modifier_flags,                               jint)                                  \
   nonstatic_field(Klass,                       _super,                                        Klass*)                                \
+  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
   nonstatic_field(Klass,                       _layout_helper,                                jint)                                  \
   nonstatic_field(Klass,                       _name,                                         Symbol*)                               \
   nonstatic_field(Klass,                       _access_flags,                                 AccessFlags)                           \
-  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
+  nonstatic_field(Klass,                       _prototype_header,                             markOop)                               \
   nonstatic_field(Klass,                       _next_sibling,                                 Klass*)                                \
+  nonstatic_field(vtableEntry,                 _method,                                       Method*)                               \
   nonstatic_field(MethodData,           _size,                                         int)                                   \
   nonstatic_field(MethodData,           _method,                                       Method*)                               \
   nonstatic_field(MethodData,           _data_size,                                    int)                                   \
@@ -348,10 +351,15 @@
   nonstatic_field(MethodData,           _nof_decompiles,                               uint)                                  \
   nonstatic_field(MethodData,           _nof_overflow_recompiles,                      uint)                                  \
   nonstatic_field(MethodData,           _nof_overflow_traps,                           uint)                                  \
+  nonstatic_field(MethodData,           _trap_hist._array[0],                          u1)                                    \
   nonstatic_field(MethodData,           _eflags,                                       intx)                                  \
   nonstatic_field(MethodData,           _arg_local,                                    intx)                                  \
   nonstatic_field(MethodData,           _arg_stack,                                    intx)                                  \
   nonstatic_field(MethodData,           _arg_returned,                                 intx)                                  \
+  nonstatic_field(DataLayout,           _header._struct._tag,                          u1)                                    \
+  nonstatic_field(DataLayout,           _header._struct._flags,                        u1)                                    \
+  nonstatic_field(DataLayout,           _header._struct._bci,                          u2)                                    \
+  nonstatic_field(DataLayout,           _cells[0],                                     intptr_t)                              \
   nonstatic_field(MethodCounters,       _interpreter_invocation_count,                 int)                                   \
   nonstatic_field(MethodCounters,       _interpreter_throwout_count,                   u2)                                    \
   nonstatic_field(MethodCounters,       _number_of_breakpoints,                        u2)                                    \
@@ -363,6 +371,7 @@
   nonstatic_field(Method,               _access_flags,                                 AccessFlags)                           \
   nonstatic_field(Method,               _vtable_index,                                 int)                                   \
   nonstatic_field(Method,               _method_size,                                  u2)                                    \
+  nonstatic_field(Method,               _intrinsic_id,                                 u1)                                    \
   nonproduct_nonstatic_field(Method,    _compiled_invocation_count,                    int)                                   \
   volatile_nonstatic_field(Method,      _code,                                         nmethod*)                              \
   nonstatic_field(Method,               _i2i_entry,                                    address)                               \
@@ -449,12 +458,19 @@
      static_field(Universe,                    _bootstrapping,                                bool)                                  \
      static_field(Universe,                    _fully_initialized,                            bool)                                  \
      static_field(Universe,                    _verify_count,                                 int)                                   \
+     static_field(Universe,                    _non_oop_bits,                                 intptr_t)                              \
      static_field(Universe,                    _narrow_oop._base,                             address)                               \
      static_field(Universe,                    _narrow_oop._shift,                            int)                                   \
      static_field(Universe,                    _narrow_oop._use_implicit_null_checks,         bool)                                  \
      static_field(Universe,                    _narrow_klass._base,                           address)                               \
      static_field(Universe,                    _narrow_klass._shift,                          int)                                   \
                                                                                                                                      \
+  /******/                                                                                                                           \
+  /* os */                                                                                                                           \
+  /******/                                                                                                                           \
+                                                                                                                                     \
+     static_field(os,                          _polling_page,                                 address)                               \
+                                                                                                                                     \
   /**********************************************************************************/                                               \
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
@@ -462,6 +478,7 @@
   unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
                                                                                                                                      \
   nonstatic_field(BarrierSet,                  _max_covered_regions,                          int)                                   \
+  nonstatic_field(BarrierSet,                  _kind,                                         BarrierSet::Name)                      \
   nonstatic_field(BlockOffsetTable,            _bottom,                                       HeapWord*)                             \
   nonstatic_field(BlockOffsetTable,            _end,                                          HeapWord*)                             \
                                                                                                                                      \
@@ -501,6 +518,7 @@
   nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
   nonstatic_field(CollectedHeap,               _defer_initial_card_mark,                      bool)                                  \
   nonstatic_field(CollectedHeap,               _is_gc_active,                                 bool)                                  \
+  nonstatic_field(CollectedHeap,               _total_collections,                            unsigned int)                          \
   nonstatic_field(CompactibleSpace,            _compaction_top,                               HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _first_dead,                                   HeapWord*)                             \
   nonstatic_field(CompactibleSpace,            _end_of_live,                                  HeapWord*)                             \
@@ -511,7 +529,7 @@
   nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
                                                                                                                                      \
   nonstatic_field(DefNewGeneration,            _next_gen,                                     Generation*)                           \
-  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                   \
+  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
   nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
   nonstatic_field(DefNewGeneration,            _eden_space,                                   EdenSpace*)                            \
   nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
@@ -558,6 +576,11 @@
   nonstatic_field(ThreadLocalAllocBuffer,      _desired_size,                                 size_t)                                \
   nonstatic_field(ThreadLocalAllocBuffer,      _refill_waste_limit,                           size_t)                                \
      static_field(ThreadLocalAllocBuffer,      _target_refills,                               unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _number_of_refills,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _fast_refill_waste,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _slow_refill_waste,                            unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _gc_waste,                                     unsigned)                              \
+  nonstatic_field(ThreadLocalAllocBuffer,      _slow_allocations,                             unsigned)                              \
   nonstatic_field(VirtualSpace,                _low_boundary,                                 char*)                                 \
   nonstatic_field(VirtualSpace,                _high_boundary,                                char*)                                 \
   nonstatic_field(VirtualSpace,                _low,                                          char*)                                 \
@@ -699,11 +722,17 @@
   nonstatic_field(PlaceholderEntry,            _loader_data,                                  ClassLoaderData*)                      \
                                                                                                                                      \
   /**************************/                                                                                                       \
-  /* ProctectionDomainEntry */                                                                                                       \
+  /* ProtectionDomainEntry  */                                                                                                       \
   /**************************/                                                                                                       \
                                                                                                                                      \
   nonstatic_field(ProtectionDomainEntry,       _next,                                         ProtectionDomainEntry*)                \
-  nonstatic_field(ProtectionDomainEntry,       _protection_domain,                            oop)                                   \
+  nonstatic_field(ProtectionDomainEntry,       _pd_cache,                                     ProtectionDomainCacheEntry*)           \
+                                                                                                                                     \
+  /*******************************/                                                                                                  \
+  /* ProtectionDomainCacheEntry  */                                                                                                  \
+  /*******************************/                                                                                                  \
+                                                                                                                                     \
+  nonstatic_field(ProtectionDomainCacheEntry,  _literal,                                      oop)                                   \
                                                                                                                                      \
   /*************************/                                                                                                        \
   /* LoaderConstraintEntry */                                                                                                        \
@@ -719,6 +748,13 @@
                                                                                                                                      \
   static_field(ClassLoaderDataGraph,           _head,                                         ClassLoaderData*)                      \
                                                                                                                                      \
+  /**********/                                                                                                                       \
+  /* Arrays */                                                                                                                       \
+  /**********/                                                                                                                       \
+                                                                                                                                     \
+  nonstatic_field(Array<Klass*>,               _length,                                       int)                                   \
+  nonstatic_field(Array<Klass*>,               _data[0],                                      Klass*)                                \
+                                                                                                                                     \
   /*******************/                                                                                                              \
   /* GrowableArrays  */                                                                                                              \
   /*******************/                                                                                                              \
@@ -726,7 +762,7 @@
   nonstatic_field(GenericGrowableArray,        _len,                                          int)                                   \
   nonstatic_field(GenericGrowableArray,        _max,                                          int)                                   \
   nonstatic_field(GenericGrowableArray,        _arena,                                        Arena*)                                \
-  nonstatic_field(GrowableArray<int>,               _data,                                         int*) \
+  nonstatic_field(GrowableArray<int>,          _data,                                         int*)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* CodeCache (NOTE: incomplete) */                                                                                                 \
@@ -769,7 +805,20 @@
   /* StubRoutines (NOTE: incomplete) */                                                                                              \
   /***********************************/                                                                                              \
                                                                                                                                      \
+     static_field(StubRoutines,                _verify_oop_count,                             jint)                                  \
      static_field(StubRoutines,                _call_stub_return_address,                     address)                               \
+     static_field(StubRoutines,                _aescrypt_encryptBlock,                        address)                               \
+     static_field(StubRoutines,                _aescrypt_decryptBlock,                        address)                               \
+     static_field(StubRoutines,                _cipherBlockChaining_encryptAESCrypt,          address)                               \
+     static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,          address)                               \
+     static_field(StubRoutines,                _updateBytesCRC32,                             address)                               \
+     static_field(StubRoutines,                _crc_table_adr,                                address)                               \
+                                                                                                                                     \
+  /*****************/                                                                                                                \
+  /* SharedRuntime */                                                                                                                \
+  /*****************/                                                                                                                \
+                                                                                                                                     \
+     static_field(SharedRuntime,               _ic_miss_blob,                                 RuntimeStub*)                          \
                                                                                                                                      \
   /***************************************/                                                                                          \
   /* PcDesc and other compiled code info */                                                                                          \
@@ -806,7 +855,7 @@
   nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
-  nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
+  nonstatic_field(nmethod,             _state,                                        volatile unsigned char)                \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   nonstatic_field(nmethod,             _deoptimize_mh_offset,                         int)                                   \
@@ -859,6 +908,7 @@
    volatile_nonstatic_field(Thread,            _suspend_flags,                                uint32_t)                              \
   nonstatic_field(Thread,                      _active_handles,                               JNIHandleBlock*)                       \
   nonstatic_field(Thread,                      _tlab,                                         ThreadLocalAllocBuffer)                \
+  nonstatic_field(Thread,                      _allocated_bytes,                              jlong)                                 \
   nonstatic_field(Thread,                      _current_pending_monitor,                      ObjectMonitor*)                        \
   nonstatic_field(Thread,                      _current_pending_monitor_is_from_java,         bool)                                  \
   nonstatic_field(Thread,                      _current_waiting_monitor,                      ObjectMonitor*)                        \
@@ -872,7 +922,7 @@
   nonstatic_field(JavaThread,                  _pending_async_exception,                      oop)                                   \
   volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
   volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
-  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
+  volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
   nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
   nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
    volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
@@ -881,6 +931,8 @@
   nonstatic_field(JavaThread,                  _stack_size,                                   size_t)                                \
   nonstatic_field(JavaThread,                  _vframe_array_head,                            vframeArray*)                          \
   nonstatic_field(JavaThread,                  _vframe_array_last,                            vframeArray*)                          \
+  nonstatic_field(JavaThread,                  _satb_mark_queue,                              ObjPtrQueue)                           \
+  nonstatic_field(JavaThread,                  _dirty_card_queue,                             DirtyCardQueue)                        \
   nonstatic_field(Thread,                      _resource_area,                                ResourceArea*)                         \
   nonstatic_field(CompilerThread,              _env,                                          ciEnv*)                                \
                                                                                                                                      \
@@ -1146,11 +1198,10 @@
   /* -XX flags         */                                                                                                            \
   /*********************/                                                                                                            \
                                                                                                                                      \
-  nonstatic_field(Flag,                        type,                                          const char*)                           \
-  nonstatic_field(Flag,                        name,                                          const char*)                           \
-  unchecked_nonstatic_field(Flag,              addr,                                          sizeof(void*)) /* NOTE: no type */     \
-  nonstatic_field(Flag,                        kind,                                          const char*)                           \
-  nonstatic_field(Flag,                        origin,                                        FlagValueOrigin)                       \
+  nonstatic_field(Flag,                        _type,                                         const char*)                           \
+  nonstatic_field(Flag,                        _name,                                         const char*)                           \
+  unchecked_nonstatic_field(Flag,              _addr,                                         sizeof(void*)) /* NOTE: no type */     \
+  nonstatic_field(Flag,                        _flags,                                        Flag::Flags)                           \
   static_field(Flag,                           flags,                                         Flag*)                                 \
   static_field(Flag,                           numFlags,                                      size_t)                                \
                                                                                                                                      \
@@ -1193,7 +1244,7 @@
   unchecked_nonstatic_field(Array<int>,            _data,                                     sizeof(int))                           \
   unchecked_nonstatic_field(Array<u1>,             _data,                                     sizeof(u1))                            \
   unchecked_nonstatic_field(Array<u2>,             _data,                                     sizeof(u2))                            \
-  unchecked_nonstatic_field(Array<Method*>, _data,                                     sizeof(Method*))                \
+  unchecked_nonstatic_field(Array<Method*>,        _data,                                     sizeof(Method*))                       \
   unchecked_nonstatic_field(Array<Klass*>,         _data,                                     sizeof(Klass*))                        \
                                                                                                                                      \
   /*********************************/                                                                                                \
@@ -1209,7 +1260,7 @@
   /* Miscellaneous fields */                                                                                                         \
   /************************/                                                                                                         \
                                                                                                                                      \
-  nonstatic_field(CompileTask,                 _method,                                      Method*)                         \
+  nonstatic_field(CompileTask,                 _method,                                      Method*)                                \
   nonstatic_field(CompileTask,                 _osr_bci,                                     int)                                    \
   nonstatic_field(CompileTask,                 _comp_level,                                  int)                                    \
   nonstatic_field(CompileTask,                 _compile_id,                                  uint)                                   \
@@ -1223,7 +1274,11 @@
                                                                                                                                      \
   nonstatic_field(vframeArrayElement,          _frame,                                       frame)                                  \
   nonstatic_field(vframeArrayElement,          _bci,                                         int)                                    \
-  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                         \
+  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                                \
+                                                                                                                                     \
+  nonstatic_field(PtrQueue,                    _active,                                      bool)                                   \
+  nonstatic_field(PtrQueue,                    _buf,                                         void**)                                 \
+  nonstatic_field(PtrQueue,                    _index,                                       size_t)                                 \
                                                                                                                                      \
   nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
   nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
@@ -1317,6 +1372,7 @@
   declare_integer_type(long)                                              \
   declare_integer_type(char)                                              \
   declare_unsigned_integer_type(unsigned char)                            \
+  declare_unsigned_integer_type(volatile unsigned char)                   \
   declare_unsigned_integer_type(u_char)                                   \
   declare_unsigned_integer_type(unsigned int)                             \
   declare_unsigned_integer_type(uint)                                     \
@@ -1339,6 +1395,7 @@
   declare_toplevel_type(char**)                                           \
   declare_toplevel_type(u_char*)                                          \
   declare_toplevel_type(unsigned char*)                                   \
+  declare_toplevel_type(volatile unsigned char*)                          \
                                                                           \
   /*******************************************************************/   \
   /* Types which it will be handy to have available over in the SA   */   \
@@ -1369,7 +1426,7 @@
   /* MetadataOopDesc hierarchy (NOTE: some missing) */                    \
   /**************************************************/                    \
                                                                           \
-  declare_toplevel_type(CompiledICHolder)                          \
+  declare_toplevel_type(CompiledICHolder)                                 \
   declare_toplevel_type(MetaspaceObj)                                     \
     declare_type(Metadata, MetaspaceObj)                                  \
     declare_type(Klass, Metadata)                                         \
@@ -1380,17 +1437,20 @@
         declare_type(InstanceClassLoaderKlass, InstanceKlass)             \
         declare_type(InstanceMirrorKlass, InstanceKlass)                  \
         declare_type(InstanceRefKlass, InstanceKlass)                     \
-    declare_type(ConstantPool, Metadata)                           \
-    declare_type(ConstantPoolCache, MetaspaceObj)                  \
-    declare_type(MethodData, Metadata)                             \
-    declare_type(Method, Metadata)                                 \
-    declare_type(MethodCounters, MetaspaceObj)                     \
-    declare_type(ConstMethod, MetaspaceObj)                        \
+    declare_type(ConstantPool, Metadata)                                  \
+    declare_type(ConstantPoolCache, MetaspaceObj)                         \
+    declare_type(MethodData, Metadata)                                    \
+    declare_type(Method, Metadata)                                        \
+    declare_type(MethodCounters, MetaspaceObj)                            \
+    declare_type(ConstMethod, MetaspaceObj)                               \
+                                                                          \
+  declare_toplevel_type(vtableEntry)                                      \
                                                                           \
            declare_toplevel_type(Symbol)                                  \
            declare_toplevel_type(Symbol*)                                 \
   declare_toplevel_type(volatile Metadata*)                               \
                                                                           \
+  declare_toplevel_type(DataLayout)                                       \
   declare_toplevel_type(nmethodBucket)                                    \
                                                                           \
   /********/                                                              \
@@ -1410,6 +1470,7 @@
   declare_toplevel_type(CheckedExceptionElement)                          \
   declare_toplevel_type(LocalVariableTableElement)                        \
   declare_toplevel_type(ExceptionTableElement)                            \
+  declare_toplevel_type(MethodParametersElement)                          \
                                                                           \
   declare_toplevel_type(ClassLoaderData)                                  \
   declare_toplevel_type(ClassLoaderDataGraph)                             \
@@ -1438,6 +1499,7 @@
            declare_type(ModRefBarrierSet,             BarrierSet)         \
            declare_type(CardTableModRefBS,            ModRefBarrierSet)   \
            declare_type(CardTableModRefBSForCTRS,     CardTableModRefBS)  \
+  declare_toplevel_type(BarrierSet::Name)                                 \
   declare_toplevel_type(GenRemSet)                                        \
            declare_type(CardTableRS,                  GenRemSet)          \
   declare_toplevel_type(BlockOffsetSharedArray)                           \
@@ -1456,6 +1518,8 @@
   declare_toplevel_type(ThreadLocalAllocBuffer)                           \
   declare_toplevel_type(VirtualSpace)                                     \
   declare_toplevel_type(WaterMark)                                        \
+  declare_toplevel_type(ObjPtrQueue)                                      \
+  declare_toplevel_type(DirtyCardQueue)                                   \
                                                                           \
   /* Pointers to Garbage Collection types */                              \
                                                                           \
@@ -1511,6 +1575,7 @@
   declare_toplevel_type(SystemDictionary)                                 \
   declare_toplevel_type(vmSymbols)                                        \
   declare_toplevel_type(ProtectionDomainEntry)                            \
+  declare_toplevel_type(ProtectionDomainCacheEntry)                       \
                                                                           \
   declare_toplevel_type(GenericGrowableArray)                             \
   declare_toplevel_type(GrowableArray<int>)                               \
@@ -1879,6 +1944,15 @@
   declare_c2_type(CmpF3Node, CmpFNode)                                    \
   declare_c2_type(CmpDNode, CmpNode)                                      \
   declare_c2_type(CmpD3Node, CmpDNode)                                    \
+  declare_c2_type(MathExactNode, MultiNode)                               \
+  declare_c2_type(MathExactINode, MathExactNode)                          \
+  declare_c2_type(AddExactINode, MathExactINode)                          \
+  declare_c2_type(AddExactLNode, MathExactLNode)                          \
+  declare_c2_type(SubExactINode, MathExactINode)                          \
+  declare_c2_type(SubExactLNode, MathExactLNode)                          \
+  declare_c2_type(NegExactINode, MathExactINode)                          \
+  declare_c2_type(MulExactINode, MathExactINode)                          \
+  declare_c2_type(FlagsProjNode, ProjNode)                                \
   declare_c2_type(BoolNode, Node)                                         \
   declare_c2_type(AbsNode, Node)                                          \
   declare_c2_type(AbsINode, AbsNode)                                      \
@@ -2025,7 +2099,7 @@
    declare_integer_type(JavaThreadState)                                  \
    declare_integer_type(Location::Type)                                   \
    declare_integer_type(Location::Where)                                  \
-   declare_integer_type(FlagValueOrigin)                                  \
+   declare_integer_type(Flag::Flags)                                      \
    COMPILER2_PRESENT(declare_integer_type(OptoReg::Name))                 \
                                                                           \
    declare_toplevel_type(CHeapObj<mtInternal>)                            \
@@ -2033,7 +2107,7 @@
             declare_type(Array<u1>, MetaspaceObj)                         \
             declare_type(Array<u2>, MetaspaceObj)                         \
             declare_type(Array<Klass*>, MetaspaceObj)                     \
-            declare_type(Array<Method*>, MetaspaceObj)             \
+            declare_type(Array<Method*>, MetaspaceObj)                    \
                                                                           \
    declare_integer_type(AccessFlags)  /* FIXME: wrong type (not integer) */\
   declare_toplevel_type(address)      /* FIXME: should this be an integer type? */\
@@ -2074,6 +2148,7 @@
   declare_toplevel_type(StubQueue*)                                       \
   declare_toplevel_type(Thread*)                                          \
   declare_toplevel_type(Universe)                                         \
+  declare_toplevel_type(os)                                               \
   declare_toplevel_type(vframeArray)                                      \
   declare_toplevel_type(vframeArrayElement)                               \
   declare_toplevel_type(Annotations*)                                     \
@@ -2082,6 +2157,8 @@
   /* Miscellaneous types */                                               \
   /***************/                                                       \
                                                                           \
+  declare_toplevel_type(PtrQueue)                                         \
+                                                                          \
   /* freelist */                                                          \
   declare_toplevel_type(FreeChunk*)                                       \
   declare_toplevel_type(Metablock*)                                       \
@@ -2112,6 +2189,7 @@
   /* Useful globals */                                                    \
   /******************/                                                    \
                                                                           \
+  declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0))     \
                                                                           \
   /**************/                                                        \
   /* Stack bias */                                                        \
@@ -2128,6 +2206,8 @@
   declare_constant(BytesPerWord)                                          \
   declare_constant(BytesPerLong)                                          \
                                                                           \
+  declare_constant(LogKlassAlignmentInBytes)                              \
+                                                                          \
   /********************************************/                          \
   /* Generation and Space Hierarchy Constants */                          \
   /********************************************/                          \
@@ -2136,6 +2216,9 @@
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
+  declare_constant(BarrierSet::CardTableExtension)                        \
+  declare_constant(BarrierSet::G1SATBCT)                                  \
+  declare_constant(BarrierSet::G1SATBCTLogging)                           \
   declare_constant(BarrierSet::Other)                                     \
                                                                           \
   declare_constant(BlockOffsetSharedArray::LogN)                          \
@@ -2184,12 +2267,6 @@
   declare_preprocessor_constant("PERFDATA_BIG_ENDIAN", PERFDATA_BIG_ENDIAN)       \
   declare_preprocessor_constant("PERFDATA_LITTLE_ENDIAN", PERFDATA_LITTLE_ENDIAN) \
                                                                           \
-  /***************/                                                       \
-  /* SymbolTable */                                                       \
-  /***************/                                                       \
-                                                                          \
-  declare_constant(SymbolTable::symbol_table_size)                        \
-                                                                          \
   /***********************************/                                   \
   /* LoaderConstraintTable constants */                                   \
   /***********************************/                                   \
@@ -2254,8 +2331,11 @@
   declare_constant(Klass::_primary_super_limit)                           \
   declare_constant(Klass::_lh_instance_slow_path_bit)                     \
   declare_constant(Klass::_lh_log2_element_size_shift)                    \
+  declare_constant(Klass::_lh_log2_element_size_mask)                     \
   declare_constant(Klass::_lh_element_type_shift)                         \
+  declare_constant(Klass::_lh_element_type_mask)                          \
   declare_constant(Klass::_lh_header_size_shift)                          \
+  declare_constant(Klass::_lh_header_size_mask)                           \
   declare_constant(Klass::_lh_array_tag_shift)                            \
   declare_constant(Klass::_lh_array_tag_type_value)                       \
   declare_constant(Klass::_lh_array_tag_obj_value)                        \
@@ -2269,11 +2349,18 @@
   declare_constant(ConstMethod::_has_localvariable_table)                 \
   declare_constant(ConstMethod::_has_exception_table)                     \
   declare_constant(ConstMethod::_has_generic_signature)                   \
+  declare_constant(ConstMethod::_has_method_parameters)                   \
   declare_constant(ConstMethod::_has_method_annotations)                  \
   declare_constant(ConstMethod::_has_parameter_annotations)               \
   declare_constant(ConstMethod::_has_default_annotations)                 \
   declare_constant(ConstMethod::_has_type_annotations)                    \
                                                                           \
+  /**************/                                                        \
+  /* DataLayout */                                                        \
+  /**************/                                                        \
+                                                                          \
+  declare_constant(DataLayout::cell_size)                                 \
+                                                                          \
   /*************************************/                                 \
   /* InstanceKlass enum                */                                 \
   /*************************************/                                 \
@@ -2408,6 +2495,13 @@
   declare_constant(Deoptimization::Reason_LIMIT)                          \
   declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
                                                                           \
+  declare_constant(Deoptimization::Action_none)                           \
+  declare_constant(Deoptimization::Action_maybe_recompile)                \
+  declare_constant(Deoptimization::Action_reinterpret)                    \
+  declare_constant(Deoptimization::Action_make_not_entrant)               \
+  declare_constant(Deoptimization::Action_make_not_compilable)            \
+  declare_constant(Deoptimization::Action_LIMIT)                          \
+                                                                          \
   /*********************/                                                 \
   /* Matcher (C2 only) */                                                 \
   /*********************/                                                 \
@@ -2474,6 +2568,16 @@
   declare_constant(vmSymbols::FIRST_SID)                                  \
   declare_constant(vmSymbols::SID_LIMIT)                                  \
                                                                           \
+  /****************/                                                      \
+  /* vmIntrinsics */                                                      \
+  /****************/                                                      \
+                                                                          \
+  declare_constant(vmIntrinsics::_invokeBasic)                            \
+  declare_constant(vmIntrinsics::_linkToVirtual)                          \
+  declare_constant(vmIntrinsics::_linkToStatic)                           \
+  declare_constant(vmIntrinsics::_linkToSpecial)                          \
+  declare_constant(vmIntrinsics::_linkToInterface)                        \
+                                                                          \
   /********************************/                                      \
   /* Calling convention constants */                                      \
   /********************************/                                      \
@@ -2521,6 +2625,8 @@
   declare_constant(markOopDesc::biased_lock_bit_in_place)                 \
   declare_constant(markOopDesc::age_mask)                                 \
   declare_constant(markOopDesc::age_mask_in_place)                        \
+  declare_constant(markOopDesc::epoch_mask)                               \
+  declare_constant(markOopDesc::epoch_mask_in_place)                      \
   declare_constant(markOopDesc::hash_mask)                                \
   declare_constant(markOopDesc::hash_mask_in_place)                       \
   declare_constant(markOopDesc::biased_lock_alignment)                    \
--- a/src/share/vm/runtime/vm_operations.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/vm_operations.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -173,10 +173,6 @@
   SymbolTable::unlink();
 }
 
-void VM_HandleFullCodeCache::doit() {
-  NMethodSweeper::speculative_disconnect_nmethods(_is_full);
-}
-
 void VM_Verify::doit() {
   Universe::heap()->prepare_for_verify();
   Universe::verify(_silent);
--- a/src/share/vm/runtime/vm_operations.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/vm_operations.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -51,7 +51,6 @@
   template(DeoptimizeAll)                         \
   template(ZombieAll)                             \
   template(UnlinkSymbols)                         \
-  template(HandleFullCodeCache)                   \
   template(Verify)                                \
   template(PrintJNI)                              \
   template(HeapDumper)                            \
@@ -261,16 +260,6 @@
   bool allow_nested_vm_operations() const        { return true;  }
 };
 
-class VM_HandleFullCodeCache: public VM_Operation {
- private:
-  bool  _is_full;
- public:
-  VM_HandleFullCodeCache(bool is_full)           { _is_full = is_full; }
-  VMOp_Type type() const                         { return VMOp_HandleFullCodeCache; }
-  void doit();
-  bool allow_nested_vm_operations() const        { return true; }
-};
-
 #ifndef PRODUCT
 class VM_DeoptimizeAll: public VM_Operation {
  private:
--- a/src/share/vm/runtime/vm_version.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/runtime/vm_version.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -78,7 +78,13 @@
   static const char* jre_release_version();
 
   // does HW support an 8-byte compare-exchange operation?
-  static bool supports_cx8()  {return _supports_cx8;}
+  static bool supports_cx8()  {
+#ifdef SUPPORTS_NATIVE_CX8
+    return true;
+#else
+    return _supports_cx8;
+#endif
+  }
   // does HW support atomic get-and-set or atomic get-and-add?  Used
   // to guide intrinsification decisions for Unsafe atomic ops
   static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
--- a/src/share/vm/services/attachListener.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/attachListener.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -245,7 +245,7 @@
     }
     value = (tmp != 0);
   }
-  bool res = CommandLineFlags::boolAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::boolAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -263,7 +263,7 @@
       return JNI_ERR;
     }
   }
-  bool res = CommandLineFlags::intxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::intxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -282,7 +282,7 @@
       return JNI_ERR;
     }
   }
-  bool res = CommandLineFlags::uintxAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::uintxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -301,7 +301,7 @@
       return JNI_ERR;
     }
   }
-  bool res = CommandLineFlags::uint64_tAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::uint64_tAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
   }
@@ -316,7 +316,7 @@
     out->print_cr("flag value must be a string");
     return JNI_ERR;
   }
-  bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, ATTACH_ON_DEMAND);
+  bool res = CommandLineFlags::ccstrAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (res) {
     FREE_C_HEAP_ARRAY(char, value, mtInternal);
   } else {
@@ -470,7 +470,17 @@
                        vmSymbols::threadgroup_string_void_signature(),
                        thread_group,
                        string,
-                       CHECK);
+                       THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
   JavaCalls::call_special(&result,
@@ -479,7 +489,17 @@
                         vmSymbols::add_method_name(),
                         vmSymbols::thread_void_signature(),
                         thread_oop,             // ARG 1
-                        CHECK);
+                        THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   { MutexLocker mu(Threads_lock);
     JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
--- a/src/share/vm/services/classLoadingService.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/classLoadingService.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
       len = name->utf8_length();                    \
     }                                               \
     HS_DTRACE_PROBE4(hotspot, class__##type,        \
-      data, len, (clss)->class_loader(), (shared)); \
+      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), (shared)); \
   }
 
 #else /* USDT2 */
@@ -202,7 +202,7 @@
   MutexLocker m(Management_lock);
 
   // verbose will be set to the previous value
-  bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassLoading", &verbose, MANAGEMENT);
+  bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassLoading", &verbose, Flag::MANAGEMENT);
   assert(succeed, "Setting TraceClassLoading flag fails");
   reset_trace_class_unloading();
 
@@ -213,7 +213,7 @@
 void ClassLoadingService::reset_trace_class_unloading() {
   assert(Management_lock->owned_by_self(), "Must own the Management_lock");
   bool value = MemoryService::get_verbose() || ClassLoadingService::get_verbose();
-  bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassUnloading", &value, MANAGEMENT);
+  bool succeed = CommandLineFlags::boolAtPut((char*)"TraceClassUnloading", &value, Flag::MANAGEMENT);
   assert(succeed, "Setting TraceClassUnLoading flag fails");
 }
 
--- a/src/share/vm/services/diagnosticArgument.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/diagnosticArgument.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -61,7 +61,7 @@
 }
 
 void GenDCmdArgument::to_string(char* c, char* buf, size_t len) {
-  jio_snprintf(buf, len, "%s", c);
+  jio_snprintf(buf, len, "%s", (c != NULL) ? c : "");
 }
 
 void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
--- a/src/share/vm/services/diagnosticCommand.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/diagnosticCommand.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -48,7 +48,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(full_export, true, false));
 #if INCLUDE_SERVICES // Heap dumping/inspection supported
-  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassStatsDCmd>(full_export, true, false));
 #endif // INCLUDE_SERVICES
@@ -505,7 +505,11 @@
 
    _jdp_pause
   ("jdp.pause",
-   "set com.sun.management.jdp.pause", "INT", false)
+   "set com.sun.management.jdp.pause", "INT", false),
+
+   _jdp_name
+  ("jdp.name",
+   "set com.sun.management.jdp.name", "STRING", false)
 
   {
     _dcmdparser.add_dcmd_option(&_config_file);
@@ -527,6 +531,7 @@
     _dcmdparser.add_dcmd_option(&_jdp_source_addr);
     _dcmdparser.add_dcmd_option(&_jdp_ttl);
     _dcmdparser.add_dcmd_option(&_jdp_pause);
+    _dcmdparser.add_dcmd_option(&_jdp_name);
 }
 
 
@@ -596,6 +601,7 @@
     PUT_OPTION(_jdp_source_addr);
     PUT_OPTION(_jdp_ttl);
     PUT_OPTION(_jdp_pause);
+    PUT_OPTION(_jdp_name);
 
 #undef PUT_OPTION
 
--- a/src/share/vm/services/diagnosticCommand.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/diagnosticCommand.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -302,6 +302,7 @@
   DCmdArgument<char *> _jdp_source_addr;
   DCmdArgument<jlong>  _jdp_ttl;
   DCmdArgument<jlong>  _jdp_pause;
+  DCmdArgument<char *> _jdp_name;
 
 public:
   JMXStartRemoteDCmd(outputStream *output, bool heap_allocated);
--- a/src/share/vm/services/dtraceAttacher.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/dtraceAttacher.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -51,7 +51,7 @@
 
 static void set_bool_flag(const char* flag, bool value) {
   CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value,
-                              ATTACH_ON_DEMAND);
+                              Flag::ATTACH_ON_DEMAND);
 }
 
 // Enable only the "fine grained" flags. Do *not* touch
--- a/src/share/vm/services/gcNotifier.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/gcNotifier.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -209,7 +209,7 @@
   GCNotificationRequest *request = getRequest();
   if (request != NULL) {
     NotificationMark nm(request);
-    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
+    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, CHECK);
 
     Handle objName = java_lang_String::create_from_str(request->gcManager->name(), CHECK);
     Handle objAction = java_lang_String::create_from_str(request->gcAction, CHECK);
--- a/src/share/vm/services/heapDumper.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/heapDumper.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -563,7 +563,7 @@
 }
 
 void DumpWriter::write_objectID(oop o) {
-  address a = (address)((uintptr_t)o);
+  address a = (address)o;
 #ifdef _LP64
   write_u8((u8)a);
 #else
@@ -1545,7 +1545,9 @@
 
 // writes a HPROF_GC_CLASS_DUMP record for the given class
 void VM_HeapDumper::do_class_dump(Klass* k) {
-  DumperSupport::dump_class_and_array_classes(writer(), k);
+  if (k->oop_is_instance()) {
+    DumperSupport::dump_class_and_array_classes(writer(), k);
+  }
 }
 
 // writes a HPROF_GC_CLASS_DUMP records for a given basic type
@@ -1722,7 +1724,7 @@
   SymbolTable::symbols_do(&sym_dumper);
 
   // write HPROF_LOAD_CLASS records
-  SystemDictionary::classes_do(&do_load_class);
+  ClassLoaderDataGraph::classes_do(&do_load_class);
   Universe::basic_type_classes_do(&do_load_class);
 
   // write HPROF_FRAME and HPROF_TRACE records
@@ -1733,7 +1735,7 @@
   write_dump_header();
 
   // Writes HPROF_GC_CLASS_DUMP records
-  SystemDictionary::classes_do(&do_class_dump);
+  ClassLoaderDataGraph::classes_do(&do_class_dump);
   Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
   check_segment_length();
 
--- a/src/share/vm/services/jmm.h	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/jmm.h	Tue Nov 05 17:38:04 2013 -0800
@@ -78,6 +78,7 @@
   JMM_COMPILE_TOTAL_TIME_MS          = 8,    /* Total accumulated time spent in compilation */
   JMM_GC_TIME_MS                     = 9,    /* Total accumulated time spent in collection */
   JMM_GC_COUNT                       = 10,   /* Total number of collections */
+  JMM_JVM_UPTIME_MS                  = 11,   /* The JVM uptime in milliseconds */
 
   JMM_INTERNAL_ATTRIBUTE_INDEX       = 100,
   JMM_CLASS_LOADED_BYTES             = 101,  /* Number of bytes loaded instance classes */
--- a/src/share/vm/services/management.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/management.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1032,6 +1032,9 @@
   case JMM_JVM_INIT_DONE_TIME_MS:
     return Management::vm_init_done_time();
 
+  case JMM_JVM_UPTIME_MS:
+    return Management::ticks_to_ms(os::elapsed_counter());
+
   case JMM_COMPILE_TOTAL_TIME_MS:
     return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
 
@@ -1643,9 +1646,13 @@
   int num_entries = 0;
   for (int i = 0; i < nFlags; i++) {
     Flag* flag = &Flag::flags[i];
+    // Exclude notproduct and develop flags in product builds.
+    if (flag->is_constant_in_binary()) {
+      continue;
+    }
     // Exclude the locked (experimental, diagnostic) flags
     if (flag->is_unlocked() || flag->is_unlocker()) {
-      Handle s = java_lang_String::create_from_str(flag->name, CHECK_0);
+      Handle s = java_lang_String::create_from_str(flag->_name, CHECK_0);
       flags_ah->obj_at_put(num_entries, s());
       num_entries++;
     }
@@ -1669,7 +1676,7 @@
 bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
   Handle flag_name;
   if (name() == NULL) {
-    flag_name = java_lang_String::create_from_str(flag->name, CHECK_false);
+    flag_name = java_lang_String::create_from_str(flag->_name, CHECK_false);
   } else {
     flag_name = name;
   }
@@ -1698,23 +1705,23 @@
 
   global->writeable = flag->is_writeable();
   global->external = flag->is_external();
-  switch (flag->origin) {
-    case DEFAULT:
+  switch (flag->get_origin()) {
+    case Flag::DEFAULT:
       global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
       break;
-    case COMMAND_LINE:
+    case Flag::COMMAND_LINE:
       global->origin = JMM_VMGLOBAL_ORIGIN_COMMAND_LINE;
       break;
-    case ENVIRON_VAR:
+    case Flag::ENVIRON_VAR:
       global->origin = JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR;
       break;
-    case CONFIG_FILE:
+    case Flag::CONFIG_FILE:
       global->origin = JMM_VMGLOBAL_ORIGIN_CONFIG_FILE;
       break;
-    case MANAGEMENT:
+    case Flag::MANAGEMENT:
       global->origin = JMM_VMGLOBAL_ORIGIN_MANAGEMENT;
       break;
-    case ERGONOMIC:
+    case Flag::ERGONOMIC:
       global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
       break;
     default:
@@ -1781,6 +1788,10 @@
     int num_entries = 0;
     for (int i = 0; i < nFlags && num_entries < count;  i++) {
       Flag* flag = &Flag::flags[i];
+      // Exclude notproduct and develop flags in product builds.
+      if (flag->is_constant_in_binary()) {
+        continue;
+      }
       // Exclude the locked (diagnostic, experimental) flags
       if ((flag->is_unlocked() || flag->is_unlocker()) &&
           add_global_entry(env, null_h, &globals[num_entries], flag, THREAD)) {
@@ -1813,23 +1824,23 @@
   bool succeed;
   if (flag->is_bool()) {
     bool bvalue = (new_value.z == JNI_TRUE ? true : false);
-    succeed = CommandLineFlags::boolAtPut(name, &bvalue, MANAGEMENT);
+    succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT);
   } else if (flag->is_intx()) {
     intx ivalue = (intx)new_value.j;
-    succeed = CommandLineFlags::intxAtPut(name, &ivalue, MANAGEMENT);
+    succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT);
   } else if (flag->is_uintx()) {
     uintx uvalue = (uintx)new_value.j;
-    succeed = CommandLineFlags::uintxAtPut(name, &uvalue, MANAGEMENT);
+    succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT);
   } else if (flag->is_uint64_t()) {
     uint64_t uvalue = (uint64_t)new_value.j;
-    succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, MANAGEMENT);
+    succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, Flag::MANAGEMENT);
   } else if (flag->is_ccstr()) {
     oop str = JNIHandles::resolve_external_guard(new_value.l);
     if (str == NULL) {
       THROW(vmSymbols::java_lang_NullPointerException());
     }
     ccstr svalue = java_lang_String::as_utf8_string(str);
-    succeed = CommandLineFlags::ccstrAtPut(name, &svalue, MANAGEMENT);
+    succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT);
   }
   assert(succeed, "Setting flag should succeed");
 JVM_END
--- a/src/share/vm/services/memPtr.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memPtr.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,9 +34,9 @@
   jint seq = Atomic::add(1, &_seq_number);
   if (seq < 0) {
     MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
+  } else {
+    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
   }
-  assert(seq > 0, "counter overflow");
-  NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
   return seq;
 }
 
--- a/src/share/vm/services/memRecorder.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memRecorder.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -53,13 +53,13 @@
     }
   }
 
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
     // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
     // to avoid recursion
     return os::malloc(size, (mtNMT | otNMTRecorder));
   }
 
-  void* operator new(size_t size) {
+  void* operator new(size_t size) throw() {
     assert(false, "use nothrow version");
     return NULL;
   }
--- a/src/share/vm/services/memTrackWorker.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memTrackWorker.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,12 +63,12 @@
   }
 }
 
-void* MemTrackWorker::operator new(size_t size) {
+void* MemTrackWorker::operator new(size_t size) throw() {
   assert(false, "use nothrow version");
   return NULL;
 }
 
-void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
+void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
   return allocate(size, false, mtNMT);
 }
 
--- a/src/share/vm/services/memTrackWorker.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memTrackWorker.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,8 +90,8 @@
  public:
   MemTrackWorker(MemSnapshot* snapshot);
   ~MemTrackWorker();
-  _NOINLINE_ void* operator new(size_t size);
-  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
+  _NOINLINE_ void* operator new(size_t size) throw();
+  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
 
   void start();
   void run();
--- a/src/share/vm/services/memoryManager.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryManager.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 
 MemoryManager::MemoryManager() {
   _num_pools = 0;
-  _memory_mgr_obj = NULL;
+  (void)const_cast<instanceOop&>(_memory_mgr_obj = NULL);
 }
 
 void MemoryManager::add_pool(MemoryPool* pool) {
--- a/src/share/vm/services/memoryPool.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryPool.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
   _name = name;
   _initial_size = init_size;
   _max_size = max_size;
-  _memory_pool_obj = NULL;
+  (void)const_cast<instanceOop&>(_memory_pool_obj = NULL);
   _available_for_allocation = true;
   _num_managers = 0;
   _type = type;
@@ -260,10 +260,10 @@
 }
 
 MetaspacePool::MetaspacePool() :
-  MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { }
+  MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
 
 MemoryUsage MetaspacePool::get_memory_usage() {
-  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+  size_t committed = MetaspaceAux::committed_bytes();
   return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
 
@@ -271,26 +271,19 @@
   return MetaspaceAux::allocated_used_bytes();
 }
 
-size_t MetaspacePool::capacity_in_bytes() const {
-  return MetaspaceAux::allocated_capacity_bytes();
-}
-
 size_t MetaspacePool::calculate_max_size() const {
-  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx;
+  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
+                                             MemoryUsage::undefined_size();
 }
 
 CompressedKlassSpacePool::CompressedKlassSpacePool() :
-  MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { }
+  MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
 
 size_t CompressedKlassSpacePool::used_in_bytes() {
   return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
 }
 
-size_t CompressedKlassSpacePool::capacity_in_bytes() const {
-  return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
-}
-
 MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
-  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
+  size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
   return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
--- a/src/share/vm/services/memoryPool.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryPool.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -224,7 +224,6 @@
 
 class MetaspacePool : public MemoryPool {
   size_t calculate_max_size() const;
-  size_t capacity_in_bytes() const;
  public:
   MetaspacePool();
   MemoryUsage get_memory_usage();
@@ -232,7 +231,6 @@
 };
 
 class CompressedKlassSpacePool : public MemoryPool {
-  size_t capacity_in_bytes() const;
  public:
   CompressedKlassSpacePool();
   MemoryUsage get_memory_usage();
--- a/src/share/vm/services/memoryService.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryService.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -409,7 +409,7 @@
   mgr->add_pool(_metaspace_pool);
   _pools_list->append(_metaspace_pool);
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     _compressed_class_pool = new CompressedKlassSpacePool();
     mgr->add_pool(_compressed_class_pool);
     _pools_list->append(_compressed_class_pool);
@@ -515,7 +515,7 @@
 bool MemoryService::set_verbose(bool verbose) {
   MutexLocker m(Management_lock);
   // verbose will be set to the previous value
-  bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, MANAGEMENT);
+  bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, Flag::MANAGEMENT);
   assert(succeed, "Setting PrintGC flag fails");
   ClassLoadingService::reset_trace_class_unloading();
 
@@ -618,4 +618,3 @@
   MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime,
                         _recordGCEndTime, _countCollection, _cause);
 }
-
--- a/src/share/vm/services/memoryService.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryService.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -148,6 +148,12 @@
   static void track_code_cache_memory_usage() {
     track_memory_pool_usage(_code_heap_pool);
   }
+  static void track_metaspace_memory_usage() {
+    track_memory_pool_usage(_metaspace_pool);
+  }
+  static void track_compressed_class_memory_usage() {
+    track_memory_pool_usage(_compressed_class_pool);
+  }
   static void track_memory_pool_usage(MemoryPool* pool);
 
   static void gc_begin(bool fullGC, bool recordGCBeginTime,
--- a/src/share/vm/services/memoryUsage.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/memoryUsage.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -63,10 +63,12 @@
   size_t committed() const { return _committed; }
   size_t max_size()  const { return _maxSize; }
 
+  static size_t undefined_size() { return (size_t) -1; }
+
   inline static jlong convert_to_jlong(size_t val) {
     // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
     jlong ret;
-    if (val == (size_t)-1) {
+    if (val == undefined_size()) {
       ret = -1L;
     } else {
       NOT_LP64(ret = val;)
--- a/src/share/vm/services/runtimeService.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/services/runtimeService.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,7 +119,7 @@
 #endif /* USDT2 */
 
   // Print the time interval in which the app was executing
-  if (PrintGCApplicationConcurrentTime) {
+  if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
     gclog_or_tty->date_stamp(PrintGCDateStamps);
     gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print_cr("Application time: %3.7f seconds",
--- a/src/share/vm/shark/sharkCompiler.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/shark/sharkCompiler.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -133,11 +133,10 @@
     exit(1);
   }
 
-  execution_engine()->addModule(
-    _native_context->module());
+  execution_engine()->addModule(_native_context->module());
 
   // All done
-  mark_initialized();
+  set_state(initialized);
 }
 
 void SharkCompiler::initialize() {
--- a/src/share/vm/shark/sharkCompiler.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/shark/sharkCompiler.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -50,10 +50,6 @@
     return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
   }
 
-  // Customization
-  bool needs_adapters()  { return false; }
-  bool needs_stubs()     { return false; }
-
   // Initialization
   void initialize();
 
--- a/src/share/vm/trace/trace.xml	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/trace/trace.xml	Tue Nov 05 17:38:04 2013 -0800
@@ -313,13 +313,6 @@
       <value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
     </event>
 
-    <event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
-             description="Clean code cache from oldest methods"
-             has_thread="true" is_requestable="false" is_constant="false">
-      <value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
-      <value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
-    </event>
-
     <!-- Code cache events -->
 
     <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
--- a/src/share/vm/trace/traceEventClasses.xsl	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/trace/traceEventClasses.xsl	Tue Nov 05 17:38:04 2013 -0800
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
--- a/src/share/vm/trace/traceEventIds.xsl	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/trace/traceEventIds.xsl	Tue Nov 05 17:38:04 2013 -0800
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
--- a/src/share/vm/trace/traceMacros.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/trace/traceMacros.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_TRACE_TRACE_MACRO_HPP
 
 #define EVENT_THREAD_EXIT(thread)
+#define EVENT_THREAD_DESTRUCT(thread)
 
 #define TRACE_INIT_ID(k)
 #define TRACE_DATA TraceThreadData
--- a/src/share/vm/trace/traceTypes.xsl	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/trace/traceTypes.xsl	Tue Nov 05 17:38:04 2013 -0800
@@ -23,8 +23,8 @@
 -->
 
 <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="xsl_util.xsl"/>
 <xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
-<xsl:import href="xsl_util.xsl"/>
 
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
--- a/src/share/vm/utilities/accessFlags.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/accessFlags.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -78,11 +78,13 @@
   JVM_ACC_FIELD_ACCESS_WATCHED       = 0x00002000,  // field access is watched by JVMTI
   JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000,  // field modification is watched by JVMTI
   JVM_ACC_FIELD_INTERNAL             = 0x00000400,  // internal field, same as JVM_ACC_ABSTRACT
+  JVM_ACC_FIELD_STABLE               = 0x00000020,  // @Stable field, same as JVM_ACC_SYNCHRONIZED
   JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
 
   JVM_ACC_FIELD_INTERNAL_FLAGS       = JVM_ACC_FIELD_ACCESS_WATCHED |
                                        JVM_ACC_FIELD_MODIFICATION_WATCHED |
                                        JVM_ACC_FIELD_INTERNAL |
+                                       JVM_ACC_FIELD_STABLE |
                                        JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE,
 
                                                     // flags accepted by set_field_flags()
@@ -148,6 +150,7 @@
                                         { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
   bool on_stack() const                 { return (_flags & JVM_ACC_ON_STACK) != 0; }
   bool is_internal() const              { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
+  bool is_stable() const                { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
   bool field_has_generic_signature() const
                                         { return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; }
 
--- a/src/share/vm/utilities/array.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/array.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -317,7 +317,7 @@
   Array(const Array<T>&);
   void operator=(const Array<T>&);
 
-  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
+  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
     size_t word_size = Array::size(length);
     return (void*) Metaspace::allocate(loader_data, word_size, read_only,
                                        MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
@@ -353,9 +353,9 @@
   // sort the array.
   bool contains(const T& x) const      { return index_of(x) >= 0; }
 
-  T    at(int i) const                 { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); return _data[i]; }
-  void at_put(const int i, const T& x) { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); _data[i] = x; }
-  T*   adr_at(const int i)             { assert(i >= 0 && i< _length, err_msg_res("oob: 0 <= %d < %d", i, _length)); return &_data[i]; }
+  T    at(int i) const                 { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); return _data[i]; }
+  void at_put(const int i, const T& x) { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); _data[i] = x; }
+  T*   adr_at(const int i)             { assert(i >= 0 && i< _length, err_msg("oob: 0 <= %d < %d", i, _length)); return &_data[i]; }
   int  find(const T& x)                { return index_of(x); }
 
   T at_acquire(const int which)              { return OrderAccess::load_acquire(adr_at(which)); }
--- a/src/share/vm/utilities/bitMap.inline.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/bitMap.inline.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -52,16 +52,16 @@
 
 inline bool BitMap::par_set_bit(idx_t bit) {
   verify_index(bit);
-  volatile idx_t* const addr = word_addr(bit);
-  const idx_t mask = bit_mask(bit);
-  idx_t old_val = *addr;
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t mask = bit_mask(bit);
+  bm_word_t old_val = *addr;
 
   do {
-    const idx_t new_val = old_val | mask;
+    const bm_word_t new_val = old_val | mask;
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
                                                       (volatile void*) addr,
                                                       (void*) old_val);
     if (cur_val == old_val) {
@@ -73,16 +73,16 @@
 
 inline bool BitMap::par_clear_bit(idx_t bit) {
   verify_index(bit);
-  volatile idx_t* const addr = word_addr(bit);
-  const idx_t mask = ~bit_mask(bit);
-  idx_t old_val = *addr;
+  volatile bm_word_t* const addr = word_addr(bit);
+  const bm_word_t mask = ~bit_mask(bit);
+  bm_word_t old_val = *addr;
 
   do {
-    const idx_t new_val = old_val & mask;
+    const bm_word_t new_val = old_val & mask;
     if (new_val == old_val) {
       return false;     // Someone else beat us to it.
     }
-    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
+    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
                                                       (volatile void*) addr,
                                                       (void*) old_val);
     if (cur_val == old_val) {
--- a/src/share/vm/utilities/constantTag.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/constantTag.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -51,7 +51,9 @@
     case JVM_CONSTANT_ClassIndex :
     case JVM_CONSTANT_StringIndex :
     case JVM_CONSTANT_MethodHandle :
+    case JVM_CONSTANT_MethodHandleInError :
     case JVM_CONSTANT_MethodType :
+    case JVM_CONSTANT_MethodTypeInError :
       return T_OBJECT;
     default:
       ShouldNotReachHere();
@@ -60,6 +62,19 @@
 }
 
 
+jbyte constantTag::non_error_value() const {
+  switch (_tag) {
+  case JVM_CONSTANT_UnresolvedClassInError:
+    return JVM_CONSTANT_UnresolvedClass;
+  case JVM_CONSTANT_MethodHandleInError:
+    return JVM_CONSTANT_MethodHandle;
+  case JVM_CONSTANT_MethodTypeInError:
+    return JVM_CONSTANT_MethodType;
+  default:
+    return _tag;
+  }
+}
+
 
 const char* constantTag::internal_name() const {
   switch (_tag) {
--- a/src/share/vm/utilities/constantTag.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/constantTag.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -108,7 +108,8 @@
     _tag = tag;
   }
 
-  jbyte value()                      { return _tag; }
+  jbyte value() const                { return _tag; }
+  jbyte non_error_value() const;
 
   BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
 
--- a/src/share/vm/utilities/decoder.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/decoder.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "prims/jvm.h"
-#include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/vmError.hpp"
@@ -84,6 +83,23 @@
   return decoder;
 }
 
+inline bool DecoderLocker::is_first_error_thread() {
+  return (os::current_thread_id() == VMError::get_first_error_tid());
+}
+
+DecoderLocker::DecoderLocker() :
+  MutexLockerEx(DecoderLocker::is_first_error_thread() ?
+                NULL : Decoder::shared_decoder_lock(), true) {
+  _decoder = is_first_error_thread() ?
+    Decoder::get_error_handler_instance() : Decoder::get_shared_instance();
+  assert(_decoder != NULL, "null decoder");
+}
+
+Mutex* Decoder::shared_decoder_lock() {
+  assert(_shared_decoder_lock != NULL, "Just check");
+  return _shared_decoder_lock;
+}
+
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
   assert(_shared_decoder_lock != NULL, "Just check");
   bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
--- a/src/share/vm/utilities/decoder.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/decoder.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -28,6 +28,7 @@
 
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/mutexLocker.hpp"
 
 class AbstractDecoder : public CHeapObj<mtInternal> {
 public:
@@ -124,6 +125,19 @@
 
 protected:
   static Mutex*               _shared_decoder_lock;
+  static Mutex* shared_decoder_lock();
+
+  friend class DecoderLocker;
+};
+
+class DecoderLocker : public MutexLockerEx {
+  AbstractDecoder* _decoder;
+  inline bool is_first_error_thread();
+public:
+  DecoderLocker();
+  AbstractDecoder* decoder() {
+    return _decoder;
+  }
 };
 
 #endif // SHARE_VM_UTILITIES_DECODER_HPP
--- a/src/share/vm/utilities/globalDefinitions.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -329,12 +329,18 @@
 
 const int max_method_code_size = 64*K - 1;  // JVM spec, 2nd ed. section 4.8.1 (p.134)
 
+// Default ProtectionDomainCacheSize values
+
+const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017);
 
 //----------------------------------------------------------------------------------------------------
 // Default and minimum StringTableSize values
 
 const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013);
-const int minimumStringTableSize=1009;
+const int minimumStringTableSize = 1009;
+
+const int defaultSymbolTableSize = 20011;
+const int minimumSymbolTableSize = 1009;
 
 
 //----------------------------------------------------------------------------------------------------
@@ -365,8 +371,6 @@
 // Klass encoding metaspace max size
 const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
 
-const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000));  // 32*G
-
 // Machine dependent stuff
 
 #ifdef TARGET_ARCH_x86
@@ -970,9 +974,9 @@
 // (These must be implemented as #defines because C++ compilers are
 // not obligated to inline non-integral constants!)
 #define       badAddress        ((address)::badAddressVal)
-#define       badOop            ((oop)::badOopVal)
+#define       badOop            (cast_to_oop(::badOopVal))
 #define       badHeapWord       (::badHeapWordVal)
-#define       badJNIHandle      ((oop)::badJNIHandleVal)
+#define       badJNIHandle      (cast_to_oop(::badJNIHandleVal))
 
 // Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
 #define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -189,6 +189,10 @@
 #pragma warning( disable : 4201 ) // nonstandard extension used : nameless struct/union (needed in windows.h)
 #pragma warning( disable : 4511 ) // copy constructor could not be generated
 #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception
+#ifdef CHECK_UNHANDLED_OOPS
+#pragma warning( disable : 4521 ) // class has multiple copy ctors of a single type
+#pragma warning( disable : 4522 ) // class has multiple assignment operators of a single type
+#endif // CHECK_UNHANDLED_OOPS
 #if _MSC_VER >= 1400
 #pragma warning( disable : 4996 ) // unsafe string functions. Same as define _CRT_SECURE_NO_WARNINGS/_CRT_SECURE_NO_DEPRICATE
 #endif
--- a/src/share/vm/utilities/hashtable.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/hashtable.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -356,9 +356,9 @@
 template class Hashtable<Symbol*, mtSymbol>;
 template class Hashtable<Klass*, mtClass>;
 template class Hashtable<oop, mtClass>;
-#ifdef SOLARIS
+#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
 template class Hashtable<oop, mtSymbol>;
-#endif
+#endif // SOLARIS || CHECK_UNHANDLED_OOPS
 template class Hashtable<oopDesc*, mtSymbol>;
 template class Hashtable<Symbol*, mtClass>;
 template class HashtableEntry<Symbol*, mtSymbol>;
--- a/src/share/vm/utilities/ostream.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/ostream.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -345,7 +345,7 @@
 }
 
 char* stringStream::as_string() {
-  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1);
+  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos + 1);
   strncpy(copy, buffer, buffer_pos);
   copy[buffer_pos] = 0;  // terminating null
   return copy;
@@ -358,14 +358,190 @@
 outputStream* gclog_or_tty;
 extern Mutex* tty_lock;
 
+#define EXTRACHARLEN   32
+#define CURRENTAPPX    ".current"
+#define FILENAMEBUFLEN  1024
+// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
+char* get_datetime_string(char *buf, size_t len) {
+  os::local_time_string(buf, len);
+  int i = (int)strlen(buf);
+  while (i-- >= 0) {
+    if (buf[i] == ' ') buf[i] = '_';
+    else if (buf[i] == ':') buf[i] = '-';
+  }
+  return buf;
+}
+
+static const char* make_log_name_internal(const char* log_name, const char* force_directory,
+                                                int pid, const char* tms) {
+  const char* basename = log_name;
+  char file_sep = os::file_separator()[0];
+  const char* cp;
+  char  pid_text[32];
+
+  for (cp = log_name; *cp != '\0'; cp++) {
+    if (*cp == '/' || *cp == file_sep) {
+      basename = cp + 1;
+    }
+  }
+  const char* nametail = log_name;
+  // Compute buffer length
+  size_t buffer_length;
+  if (force_directory != NULL) {
+    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
+                    strlen(basename) + 1;
+  } else {
+    buffer_length = strlen(log_name) + 1;
+  }
+
+  // const char* star = strchr(basename, '*');
+  const char* pts = strstr(basename, "%p");
+  int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
+
+  if (pid_pos >= 0) {
+    jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid);
+    buffer_length += strlen(pid_text);
+  }
+
+  pts = strstr(basename, "%t");
+  int tms_pos = (pts == NULL) ? -1 : (pts - nametail);
+  if (tms_pos >= 0) {
+    buffer_length += strlen(tms);
+  }
+
+  // Create big enough buffer.
+  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
+
+  strcpy(buf, "");
+  if (force_directory != NULL) {
+    strcat(buf, force_directory);
+    strcat(buf, os::file_separator());
+    nametail = basename;       // completely skip directory prefix
+  }
+
+  // who is first, %p or %t?
+  int first = -1, second = -1;
+  const char *p1st = NULL;
+  const char *p2nd = NULL;
+
+  if (pid_pos >= 0 && tms_pos >= 0) {
+    // contains both %p and %t
+    if (pid_pos < tms_pos) {
+      // case foo%pbar%tmonkey.log
+      first  = pid_pos;
+      p1st   = pid_text;
+      second = tms_pos;
+      p2nd   = tms;
+    } else {
+      // case foo%tbar%pmonkey.log
+      first  = tms_pos;
+      p1st   = tms;
+      second = pid_pos;
+      p2nd   = pid_text;
+    }
+  } else if (pid_pos >= 0) {
+    // contains %p only
+    first  = pid_pos;
+    p1st   = pid_text;
+  } else if (tms_pos >= 0) {
+    // contains %t only
+    first  = tms_pos;
+    p1st   = tms;
+  }
+
+  int buf_pos = (int)strlen(buf);
+  const char* tail = nametail;
+
+  if (first >= 0) {
+    tail = nametail + first + 2;
+    strncpy(&buf[buf_pos], nametail, first);
+    strcpy(&buf[buf_pos + first], p1st);
+    buf_pos = (int)strlen(buf);
+    if (second >= 0) {
+      strncpy(&buf[buf_pos], tail, second - first - 2);
+      strcpy(&buf[buf_pos + second - first - 2], p2nd);
+      tail = nametail + second + 2;
+    }
+  }
+  strcat(buf, tail);      // append rest of name, or all of name
+  return buf;
+}
+
+// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// in log_name, %p => pid1234 and
+//              %t => YYYY-MM-DD_HH-MM-SS
+static const char* make_log_name(const char* log_name, const char* force_directory) {
+  char timestr[32];
+  get_datetime_string(timestr, sizeof(timestr));
+  return make_log_name_internal(log_name, force_directory, os::current_process_id(),
+                                timestr);
+}
+
+#ifndef PRODUCT
+void test_loggc_filename() {
+  int pid;
+  char  tms[32];
+  char  i_result[FILENAMEBUFLEN];
+  const char* o_result;
+  get_datetime_string(tms, sizeof(tms));
+  pid = os::current_process_id();
+
+  // test.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
+  o_result = make_log_name_internal("test.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // test-%t-%p.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
+  o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // test-%t%p.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
+  o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %p%t.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
+  o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %p-test.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
+  o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  // %t.log
+  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
+  o_result = make_log_name_internal("%t.log", NULL, pid, tms);
+  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
+  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+}
+#endif // PRODUCT
+
 fileStream::fileStream(const char* file_name) {
   _file = fopen(file_name, "w");
-  _need_close = true;
+  if (_file != NULL) {
+    _need_close = true;
+  } else {
+    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
 fileStream::fileStream(const char* file_name, const char* opentype) {
   _file = fopen(file_name, opentype);
-  _need_close = true;
+  if (_file != NULL) {
+    _need_close = true;
+  } else {
+    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
 void fileStream::write(const char* s, size_t len) {
@@ -426,34 +602,51 @@
   update_position(s, len);
 }
 
-rotatingFileStream::~rotatingFileStream() {
+// dump vm version, os version, platform info, build id,
+// memory usage and command line flags into header
+void gcLogFileStream::dump_loggc_header() {
+  if (is_open()) {
+    print_cr(Abstract_VM_Version::internal_vm_info_string());
+    os::print_memory_info(this);
+    print("CommandLine flags: ");
+    CommandLineFlags::printSetFlags(this);
+  }
+}
+
+gcLogFileStream::~gcLogFileStream() {
   if (_file != NULL) {
     if (_need_close) fclose(_file);
-    _file      = NULL;
+    _file = NULL;
+  }
+  if (_file_name != NULL) {
     FREE_C_HEAP_ARRAY(char, _file_name, mtInternal);
     _file_name = NULL;
   }
 }
 
-rotatingFileStream::rotatingFileStream(const char* file_name) {
+gcLogFileStream::gcLogFileStream(const char* file_name) {
   _cur_file_num = 0;
   _bytes_written = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
-  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
-  _file = fopen(_file_name, "w");
-  _need_close = true;
+  _file_name = make_log_name(file_name, NULL);
+
+  // gc log file rotation
+  if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
+    char tempbuf[FILENAMEBUFLEN];
+    jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
+    _file = fopen(tempbuf, "w");
+  } else {
+    _file = fopen(_file_name, "w");
+  }
+  if (_file != NULL) {
+    _need_close = true;
+    dump_loggc_header();
+  } else {
+    warning("Cannot open file %s due to %s\n", _file_name, strerror(errno));
+    _need_close = false;
+  }
 }
 
-rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
-  _cur_file_num = 0;
-  _bytes_written = 0L;
-  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
-  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
-  _file = fopen(_file_name, opentype);
-  _need_close = true;
-}
-
-void rotatingFileStream::write(const char* s, size_t len) {
+void gcLogFileStream::write(const char* s, size_t len) {
   if (_file != NULL) {
     size_t count = fwrite(s, 1, len, _file);
     _bytes_written += count;
@@ -469,7 +662,12 @@
 // write to gc log file at safepoint. If in future, changes made for mutator threads or
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
-void rotatingFileStream::rotate_log() {
+void gcLogFileStream::rotate_log() {
+  char time_msg[FILENAMEBUFLEN];
+  char time_str[EXTRACHARLEN];
+  char current_file_name[FILENAMEBUFLEN];
+  char renamed_file_name[FILENAMEBUFLEN];
+
   if (_bytes_written < (jlong)GCLogFileSize) {
     return;
   }
@@ -484,27 +682,89 @@
     // rotate in same file
     rewind();
     _bytes_written = 0L;
+    jio_snprintf(time_msg, sizeof(time_msg), "File  %s rotated at %s\n",
+                 _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
+    write(time_msg, strlen(time_msg));
+    dump_loggc_header();
     return;
   }
 
-  // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
-  // close current file, rotate to next file
+#if defined(_WINDOWS)
+#ifndef F_OK
+#define F_OK 0
+#endif
+#endif // _WINDOWS
+
+  // rotate file in names extended_filename.0, extended_filename.1, ...,
+  // extended_filename.<NumberOfGCLogFiles - 1>. Current rotation file name will
+  // have a form of extended_filename.<i>.current where i is the current rotation
+  // file number. After it reaches max file size, the file will be saved and renamed
+  // with .current removed from its tail.
+  size_t filename_len = strlen(_file_name);
   if (_file != NULL) {
-    _cur_file_num ++;
-    if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
-    jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
-             Arguments::gc_log_filename(), _cur_file_num);
+    jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
+                 _file_name, _cur_file_num);
+    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+                 _file_name, _cur_file_num);
+    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the"
+                           " maximum size. Saved as %s\n",
+                           os::local_time_string((char *)time_str, sizeof(time_str)),
+                           renamed_file_name);
+    write(time_msg, strlen(time_msg));
+
     fclose(_file);
     _file = NULL;
+
+    bool can_rename = true;
+    if (access(current_file_name, F_OK) != 0) {
+      // current file does not exist?
+      warning("No source file exists, cannot rename\n");
+      can_rename = false;
+    }
+    if (can_rename) {
+      if (access(renamed_file_name, F_OK) == 0) {
+        if (remove(renamed_file_name) != 0) {
+          warning("Could not delete existing file %s\n", renamed_file_name);
+          can_rename = false;
+        }
+      } else {
+        // file does not exist, ok to rename
+      }
+    }
+    if (can_rename && rename(current_file_name, renamed_file_name) != 0) {
+      warning("Could not rename %s to %s\n", _file_name, renamed_file_name);
+    }
   }
-  _file = fopen(_file_name, "w");
+
+  _cur_file_num++;
+  if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
+  jio_snprintf(current_file_name,  filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+               _file_name, _cur_file_num);
+  _file = fopen(current_file_name, "w");
+
   if (_file != NULL) {
     _bytes_written = 0L;
     _need_close = true;
+    // reuse current_file_name for time_msg
+    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
+                 "%s.%d", _file_name, _cur_file_num);
+    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
+                           os::local_time_string((char *)time_str, sizeof(time_str)),
+                           current_file_name);
+    write(time_msg, strlen(time_msg));
+    dump_loggc_header();
+    // remove the existing file
+    if (access(current_file_name, F_OK) == 0) {
+      if (remove(current_file_name) != 0) {
+        warning("Could not delete existing file %s\n", current_file_name);
+      }
+    }
   } else {
-    tty->print_cr("failed to open rotation log file %s due to %s\n",
+    warning("failed to open rotation log file %s due to %s\n"
+            "Turned off GC log file rotation\n",
                   _file_name, strerror(errno));
     _need_close = false;
+    FLAG_SET_DEFAULT(UseGCLogFileRotation, false);
   }
 }
 
@@ -533,69 +793,9 @@
   return _log_file != NULL;
 }
 
-static const char* make_log_name(const char* log_name, const char* force_directory) {
-  const char* basename = log_name;
-  char file_sep = os::file_separator()[0];
-  const char* cp;
-  for (cp = log_name; *cp != '\0'; cp++) {
-    if (*cp == '/' || *cp == file_sep) {
-      basename = cp+1;
-    }
-  }
-  const char* nametail = log_name;
-
-  // Compute buffer length
-  size_t buffer_length;
-  if (force_directory != NULL) {
-    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
-                    strlen(basename) + 1;
-  } else {
-    buffer_length = strlen(log_name) + 1;
-  }
-
-  const char* star = strchr(basename, '*');
-  int star_pos = (star == NULL) ? -1 : (star - nametail);
-  int skip = 1;
-  if (star == NULL) {
-    // Try %p
-    star = strstr(basename, "%p");
-    if (star != NULL) {
-      skip = 2;
-    }
-  }
-  star_pos = (star == NULL) ? -1 : (star - nametail);
-
-  char pid[32];
-  if (star_pos >= 0) {
-    jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
-    buffer_length += strlen(pid);
-  }
-
-  // Create big enough buffer.
-  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
-
-  strcpy(buf, "");
-  if (force_directory != NULL) {
-    strcat(buf, force_directory);
-    strcat(buf, os::file_separator());
-    nametail = basename;       // completely skip directory prefix
-  }
-
-  if (star_pos >= 0) {
-    // convert foo*bar.log or foo%pbar.log to foo123bar.log
-    int buf_pos = (int) strlen(buf);
-    strncpy(&buf[buf_pos], nametail, star_pos);
-    strcpy(&buf[buf_pos + star_pos], pid);
-    nametail += star_pos + skip;  // skip prefix and pid format
-  }
-
-  strcat(buf, nametail);      // append rest of name, or all of name
-  return buf;
-}
-
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
-  const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
+  const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
   const char* try_name = make_log_name(log_name, NULL);
   fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
   if (!file->is_open()) {
@@ -606,14 +806,15 @@
     // Note:  This feature is for maintainer use only.  No need for L10N.
     jio_print(warnbuf);
     FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
-    try_name = make_log_name("hs_pid%p.log", os::get_temp_directory());
+    try_name = make_log_name(log_name, os::get_temp_directory());
     jio_snprintf(warnbuf, sizeof(warnbuf),
                  "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
     jio_print(warnbuf);
     delete file;
     file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
-    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
   }
+  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+
   if (file->is_open()) {
     _log_file = file;
     xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
@@ -880,11 +1081,8 @@
 
   gclog_or_tty = tty; // default to tty
   if (Arguments::gc_log_filename() != NULL) {
-    fileStream * gclog  = UseGCLogFileRotation ?
-                          new(ResourceObj::C_HEAP, mtInternal)
-                             rotatingFileStream(Arguments::gc_log_filename()) :
-                          new(ResourceObj::C_HEAP, mtInternal)
-                             fileStream(Arguments::gc_log_filename());
+    fileStream * gclog  = new(ResourceObj::C_HEAP, mtInternal)
+                             gcLogFileStream(Arguments::gc_log_filename());
     if (gclog->is_open()) {
       // now we update the time stamp of the GC log to be synced up
       // with tty.
--- a/src/share/vm/utilities/ostream.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/ostream.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -231,20 +231,24 @@
   void flush() {};
 };
 
-class rotatingFileStream : public fileStream {
+class gcLogFileStream : public fileStream {
  protected:
-  char*  _file_name;
+  const char*  _file_name;
   jlong  _bytes_written;
-  uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
+  uintx  _cur_file_num;             // current logfile rotation number, from 0 to NumberOfGCLogFiles-1
  public:
-  rotatingFileStream(const char* file_name);
-  rotatingFileStream(const char* file_name, const char* opentype);
-  rotatingFileStream(FILE* file) : fileStream(file) {}
-  ~rotatingFileStream();
+  gcLogFileStream(const char* file_name);
+  ~gcLogFileStream();
   virtual void write(const char* c, size_t len);
   virtual void rotate_log();
+  void dump_loggc_header();
 };
 
+#ifndef PRODUCT
+// unit test for checking -Xloggc:<filename> parsing result
+void test_loggc_filename();
+#endif
+
 void ostream_init();
 void ostream_init_log();
 void ostream_exit();
--- a/src/share/vm/utilities/taskqueue.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/taskqueue.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -325,11 +325,11 @@
   // Attempts to claim a task from the "local" end of the queue (the most
   // recently pushed).  If successful, returns true and sets t to the task;
   // otherwise, returns false (the queue is empty).
-  inline bool pop_local(E& t);
+  inline bool pop_local(volatile E& t);
 
   // Like pop_local(), but uses the "global" end of the queue (the least
   // recently pushed).
-  bool pop_global(E& t);
+  bool pop_global(volatile E& t);
 
   // Delete any resource associated with the queue.
   ~GenericTaskQueue();
@@ -427,7 +427,7 @@
 }
 
 template<class E, MEMFLAGS F, unsigned int N>
-bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
+bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
   Age oldAge = _age.get();
   // Architectures with weak memory model require a barrier here
   // to guarantee that bottom is not older than age,
@@ -704,7 +704,7 @@
 }
 
 template<class E, MEMFLAGS F, unsigned int N> inline bool
-GenericTaskQueue<E, F, N>::pop_local(E& t) {
+GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
   uint localBot = _bottom;
   // This value cannot be N-1.  That can only occur as a result of
   // the assignment to bottom in this method.  If it does, this method
@@ -802,7 +802,7 @@
   }
   volatile ObjArrayTask&
   operator =(const volatile ObjArrayTask& t) volatile {
-    _obj = t._obj;
+    (void)const_cast<oop&>(_obj = t._obj);
     _index = t._index;
     return *this;
   }
--- a/src/share/vm/utilities/vmError.cpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/vmError.cpp	Tue Nov 05 17:38:04 2013 -0800
@@ -574,6 +574,10 @@
   STEP(120, "(printing native stack)" )
 
      if (_verbose) {
+     if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
+       // We have printed the native stack in platform-specific code
+       // Windows/x64 needs special handling.
+     } else {
        frame fr = _context ? os::fetch_frame_from_context(_context)
                            : os::current_frame();
 
@@ -604,6 +608,7 @@
           st->cr();
        }
      }
+   }
 
   STEP(130, "(printing Java stack)" )
 
@@ -1045,7 +1050,7 @@
         FILE* replay_data_file = os::open(fd, "w");
         if (replay_data_file != NULL) {
           fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
-          env->dump_replay_data(&replay_data_stream);
+          env->dump_replay_data_unsafe(&replay_data_stream);
           out.print_raw("#\n# Compiler replay data is saved as:\n# ");
           out.print_raw_cr(buffer);
         } else {
--- a/src/share/vm/utilities/vmError.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/vmError.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -136,6 +136,10 @@
 
   // check to see if fatal error reporting is in progress
   static bool fatal_error_in_progress() { return first_error != NULL; }
+
+  static jlong get_first_error_tid() {
+    return first_error_tid;
+  }
 };
 
 #endif // SHARE_VM_UTILITIES_VMERROR_HPP
--- a/src/share/vm/utilities/yieldingWorkgroup.hpp	Wed Oct 16 10:52:41 2013 +0200
+++ b/src/share/vm/utilities/yieldingWorkgroup.hpp	Tue Nov 05 17:38:04 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,7 @@
 #define SHARE_VM_UTILITIES_YIELDINGWORKGROUP_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
 #include "utilities/workgroup.hpp"
-#endif // INCLUDE_ALL_GCS
-
 
 // Forward declarations
 class YieldingFlexibleWorkGang;
--- a/test/TEST.groups	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/TEST.groups	Tue Nov 05 17:38:04 2013 -0800
@@ -27,7 +27,7 @@
 # - compact1, compact2, compact3, full JRE, JDK
 #
 # In addition they support testing of the minimal VM on compact1 and compact2.
-# Essentially this defines groups based around the specified API's and VM 
+# Essentially this defines groups based around the specified API's and VM
 # services available in the runtime.
 #
 # The groups are defined hierarchically in two forms:
@@ -44,9 +44,9 @@
 # by listing the top-level test directories.
 #
 # To use a group simply list it on the jtreg command line eg:
-#   jtreg :jdk    
+#   jtreg :jdk
 # runs all tests. While
-#   jtreg :compact2  
+#   jtreg :compact2
 # runs those tests that only require compact1 and compact2 API's.
 #
 
@@ -62,14 +62,15 @@
 #
 needs_jdk = \
   gc/TestG1ZeroPGCTJcmdThreadPrint.java \
-  gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
+  gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
   gc/metaspace/TestMetaspacePerfCounters.java \
+  gc/metaspace/TestPerfCountersAndMemoryPools.java \
   runtime/6819213/TestBootNativeLibraryPath.java \
-  runtime/6878713/Test6878713.sh \
   runtime/6925573/SortMethodsTest.java \
   runtime/7107135/Test7107135.sh \
   runtime/7158988/FieldMonitor.java \
   runtime/7194254/Test7194254.java \
+  runtime/8026365/InvokeSpecialAnonTest.java \
   runtime/jsig/Test8017498.sh \
   runtime/Metaspace/FragmentMetaspace.java \
   runtime/NMT/BaselineWithParameter.java \
@@ -84,7 +85,10 @@
   runtime/NMT/ThreadedVirtualAllocTestType.java \
   runtime/NMT/VirtualAllocTestType.java \
   runtime/RedefineObject/TestRedefineObject.java \
-  serviceability/attach/AttachWithStalePidFile.java
+  runtime/XCheckJniJsig/XCheckJSig.java \
+  serviceability/attach/AttachWithStalePidFile.java \
+  serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java
+
 
 # JRE adds further tests to compact3
 #
@@ -98,7 +102,9 @@
 needs_jre = \
   compiler/6852078/Test6852078.java \
   compiler/7047069/Test7047069.java \
-  runtime/6294277/SourceDebugExtension.java
+  runtime/6294277/SourceDebugExtension.java \
+  runtime/ClassFile/JsrRewriting.java \
+  runtime/ClassFile/OomWhileParsingRepeatedJsr.java
 
 # Compact 3 adds further tests to compact2
 #
@@ -122,7 +128,7 @@
   compiler/whitebox/IsMethodCompilableTest.java \
   gc/6581734/Test6581734.java \
   gc/7072527/TestFullGCCount.java \
-  gc/7168848/HumongousAlloc.java \
+  gc/g1/TestHumongousAllocInitialMark.java \
   gc/arguments/TestG1HeapRegionSize.java \
   gc/metaspace/TestMetaspaceMemoryPool.java \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
@@ -138,7 +144,7 @@
  -:needs_jdk
 
 # Tests that require compact2 API's and a full VM
-#  
+#
 needs_full_vm_compact2 =
 
 # Compact 1 adds full VM tests
@@ -159,7 +165,18 @@
   gc/g1/TestRegionAlignment.java \
   gc/g1/TestShrinkToOneRegion.java \
   gc/metaspace/G1AddMetaspaceDependency.java \
-  runtime/6929067/Test6929067.sh
+  gc/startup_warnings/TestCMS.java \
+  gc/startup_warnings/TestCMSIncrementalMode.java \
+  gc/startup_warnings/TestCMSNoIncrementalMode.java \
+  gc/startup_warnings/TestDefaultMaxRAMFraction.java \
+  gc/startup_warnings/TestDefNewCMS.java \
+  gc/startup_warnings/TestIncGC.java \
+  gc/startup_warnings/TestParallelGC.java \
+  gc/startup_warnings/TestParallelScavengeSerialOld.java \
+  gc/startup_warnings/TestParNewCMS.java \
+  gc/startup_warnings/TestParNewSerialOld.java \
+  runtime/6929067/Test6929067.sh \
+  runtime/SharedArchiveFile/SharedArchiveFile.java
 
 # Minimal VM on Compact 2 adds in some compact2 tests
 #
@@ -181,6 +198,7 @@
   serviceability/ \
   compiler/ \
   testlibrary/ \
+  testlibrary_tests/ \
   sanity/ \
   runtime/ \
   gc/ \
--- a/test/compiler/8013496/Test8013496.sh	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-#!/bin/sh
-# 
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-# 
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-# 
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-# 
-#
-# @test
-# @bug 8013496
-# @summary Test checks that the order in which ReversedCodeCacheSize and 
-#          InitialCodeCacheSize are passed to the VM is irrelevant.  
-# @run shell Test8013496.sh
-#
-#
-## some tests require path to find test source dir
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-set -x
-
-${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
-${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
-
-diff 1.out 2.out
-
-result=$?
-if [ $result -eq 0 ] ; then  
-  echo "Test Passed"
-  exit 0
-else
-  echo "Test Failed"
-  exit 1
-fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8013496
+ * @summary Test checks that the order in which ReversedCodeCacheSize and
+ *          InitialCodeCacheSize are passed to the VM is irrelevant.
+ * @library /testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class CheckReservedInitialCodeCacheSizeArgOrder {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb1,  pb2;
+    OutputAnalyzer out1, out2;
+
+    pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
+    pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
+
+    out1 = new OutputAnalyzer(pb1.start());
+    out2 = new OutputAnalyzer(pb2.start());
+
+    // Check that the outputs are equal
+    if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
+      throw new RuntimeException("Test failed");
+    }
+
+    out1.shouldHaveExitValue(0);
+    out2.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/gcbarriers/G1CrashTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8023472
+ * @summary C2 optimization breaks with G1
+ *
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
+ *
+ * @author pbiswal@palantir.com
+ */
+
+public class G1CrashTest {
+    static Object[] set = new Object[11];
+
+    public static void main(String[] args) throws InterruptedException {
+        for (int j = 0; j < Integer.getInteger("count"); j++) {
+            Object key = new Object();
+            insertKey(key);
+            if (j > set.length / 2) {
+                Object[] oldKeys = set;
+                set = new Object[2 * set.length - 1];
+                for (Object o : oldKeys) {
+                    if (o != null)
+                        insertKey(o);
+                }
+            }
+        }
+    }
+
+    static void insertKey(Object key) {
+        int hash = key.hashCode() & 0x7fffffff;
+        int index = hash % set.length;
+        Object cur = set[index];
+        if (cur == null)
+            set[index] = key;
+        else
+            insertKeyRehash(key, index, hash, cur);
+    }
+
+    static void insertKeyRehash(Object key, int index, int hash, Object cur) {
+        int loopIndex = index;
+        int firstRemoved = -1;
+        do {
+            if (cur == "dead")
+                firstRemoved = 1;
+            index--;
+            if (index < 0)
+                index += set.length;
+            cur = set[index];
+            if (cur == null) {
+                if (firstRemoved != -1)
+                    set[firstRemoved] = "dead";
+                else
+                    set[index] = key;
+                return;
+            }
+        } while (index != loopIndex);
+        if (firstRemoved != -1)
+            set[firstRemoved] = null;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/inlining/InlineDefaultMethod.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026735
+ * @summary CHA in C1 should make correct decisions about default methods
+ * @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod::test -XX:TieredStopAtLevel=1 InlineDefaultMethod
+ */
+
+
+interface InterfaceWithDefaultMethod0 {
+    default public int defaultMethod() {
+        return 1;
+    }
+}
+
+interface InterfaceWithDefaultMethod1 extends InterfaceWithDefaultMethod0 { }
+
+abstract class Subtype implements InterfaceWithDefaultMethod1 { }
+
+class Decoy extends Subtype {
+    public int defaultMethod() {
+        return 2;
+    }
+}
+
+class Instance extends Subtype { }
+
+public class InlineDefaultMethod {
+    public static int test(InterfaceWithDefaultMethod1 x) {
+        return x.defaultMethod();
+    }
+    public static void main(String[] args) {
+        InterfaceWithDefaultMethod1 a = new Decoy();
+        InterfaceWithDefaultMethod1 b = new Instance();
+        if (test(a) != 2 ||
+            test(b) != 1) {
+          System.err.println("FAILED");
+          System.exit(97);
+        }
+        System.err.println("PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactICondTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024924
+ * @summary Test non constant addExact
+ * @compile AddExactICondTest.java
+ * @run main AddExactICondTest
+ *
+ */
+
+public class AddExactICondTest {
+  public static int result = 0;
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 50000; ++i) {
+      runTest();
+    }
+  }
+
+  public static void runTest() {
+    int i = 7;
+    while (java.lang.Math.addExact(i, result) < 89361) {
+        if ((java.lang.Math.addExact(i, i) & 1) == 1) {
+            i += 3;
+        } else if ((i & 5) == 4) {
+            i += 7;
+        } else if ((i & 0xf) == 6) {
+            i += 2;
+        } else {
+            i += 1;
+        }
+        result += 2;
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024924
+ * @summary Test constant addExact
+ * @compile AddExactIConstantTest.java Verify.java
+ * @run main AddExactIConstantTest
+ *
+ */
+
+public class AddExactIConstantTest {
+  public static void main(String[] args) {
+      Verify.ConstantTest.verify(new Verify.AddExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactILoadTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024924
+ * @summary Test non constant addExact
+ * @compile AddExactILoadTest.java Verify.java
+ * @run main AddExactILoadTest
+ *
+ */
+
+public class AddExactILoadTest {
+  public static void main(String[] args) {
+      Verify.LoadTest.init();
+      Verify.LoadTest.verify(new Verify.AddExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024924
+ * @summary Test non constant addExact
+ * @compile AddExactILoopDependentTest.java Verify.java
+ * @run main AddExactILoopDependentTest
+ *
+ */
+
+public class AddExactILoopDependentTest {
+  public static void main(String[] args) {
+    Verify.LoopDependentTest.verify(new Verify.AddExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024924
+ * @summary Test non constant addExact
+ * @compile AddExactINonConstantTest.java Verify.java
+ * @run main AddExactINonConstantTest
+ *
+ */
+
+public class AddExactINonConstantTest {
+  public static void main(String[] args) {
+      Verify.NonConstantTest.verify(new Verify.AddExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8025657
+ * @summary Test repeating addExact
+ * @compile AddExactIRepeatTest.java Verify.java
+ * @run main AddExactIRepeatTest
+ *
+ */
+
+public class AddExactIRepeatTest {
+    public static void main(String[] args) {
+        runTest(new Verify.AddExactI());
+    }
+
+    public static int nonExact(int x, int y, Verify.BinaryMethod method) {
+        int result = method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        return result;
+    }
+
+    public static void runTest(Verify.BinaryMethod method) {
+        java.util.Random rnd = new java.util.Random();
+        for (int i = 0; i < 50000; ++i) {
+            int x = Integer.MAX_VALUE - 10;
+            int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
+
+            int c = rnd.nextInt() / 2;
+            int d = rnd.nextInt() / 2;
+
+            int a = catchingExact(x, y, method);
+
+            if (a != 36) {
+                throw new RuntimeException("a != 36 : " + a);
+            }
+
+            int b = nonExact(c, d, method);
+            int n = exact(c, d, method);
+
+
+            if (n != b) {
+                throw new RuntimeException("n != b : " + n + " != " + b);
+            }
+        }
+    }
+
+    public static int exact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        return result;
+    }
+
+    public static int catchingExact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        try {
+            result += 5;
+            result = method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 1;
+        }
+        try {
+            result += 6;
+
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 2;
+        }
+        try {
+            result += 7;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 3;
+        }
+        try {
+            result += 8;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 4;
+        }
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant addExact
+ * @compile AddExactLConstantTest.java Verify.java
+ * @run main AddExactLConstantTest
+ *
+ */
+
+public class AddExactLConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantLongTest.verify(new Verify.AddExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant addExact
+ * @compile AddExactLNonConstantTest.java Verify.java
+ * @run main AddExactLNonConstantTest
+ *
+ */
+
+public class AddExactLNonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantLongTest.verify(new Verify.AddExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/DecExactITest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test decrementExact
+ * @compile DecExactITest.java Verify.java
+ * @run main DecExactITest
+ *
+ */
+
+public class DecExactITest {
+    public static int[] values = {1, 1, 1, 1};
+    public static int[] minvalues = {Integer.MIN_VALUE, Integer.MIN_VALUE};
+
+    public static void main(String[] args) {
+        runTest(new Verify.DecExactI());
+    }
+
+    public static void runTest(Verify.UnaryMethod method) {
+        for (int i = 0; i < 20000; ++i) {
+            Verify.verifyUnary(Integer.MIN_VALUE, method);
+            Verify.verifyUnary(minvalues[0], method);
+            Verify.verifyUnary(Integer.MIN_VALUE - values[2], method);
+            Verify.verifyUnary(0, method);
+            Verify.verifyUnary(values[2], method);
+            Verify.verifyUnary(Integer.MAX_VALUE, method);
+            Verify.verifyUnary(Integer.MIN_VALUE - values[0] + values[3], method);
+            Verify.verifyUnary(Integer.MIN_VALUE + 1 - values[0], method);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/DecExactLTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test decrementExact
+ * @compile DecExactITest.java Verify.java
+ * @run main DecExactITest
+ *
+ */
+
+public class DecExactLTest {
+    public static long[] values = {1, 1, 1, 1};
+    public static long[] minvalues = {Long.MIN_VALUE, Long.MIN_VALUE};
+
+    public static void main(String[] args) {
+        runTest(new Verify.DecExactL());
+    }
+
+    public static void runTest(Verify.UnaryLongMethod method) {
+        for (int i = 0; i < 20000; ++i) {
+            Verify.verifyUnary(Long.MIN_VALUE, method);
+            Verify.verifyUnary(minvalues[0], method);
+            Verify.verifyUnary(Long.MIN_VALUE - values[2], method);
+            Verify.verifyUnary(0, method);
+            Verify.verifyUnary(values[2], method);
+            Verify.verifyUnary(Long.MAX_VALUE, method);
+            Verify.verifyUnary(Long.MIN_VALUE - values[0] + values[3], method);
+            Verify.verifyUnary(Long.MIN_VALUE + 1 - values[0], method);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/IncExactITest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test incrementExact
+ * @compile IncExactITest.java Verify.java
+ * @run main IncExactITest
+ *
+ */
+
+
+public class IncExactITest {
+    public static int[] values = {1, 1, 1, 1};
+    public static void main(String[] args) {
+        runTest(new Verify.IncExactI());
+    }
+
+    public static void runTest(Verify.UnaryMethod method) {
+        for (int i = 0; i < 20000; ++i) {
+            Verify.verifyUnary(Integer.MIN_VALUE, method);
+            Verify.verifyUnary(Integer.MAX_VALUE - 1, method);
+            Verify.verifyUnary(0, method);
+            Verify.verifyUnary(values[1], method);
+            Verify.verifyUnary(Integer.MAX_VALUE, method);
+            Verify.verifyUnary(Integer.MAX_VALUE - values[0] + values[3], method);
+            Verify.verifyUnary(Integer.MAX_VALUE - 1 + values[0], method);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/IncExactLTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test incrementExact
+ * @compile IncExactLTest.java Verify.java
+ * @run main IncExactLTest
+ *
+ */
+
+public class IncExactLTest {
+    public static long[] values = {1, 1, 1, 1};
+    public static void main(String[] args) {
+        runTest(new Verify.IncExactL());
+    }
+
+    public static void runTest(Verify.UnaryLongMethod method) {
+        for (int i = 0; i < 20000; ++i) {
+            Verify.verifyUnary(Long.MIN_VALUE, method);
+            Verify.verifyUnary(Long.MAX_VALUE - 1, method);
+            Verify.verifyUnary(0, method);
+            Verify.verifyUnary(values[1], method);
+            Verify.verifyUnary(Long.MAX_VALUE, method);
+            Verify.verifyUnary(Long.MAX_VALUE - values[0] + values[3], method);
+            Verify.verifyUnary(Long.MAX_VALUE - 1 + values[0], method);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactICondTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test multiplyExact as condition
+ * @compile MulExactICondTest.java
+ * @run main MulExactICondTest
+ *
+ */
+
+public class MulExactICondTest {
+    public static int result = 0;
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 50000; ++i) {
+            runTest();
+        }
+    }
+
+    public static void runTest() {
+        int i = 7;
+        while (java.lang.Math.multiplyExact(i, result) < 89361) {
+            if ((java.lang.Math.multiplyExact(i, i) & 1) == 1) {
+                i += 3;
+            } else if ((i & 5) == 4) {
+                i += 7;
+            } else if ((i & 0xf) == 6) {
+                i += 2;
+            } else {
+                i += 1;
+            }
+            result += 2;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant multiplyExact
+ * @compile MulExactIConstantTest.java Verify.java
+ * @run main MulExactIConstantTest
+ *
+ */
+
+public class MulExactIConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantTest.verify(new Verify.MulExactI());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactILoadTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test multiplyExact
+ * @compile MulExactILoadTest.java Verify.java
+ * @run main MulExactILoadTest
+ *
+ */
+
+public class MulExactILoadTest {
+    public static void main(String[] args) {
+        Verify.LoadTest.init();
+        Verify.LoadTest.verify(new Verify.MulExactI());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test loop dependent multiplyExact
+ * @compile MulExactILoopDependentTest.java Verify.java
+ * @run main MulExactILoopDependentTest
+ *
+ */
+public class MulExactILoopDependentTest {
+    public static void main(String[] args) {
+        Verify.LoopDependentTest.verify(new Verify.MulExactI());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant multiplyExact
+ * @compile MulExactINonConstantTest.java Verify.java
+ * @run main MulExactINonConstantTest
+ *
+ */
+
+public class MulExactINonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantTest.verify(new Verify.MulExactI());
+        Verify.LoadTest.verify(new Verify.MulExactI());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test repeating multiplyExact
+ * @compile MulExactIRepeatTest.java Verify.java
+ * @run main MulExactIRepeatTest
+ *
+ */
+
+public class MulExactIRepeatTest {
+    public static void main(String[] args) {
+        runTest(new Verify.MulExactI());
+    }
+
+    public static int nonExact(int x, int y, Verify.BinaryMethod method) {
+        int result = method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        return result;
+    }
+
+    public static void runTest(Verify.BinaryMethod method) {
+        java.util.Random rnd = new java.util.Random();
+        for (int i = 0; i < 50000; ++i) {
+            int x = Integer.MAX_VALUE - 10;
+            int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
+
+            int c = rnd.nextInt() / 10;
+            int d = rnd.nextInt(9);
+
+            int a = catchingExact(x, y, method);
+
+            if (a != 36) {
+                throw new RuntimeException("a != 36 : " + a);
+            }
+
+            int b = nonExact(c, d, method);
+            int n = exact(c, d, method);
+
+
+            if (n != b) {
+                throw new RuntimeException("n != b : " + n + " != " + b);
+            }
+        }
+    }
+
+    public static int exact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        return result;
+    }
+
+    public static int catchingExact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        try {
+            result += 5;
+            result = method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 1;
+        }
+        try {
+            result += 6;
+
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 2;
+        }
+        try {
+            result += 7;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 3;
+        }
+        try {
+            result += 8;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 4;
+        }
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant mulExact
+ * @compile MulExactLConstantTest.java Verify.java
+ * @run main MulExactLConstantTest
+ *
+ */
+
+public class MulExactLConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantLongTest.verify(new Verify.MulExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant mulExact
+ * @compile MulExactLNonConstantTest.java Verify.java
+ * @run main MulExactLNonConstantTest
+ *
+ */
+
+public class MulExactLNonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantLongTest.verify(new Verify.MulExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant negExact
+ * @compile NegExactIConstantTest.java Verify.java
+ * @run main NegExactIConstantTest
+ *
+ */
+
+public class NegExactIConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactILoadTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test negExact
+ * @compile NegExactILoadTest.java Verify.java
+ * @run main NegExactILoadTest
+ *
+ */
+
+public class NegExactILoadTest {
+    public static void main(String[] args) {
+        Verify.LoadTest.init();
+        Verify.LoadTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test negExact loop dependent
+ * @compile NegExactILoopDependentTest.java Verify.java
+ * @run main NegExactILoopDependentTest
+ *
+ */
+public class NegExactILoopDependentTest {
+    public static void main(String[] args) {
+        Verify.LoopDependentTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant negExact
+ * @compile NegExactINonConstantTest.java Verify.java
+ * @run main NegExactINonConstantTest
+ *
+ */
+
+public class NegExactINonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant negExact
+ * @compile NegExactLConstantTest.java Verify.java
+ * @run main NegExactLConstantTest
+ *
+ */
+
+public class NegExactLConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantLongTest.verify(new Verify.UnaryToBinaryLong(new Verify.NegExactL()));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant negExact
+ * @compile NegExactLNonConstantTest.java Verify.java
+ * @run main NegExactLNonConstantTest
+ *
+ */
+
+public class NegExactLNonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantLongTest.verify(new Verify.UnaryToBinaryLong(new Verify.NegExactL()));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/NestedMathExactTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027444
+ * @summary Test nested loops
+ * @compile NestedMathExactTest.java
+ * @run main NestedMathExactTest
+ *
+ */
+
+public class NestedMathExactTest {
+    public static final int LIMIT = 100;
+    public static int[] result = new int[LIMIT];
+    public static int value = 17;
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 100; ++i) {
+            result[i] = runTest();
+        }
+    }
+
+    public static int runTest() {
+        int sum = 0;
+        for (int j = 0; j < 100000; j = Math.addExact(j, 1)) {
+            sum = 1;
+            for (int i = 0; i < 5; ++i) {
+                sum *= value;
+            }
+        }
+        return sum;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactICondTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test subtractExact as condition
+ * @compile SubExactICondTest.java Verify.java
+ * @run main SubExactICondTest
+ *
+ */
+
+public class SubExactICondTest {
+  public static int result = 0;
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 50000; ++i) {
+      runTest();
+    }
+  }
+
+  public static void runTest() {
+    int i = 7;
+    while (java.lang.Math.subtractExact(i, result) > -31361) {
+        if ((java.lang.Math.subtractExact(i, i) & 1) == 1) {
+            i -= 3;
+        } else if ((i & 5) == 4) {
+            i -= 7;
+        } else if ((i & 0xf) == 6) {
+            i -= 2;
+        } else {
+            i -= 1;
+        }
+        result += 2;
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test constant subtractExact
+ * @compile SubExactIConstantTest.java Verify.java
+ * @run main SubExactIConstantTest
+ *
+ */
+
+public class SubExactIConstantTest {
+  public static void main(String[] args) {
+      Verify.ConstantTest.verify(new Verify.SubExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactILoadTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant subtractExact
+ * @compile SubExactILoadTest.java Verify.java
+ * @run main SubExactILoadTest
+ *
+ */
+
+public class SubExactILoadTest {
+  public static void main(String[] args) {
+      Verify.LoadTest.init();
+      Verify.LoadTest.verify(new Verify.SubExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant subtractExact
+ * @compile SubExactILoopDependentTest.java Verify.java
+ * @run main SubExactILoopDependentTest
+ *
+ */
+
+public class SubExactILoopDependentTest {
+  public static void main(String[] args) {
+      Verify.LoopDependentTest.verify(new Verify.SubExactI());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test non constant subtractExact
+ * @compile SubExactINonConstantTest.java Verify.java
+ * @run main SubExactINonConstantTest
+ *
+ */
+
+public class SubExactINonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantTest.verify(new Verify.SubExactI());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @summary Test repeating subtractExact
+ * @compile SubExactIRepeatTest.java Verify.java
+ * @run main SubExactIRepeatTest
+ *
+ */
+
+import java.lang.ArithmeticException;
+
+public class SubExactIRepeatTest {
+    public static void main(String[] args) {
+        runTest(new Verify.SubExactI());
+    }
+
+    public static int nonExact(int x, int y, Verify.BinaryMethod method) {
+        int result = method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        result += method.unchecked(x, y);
+        return result;
+    }
+
+    public static void runTest(Verify.BinaryMethod method) {
+        java.util.Random rnd = new java.util.Random();
+        for (int i = 0; i < 50000; ++i) {
+            int x = Integer.MIN_VALUE + 10;
+            int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5);
+
+            int c = rnd.nextInt() / 2;
+            int d = rnd.nextInt() / 2;
+
+            int a = catchingExact(x, y, method);
+
+            if (a != 36) {
+                throw new RuntimeException("a != 36 : " + a);
+            }
+
+            int b = nonExact(c, d, method);
+            int n = exact(c, d, method);
+
+
+            if (n != b) {
+                throw new RuntimeException("n != b : " + n + " != " + b);
+            }
+        }
+    }
+
+    public static int exact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        result += method.checkMethod(x, y);
+        return result;
+    }
+
+    public static int catchingExact(int x, int y, Verify.BinaryMethod method) {
+        int result = 0;
+        try {
+            result += 5;
+            result = method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 1;
+        }
+        try {
+            result += 6;
+
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 2;
+        }
+        try {
+            result += 7;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 3;
+        }
+        try {
+            result += 8;
+            result += method.checkMethod(x, y);
+        } catch (ArithmeticException e) {
+            result += 4;
+        }
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @bug 8027353
+ * @summary Test constant subtractExact
+ * @compile SubExactLConstantTest.java Verify.java
+ * @run main SubExactLConstantTest
+ *
+ */
+
+public class SubExactLConstantTest {
+    public static void main(String[] args) {
+        Verify.ConstantLongTest.verify(new Verify.SubExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026844
+ * @bug 8027353
+ * @summary Test non constant subtractExact
+ * @compile SubExactLNonConstantTest.java Verify.java
+ * @run main SubExactLNonConstantTest
+ *
+ */
+
+public class SubExactLNonConstantTest {
+    public static void main(String[] args) {
+        Verify.NonConstantLongTest.verify(new Verify.SubExactL());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/Verify.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class Verify {
+    public static String throwWord(boolean threw) {
+        return (threw ? "threw" : "didn't throw");
+    }
+
+    public static void verifyResult(UnaryMethod method, int result1, int result2, boolean exception1, boolean exception2, int value) {
+        if (exception1 != exception2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version" + throwWord(exception2) + " for: " + value);
+        }
+        if (result1 != result2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2);
+        }
+    }
+
+    public static void verifyResult(UnaryLongMethod method, long result1, long result2, boolean exception1, boolean exception2, long value) {
+        if (exception1 != exception2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version" + throwWord(exception2) + " for: " + value);
+        }
+        if (result1 != result2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2);
+        }
+    }
+
+    private static void verifyResult(BinaryMethod method, int result1, int result2, boolean exception1, boolean exception2, int a, int b) {
+        if (exception1 != exception2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b);
+        }
+        if (result1 != result2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2);
+        }
+    }
+
+    private static void verifyResult(BinaryLongMethod method, long result1, long result2, boolean exception1, boolean exception2, long a, long b) {
+        if (exception1 != exception2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "]" + throwWord(exception1) + " exception, NonIntrinsic version " + throwWord(exception2) + " for: " + a + " + " + b);
+        }
+        if (result1 != result2) {
+            throw new RuntimeException("Intrinsic version [" + method.name() + "] returned: " + result1 + " while NonIntrinsic version returned: " + result2);
+        }
+    }
+
+
+    public static void verifyUnary(int a, UnaryMethod method) {
+        boolean exception1 = false, exception2 = false;
+        int result1 = 0, result2 = 0;
+        try {
+            result1 = method.checkMethod(a);
+        } catch (ArithmeticException e) {
+            exception1 = true;
+        }
+        try {
+            result2 = method.safeMethod(a);
+        } catch (ArithmeticException e) {
+            exception2 = true;
+        }
+
+        verifyResult(method, result1, result2, exception1, exception2, a);
+    }
+
+    public static void verifyUnary(long a, UnaryLongMethod method) {
+        boolean exception1 = false, exception2 = false;
+        long result1 = 0, result2 = 0;
+        try {
+            result1 = method.checkMethod(a);
+        } catch (ArithmeticException e) {
+            exception1 = true;
+        }
+        try {
+            result2 = method.safeMethod(a);
+        } catch (ArithmeticException e) {
+            exception2 = true;
+        }
+
+        verifyResult(method, result1, result2, exception1, exception2, a);
+    }
+
+
+    public static void verifyBinary(int a, int b, BinaryMethod method) {
+        boolean exception1 = false, exception2 = false;
+        int result1 = 0, result2 = 0;
+        try {
+            result1 = method.checkMethod(a, b);
+        } catch (ArithmeticException e) {
+            exception1 = true;
+        }
+        try {
+            result2 = method.safeMethod(a, b);
+        } catch (ArithmeticException e) {
+            exception2 = true;
+        }
+
+        verifyResult(method, result1, result2, exception1, exception2, a, b);
+    }
+
+    public static void verifyBinary(long a, long b, BinaryLongMethod method) {
+        boolean exception1 = false, exception2 = false;
+        long result1 = 0, result2 = 0;
+        try {
+            result1 = method.checkMethod(a, b);
+        } catch (ArithmeticException e) {
+            exception1 = true;
+        }
+        try {
+            result2 = method.safeMethod(a, b);
+        } catch (ArithmeticException e) {
+            exception2 = true;
+        }
+
+        verifyResult(method, result1, result2, exception1, exception2, a, b);
+    }
+
+
+    public static class LoadTest {
+        public static java.util.Random rnd = new java.util.Random();
+        public static int[] values = new int[256];
+
+        public static void init() {
+            for (int i = 0; i < values.length; ++i) {
+                values[i] = rnd.nextInt();
+            }
+        }
+
+        public static void verify(BinaryMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                Verify.verifyBinary(values[i & 255], values[i & 255] - i, method);
+                Verify.verifyBinary(values[i & 255] + i, values[i & 255] - i, method);
+                Verify.verifyBinary(values[i & 255], values[i & 255], method);
+                if ((i & 1) == 1 && i > 5) {
+                    Verify.verifyBinary(values[i & 255] + i, values[i & 255] - i, method);
+                } else {
+                    Verify.verifyBinary(values[i & 255] - i, values[i & 255] + i, method);
+                }
+                Verify.verifyBinary(values[i & 255], values[(i + 1) & 255], method);
+            }
+        }
+    }
+
+    public static class NonConstantTest {
+        public static java.util.Random rnd = new java.util.Random();
+
+        public static void verify(BinaryMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt();
+                Verify.verifyBinary(rnd1, rnd2, method);
+                Verify.verifyBinary(rnd1, rnd2 + 1, method);
+                Verify.verifyBinary(rnd1 + 1, rnd2, method);
+                Verify.verifyBinary(rnd1 - 1, rnd2, method);
+                Verify.verifyBinary(rnd1, rnd2 - 1, method);
+            }
+        }
+    }
+
+    public static class NonConstantLongTest {
+        public static long[] values = { Long.MIN_VALUE, Long.MAX_VALUE, 0, Long.MAX_VALUE - 1831 };
+        public static java.util.Random rnd = new java.util.Random();
+
+        public static void verify(BinaryLongMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                long rnd1 = rnd.nextLong(), rnd2 = rnd.nextLong();
+                Verify.verifyBinary(rnd1, rnd2, method);
+                Verify.verifyBinary(rnd1, rnd2 + 1, method);
+                Verify.verifyBinary(rnd1 + 1, rnd2, method);
+                Verify.verifyBinary(rnd1 - 1, rnd2, method);
+                Verify.verifyBinary(rnd1, rnd2 - 1, method);
+                Verify.verifyBinary(rnd1 + Long.MAX_VALUE - rnd2, rnd2 + 1, method);
+                Verify.verifyBinary(values[0], values[2], method);
+                Verify.verifyBinary(values[1], values[2], method);
+                Verify.verifyBinary(values[3], 74L, method);
+            }
+        }
+    }
+
+    public static class LoopDependentTest {
+        public static java.util.Random rnd = new java.util.Random();
+
+        public static void verify(BinaryMethod method) {
+            int rnd1 = rnd.nextInt(), rnd2 = rnd.nextInt();
+            runTest(rnd1, rnd2, method);
+        }
+
+        private static void runTest(int rnd1, int rnd2, BinaryMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                Verify.verifyBinary(rnd1 + i, rnd2 + i, method);
+                Verify.verifyBinary(rnd1 + i, rnd2 + (i & 0xff), method);
+                Verify.verifyBinary(rnd1 - i, rnd2 - (i & 0xff), method);
+                Verify.verifyBinary(rnd1 + i + 1, rnd2 + i + 2, method);
+                Verify.verifyBinary(rnd1 + i * 2, rnd2 + i, method);
+            }
+        }
+    }
+
+    public static class ConstantTest {
+        public static void verify(BinaryMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                Verify.verifyBinary(5, 7, method);
+                Verify.verifyBinary(Integer.MAX_VALUE, 1, method);
+                Verify.verifyBinary(Integer.MIN_VALUE, -1, method);
+                Verify.verifyBinary(Integer.MAX_VALUE, -1, method);
+                Verify.verifyBinary(Integer.MIN_VALUE, 1, method);
+                Verify.verifyBinary(Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 2, method);
+                Verify.verifyBinary(Integer.MAX_VALUE / 2, (Integer.MAX_VALUE / 2) + 3, method);
+                Verify.verifyBinary(Integer.MAX_VALUE, Integer.MIN_VALUE, method);
+            }
+        }
+    }
+
+    public static class ConstantLongTest {
+        public static void verify(BinaryLongMethod method) {
+            for (int i = 0; i < 50000; ++i) {
+                Verify.verifyBinary(5, 7, method);
+                Verify.verifyBinary(Long.MAX_VALUE, 1, method);
+                Verify.verifyBinary(Long.MIN_VALUE, -1, method);
+                Verify.verifyBinary(Long.MAX_VALUE, -1, method);
+                Verify.verifyBinary(Long.MIN_VALUE, 1, method);
+                Verify.verifyBinary(Long.MAX_VALUE / 2, Long.MAX_VALUE / 2, method);
+                Verify.verifyBinary(Long.MAX_VALUE / 2, (Long.MAX_VALUE / 2) + 3, method);
+                Verify.verifyBinary(Long.MAX_VALUE, Long.MIN_VALUE, method);
+            }
+        }
+    }
+
+    public static interface BinaryMethod {
+        int safeMethod(int a, int b);
+        int checkMethod(int a, int b);
+        int unchecked(int a, int b);
+        String name();
+    }
+
+    public static interface UnaryMethod {
+        int safeMethod(int value);
+        int checkMethod(int value);
+        int unchecked(int value);
+        String name();
+    }
+
+    public static interface BinaryLongMethod {
+        long safeMethod(long a, long b);
+        long checkMethod(long a, long b);
+        long unchecked(long a, long b);
+        String name();
+    }
+
+    public static interface UnaryLongMethod {
+        long safeMethod(long value);
+        long checkMethod(long value);
+        long unchecked(long value);
+        String name();
+    }
+
+    public static class UnaryToBinary implements BinaryMethod {
+        private final UnaryMethod method;
+        public UnaryToBinary(UnaryMethod method) {
+            this.method = method;
+        }
+
+        @Override
+        public int safeMethod(int a, int b) {
+            return method.safeMethod(a);
+        }
+
+        @Override
+        public int checkMethod(int a, int b) {
+            return method.checkMethod(a);
+        }
+
+        @Override
+        public int unchecked(int a, int b) {
+            return method.unchecked(a);
+
+        }
+
+        @Override
+        public String name() {
+            return method.name();
+        }
+    }
+
+    public static class UnaryToBinaryLong implements BinaryLongMethod {
+        private final UnaryLongMethod method;
+        public UnaryToBinaryLong(UnaryLongMethod method) {
+            this.method = method;
+        }
+
+        @Override
+        public long safeMethod(long a, long b) {
+            return method.safeMethod(a);
+        }
+
+        @Override
+        public long checkMethod(long a, long b) {
+            return method.checkMethod(a);
+        }
+
+        @Override
+        public long unchecked(long a, long b) {
+            return method.unchecked(a);
+
+        }
+
+        @Override
+        public String name() {
+            return method.name();
+        }
+    }
+
+
+    public static class AddExactI implements BinaryMethod {
+        @Override
+        public int safeMethod(int x, int y) {
+            int r = x + y;
+            // HD 2-12 Overflow iff both arguments have the opposite sign of the result
+            if (((x ^ r) & (y ^ r)) < 0) {
+                throw new ArithmeticException("integer overflow");
+            }
+            return r;
+
+        }
+
+        @Override
+        public int checkMethod(int a, int b) {
+            return Math.addExact(a, b);
+        }
+
+        @Override
+        public String name() {
+            return "addExact";
+        }
+
+        @Override
+        public int unchecked(int a, int b) {
+            return a + b;
+        }
+    }
+
+    public static class AddExactL implements BinaryLongMethod {
+        @Override
+        public long safeMethod(long x, long y) {
+            long r = x + y;
+            // HD 2-12 Overflow iff both arguments have the opposite sign of the result
+            if (((x ^ r) & (y ^ r)) < 0) {
+                throw new ArithmeticException("integer overflow");
+            }
+            return r;
+
+        }
+
+        @Override
+        public long checkMethod(long a, long b) {
+            return Math.addExact(a, b);
+        }
+
+        @Override
+        public String name() {
+            return "addExactLong";
+        }
+
+        @Override
+        public long unchecked(long a, long b) {
+            return a + b;
+        }
+    }
+
+    public static class MulExactI implements BinaryMethod {
+        @Override
+        public int safeMethod(int x, int y) {
+            long r = (long)x * (long)y;
+            if ((int)r != r) {
+                throw new ArithmeticException("integer overflow");
+            }
+            return (int)r;
+
+        }
+
+        @Override
+        public int checkMethod(int a, int b) {
+            return Math.multiplyExact(a, b);
+        }
+
+        @Override
+        public int unchecked(int a, int b) {
+            return a * b;
+        }
+
+        @Override
+        public String name() {
+            return "multiplyExact";
+        }
+    }
+
+    public static class MulExactL implements BinaryLongMethod {
+        @Override
+        public long safeMethod(long x, long y) {
+            long r = x * y;
+            long ax = Math.abs(x);
+            long ay = Math.abs(y);
+            if (((ax | ay) >>> 31 != 0)) {
+                // Some bits greater than 2^31 that might cause overflow
+                // Check the result using the divide operator
+                // and check for the special case of Long.MIN_VALUE * -1
+                if (((y != 0) && (r / y != x)) ||
+                        (x == Long.MIN_VALUE && y == -1)) {
+                    throw new ArithmeticException("long overflow");
+                }
+            }
+            return r;
+        }
+
+        @Override
+        public long checkMethod(long a, long b) {
+            return Math.multiplyExact(a, b);
+        }
+
+        @Override
+        public long unchecked(long a, long b) {
+            return a * b;
+        }
+
+        @Override
+        public String name() {
+            return "multiplyExact";
+        }
+    }
+
+    public static class NegExactL implements UnaryLongMethod {
+        @Override
+        public long safeMethod(long a) {
+            if (a == Long.MIN_VALUE) {
+                throw new ArithmeticException("long overflow");
+            }
+
+            return -a;
+
+        }
+
+        @Override
+        public long checkMethod(long value) {
+            return Math.negateExact(value);
+        }
+
+        @Override
+        public long unchecked(long value) {
+            return -value;
+        }
+
+        @Override
+        public String name() {
+            return "negateExactLong";
+        }
+    }
+
+    public static class NegExactI implements UnaryMethod {
+        @Override
+        public int safeMethod(int a) {
+            if (a == Integer.MIN_VALUE) {
+                throw new ArithmeticException("integer overflow");
+            }
+
+            return -a;
+
+        }
+
+        @Override
+        public int checkMethod(int value) {
+            return Math.negateExact(value);
+        }
+
+        @Override
+        public int unchecked(int value) {
+            return -value;
+        }
+
+        @Override
+        public String name() {
+            return "negateExact";
+        }
+    }
+
+    public static class SubExactI implements BinaryMethod {
+        @Override
+        public int safeMethod(int x, int y) {
+            int r = x - y;
+            // HD 2-12 Overflow iff the arguments have different signs and
+            // the sign of the result is different than the sign of x
+            if (((x ^ y) & (x ^ r)) < 0) {
+                throw new ArithmeticException("integer overflow");
+            }
+            return r;
+        }
+
+        @Override
+        public int checkMethod(int a, int b) {
+            return Math.subtractExact(a, b);
+        }
+
+        @Override
+        public int unchecked(int a, int b) {
+            return a - b;
+        }
+
+        @Override
+        public String name() {
+            return "subtractExact";
+        }
+    }
+
+    public static class SubExactL implements BinaryLongMethod {
+        @Override
+        public long safeMethod(long x, long y) {
+            long r = x - y;
+            // HD 2-12 Overflow iff the arguments have different signs and
+            // the sign of the result is different than the sign of x
+            if (((x ^ y) & (x ^ r)) < 0) {
+                throw new ArithmeticException("integer overflow");
+            }
+            return r;
+        }
+
+        @Override
+        public long checkMethod(long a, long b) {
+            return Math.subtractExact(a, b);
+        }
+
+        @Override
+        public long unchecked(long a, long b) {
+            return a - b;
+        }
+
+        @Override
+        public String name() {
+            return "subtractExactLong";
+        }
+    }
+
+    static class IncExactL implements UnaryLongMethod {
+        @Override
+        public long safeMethod(long a) {
+            if (a == Long.MAX_VALUE) {
+                throw new ArithmeticException("long overflow");
+            }
+
+            return a + 1L;
+
+        }
+
+        @Override
+        public long checkMethod(long value) {
+            return Math.incrementExact(value);
+        }
+
+        @Override
+        public long unchecked(long value) {
+            return value + 1;
+        }
+
+        @Override
+        public String name() {
+            return "incrementExactLong";
+        }
+    }
+
+    static class IncExactI implements UnaryMethod {
+        @Override
+        public int safeMethod(int a) {
+            if (a == Integer.MAX_VALUE) {
+                throw new ArithmeticException("integer overflow");
+            }
+
+            return a + 1;
+        }
+
+        @Override
+        public int checkMethod(int value) {
+            return Math.incrementExact(value);
+        }
+
+        @Override
+        public int unchecked(int value) {
+            return value + 1;
+        }
+
+        @Override
+        public String name() {
+            return "incrementExact";
+        }
+    }
+
+    static class DecExactL implements UnaryLongMethod {
+        @Override
+        public long safeMethod(long a) {
+            if (a == Long.MIN_VALUE) {
+                throw new ArithmeticException("long overflow");
+            }
+
+            return a - 1L;
+        }
+
+        @Override
+        public long checkMethod(long value) {
+            return Math.decrementExact(value);
+        }
+
+        @Override
+        public long unchecked(long value) {
+            return value - 1;
+        }
+
+        @Override
+        public String name() {
+            return "decExactLong";
+        }
+    }
+
+    static class DecExactI implements UnaryMethod {
+        @Override
+        public int safeMethod(int a) {
+            if (a == Integer.MIN_VALUE) {
+                throw new ArithmeticException("integer overflow");
+            }
+
+            return a - 1;
+        }
+
+        @Override
+        public int checkMethod(int value) {
+            return Math.decrementExact(value);
+        }
+
+        @Override
+        public int unchecked(int value) {
+            return value - 1;
+        }
+
+        @Override
+        public String name() {
+            return "decrementExact";
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8022595
+ * @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
+ *
+ * @run main/othervm ConcurrentClassLoadingTest
+ */
+import java.util.*;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+public class ConcurrentClassLoadingTest {
+    int numThreads = 0;
+    long seed = 0;
+    CyclicBarrier l;
+    Random rand;
+
+    public static void main(String[] args) throws Throwable {
+        ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
+        test.parseArgs(args);
+        test.run();
+    }
+
+    void parseArgs(String[] args) {
+        int i = 0;
+        while (i < args.length) {
+            String flag = args[i];
+            switch(flag) {
+                case "-seed":
+                    seed = Long.parseLong(args[++i]);
+                    break;
+                case "-numThreads":
+                    numThreads = Integer.parseInt(args[++i]);
+                    break;
+                default:
+                    throw new Error("Unknown flag: " + flag);
+            }
+            ++i;
+        }
+    }
+
+    void init() {
+        if (numThreads == 0) {
+            numThreads = Runtime.getRuntime().availableProcessors();
+        }
+
+        if (seed == 0) {
+            seed = (new Random()).nextLong();
+        }
+        rand = new Random(seed);
+
+        l = new CyclicBarrier(numThreads + 1);
+
+        System.out.printf("Threads: %d\n", numThreads);
+        System.out.printf("Seed: %d\n", seed);
+    }
+
+    final List<Loader> loaders = new ArrayList<>();
+
+    void prepare() {
+        List<String> c = new ArrayList<>(Arrays.asList(classNames));
+
+        // Split classes between loading threads
+        int count = (classNames.length / numThreads) + 1;
+        for (int t = 0; t < numThreads; t++) {
+            List<String> sel = new ArrayList<>();
+
+            System.out.printf("Thread #%d:\n", t);
+            for (int i = 0; i < count; i++) {
+                if (c.size() == 0) break;
+
+                int k = rand.nextInt(c.size());
+                String elem = c.remove(k);
+                sel.add(elem);
+                System.out.printf("\t%s\n", elem);
+            }
+            loaders.add(new Loader(sel));
+        }
+
+        // Print diagnostic info when the test hangs
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                boolean alive = false;
+                for (Loader l : loaders) {
+                    if (!l.isAlive())  continue;
+
+                    if (!alive) {
+                        System.out.println("Some threads are still alive:");
+                        alive = true;
+                    }
+
+                    System.out.println(l.getName());
+                    for (StackTraceElement elem : l.getStackTrace()) {
+                        System.out.println("\t"+elem.toString());
+                    }
+                }
+            }
+        });
+    }
+
+    public void run() throws Throwable {
+        init();
+        prepare();
+
+        for (Loader loader : loaders) {
+            loader.start();
+        }
+
+        l.await();
+
+        for (Loader loader : loaders) {
+            loader.join();
+        }
+    }
+
+    class Loader extends Thread {
+        List<String> classes;
+
+        public Loader(List<String> classes) {
+            this.classes = classes;
+            setDaemon(true);
+        }
+
+        @Override
+        public void run() {
+            try {
+                l.await();
+
+                for (String name : classes) {
+                    Class.forName(name).getName();
+                }
+            } catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) {
+                throw new Error(e);
+            }
+        }
+    }
+
+    final static String[] classNames = {
+            "java.lang.invoke.AbstractValidatingLambdaMetafactory",
+            "java.lang.invoke.BoundMethodHandle",
+            "java.lang.invoke.CallSite",
+            "java.lang.invoke.ConstantCallSite",
+            "java.lang.invoke.DirectMethodHandle",
+            "java.lang.invoke.InnerClassLambdaMetafactory",
+            "java.lang.invoke.InvokeDynamic",
+            "java.lang.invoke.InvokeGeneric",
+            "java.lang.invoke.InvokerBytecodeGenerator",
+            "java.lang.invoke.Invokers",
+            "java.lang.invoke.LambdaConversionException",
+            "java.lang.invoke.LambdaForm",
+            "java.lang.invoke.LambdaMetafactory",
+            "java.lang.invoke.MagicLambdaImpl",
+            "java.lang.invoke.MemberName",
+            "java.lang.invoke.MethodHandle",
+            "java.lang.invoke.MethodHandleImpl",
+            "java.lang.invoke.MethodHandleInfo",
+            "java.lang.invoke.MethodHandleNatives",
+            "java.lang.invoke.MethodHandleProxies",
+            "java.lang.invoke.MethodHandles",
+            "java.lang.invoke.MethodHandleStatics",
+            "java.lang.invoke.MethodType",
+            "java.lang.invoke.MethodTypeForm",
+            "java.lang.invoke.MutableCallSite",
+            "java.lang.invoke.SerializedLambda",
+            "java.lang.invoke.SimpleMethodHandle",
+            "java.lang.invoke.SwitchPoint",
+            "java.lang.invoke.TypeConvertingMethodAdapter",
+            "java.lang.invoke.VolatileCallSite",
+            "java.lang.invoke.WrongMethodTypeException"
+    };
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8026124
+ * @summary Javascript file provoked assertion failure in linkResolver.cpp
+ *
+ * @run main/othervm CreatesInterfaceDotEqualsCallInfo
+ */
+
+public class CreatesInterfaceDotEqualsCallInfo {
+  public static void main(String[] args) throws java.io.IOException {
+    String[] jsargs = { System.getProperty("test.src", ".") +
+                        "/createsInterfaceDotEqualsCallInfo.js" };
+    jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs);
+    System.out.println("PASS, did not crash running Javascript");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+var path = new java.io.File("/Users/someone").toPath();
+path.toString();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/methodHandleExceptions/ByteClassLoader.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * A minimal classloader for loading bytecodes that could not result from
+ * properly compiled Java.
+ *
+ * @author dr2chase
+ */
+public class ByteClassLoader extends ClassLoader {
+    /**
+     * (pre)load class name using classData for the definition.
+     *
+     * @param name
+     * @param classData
+     * @return
+     */
+    public Class<?> loadBytes(String name, byte[] classData) {
+         Class<?> clazz = defineClass(name, classData, 0, classData.length);
+                     resolveClass(clazz);
+         return clazz;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/methodHandleExceptions/C.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * Test class -- implements I, which provides default for m, but this class
+ * declares it abstract which (should) hide the interface default, and throw
+ * an abstract method error if it is called (calling it requires bytecode hacking
+ * or inconsistent compilation).
+ */
+public abstract class C implements I {
+       public abstract int m();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/methodHandleExceptions/I.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+public interface I {
+    default public int m() { return 1; }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/methodHandleExceptions/TestAMEnotNPE.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Handle;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+/**
+ * @test
+ * @bug 8025260
+ * @summary Ensure that AbstractMethodError is thrown, not NullPointerException, through MethodHandles::jump_from_method_handle code path
+ *
+ * @compile -XDignore.symbol.file ByteClassLoader.java I.java C.java TestAMEnotNPE.java
+ * @run main/othervm TestAMEnotNPE
+ */
+
+public class TestAMEnotNPE implements Opcodes {
+
+    /**
+     * The bytes for D, a NOT abstract class extending abstract class C
+     * without supplying an implementation for abstract method m.
+     * There is a default method in the interface I, but it should lose to
+     * the abstract class.
+
+     class D extends C {
+        D() { super(); }
+        // does not define m
+     }
+
+     * @return
+     * @throws Exception
+     */
+    public static byte[] bytesForD() throws Exception {
+
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES|ClassWriter.COMPUTE_MAXS);
+        MethodVisitor mv;
+
+        cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, "D", null, "C", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "C", "<init>", "()V");
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(0, 0);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+
+    /**
+     * The bytecodes for an invokeExact of a particular methodHandle, I.m, invoked on a D
+
+        class T {
+           T() { super(); } // boring constructor
+           int test() {
+              MethodHandle mh = `I.m():int`;
+              D d = new D();
+              return mh.invokeExact(d); // Should explode here, AbstractMethodError
+           }
+        }
+
+     * @return
+     * @throws Exception
+     */
+    public static byte[] bytesForT() throws Exception {
+
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES|ClassWriter.COMPUTE_MAXS);
+        MethodVisitor mv;
+
+        cw.visit(V1_8, ACC_PUBLIC + ACC_SUPER, "T", null, "java/lang/Object", null);
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(0,0);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null);
+            mv.visitCode();
+            mv.visitLdcInsn(new Handle(Opcodes.H_INVOKEINTERFACE, "I", "m", "()I"));
+            mv.visitTypeInsn(NEW, "D");
+            mv.visitInsn(DUP);
+            mv.visitMethodInsn(INVOKESPECIAL, "D", "<init>", "()V");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/invoke/MethodHandle", "invokeExact", "(LI;)I");
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(0,0);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+        return cw.toByteArray();
+    }
+
+    public static void main(String args[] ) throws Throwable {
+        ByteClassLoader bcl = new ByteClassLoader();
+        Class<?> d = bcl.loadBytes("D", bytesForD());
+        Class<?> t = bcl.loadBytes("T", bytesForT());
+        try {
+          Object result = t.getMethod("test").invoke(null);
+          System.out.println("Expected AbstractMethodError wrapped in InvocationTargetException, saw no exception");
+          throw new Error("Missing expected exception");
+        } catch (InvocationTargetException e) {
+            Throwable th = e.getCause();
+            if (th instanceof AbstractMethodError) {
+                th.printStackTrace(System.out);
+                System.out.println("PASS, saw expected exception (AbstractMethodError, wrapped in InvocationTargetException).");
+            } else {
+                System.out.println("Expected AbstractMethodError wrapped in InvocationTargetException, saw " + th);
+                throw th;
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/print/PrintInlining.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8022585
+ * @summary VM crashes when ran with -XX:+PrintInlining
+ * @run main/othervm -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:+PrintInlining PrintInlining
+ *
+ */
+
+public class PrintInlining {
+  public static void main(String[] args) {
+    System.out.println("Passed");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/startup/SmallCodeCacheStartup.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8023014
+ * @summary Test ensures that there is no crash when compiler initialization fails
+ * @library /testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class SmallCodeCacheStartup {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    OutputAnalyzer out;
+
+    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
+    out = new OutputAnalyzer(pb.start());
+    out.shouldContain("no space to run compiler");
+    out.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/startup/StartupOutput.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026949
+ * @summary Test ensures correct VM output during startup
+ * @library ../../testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class StartupOutput {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    OutputAnalyzer out;
+
+    pb = ProcessTools.createJavaProcessBuilder("-Xint", "-XX:+DisplayVMOutputToStdout", "-version");
+    out = new OutputAnalyzer(pb.start());
+    out.shouldNotContain("no space to run compilers");
+
+    out.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/tiered/CompLevelsTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Abstract class for testing of used compilation levels correctness.
+ *
+ * @author igor.ignatyev@oracle.com
+ */
+public abstract class CompLevelsTest extends CompilerWhiteBoxTest {
+    protected CompLevelsTest(TestCase testCase) {
+        super(testCase);
+        // to prevent inlining of #method
+        WHITE_BOX.testSetDontInlineMethod(method, true);
+    }
+
+    /**
+     * Checks that level is available.
+     * @param compLevel level to check
+     */
+    protected void testAvailableLevel(int compLevel, int bci) {
+        if (IS_VERBOSE) {
+            System.out.printf("testAvailableLevel(level = %d, bci = %d)%n",
+                    compLevel, bci);
+        }
+        WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
+        checkCompiled();
+        checkLevel(compLevel, getCompLevel());
+        deoptimize();
+    }
+
+    /**
+     * Checks that level is unavailable.
+     * @param compLevel level to check
+     */
+    protected void testUnavailableLevel(int compLevel, int bci) {
+        if (IS_VERBOSE) {
+            System.out.printf("testUnavailableLevel(level = %d, bci = %d)%n",
+                    compLevel, bci);
+        }
+        WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
+        checkNotCompiled();
+    }
+
+    /**
+     * Checks validity of compilation level.
+     * @param expected expected level
+     * @param actual actually level
+     */
+    protected void checkLevel(int expected, int actual) {
+        if (expected != actual) {
+            throw new RuntimeException("expected[" + expected + "] != actual["
+                    + actual + "]");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/tiered/NonTieredLevelsTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.function.IntPredicate;
+
+/**
+ * @test NonTieredLevelsTest
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build NonTieredLevelsTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation
+ *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -XX:CompileCommand=compileonly,TestCase$Helper::*
+ *                   NonTieredLevelsTest
+ * @summary Verify that only one level can be used
+ * @author igor.ignatyev@oracle.com
+ */
+public class NonTieredLevelsTest extends CompLevelsTest {
+    private static final int AVAILABLE_COMP_LEVEL;
+    private static final IntPredicate IS_AVAILABLE_COMPLEVEL;
+    static {
+        String vmName = System.getProperty("java.vm.name");
+        if (vmName.endsWith(" Server VM")) {
+            AVAILABLE_COMP_LEVEL = COMP_LEVEL_FULL_OPTIMIZATION;
+            IS_AVAILABLE_COMPLEVEL = x -> x == COMP_LEVEL_FULL_OPTIMIZATION;
+        } else if (vmName.endsWith(" Client VM")
+                || vmName.endsWith(" Minimal VM")) {
+            AVAILABLE_COMP_LEVEL = COMP_LEVEL_SIMPLE;
+            IS_AVAILABLE_COMPLEVEL = x -> x >= COMP_LEVEL_SIMPLE
+                    && x <= COMP_LEVEL_FULL_PROFILE;
+        } else {
+            throw new RuntimeException("Unknown VM: " + vmName);
+        }
+
+    }
+    public static void main(String[] args) throws Exception {
+        if (TIERED_COMPILATION) {
+            System.err.println("Test isn't applicable w/ enabled "
+                    + "TieredCompilation. Skip test.");
+            return;
+        }
+        for (TestCase test : TestCase.values()) {
+            new NonTieredLevelsTest(test).runTest();
+        }
+    }
+
+    private NonTieredLevelsTest(TestCase testCase) {
+        super(testCase);
+        // to prevent inlining of #method
+        WHITE_BOX.testSetDontInlineMethod(method, true);
+    }
+
+    @Override
+    protected void test() throws Exception {
+        checkNotCompiled();
+        compile();
+        checkCompiled();
+
+        int compLevel = getCompLevel();
+        checkLevel(AVAILABLE_COMP_LEVEL, compLevel);
+        int bci = WHITE_BOX.getMethodEntryBci(method);
+        deoptimize();
+        if (!testCase.isOsr) {
+            for (int level = 1; level <= COMP_LEVEL_MAX; ++level) {
+                if (IS_AVAILABLE_COMPLEVEL.test(level)) {
+                    testAvailableLevel(level, bci);
+                } else {
+                    testUnavailableLevel(level, bci);
+                }
+            }
+        } else {
+            System.out.println("skip other levels testing in OSR");
+            testAvailableLevel(AVAILABLE_COMP_LEVEL, bci);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/tiered/TieredLevelsTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TieredLevelsTest
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build TieredLevelsTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation
+ *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -XX:CompileCommand=compileonly,TestCase$Helper::*
+ *                   TieredLevelsTest
+ * @summary Verify that all levels &lt; 'TieredStopAtLevel' can be used
+ * @author igor.ignatyev@oracle.com
+ */
+public class TieredLevelsTest extends CompLevelsTest {
+    public static void main(String[] args) throws Exception {
+        if (!TIERED_COMPILATION) {
+            System.err.println("Test isn't applicable w/ disabled "
+                    + "TieredCompilation. Skip test.");
+            return;
+        }
+        for (TestCase test : TestCase.values()) {
+            new TieredLevelsTest(test).runTest();
+        }
+    }
+
+    private TieredLevelsTest(TestCase testCase) {
+        super(testCase);
+        // to prevent inlining of #method
+        WHITE_BOX.testSetDontInlineMethod(method, true);
+    }
+
+    @Override
+    protected void test() throws Exception {
+        checkNotCompiled();
+        compile();
+        checkCompiled();
+
+        int compLevel = getCompLevel();
+        if (compLevel > TIERED_STOP_AT_LEVEL) {
+            throw new RuntimeException("method.compLevel[" + compLevel
+                    + "] > TieredStopAtLevel [" + TIERED_STOP_AT_LEVEL + "]");
+        }
+        int bci = WHITE_BOX.getMethodEntryBci(method);
+        deoptimize();
+
+        for (int testedTier = 1; testedTier <= TIERED_STOP_AT_LEVEL;
+                ++testedTier) {
+            testAvailableLevel(testedTier, bci);
+        }
+        for (int testedTier = TIERED_STOP_AT_LEVEL + 1;
+                testedTier <= COMP_LEVEL_MAX; ++testedTier) {
+            testUnavailableLevel(testedTier, bci);
+        }
+    }
+
+
+    @Override
+    protected void checkLevel(int expected, int actual) {
+        if (expected == COMP_LEVEL_FULL_PROFILE
+                && actual == COMP_LEVEL_LIMITED_PROFILE) {
+            // for simple method full_profile may be replaced by limited_profile
+            return;
+        }
+        super.checkLevel(expected, actual);
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/types/TypeSpeculation.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024070
+ * @summary Test that type speculation doesn't cause incorrect execution
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 TypeSpeculation
+ *
+ */
+
+public class TypeSpeculation {
+
+    interface I {
+    }
+
+    static class A {
+        int m() {
+            return 1;
+        }
+    }
+
+    static class B extends A implements I {
+        int m() {
+            return 2;
+        }
+    }
+
+    static class C extends B {
+        int m() {
+            return 3;
+        }
+    }
+
+    static int test1_invokevirtual(A a) {
+        return a.m();
+    }
+
+    static int test1_1(A a) {
+        return test1_invokevirtual(a);
+    }
+
+    static boolean test1() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        // pollute profile at test1_invokevirtual to make sure the
+        // compiler cannot rely on it
+        for (int i = 0; i < 5000; i++) {
+            test1_invokevirtual(a);
+            test1_invokevirtual(b);
+            test1_invokevirtual(c);
+        }
+
+        // profiling + speculation should make test1_invokevirtual
+        // inline A.m() with a guard
+        for (int i = 0; i < 20000; i++) {
+            int res = test1_1(b);
+            if (res != b.m()) {
+                System.out.println("test1 failed with class B");
+                return false;
+            }
+        }
+        // check that the guard works as expected by passing a
+        // different type
+        int res = test1_1(a);
+        if (res != a.m()) {
+            System.out.println("test1 failed with class A");
+            return false;
+        }
+        return true;
+    }
+
+    static int test2_invokevirtual(A a) {
+        return a.m();
+    }
+
+    static int test2_1(A a, boolean t) {
+        A aa;
+        if (t) {
+            aa = (B)a;
+        } else {
+            aa = a;
+        }
+        // if a of type B is passed to test2_1, the static type of aa
+        // here is no better than A but the profiled type is B so this
+        // should inline
+        return test2_invokevirtual(aa);
+    }
+
+    static boolean test2() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        // pollute profile at test2_invokevirtual to make sure the
+        // compiler cannot rely on it
+        for (int i = 0; i < 5000; i++) {
+            test2_invokevirtual(a);
+            test2_invokevirtual(b);
+            test2_invokevirtual(c);
+        }
+
+        // profiling + speculation should make test2_invokevirtual
+        // inline A.m() with a guard
+        for (int i = 0; i < 20000; i++) {
+            int res = test2_1(b, (i % 2) == 0);
+            if (res != b.m()) {
+                System.out.println("test2 failed with class B");
+                return false;
+            }
+        }
+        // check that the guard works as expected by passing a
+        // different type
+        int res = test2_1(a, false);
+        if (res != a.m()) {
+            System.out.println("test2 failed with class A");
+            return false;
+        }
+        return true;
+    }
+
+    static int test3_invokevirtual(A a) {
+        return a.m();
+    }
+
+    static void test3_2(A a) {
+    }
+
+    static int test3_1(A a, int i) {
+        if (i == 0) {
+            return 0;
+        }
+        // If we come here and a is of type B but parameter profiling
+        // is polluted, both branches of the if below should have
+        // profiling that tell us and inlining of the virtual call
+        // should happen
+        if (i == 1) {
+            test3_2(a);
+        } else {
+            test3_2(a);
+        }
+        return test3_invokevirtual(a);
+    }
+
+    static boolean test3() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        // pollute profile at test3_invokevirtual and test3_1 to make
+        // sure the compiler cannot rely on it
+        for (int i = 0; i < 3000; i++) {
+            test3_invokevirtual(a);
+            test3_invokevirtual(b);
+            test3_invokevirtual(c);
+            test3_1(a, 0);
+            test3_1(b, 0);
+        }
+
+        // profiling + speculation should make test3_invokevirtual
+        // inline A.m() with a guard
+        for (int i = 0; i < 20000; i++) {
+            int res = test3_1(b, (i % 2) + 1);
+            if (res != b.m()) {
+                System.out.println("test3 failed with class B");
+                return false;
+            }
+        }
+        // check that the guard works as expected by passing a
+        // different type
+        int res = test3_1(a, 1);
+        if (res != a.m()) {
+            System.out.println("test3 failed with class A");
+            return false;
+        }
+        return true;
+    }
+
+    // Mix 2 incompatible profiled types
+    static int test4_invokevirtual(A a) {
+        return a.m();
+    }
+
+    static void test4_2(A a) {
+    }
+
+    static int test4_1(A a, boolean b) {
+        if (b) {
+            test4_2(a);
+        } else {
+            test4_2(a);
+        }
+        // shouldn't inline
+        return test4_invokevirtual(a);
+    }
+
+    static boolean test4() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        // pollute profile at test3_invokevirtual and test3_1 to make
+        // sure the compiler cannot rely on it
+        for (int i = 0; i < 3000; i++) {
+            test4_invokevirtual(a);
+            test4_invokevirtual(b);
+            test4_invokevirtual(c);
+        }
+
+        for (int i = 0; i < 20000; i++) {
+            if ((i % 2) == 0) {
+                int res = test4_1(a, true);
+                if (res != a.m()) {
+                    System.out.println("test4 failed with class A");
+                    return false;
+                }
+            } else {
+                int res = test4_1(b, false);
+                if (res != b.m()) {
+                    System.out.println("test4 failed with class B");
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    // Mix one profiled type with an incompatible type
+    static int test5_invokevirtual(A a) {
+        return a.m();
+    }
+
+    static void test5_2(A a) {
+    }
+
+    static int test5_1(A a, boolean b) {
+        if (b) {
+            test5_2(a);
+        } else {
+            A aa = (B)a;
+        }
+        // shouldn't inline
+        return test5_invokevirtual(a);
+    }
+
+    static boolean test5() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        // pollute profile at test3_invokevirtual and test3_1 to make
+        // sure the compiler cannot rely on it
+        for (int i = 0; i < 3000; i++) {
+            test5_invokevirtual(a);
+            test5_invokevirtual(b);
+            test5_invokevirtual(c);
+        }
+
+        for (int i = 0; i < 20000; i++) {
+            if ((i % 2) == 0) {
+                int res = test5_1(a, true);
+                if (res != a.m()) {
+                    System.out.println("test5 failed with class A");
+                    return false;
+                }
+            } else {
+                int res = test5_1(b, false);
+                if (res != b.m()) {
+                    System.out.println("test5 failed with class B");
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    // Mix incompatible profiled types
+    static void test6_2(Object o) {
+    }
+
+    static Object test6_1(Object o, boolean b) {
+        if (b) {
+            test6_2(o);
+        } else {
+            test6_2(o);
+        }
+        return o;
+    }
+
+    static boolean test6() {
+        A a = new A();
+        A[] aa = new A[10];
+
+        for (int i = 0; i < 20000; i++) {
+            if ((i % 2) == 0) {
+                test6_1(a, true);
+            } else {
+                test6_1(aa, false);
+            }
+        }
+        return true;
+    }
+
+    // Mix a profiled type with an incompatible type
+    static void test7_2(Object o) {
+    }
+
+    static Object test7_1(Object o, boolean b) {
+        if (b) {
+            test7_2(o);
+        } else {
+            Object oo = (A[])o;
+        }
+        return o;
+    }
+
+    static boolean test7() {
+        A a = new A();
+        A[] aa = new A[10];
+
+        for (int i = 0; i < 20000; i++) {
+            if ((i % 2) == 0) {
+                test7_1(a, true);
+            } else {
+                test7_1(aa, false);
+            }
+        }
+        return true;
+    }
+
+    // Mix a profiled type with an interface
+    static void test8_2(Object o) {
+    }
+
+    static I test8_1(Object o) {
+        test8_2(o);
+        return (I)o;
+    }
+
+    static boolean test8() {
+        A a = new A();
+        B b = new B();
+        C c = new C();
+
+        for (int i = 0; i < 20000; i++) {
+            test8_1(b);
+        }
+        return true;
+    }
+
+    // Mix a profiled type with a constant
+    static void test9_2(Object o) {
+    }
+
+    static Object test9_1(Object o, boolean b) {
+        Object oo;
+        if (b) {
+            test9_2(o);
+            oo = o;
+        } else {
+            oo = "some string";
+        }
+        return oo;
+    }
+
+    static boolean test9() {
+        A a = new A();
+
+        for (int i = 0; i < 20000; i++) {
+            if ((i % 2) == 0) {
+                test9_1(a, true);
+            } else {
+                test9_1(a, false);
+            }
+        }
+        return true;
+    }
+
+    static public void main(String[] args) {
+        boolean success = true;
+
+        success = test1() && success;
+
+        success = test2() && success;
+
+        success = test3() && success;
+
+        success = test4() && success;
+
+        success = test5() && success;
+
+        success = test6() && success;
+
+        success = test7() && success;
+
+        success = test8() && success;
+
+        success = test9() && success;
+
+        if (success) {
+            System.out.println("TEST PASSED");
+        } else {
+            throw new RuntimeException("TEST FAILED: erroneous bound check elimination");
+        }
+    }
+}
--- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -74,11 +74,13 @@
     protected static final int THRESHOLD;
     /** count of invocation to triger OSR compilation */
     protected static final long BACKEDGE_THRESHOLD;
+    /** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */
+    protected static final String MODE
+            = System.getProperty("java.vm.info");
 
     static {
         if (TIERED_COMPILATION) {
-            THRESHOLD = 150000;
-            BACKEDGE_THRESHOLD = 0xFFFFFFFFL;
+            BACKEDGE_THRESHOLD = THRESHOLD = 150000;
         } else {
             THRESHOLD = COMPILE_THRESHOLD;
             BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption(
@@ -202,7 +204,7 @@
         if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
             throw new RuntimeException(method + " osr_comp_level must be == 0");
         }
-    }
+   }
 
     /**
      * Checks, that {@linkplain #method} is compiled.
@@ -361,7 +363,7 @@
     /** OSR constructor test case */
     OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR,
             Helper.OSR_CONSTRUCTOR_CALLABLE, true),
-     /** OSR method test case */
+    /** OSR method test case */
     OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true),
     /** OSR static method test case */
     OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true);
@@ -370,7 +372,7 @@
     final Executable executable;
     /** object to invoke {@linkplain #executable} */
     final Callable<Integer> callable;
-   /** flag for OSR test case */
+    /** flag for OSR test case */
     final boolean isOsr;
 
     private TestCase(Executable executable, Callable<Integer> callable,
--- a/test/compiler/whitebox/DeoptimizeAllTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -53,6 +53,12 @@
      */
     @Override
     protected void test() throws Exception {
+        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+                "compiled ")) {
+          System.err.printf("Warning: %s is not applicable in %s%n",
+                testCase.name(), CompilerWhiteBoxTest.MODE);
+          return;
+        }
         compile();
         checkCompiled();
         WHITE_BOX.deoptimizeAll();
--- a/test/compiler/whitebox/DeoptimizeMethodTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -53,6 +53,12 @@
      */
     @Override
     protected void test() throws Exception {
+        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+                "compiled ")) {
+          System.err.printf("Warning: %s is not applicable in %s%n",
+                testCase.name(), CompilerWhiteBoxTest.MODE);
+          return;
+        }
         compile();
         checkCompiled();
         deoptimize();
--- a/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -70,12 +70,10 @@
 
         int compLevel = getCompLevel();
         int bci = WHITE_BOX.getMethodEntryBci(method);
-        System.out.println("bci = " + bci);
-        printInfo();
         deoptimize();
-        printInfo();
         checkNotCompiled();
-        printInfo();
+        WHITE_BOX.clearMethodState(method);
+
         WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
         checkCompiled();
         deoptimize();
--- a/test/compiler/whitebox/IsMethodCompilableTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -68,6 +68,12 @@
      */
     @Override
     protected void test() throws Exception {
+        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+                "compiled ")) {
+          System.err.printf("Warning: %s is not applicable in %s%n",
+                testCase.name(), CompilerWhiteBoxTest.MODE);
+          return;
+        }
         if (!isCompilable()) {
             throw new RuntimeException(method + " must be compilable");
         }
--- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -62,6 +62,12 @@
      */
     @Override
     protected void test() throws Exception {
+        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+                "compiled ")) {
+          System.err.printf("Warning: %s is not applicable in %s%n",
+                testCase.name(), CompilerWhiteBoxTest.MODE);
+          return;
+        }
         checkNotCompiled();
         if (!isCompilable()) {
             throw new RuntimeException(method + " must be compilable");
--- a/test/gc/7168848/HumongousAlloc.java	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test Humongous.java
- * @bug 7168848
- * @summary G1: humongous object allocations should initiate marking cycles when necessary
- * @run main/othervm -Xms100m -Xmx100m -XX:+PrintGC -XX:G1HeapRegionSize=1m -XX:+UseG1GC  HumongousAlloc
- *
- */
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.util.List;
-
-public class HumongousAlloc {
-
-    public static byte[] dummy;
-    private static int sleepFreq = 40;
-    private static int sleepTime = 1000;
-    private static double size = 0.75;
-    private static int iterations = 50;
-    private static int MB = 1024 * 1024;
-
-    public static void allocate(int size, int sleepTime, int sleepFreq) throws InterruptedException {
-        System.out.println("Will allocate objects of size: " + size
-                + " bytes and sleep for " + sleepTime
-                + " ms after every " + sleepFreq + "th allocation.");
-        int count = 0;
-        while (count < iterations) {
-            for (int i = 0; i < sleepFreq; i++) {
-                dummy = new byte[size - 16];
-            }
-            Thread.sleep(sleepTime);
-            count++;
-        }
-    }
-
-    public static void main(String[] args) throws InterruptedException {
-        allocate((int) (size * MB), sleepTime, sleepFreq);
-        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
-        for (GarbageCollectorMXBean collector : collectors) {
-            if (collector.getName().contains("G1 Old")) {
-               long count = collector.getCollectionCount();
-                if (count > 0) {
-                    throw new RuntimeException("Failed: FullGCs should not have happened. The number of FullGC run is " + count);
-                }
-                else {
-                    System.out.println("Passed.");
-                }
-            }
-        }
-    }
-}
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/TestObjectAlignment.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestObjectAlignment
+ * @key gc
+ * @bug 8021823
+ * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs
+ * @library /testlibrary
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
+ * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestObjectAlignment {
+
+  public static byte[] garbage;
+
+  private static boolean runsOn32bit() {
+    return System.getProperty("sun.arch.data.model").equals("32");
+  }
+
+  public static void main(String[] args) throws Exception {
+    if (runsOn32bit()) {
+      // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called.
+      return;
+    }
+    for (int i = 0; i < 10; i++) {
+      garbage = new byte[1000];
+      System.gc();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/TestSystemGC.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,46 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestSystemGC
+ * @key gc
+ * @summary Runs System.gc() with different flags.
+ * @run main/othervm TestSystemGC
+ * @run main/othervm -XX:+UseSerialGC TestSystemGC
+ * @run main/othervm -XX:+UseParNewGC TestSystemGC
+ * @run main/othervm -XX:+UseParallelGC TestSystemGC
+ * @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC
+ * @run main/othervm -XX:+UseG1GC TestSystemGC
+ * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
+ * @run main/othervm -XX:+UseLargePages TestSystemGC
+ * @run main/othervm -XX:+UseLargePages -XX:+UseLargePagesInMetaspace TestSystemGC
+ */
+
+public class TestSystemGC {
+  public static void main(String args[]) throws Exception {
+    System.gc();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestAlignmentToUseLargePages.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestAlignmentToUseLargePages
+ * @summary All parallel GC variants may use large pages without the requirement that the
+ * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps.
+ * @bug 8024396
+ * @key gc
+ * @key regression
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages
+ * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages
+ */
+
+public class TestAlignmentToUseLargePages {
+  public static void main(String args[]) throws Exception {
+    // nothing to do
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestCompressedClassFlags.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug 8015107
+ * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize
+ *          is used together with -XX:-UseCompressedClassPointers
+ * @library /testlibrary
+ */
+public class TestCompressedClassFlags {
+    public static void main(String[] args) throws Exception {
+        if (Platform.is64bit()) {
+            OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g",
+                                            "-XX:-UseCompressedClassPointers",
+                                            "-version");
+            output.shouldContain("warning");
+            output.shouldNotContain("error");
+            output.shouldHaveExitValue(0);
+        }
+    }
+
+    private static OutputAnalyzer runJava(String ... args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
+        return new OutputAnalyzer(pb.start());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestHeapFreeRatio.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestHeapFreeRatio
+ * @key gc
+ * @bug 8025661
+ * @summary Test parsing of -Xminf and -Xmaxf
+ * @library /testlibrary
+ * @run main/othervm TestHeapFreeRatio
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestHeapFreeRatio {
+
+  enum Validation {
+    VALID,
+    MIN_INVALID,
+    MAX_INVALID,
+    COMBINATION_INVALID
+  }
+
+  private static void testMinMaxFreeRatio(String min, String max, Validation type) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-Xminf" + min,
+        "-Xmaxf" + max,
+        "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    switch (type) {
+    case VALID:
+      output.shouldNotContain("Error");
+      output.shouldHaveExitValue(0);
+      break;
+    case MIN_INVALID:
+      output.shouldContain("Bad min heap free percentage size: -Xminf" + min);
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    case MAX_INVALID:
+      output.shouldContain("Bad max heap free percentage size: -Xmaxf" + max);
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    case COMBINATION_INVALID:
+      output.shouldContain("must be less than or equal to MaxHeapFreeRatio");
+      output.shouldContain("Error");
+      output.shouldHaveExitValue(1);
+      break;
+    default:
+      throw new IllegalStateException("Must specify expected validation type");
+    }
+
+    System.out.println(output.getOutput());
+  }
+
+  public static void main(String args[]) throws Exception {
+    testMinMaxFreeRatio( "0.1", "0.5", Validation.VALID);
+    testMinMaxFreeRatio(  ".1",  ".5", Validation.VALID);
+    testMinMaxFreeRatio( "0.5", "0.5", Validation.VALID);
+
+    testMinMaxFreeRatio("-0.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio( "1.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio("=0.1", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio("0.1f", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio(
+                     "INVALID", "0.5", Validation.MIN_INVALID);
+    testMinMaxFreeRatio(
+                  "2147483647", "0.5", Validation.MIN_INVALID);
+
+    testMinMaxFreeRatio( "0.1", "-0.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1",  "1.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1", "0.5f", Validation.MAX_INVALID);
+    testMinMaxFreeRatio( "0.1", "=0.5", Validation.MAX_INVALID);
+    testMinMaxFreeRatio(
+                     "0.1",  "INVALID", Validation.MAX_INVALID);
+    testMinMaxFreeRatio(
+                   "0.1", "2147483647", Validation.MAX_INVALID);
+
+    testMinMaxFreeRatio( "0.5",  "0.1", Validation.COMBINATION_INVALID);
+    testMinMaxFreeRatio(  ".5",  ".10", Validation.COMBINATION_INVALID);
+    testMinMaxFreeRatio("0.12","0.100", Validation.COMBINATION_INVALID);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestUseCompressedOopsErgo.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,50 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestUseCompressedOopsErgo
+ * @key gc
+ * @bug 8010722
+ * @summary Tests ergonomics for UseCompressedOops.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
+ * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC
+ */
+
+public class TestUseCompressedOopsErgo {
+
+  public static void main(String args[]) throws Exception {
+    if (!TestUseCompressedOopsErgoTools.is64bitVM()) {
+      // this test is relevant for 64 bit VMs only
+      return;
+    }
+    final String[] gcFlags = args;
+    TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestUseCompressedOopsErgoTools.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,177 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+class DetermineMaxHeapForCompressedOops {
+  public static void main(String[] args) throws Exception {
+    WhiteBox wb = WhiteBox.getWhiteBox();
+    System.out.print(wb.getCompressedOopsMaxHeapSize());
+  }
+}
+
+class TestUseCompressedOopsErgoTools {
+
+  private static long getCompressedClassSpaceSize() {
+    HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
+
+    VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize");
+    return Long.parseLong(option.getValue());
+  }
+
+
+  public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception {
+    OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false);
+    return Long.parseLong(output.getStdout());
+  }
+
+  public static boolean is64bitVM() {
+    String val = System.getProperty("sun.arch.data.model");
+    if (val == null) {
+      throw new RuntimeException("Could not read sun.arch.data.model");
+    }
+    if (val.equals("64")) {
+      return true;
+    } else if (val.equals("32")) {
+      return false;
+    }
+    throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model");
+  }
+
+  /**
+   * Executes a new VM process with the given class and parameters.
+   * @param vmargs Arguments to the VM to run
+   * @param classname Name of the class to run
+   * @param arguments Arguments to the class
+   * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
+   * @return The OutputAnalyzer with the results for the invocation.
+   */
+  public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
+    ArrayList<String> finalargs = new ArrayList<String>();
+
+    String[] whiteboxOpts = new String[] {
+      "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
+      "-cp", System.getProperty("java.class.path"),
+    };
+
+    if (useTestDotJavaDotOpts) {
+      // System.getProperty("test.java.opts") is '' if no options is set,
+      // we need to skip such a result
+      String[] externalVMOpts = new String[0];
+      if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
+        externalVMOpts = System.getProperty("test.java.opts").split(" ");
+      }
+      finalargs.addAll(Arrays.asList(externalVMOpts));
+    }
+
+    finalargs.addAll(Arrays.asList(vmargs));
+    finalargs.addAll(Arrays.asList(whiteboxOpts));
+    finalargs.add(classname);
+    finalargs.addAll(Arrays.asList(arguments));
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(0);
+    return output;
+  }
+
+  private static String[] join(String[] part1, String part2) {
+    ArrayList<String> result = new ArrayList<String>();
+    result.addAll(Arrays.asList(part1));
+    result.add(part2);
+    return result.toArray(new String[0]);
+  }
+
+  public static void checkCompressedOopsErgo(String[] gcflags) throws Exception {
+    long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags);
+
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true);
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false);
+
+    // the use of HeapBaseMinAddress should not change the outcome
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false);
+
+    // use a different object alignment
+    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"));
+
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false);
+
+    // use a different CompressedClassSpaceSize
+    String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize();
+    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg));
+
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true);
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true);
+    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false);
+  }
+
+  private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception {
+     ArrayList<String> finalargs = new ArrayList<String>();
+     finalargs.addAll(Arrays.asList(args));
+     finalargs.add("-Xmx" + heapsize);
+     finalargs.add("-XX:+PrintFlagsFinal");
+     finalargs.add("-version");
+
+     String output = expectValid(finalargs.toArray(new String[0]));
+
+     boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output);
+
+     Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops);
+  }
+
+  private static boolean getFlagBoolValue(String flag, String where) {
+    Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
+    if (!m.find()) {
+      throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+    }
+    return m.group(1).equals("true");
+  }
+
+  private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(errorcode);
+    return output.getStdout();
+  }
+
+  private static String expectValid(String[] flags) throws Exception {
+    return expect(flags, false, false, 0);
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestHumongousAllocInitialMark.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestHumongousAllocInitialMark
+ * @bug 7168848
+ * @summary G1: humongous object allocations should initiate marking cycles when necessary
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class TestHumongousAllocInitialMark {
+    private static final int heapSize                       = 200; // MB
+    private static final int heapRegionSize                 = 1;   // MB
+    private static final int initiatingHeapOccupancyPercent = 50;  // %
+
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms" + heapSize + "m",
+            "-Xmx" + heapSize + "m",
+            "-XX:G1HeapRegionSize=" + heapRegionSize + "m",
+            "-XX:InitiatingHeapOccupancyPercent=" + initiatingHeapOccupancyPercent,
+            "-XX:+PrintGC",
+            HumongousObjectAllocator.class.getName());
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)");
+        output.shouldNotContain("Full GC");
+        output.shouldHaveExitValue(0);
+    }
+
+    static class HumongousObjectAllocator {
+        private static byte[] dummy;
+
+        public static void main(String [] args) throws Exception {
+            // Make object size 75% of region size
+            final int humongousObjectSize =
+                (int)(heapRegionSize * 1024 * 1024 * 0.75);
+
+            // Number of objects to allocate to go above IHOP
+            final int humongousObjectAllocations =
+                (int)((heapSize * initiatingHeapOccupancyPercent / 100.0) / heapRegionSize) + 1;
+
+            // Allocate
+            for (int i = 1; i <= humongousObjectAllocations; i++) {
+                System.out.println("Allocating humongous object " + i + "/" + humongousObjectAllocations +
+                                   " of size " + humongousObjectSize + " bytes");
+                dummy = new byte[humongousObjectSize];
+            }
+        }
+    }
+}
+
--- a/test/gc/g1/TestSummarizeRSetStats.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/g1/TestSummarizeRSetStats.java	Tue Nov 05 17:38:04 2013 -0800
@@ -25,140 +25,61 @@
  * @test TestSummarizeRSetStats.java
  * @bug 8013895
  * @library /testlibrary
- * @build TestSummarizeRSetStats
+ * @build TestSummarizeRSetStatsTools TestSummarizeRSetStats
  * @summary Verify output of -XX:+G1SummarizeRSetStats
  * @run main TestSummarizeRSetStats
  *
  * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod.
  */
 
-import com.oracle.java.testlibrary.*;
-import java.lang.Thread;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-class RunSystemGCs {
-    // 4M size, both are directly allocated into the old gen
-    static Object[] largeObject1 = new Object[1024 * 1024];
-    static Object[] largeObject2 = new Object[1024 * 1024];
-
-    static int[] temp;
-
-    public static void main(String[] args) {
-        // create some cross-references between these objects
-        for (int i = 0; i < largeObject1.length; i++) {
-            largeObject1[i] = largeObject2;
-        }
-
-        for (int i = 0; i < largeObject2.length; i++) {
-            largeObject2[i] = largeObject1;
-        }
-
-        int numGCs = Integer.parseInt(args[0]);
-
-        if (numGCs > 0) {
-            // try to force a minor collection: the young gen is 4M, the
-            // amount of data allocated below is roughly that (4*1024*1024 +
-            // some header data)
-            for (int i = 0; i < 1024 ; i++) {
-                temp = new int[1024];
-            }
-        }
-
-        for (int i = 0; i < numGCs - 1; i++) {
-            System.gc();
-        }
-    }
-}
-
 public class TestSummarizeRSetStats {
 
-    public static String runTest(String[] additionalArgs, int numGCs) throws Exception {
-        ArrayList<String> finalargs = new ArrayList<String>();
-        String[] defaultArgs = new String[] {
-            "-XX:+UseG1GC",
-            "-Xmn4m",
-            "-Xmx20m",
-            "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
-            "-XX:+PrintGC",
-            "-XX:+UnlockDiagnosticVMOptions",
-            "-XX:G1HeapRegionSize=1M",
-        };
-
-        finalargs.addAll(Arrays.asList(defaultArgs));
-
-        if (additionalArgs != null) {
-            finalargs.addAll(Arrays.asList(additionalArgs));
-        }
-
-        finalargs.add(RunSystemGCs.class.getName());
-        finalargs.add(String.valueOf(numGCs));
-
-        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            finalargs.toArray(new String[0]));
-        OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-        output.shouldHaveExitValue(0);
-
-        String result = output.getStdout();
-        return result;
-    }
-
-    private static void expectStatistics(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
-        int actualTotal = result.split("Concurrent RS processed").length - 1;
-        int actualCumulative = result.split("Cumulative RS summary").length - 1;
-
-        if (expectedCumulative != actualCumulative) {
-            throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative);
-        }
-
-        if (expectedPeriodic != (actualTotal - actualCumulative)) {
-            throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative));
-        }
-    }
-
     public static void main(String[] args) throws Exception {
         String result;
 
-        // no RSet statistics output
-        result = runTest(null, 0);
-        expectStatistics(result, 0, 0);
+        if (!TestSummarizeRSetStatsTools.testingG1GC()) {
+            return;
+        }
 
-        // no RSet statistics output
-        result = runTest(null, 2);
-        expectStatistics(result, 0, 0);
+        // no remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(null, 0);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
 
-        // no RSet statistics output
-        result = runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
-        expectStatistics(result, 0, 0);
+        // no remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(null, 2);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
 
-        // single RSet statistics output at the end
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
-        expectStatistics(result, 1, 0);
+        // no remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 0, 0);
 
-        // single RSet statistics output at the end
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2);
-        expectStatistics(result, 1, 0);
+        // single remembered set summary output at the end
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
 
-        // single RSet statistics output
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0);
-        expectStatistics(result, 1, 0);
+        // single remembered set summary output at the end
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 2);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
 
-        // two times RSet statistics output
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
-        expectStatistics(result, 1, 1);
+        // single remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 0);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 0);
+
+        // two times remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2);
 
-        // four times RSet statistics output
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
-        expectStatistics(result, 1, 3);
+        // four times remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 3);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 6);
 
-        // three times RSet statistics output
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3);
-        expectStatistics(result, 1, 2);
+        // three times remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=2" }, 3);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 4);
 
-        // single RSet statistics output
-        result = runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3);
-        expectStatistics(result, 1, 1);
+        // single remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=100" }, 3);
+        TestSummarizeRSetStatsTools.expectRSetSummaries(result, 1, 2);
     }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestSummarizeRSetStatsPerRegion.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestSummarizeRSetStatsPerRegion.java
+ * @bug 8014078
+ * @library /testlibrary
+ * @build TestSummarizeRSetStatsTools TestSummarizeRSetStatsPerRegion
+ * @summary Verify output of -XX:+G1SummarizeRSetStats in regards to per-region type output
+ * @run main TestSummarizeRSetStatsPerRegion
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.lang.Thread;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class TestSummarizeRSetStatsPerRegion {
+
+    public static void main(String[] args) throws Exception {
+        String result;
+
+        if (!TestSummarizeRSetStatsTools.testingG1GC()) {
+            return;
+        }
+
+        // single remembered set summary output at the end
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats" }, 0);
+        TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 0);
+
+        // two times remembered set summary output
+        result = TestSummarizeRSetStatsTools.runTest(new String[] { "-XX:+G1SummarizeRSetStats", "-XX:G1SummarizeRSetStatsPeriod=1" }, 1);
+        TestSummarizeRSetStatsTools.expectPerRegionRSetSummaries(result, 1, 2);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestSummarizeRSetStatsThreads.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestSummarizeRSetStatsThreads
+ * @bug 8025441
+ * @summary Ensure that various values of worker threads/concurrent
+ * refinement threads do not crash the VM.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestSummarizeRSetStatsThreads {
+
+  private static void runTest(int refinementThreads, int workerThreads) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-XX:+UnlockDiagnosticVMOptions",
+                                                              "-XX:+G1SummarizeRSetStats",
+                                                              "-XX:G1ConcRefinementThreads=" + refinementThreads,
+                                                              "-XX:ParallelGCThreads=" + workerThreads,
+                                                              "-version");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    // check output to contain the string "Concurrent RS threads times (s)" followed by
+    // the correct number of values in the next line.
+
+    // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used.
+    // Additionally use at least one thread.
+    int expectedNumRefinementThreads = refinementThreads == 0 ? workerThreads : refinementThreads;
+    expectedNumRefinementThreads = Math.max(1, expectedNumRefinementThreads);
+    // create the pattern made up of n copies of a floating point number pattern
+    String numberPattern = String.format("%0" + expectedNumRefinementThreads + "d", 0)
+      .replace("0", "\\s+\\d+\\.\\d+");
+    String pattern = "Concurrent RS threads times \\(s\\)$" + numberPattern + "$";
+    Matcher m = Pattern.compile(pattern, Pattern.MULTILINE).matcher(output.getStdout());
+
+    if (!m.find()) {
+      throw new Exception("Could not find correct output for concurrent RS threads times in stdout," +
+        " should match the pattern \"" + pattern + "\", but stdout is \n" + output.getStdout());
+    }
+    output.shouldHaveExitValue(0);
+  }
+
+  public static void main(String[] args) throws Exception {
+    if (!TestSummarizeRSetStatsTools.testingG1GC()) {
+      return;
+    }
+    // different valid combinations of number of refinement and gc worker threads
+    runTest(0, 0);
+    runTest(0, 5);
+    runTest(5, 0);
+    runTest(10, 10);
+    runTest(1, 2);
+    runTest(4, 3);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestSummarizeRSetStatsTools.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * Common helpers for TestSummarizeRSetStats* tests
+ */
+
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+
+import com.oracle.java.testlibrary.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.lang.Thread;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+class VerifySummaryOutput {
+    // 4M size, both are directly allocated into the old gen
+    static Object[] largeObject1 = new Object[1024 * 1024];
+    static Object[] largeObject2 = new Object[1024 * 1024];
+
+    static int[] temp;
+
+    public static void main(String[] args) {
+        // create some cross-references between these objects
+        for (int i = 0; i < largeObject1.length; i++) {
+            largeObject1[i] = largeObject2;
+        }
+
+        for (int i = 0; i < largeObject2.length; i++) {
+            largeObject2[i] = largeObject1;
+        }
+
+        int numGCs = Integer.parseInt(args[0]);
+
+        if (numGCs > 0) {
+            // try to force a minor collection: the young gen is 4M, the
+            // amount of data allocated below is roughly that (4*1024*1024 +
+            // some header data)
+            for (int i = 0; i < 1024 ; i++) {
+                temp = new int[1024];
+            }
+        }
+
+        for (int i = 0; i < numGCs - 1; i++) {
+            System.gc();
+        }
+    }
+}
+
+public class TestSummarizeRSetStatsTools {
+
+    // the VM is currently run using G1GC, i.e. trying to test G1 functionality.
+    public static boolean testingG1GC() {
+        HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
+
+        VMOption option = diagnostic.getVMOption("UseG1GC");
+        if (option.getValue().equals("false")) {
+          System.out.println("Skipping this test. It is only a G1 test.");
+          return false;
+        }
+        return true;
+    }
+
+    public static String runTest(String[] additionalArgs, int numGCs) throws Exception {
+        ArrayList<String> finalargs = new ArrayList<String>();
+        String[] defaultArgs = new String[] {
+            "-XX:+UseG1GC",
+            "-XX:+UseCompressedOops",
+            "-Xmn4m",
+            "-Xmx20m",
+            "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
+            "-XX:+PrintGC",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:G1HeapRegionSize=1M",
+        };
+
+        finalargs.addAll(Arrays.asList(defaultArgs));
+
+        if (additionalArgs != null) {
+            finalargs.addAll(Arrays.asList(additionalArgs));
+        }
+
+        finalargs.add(VerifySummaryOutput.class.getName());
+        finalargs.add(String.valueOf(numGCs));
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            finalargs.toArray(new String[0]));
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        output.shouldHaveExitValue(0);
+
+        String result = output.getStdout();
+        return result;
+    }
+
+    private static void checkCounts(int expected, int actual, String which) throws Exception {
+        if (expected != actual) {
+            throw new Exception("RSet summaries mention " + which + " regions an incorrect number of times. Expected " + expected + ", got " + actual);
+        }
+    }
+
+    public static void expectPerRegionRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
+        expectRSetSummaries(result, expectedCumulative, expectedPeriodic);
+        int actualYoung = result.split("Young regions").length - 1;
+        int actualHumonguous = result.split("Humonguous regions").length - 1;
+        int actualFree = result.split("Free regions").length - 1;
+        int actualOther = result.split("Old regions").length - 1;
+
+        // the strings we check for above are printed four times per summary
+        int expectedPerRegionTypeInfo = (expectedCumulative + expectedPeriodic) * 4;
+
+        checkCounts(expectedPerRegionTypeInfo, actualYoung, "Young");
+        checkCounts(expectedPerRegionTypeInfo, actualHumonguous, "Humonguous");
+        checkCounts(expectedPerRegionTypeInfo, actualFree, "Free");
+        checkCounts(expectedPerRegionTypeInfo, actualOther, "Old");
+    }
+
+    public static void expectRSetSummaries(String result, int expectedCumulative, int expectedPeriodic) throws Exception {
+        int actualTotal = result.split("concurrent refinement").length - 1;
+        int actualCumulative = result.split("Cumulative RS summary").length - 1;
+
+        if (expectedCumulative != actualCumulative) {
+            throw new Exception("Incorrect amount of RSet summaries at the end. Expected " + expectedCumulative + ", got " + actualCumulative);
+        }
+
+        if (expectedPeriodic != (actualTotal - actualCumulative)) {
+            throw new Exception("Incorrect amount of per-period RSet summaries at the end. Expected " + expectedPeriodic + ", got " + (actualTotal - actualCumulative));
+        }
+    }
+}
+
--- a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test ClassMetaspaceSizeInJmapHeap
- * @bug 8004924
- * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize
- * @library /testlibrary
- * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap
- */
-
-import com.oracle.java.testlibrary.*;
-import java.nio.file.*;
-import java.io.File;
-import java.nio.charset.Charset;
-import java.util.List;
-
-public class ClassMetaspaceSizeInJmapHeap {
-    public static void main(String[] args) throws Exception {
-        String pid = Integer.toString(ProcessTools.getProcessId());
-
-        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
-                                              .addToolArg("-heap")
-                                              .addToolArg(pid);
-        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
-
-        File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
-        pb.redirectOutput(out);
-
-        File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt");
-        pb.redirectError(err);
-
-        run(pb);
-
-        OutputAnalyzer output = new OutputAnalyzer(read(out));
-        output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)");
-        out.delete();
-    }
-
-    private static void run(ProcessBuilder pb) throws Exception {
-        Process p = pb.start();
-        p.waitFor();
-        int exitValue = p.exitValue();
-        if (exitValue != 0) {
-            throw new Exception("jmap -heap exited with error code: " + exitValue);
-        }
-    }
-
-    private static String read(File f) throws Exception {
-        Path p = f.toPath();
-        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
-
-        StringBuilder sb = new StringBuilder();
-        for (String line : lines) {
-            sb.append(line).append('\n');
-        }
-        return sb.toString();
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CompressedClassSpaceSizeInJmapHeap
+ * @bug 8004924
+ * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize
+ * @library /testlibrary
+ * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.nio.file.*;
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.List;
+
+public class CompressedClassSpaceSizeInJmapHeap {
+    public static void main(String[] args) throws Exception {
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+                                              .addToolArg("-heap")
+                                              .addToolArg(pid);
+        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+
+        File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt");
+        pb.redirectOutput(out);
+
+        File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt");
+        pb.redirectError(err);
+
+        run(pb);
+
+        OutputAnalyzer output = new OutputAnalyzer(read(out));
+        output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)");
+        out.delete();
+    }
+
+    private static void run(ProcessBuilder pb) throws Exception {
+        Process p = pb.start();
+        p.waitFor();
+        int exitValue = p.exitValue();
+        if (exitValue != 0) {
+            throw new Exception("jmap -heap exited with error code: " + exitValue);
+        }
+    }
+
+    private static String read(File f) throws Exception {
+        Path p = f.toPath();
+        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
+
+        StringBuilder sb = new StringBuilder();
+        for (String line : lines) {
+            sb.append(line).append('\n');
+        }
+        return sb.toString();
+    }
+}
--- a/test/gc/metaspace/G1AddMetaspaceDependency.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/metaspace/G1AddMetaspaceDependency.java	Tue Nov 05 17:38:04 2013 -0800
@@ -107,7 +107,6 @@
     Loader f_loader = new Loader(b_name, b_bytes, a_name, a_loader);
     Loader g_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 
-    byte[] b = new byte[20 * 2 << 20];
     Class<?> c;
     c = b_loader.loadClass(b_name);
     c = c_loader.loadClass(b_name);
--- a/test/gc/metaspace/TestMetaspaceMemoryPool.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java	Tue Nov 05 17:38:04 2013 -0800
@@ -22,55 +22,35 @@
  */
 
 import java.util.List;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryManagerMXBean;
-import java.lang.management.MemoryPoolMXBean;
-import java.lang.management.MemoryUsage;
-
-import java.lang.management.RuntimeMXBean;
-import java.lang.management.ManagementFactory;
+import java.lang.management.*;
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
 
 /* @test TestMetaspaceMemoryPool
  * @bug 8000754
  * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a
  *          MemoryManagerMXBean is created.
+ * @library /testlibrary
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool
  */
 public class TestMetaspaceMemoryPool {
     public static void main(String[] args) {
         verifyThatMetaspaceMemoryManagerExists();
-        verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize"));
 
-        if (runsOn64bit()) {
-            if (usesCompressedOops()) {
+        boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize");
+        verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined);
+
+        if (Platform.is64bit()) {
+            if (InputArguments.contains("-XX:+UseCompressedOops")) {
                 MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space");
                 verifyMemoryPool(cksPool, true);
             }
         }
     }
 
-    private static boolean runsOn64bit() {
-        return !System.getProperty("sun.arch.data.model").equals("32");
-    }
-
-    private static boolean usesCompressedOops() {
-        return isFlagDefined("+UseCompressedOops");
-    }
-
-    private static boolean isFlagDefined(String name) {
-        RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
-        List<String> args = runtimeMxBean.getInputArguments();
-        for (String arg : args) {
-            if (arg.startsWith("-XX:" + name)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
     private static void verifyThatMetaspaceMemoryManagerExists() {
         List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans();
         for (MemoryManagerMXBean manager : managers) {
@@ -95,32 +75,19 @@
 
     private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) {
         MemoryUsage mu = pool.getUsage();
-        assertDefined(mu.getInit(), "init");
-        assertDefined(mu.getUsed(), "used");
-        assertDefined(mu.getCommitted(), "committed");
+        long init = mu.getInit();
+        long used = mu.getUsed();
+        long committed = mu.getCommitted();
+        long max = mu.getMax();
+
+        assertGTE(init, 0L);
+        assertGTE(used, init);
+        assertGTE(committed, used);
 
         if (isMaxDefined) {
-            assertDefined(mu.getMax(), "max");
+            assertGTE(max, committed);
         } else {
-            assertUndefined(mu.getMax(), "max");
-        }
-    }
-
-    private static void assertDefined(long value, String name) {
-        assertTrue(value != -1, "Expected " + name + " to be defined");
-    }
-
-    private static void assertUndefined(long value, String name) {
-        assertEquals(value, -1, "Expected " + name + " to be undefined");
-    }
-
-    private static void assertEquals(long actual, long expected, String msg) {
-        assertTrue(actual == expected, msg);
-    }
-
-    private static void assertTrue(boolean condition, String msg) {
-        if (!condition) {
-            throw new RuntimeException(msg);
+            assertEQ(max, -1L);
         }
     }
 }
--- a/test/gc/metaspace/TestMetaspacePerfCounters.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/metaspace/TestMetaspacePerfCounters.java	Tue Nov 05 17:38:04 2013 -0800
@@ -33,13 +33,13 @@
  * @summary Tests that performance counters for metaspace and compressed class
  *          space exists and works.
  *
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  *
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  */
 public class TestMetaspacePerfCounters {
     public static Class fooClass = null;
@@ -61,10 +61,15 @@
     }
 
     private static void checkPerfCounters(String ns) throws Exception {
-        for (PerfCounter counter : countersInNamespace(ns)) {
-            String msg = "Expected " + counter.getName() + " to be larger than 0";
-            assertGT(counter.longValue(), 0L, msg);
-        }
+        long minCapacity = getMinCapacity(ns);
+        long maxCapacity = getMaxCapacity(ns);
+        long capacity = getCapacity(ns);
+        long used = getUsed(ns);
+
+        assertGTE(minCapacity, 0L);
+        assertGTE(used, minCapacity);
+        assertGTE(capacity, used);
+        assertGTE(maxCapacity, capacity);
     }
 
     private static void checkEmptyPerfCounters(String ns) throws Exception {
@@ -75,12 +80,10 @@
     }
 
     private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
-        PerfCounter used = PerfCounters.findByName(ns + ".used");
-
-        long before = used.longValue();
+        long before = getUsed(ns);
         fooClass = compileAndLoad("Foo", "public class Foo { }");
         System.gc();
-        long after = used.longValue();
+        long after = getUsed(ns);
 
         assertGT(after, before);
     }
@@ -99,6 +102,22 @@
     }
 
     private static boolean isUsingCompressedClassPointers() {
-        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
+        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers");
+    }
+
+    private static long getMinCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".minCapacity").longValue();
+    }
+
+    private static long getCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".capacity").longValue();
+    }
+
+    private static long getMaxCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".maxCapacity").longValue();
+    }
+
+    private static long getUsed(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".used").longValue();
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/metaspace/TestMetaspaceSizeFlags.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+/*
+ * @test TestMetaspaceSizeFlags
+ * @key gc
+ * @bug 8024650
+ * @summary Test that metaspace size flags can be set correctly
+ * @library /testlibrary
+ */
+public class TestMetaspaceSizeFlags {
+  public static final long K = 1024L;
+  public static final long M = 1024L * K;
+
+  // HotSpot uses a number of different values to align memory size flags.
+  // This is currently the largest alignment (unless huge large pages are used).
+  public static final long MAX_ALIGNMENT = 32 * M;
+
+  public static void main(String [] args) throws Exception {
+    testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT);
+    // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
+    testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
+    testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
+    testTooSmallInitialMetaspace(0, 0);
+    testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
+    testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
+  }
+
+  private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+  }
+
+  private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize);
+  }
+
+  private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertGT(maxMetaspaceSize, metaspaceSize);
+    Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize);
+    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
+    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
+  }
+
+  private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+    output.shouldContain("Too small initial Metaspace size");
+  }
+
+  private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
+    output.shouldNotMatch("Error occurred during initialization of VM\n.*");
+
+    String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1);
+    String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1);
+
+    return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize),
+                              Long.parseLong(stringMetaspaceSize));
+  }
+
+  private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:MaxMetaspaceSize=" + maxMetaspaceSize,
+        "-XX:MetaspaceSize=" + metaspaceSize,
+        "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc.
+        "-XX:+PrintFlagsFinal",
+        "-version");
+    return new OutputAnalyzer(pb.start());
+  }
+
+  private static class MetaspaceFlags {
+    public long maxMetaspaceSize;
+    public long metaspaceSize;
+    public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) {
+      this.maxMetaspaceSize = maxMetaspaceSize;
+      this.metaspaceSize = metaspaceSize;
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.List;
+import java.lang.management.*;
+
+import com.oracle.java.testlibrary.*;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+/* @test TestPerfCountersAndMemoryPools
+ * @bug 8023476
+ * @library /testlibrary
+ * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
+ *          report the same data.
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools
+ */
+public class TestPerfCountersAndMemoryPools {
+    public static void main(String[] args) throws Exception {
+        checkMemoryUsage("Metaspace", "sun.gc.metaspace");
+
+        if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) {
+            checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace");
+        }
+    }
+
+    private static MemoryPoolMXBean getMemoryPool(String memoryPoolName) {
+        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+        for (MemoryPoolMXBean pool : pools) {
+            if (pool.getName().equals(memoryPoolName)) {
+                return pool;
+            }
+        }
+
+        throw new RuntimeException("Excpted to find a memory pool with name " +
+                                   memoryPoolName);
+    }
+
+    private static void checkMemoryUsage(String memoryPoolName, String perfNS)
+        throws Exception {
+        MemoryPoolMXBean pool = getMemoryPool(memoryPoolName);
+
+        // Must do a GC to update performance counters
+        System.gc();
+        assertEQ(getMinCapacity(perfNS), pool.getUsage().getInit());
+
+        // Must do a second GC to update the perfomance counters again, since
+        // the call pool.getUsage().getInit() could have allocated some
+        // metadata.
+        System.gc();
+        assertEQ(getUsed(perfNS), pool.getUsage().getUsed());
+        assertEQ(getCapacity(perfNS), pool.getUsage().getCommitted());
+    }
+
+    private static long getMinCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".minCapacity").longValue();
+    }
+
+    private static long getCapacity(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".capacity").longValue();
+    }
+
+    private static long getUsed(String ns) throws Exception {
+        return PerfCounters.findByName(ns + ".used").longValue();
+    }
+}
--- a/test/gc/startup_warnings/TestCMS.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestCMS.java	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/startup_warnings/TestCMSForegroundFlags.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,52 @@
+/*
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+* @test TestCMSForegroundFlags
+* @key gc
+* @bug 8027132
+* @summary Test that the deprecated CMS foreground collector flags print warning messages
+* @library /testlibrary
+* @run main TestCMSForegroundFlags -XX:-UseCMSCompactAtFullCollection UseCMSCompactAtFullCollection
+* @run main TestCMSForegroundFlags -XX:CMSFullGCsBeforeCompaction=4 CMSFullGCsBeforeCompaction
+* @run main TestCMSForegroundFlags -XX:-UseCMSCollectionPassing UseCMSCollectionPassing
+*/
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class TestCMSForegroundFlags {
+  public static void main(String[] args) throws Exception {
+    if (args.length != 2) {
+      throw new Exception("Expected two arguments,flagValue and flagName");
+    }
+    String flagValue = args[0];
+    String flagName = args[1];
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flagValue, "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldContain("warning: " + flagName + " is deprecated and will likely be removed in a future release.");
+    output.shouldNotContain("error");
+    output.shouldHaveExitValue(0);
+  }
+}
--- a/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Tue Nov 05 17:38:04 2013 -0800
@@ -37,7 +37,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestG1.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestG1.java	Tue Nov 05 17:38:04 2013 -0800
@@ -37,7 +37,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestParNewCMS.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestParNewCMS.java	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestParallelGC.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestParallelGC.java	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-XX:-UseParallelOldGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestSerialGC.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/gc/startup_warnings/TestSerialGC.java	Tue Nov 05 17:38:04 2013 -0800
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseSerialGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("warning");
+    output.shouldNotContain("deprecated");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/runtime/6878713/Test6878713.sh	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,137 +0,0 @@
-#!/bin/sh
-
-# 
-#  Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
-#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-#  This code is free software; you can redistribute it and/or modify it
-#  under the terms of the GNU General Public License version 2 only, as
-#  published by the Free Software Foundation.
-# 
-#  This code is distributed in the hope that it will be useful, but WITHOUT
-#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-#  version 2 for more details (a copy is included in the LICENSE file that
-#  accompanied this code).
-# 
-#  You should have received a copy of the GNU General Public License version
-#  2 along with this work; if not, write to the Free Software Foundation,
-#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-#  or visit www.oracle.com if you need additional information or have any
-#  questions.
-# 
-
- 
-
-##
-## @test
-## @bug 6878713
-## @bug 7030610
-## @bug 7037122
-## @bug 7123945
-## @summary Verifier heap corruption, relating to backward jsrs
-## @run shell Test6878713.sh
-##
-## some tests require path to find test source dir
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-TARGET_CLASS=OOMCrashClass1960_2
-
-echo "INFO: extracting the target class."
-${COMPILEJAVA}${FS}bin${FS}jar xvf \
-    ${TESTSRC}${FS}testcase.jar ${TARGET_CLASS}.class
-
-# remove any hs_err_pid that might exist here
-rm -f hs_err_pid*.log
-
-echo "INFO: checking for 32-bit versus 64-bit VM."
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version 2>&1 \
-    | grep "64-Bit [^ ][^ ]* VM" > /dev/null 2>&1
-status="$?"
-if [ "$status" = 0 ]; then
-    echo "INFO: testing a 64-bit VM."
-    is_64_bit=true
-else
-    echo "INFO: testing a 32-bit VM."
-fi
-
-if [ "$is_64_bit" = true ]; then
-    # limit is 768MB in 8-byte words (1024 * 1024 * 768 / 8) == 100663296
-    MALLOC_MAX=100663296
-else
-    # limit is 768MB in 4-byte words (1024 * 1024 * 768 / 4) == 201326592
-    MALLOC_MAX=201326592
-fi
-echo "INFO: MALLOC_MAX=$MALLOC_MAX"
-
-echo "INFO: executing the target class."
-# -XX:+PrintCommandLineFlags for debugging purposes
-# -XX:+IgnoreUnrecognizedVMOptions so test will run on a VM without
-#     the new -XX:MallocMaxTestWords option
-# -XX:+UnlockDiagnosticVMOptions so we can use -XX:MallocMaxTestWords
-# -XX:MallocMaxTestWords limits malloc to $MALLOC_MAX
-${TESTJAVA}${FS}bin${FS}java \
-    -XX:+PrintCommandLineFlags \
-    -XX:+IgnoreUnrecognizedVMOptions \
-    -XX:+UnlockDiagnosticVMOptions \
-    -XX:MallocMaxTestWords=$MALLOC_MAX \
-    ${TESTVMOPTS} ${TARGET_CLASS} > test.out 2>&1
-
-echo "INFO: begin contents of test.out:"
-cat test.out
-echo "INFO: end contents of test.out."
-
-echo "INFO: checking for memory allocation error message."
-# We are looking for this specific memory allocation failure mesg so
-# we know we exercised the right allocation path with the test class:
-MESG1="Native memory allocation (malloc) failed to allocate 25696531[0-9][0-9] bytes"
-grep "$MESG1" test.out
-status="$?"
-if [ "$status" = 0 ]; then
-    echo "INFO: found expected memory allocation error message."
-else
-    echo "INFO: did not find expected memory allocation error message."
-
-    # If we didn't find MESG1 above, then there are several scenarios:
-    # 1) -XX:MallocMaxTestWords is not supported by the current VM and we
-    #    didn't fail TARGET_CLASS's memory allocation attempt; instead
-    #    we failed to find TARGET_CLASS's main() method. The TARGET_CLASS
-    #    is designed to provoke a memory allocation failure during class
-    #    loading; we actually don't care about running the class which is
-    #    why it doesn't have a main() method.
-    # 2) we failed a memory allocation, but not the one we were looking
-    #    so it might be that TARGET_CLASS no longer tickles the same
-    #    memory allocation code path
-    # 3) TARGET_CLASS reproduces the failure mode (SIGSEGV) fixed by
-    #    6878713 because the test is running on a pre-fix VM.
-    echo "INFO: checking for no main() method message."
-    MESG2="Error: Main method not found in class"
-    grep "$MESG2" test.out
-    status="$?"
-    if [ "$status" = 0 ]; then
-        echo "INFO: found no main() method message."
-    else
-        echo "FAIL: did not find no main() method message."
-        # status is non-zero for exit below
-
-        if [ -s hs_err_pid*.log ]; then
-            echo "INFO: begin contents of hs_err_pid file:"
-            cat hs_err_pid*.log
-            echo "INFO: end contents of hs_err_pid file."
-        fi
-    fi
-fi
-
-if [ "$status" = 0 ]; then
-    echo "PASS: test found one of the expected messages."
-fi
-exit "$status"
Binary file test/runtime/6878713/testcase.jar has changed
--- a/test/runtime/6888954/vmerrors.sh	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/6888954/vmerrors.sh	Tue Nov 05 17:38:04 2013 -0800
@@ -1,3 +1,25 @@
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
 # @test
 # @bug 6888954
 # @bug 8015884
@@ -63,6 +85,7 @@
     [ $i -lt 10 ] && i2=0$i
 
     "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
+        -XX:-TransmitErrorReport \
         -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
 
     # If ErrorHandlerTest is ignored (product build), stop.
--- a/test/runtime/7020373/Test7020373.sh	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-##
-## @test
-## @bug 7020373 7055247 7053586 7185550
-## @key cte_test
-## @summary JSR rewriting can overflow memory address size variables
-## @ignore Ignore it as 7053586 test uses lots of memory. See bug report for detail.
-## @run shell Test7020373.sh
-##
-
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-${COMPILEJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} OOMCrashClass4000_1 > test.out 2>&1
-
-cat test.out
-
-egrep "SIGSEGV|An unexpected error has been detected" test.out
-
-if [ $? = 0 ]
-then
-    echo "Test Failed"
-    exit 1
-else
-    egrep "java.lang.LinkageError|java.lang.NoSuchMethodError|Main method not found in class OOMCrashClass4000_1|insufficient memory" test.out
-    if [ $? = 0 ]
-    then
-        echo "Test Passed"
-        exit 0
-    else
-        echo "Test Failed"
-        exit 1
-    fi
-fi
Binary file test/runtime/7020373/testcase.jar has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/8024804/RegisterNatives.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024804
+ * @summary registerNatives() interface resolution should receive IAE
+ * @run main RegisterNatives
+ */
+public class RegisterNatives {
+  interface I { void registerNatives(); }
+  interface J extends I {}
+  static class B implements J { public void registerNatives() { System.out.println("B"); } }
+  public static void main(String... args) {
+    System.out.println("Regression test for JDK-8024804, crash when InterfaceMethodref resolves to Object.registerNatives\n");
+    J val = new B();
+    try {
+      val.registerNatives();
+    } catch (IllegalAccessError e) {
+      System.out.println("TEST PASSES - according to current JVM spec, IAE expected\n");
+      return;
+    }
+    System.out.println("TEST FAILS - no IAE resulted\n");
+    System.exit(1);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/8026365/InvokeSpecialAnonTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026365
+ * @summary Test invokespecial of host class method from an anonymous class
+ * @author  Robert Field
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file InvokeSpecialAnonTest.java
+ * @run main ClassFileInstaller InvokeSpecialAnonTest AnonTester
+ * @run main/othervm -Xbootclasspath/a:. -Xverify:all InvokeSpecialAnonTest
+ */
+import jdk.internal.org.objectweb.asm.*;
+import java.lang.reflect.Constructor;
+import sun.misc.Unsafe;
+
+public class InvokeSpecialAnonTest implements Opcodes {
+
+    static byte[] anonClassBytes() throws Exception {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(V1_8, ACC_FINAL + ACC_SUPER, "Anon", null, "java/lang/Object", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(2, 2);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "(LInvokeSpecialAnonTest;)I", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitVarInsn(ALOAD, 1);
+            mv.visitMethodInsn(INVOKESPECIAL, "InvokeSpecialAnonTest", "privMethod", "()I");
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(2, 3);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    private int privMethod() { return 1234; }
+
+    public static void main(String[] args) throws Exception {
+        Class<?> klass = InvokeSpecialAnonTest.class;
+        try {
+           Class<?> result = AnonTester.defineTest(klass, anonClassBytes());
+           System.out.println("Passed.");
+        } catch (Exception e) {
+           e.printStackTrace();
+           throw e;
+        }
+    }
+}
+
+
+class AnonTester {
+    private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+
+    public static Class<?> defineTest(Class<?> targetClass, byte[] classBytes) throws Exception {
+        return UNSAFE.defineAnonymousClass(targetClass, classBytes, null);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/8026394/InterfaceObjectTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8026394
+ * @summary clone() and finalize() interface resolution should not receive IAE
+ * @run main InterfaceObjectTest
+ */
+interface IClone extends Cloneable {
+    void finalize() throws Throwable;
+    Object clone();
+}
+
+interface ICloneExtend extends IClone { }
+
+public class InterfaceObjectTest implements ICloneExtend {
+
+    public Object clone() {
+        System.out.println("In InterfaceObjectTest's clone() method\n");
+        return null;
+    }
+
+    public void finalize() throws Throwable {
+        try {
+            System.out.println("In InterfaceObjectTest's finalize() method\n");
+        } catch (Throwable t) {
+            throw new AssertionError(t);
+        }
+    }
+
+    public static void tryIt(ICloneExtend o1) {
+        try {
+            Object o2 = o1.clone();
+            o1.finalize();
+        } catch (Throwable t) {
+            if (t instanceof IllegalAccessError) {
+                System.out.println("TEST FAILS - IAE resulted\n");
+                System.exit(1);
+            }
+        }
+    }
+
+    public static void main(String[] args) {
+        InterfaceObjectTest o1 = new InterfaceObjectTest();
+        tryIt(o1);
+        System.out.println("TEST PASSES - no IAE resulted\n");
+    }
+}
--- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Tue Nov 05 17:38:04 2013 -0800
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8003424
- * @summary Testing UseCompressedKlassPointers with CDS
+ * @summary Testing UseCompressedClassPointers with CDS
  * @library /testlibrary
  * @run main CDSCompressedKPtrs
  */
@@ -36,7 +36,7 @@
     ProcessBuilder pb;
     if (Platform.is64bit()) {
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+        "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
         "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       OutputAnalyzer output = new OutputAnalyzer(pb.start());
       try {
@@ -44,16 +44,15 @@
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+          "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("sharing");
         output.shouldHaveExitValue(0);
 
       } catch (RuntimeException e) {
-        // Report 'passed' if CDS was turned off because we could not allocate
-        // the klass metaspace at an address that would work with CDS.
-        output.shouldContain("Could not allocate metaspace at a compatible address");
+        // Report 'passed' if CDS was turned off.
+        output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(1);
       }
     }
--- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Tue Nov 05 17:38:04 2013 -0800
@@ -24,7 +24,7 @@
 /*
  * @test
  * @bug 8003424
- * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
+ * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off.
  * @library /testlibrary
  * @run main CDSCompressedKPtrsError
  */
@@ -36,7 +36,7 @@
     ProcessBuilder pb;
     if (Platform.is64bit()) {
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       OutputAnalyzer output = new OutputAnalyzer(pb.start());
       try {
@@ -44,21 +44,21 @@
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+          "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
+          "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
         output.shouldHaveExitValue(0);
 
         pb = ProcessTools.createJavaProcessBuilder(
-          "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
+          "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops",
           "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
         output = new OutputAnalyzer(pb.start());
         output.shouldContain("Unable to use shared archive");
@@ -71,19 +71,19 @@
 
       // Test bad options with -Xshare:dump.
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
 
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
 
       pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
+        "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
         "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
       output = new OutputAnalyzer(pb.start());
       output.shouldContain("Cannot dump shared archive");
--- a/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Tue Nov 05 17:38:04 2013 -0800
@@ -33,16 +33,9 @@
 
 public class XShareAuto {
     public static void main(String[] args) throws Exception {
-        if (!Platform.is64bit()) {
-            System.out.println("ObjectAlignmentInBytes for CDS is only " +
-                "supported on 64bit platforms; this plaform is " +
-                System.getProperty("sun.arch.data.model"));
-            System.out.println("Skipping the test");
-            return;
-        }
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
-            "-Xshare:dump");
+            "-server", "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Loading classes to share");
         output.shouldHaveExitValue(0);
@@ -69,7 +62,7 @@
                 "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
                 "-XX:SharedArchiveFile=./sample.jsa", "-version");
             output = new OutputAnalyzer(pb.start());
-            output.shouldContain("Could not allocate metaspace at a compatible address");
+            output.shouldContain("Unable to use shared archive");
             output.shouldHaveExitValue(1);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ClassFile/JsrRewriting.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+/*
+ * @test JsrRewriting
+ * @summary JSR (jump local subroutine)
+ *      rewriting can overflow memory address size variables
+ * @bug 7020373
+ * @bug 7055247
+ * @bug 7053586
+ * @bug 7185550
+ * @bug 7149464
+ * @key cte_test
+ * @library /testlibrary
+ * @run main JsrRewriting
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class JsrRewriting {
+
+    public static void main(String[] args) throws Exception {
+
+        // ======= Configure the test
+        String jarFile = System.getProperty("test.src") +
+            File.separator + "JsrRewritingTestCase.jar";
+        String className = "OOMCrashClass4000_1";
+
+        // limit is 768MB in native words
+        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
+        if (Platform.is64bit())
+            mallocMaxTestWords = (mallocMaxTestWords / 2);
+
+        // ======= extract the test class
+        ProcessBuilder pb = new ProcessBuilder(new String[] {
+            JDKToolFinder.getJDKTool("jar"),
+            "xvf", jarFile } );
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        // ======= execute the test
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-cp", ".",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
+            className);
+
+        output = new OutputAnalyzer(pb.start());
+        String[] expectedMsgs = {
+            "java.lang.LinkageError",
+            "java.lang.NoSuchMethodError",
+            "Main method not found in class " + className,
+            "insufficient memory"
+        };
+
+        MultipleOrMatch(output, expectedMsgs);
+    }
+
+    private static void
+        MultipleOrMatch(OutputAnalyzer analyzer, String[] whatToMatch) {
+            String output = analyzer.getOutput();
+
+            for (String expected : whatToMatch)
+                if (output.contains(expected))
+                    return;
+
+            String err =
+                " stdout: [" + analyzer.getOutput() + "];\n" +
+                " exitValue = " + analyzer.getExitValue() + "\n";
+            System.err.println(err);
+
+            StringBuilder msg = new StringBuilder("Output did not contain " +
+                "any of the following expected messages: \n");
+            for (String expected : whatToMatch)
+                msg.append(expected).append(System.lineSeparator());
+            throw new RuntimeException(msg.toString());
+    }
+}
+
Binary file test/runtime/ClassFile/JsrRewritingTestCase.jar has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ClassFile/OomWhileParsingRepeatedJsr.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+/*
+ * @test OomWhileParsingRepeatedJsr
+ * @summary Testing class file parser; specifically parsing
+ *          a file with repeated JSR (jump local subroutine)
+ *          bytecode command.
+ * @bug 6878713
+ * @bug 7030610
+ * @bug 7037122
+ * @bug 7123945
+ * @bug 8016029
+ * @library /testlibrary
+ * @run main OomWhileParsingRepeatedJsr
+ */
+
+import com.oracle.java.testlibrary.*;
+
+
+public class OomWhileParsingRepeatedJsr {
+
+    public static void main(String[] args) throws Exception {
+
+        // ======= Configure the test
+        String jarFile = System.getProperty("test.src") + "/testcase.jar";
+        String className = "OOMCrashClass1960_2";
+
+        // limit is 768MB in native words
+        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
+        if (Platform.is64bit())
+            mallocMaxTestWords = (mallocMaxTestWords / 2);
+
+        // ======= extract the test class
+        ProcessBuilder pb = new ProcessBuilder(new String[] {
+            JDKToolFinder.getJDKTool("jar"),
+            "xvf", jarFile } );
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        // ======= execute the test
+        pb = ProcessTools.createJavaProcessBuilder(
+            "-cp", ".",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
+            className );
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Cannot reserve enough memory");
+    }
+}
+
Binary file test/runtime/ClassFile/testcase.jar has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CommandLine/PrintGCApplicationConcurrentTime.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+ /*
+ * @test
+ * @bug 8026041
+ * @run main/othervm -XX:+PrintGCApplicationConcurrentTime -Xcomp PrintGCApplicationConcurrentTime
+ */
+
+public class PrintGCApplicationConcurrentTime {
+
+  public static void main(String args[]) throws Exception {
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CompressedOops/CompressedClassPointers.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8024927
+ * @summary Testing address of compressed class pointer space as best as possible.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class CompressedClassPointers {
+
+    public static void smallHeapTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedBaseAddress=8g",
+            "-Xmx128m",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base: 0x0000000000000000");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void smallHeapTestWith3G() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:CompressedClassSpaceSize=3g",
+            "-Xmx128m",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base: 0x0000000000000000, Narrow klass shift: 3");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void largeHeapTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-Xmx30g",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Narrow klass base: 0x0000000000000000");
+        output.shouldContain("Narrow klass shift: 0");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void largePagesTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-Xmx128m",
+            "-XX:+UseLargePages",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Narrow klass base:");
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void sharingTest() throws Exception {
+        // Test small heaps
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa",
+            "-Xmx128m",
+            "-XX:SharedBaseAddress=8g",
+            "-XX:+PrintCompressedOopsMode",
+            "-XX:+VerifyBeforeGC",
+            "-Xshare:dump");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        try {
+          output.shouldContain("Loading classes to share");
+          output.shouldHaveExitValue(0);
+
+          pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa",
+            "-Xmx128m",
+            "-XX:SharedBaseAddress=8g",
+            "-XX:+PrintCompressedOopsMode",
+            "-Xshare:on",
+            "-version");
+          output = new OutputAnalyzer(pb.start());
+          output.shouldContain("sharing");
+          output.shouldHaveExitValue(0);
+
+        } catch (RuntimeException e) {
+          output.shouldContain("Unable to use shared archive");
+          output.shouldHaveExitValue(1);
+        }
+    }
+
+  public static void main(String[] args) throws Exception {
+      if (!Platform.is64bit()) {
+          // Can't test this on 32 bit, just pass
+          System.out.println("Skipping test on 32bit");
+          return;
+      }
+      // Solaris 10 can't mmap compressed oops space without a base
+      if (Platform.isSolaris()) {
+           String name = System.getProperty("os.version");
+           if (name.equals("5.10")) {
+               System.out.println("Skipping test on Solaris 10");
+               return;
+           }
+      }
+      smallHeapTest();
+      smallHeapTestWith3G();
+      largeHeapTest();
+      largePagesTest();
+      sharingTest();
+  }
+}
--- a/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Tue Nov 05 17:38:04 2013 -0800
@@ -25,7 +25,7 @@
  * @test
  * @bug 8000968
  * @key regression
- * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32
+ * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32
  * @library /testlibrary
  */
 
@@ -52,7 +52,7 @@
         OutputAnalyzer output;
 
         pb = ProcessTools.createJavaProcessBuilder(
-            "-XX:+UseCompressedKlassPointers",
+            "-XX:+UseCompressedClassPointers",
             "-XX:+UseCompressedOops",
             "-XX:ObjectAlignmentInBytes=" + alignment,
             "-version");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/InitialThreadOverflow/DoOverflow.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class DoOverflow {
+
+    static int count;
+
+    public void overflow() {
+        count+=1;
+        overflow();
+    }
+
+    public static void printIt() {
+        System.out.println("Going to overflow stack");
+        try {
+            new DoOverflow().overflow();
+        } catch(java.lang.StackOverflowError e) {
+            System.out.println("Overflow OK " + count);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/InitialThreadOverflow/invoke.cxx	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <assert.h>
+#include <jni.h>
+
+#include <pthread.h>
+
+JavaVM* jvm;
+
+void *
+floobydust (void *p) {
+  JNIEnv *env;
+
+  jvm->AttachCurrentThread((void**)&env, NULL);
+
+  jclass class_id = env->FindClass ("DoOverflow");
+  assert (class_id);
+
+  jmethodID method_id = env->GetStaticMethodID(class_id, "printIt", "()V");
+  assert (method_id);
+
+  env->CallStaticVoidMethod(class_id, method_id, NULL);
+
+  jvm->DetachCurrentThread();
+}
+
+int
+main (int argc, const char** argv) {
+  JavaVMOption options[1];
+  options[0].optionString = (char*) "-Xss320k";
+
+  JavaVMInitArgs vm_args;
+  vm_args.version = JNI_VERSION_1_2;
+  vm_args.ignoreUnrecognized = JNI_TRUE;
+  vm_args.options = options;
+  vm_args.nOptions = 1;
+
+  JNIEnv* env;
+  jint result = JNI_CreateJavaVM(&jvm, (void**)&env, &vm_args);
+  assert(result >= 0);
+
+  pthread_t thr;
+  pthread_create(&thr, NULL, floobydust, NULL);
+  pthread_join(thr, NULL);
+
+  floobydust(NULL);
+
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/InitialThreadOverflow/testme.sh	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+
+# @test testme.sh
+# @bug 8009062
+# @summary Poor performance of JNI AttachCurrentThread after fix for 7017193
+# @compile DoOverflow.java
+# @run shell testme.sh
+
+set -x
+if [ "${TESTSRC}" = "" ]
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+
+if [ "${VM_OS}" != "linux" ]
+then
+  echo "Test only valid for Linux"
+  exit 0
+fi
+
+gcc_cmd=`which g++`
+if [ "x$gcc_cmd" = "x" ]; then
+    echo "WARNING: g++ not found. Cannot execute test." 2>&1
+    exit 0;
+fi
+
+CFLAGS="-m${VM_BITS}"
+
+LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH
+
+cp ${TESTSRC}${FS}invoke.cxx .
+
+# Copy the result of our @compile action:
+cp ${TESTCLASSES}${FS}DoOverflow.class .
+
+echo "Compilation flag: ${COMP_FLAG}"
+# Note pthread may not be found thus invoke creation will fail to be created.
+# Check to ensure you have a /usr/lib/libpthread.so if you don't please look
+# for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation.
+
+$gcc_cmd -DLINUX ${CFLAGS} -o invoke \
+    -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \
+    -L${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE} \
+    -ljvm -lpthread invoke.cxx
+
+./invoke
+exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/LoadClass/LoadClassNegative.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key regression
+ * @bug 8020675
+ * @summary make sure there is no fatal error if a class is loaded from an invalid jar file which is in the bootclasspath
+ * @library /testlibrary
+ * @build TestForName
+ * @build LoadClassNegative
+ * @run main LoadClassNegative
+ */
+
+import java.io.File;
+import com.oracle.java.testlibrary.*;
+
+public class LoadClassNegative {
+
+  public static void main(String args[]) throws Exception {
+    String bootCP = "-Xbootclasspath/a:" + System.getProperty("test.src")
+                       + File.separator + "dummy.jar";
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        bootCP,
+        "TestForName");
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldContain("ClassNotFoundException");
+    output.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/LoadClass/TestForName.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class TestForName {
+    public static void main(String[] args) {
+        try {
+            Class cls = Class.forName("xxx");
+            System.out.println("Class = " + cls.getName());
+        } catch (ClassNotFoundException cnfe) {
+            cnfe.printStackTrace();
+        }
+    }
+}
--- a/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Tue Nov 05 17:38:04 2013 -0800
@@ -84,7 +84,7 @@
             // there is a chance such reservation will fail
             // If it does, it is NOT considered a failure of the feature,
             // rather a possible expected outcome, though not likely
-            output.shouldContain("Could not allocate metaspace at a compatible address");
+            output.shouldContain("Unable to use shared archive");
             output.shouldHaveExitValue(1);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/contended/Options.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug     8006997
+ * @summary ContendedPaddingWidth should be range-checked
+ *
+ * @library /testlibrary
+ * @run main Options
+ */
+public class Options {
+
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb;
+        OutputAnalyzer output;
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-128", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-8", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-1", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=0", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=1", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8184", "-version"); // 8192-8 = 8184
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8191", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8192", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8193", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldContain("must be a multiple of 8");
+        output.shouldHaveExitValue(1);
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8200", "-version"); // 8192+8 = 8200
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ContendedPaddingWidth");
+        output.shouldContain("must be in between");
+        output.shouldHaveExitValue(1);
+
+   }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestLargePagesFlags
+ * @summary Tests how large pages are choosen depending on the given large pages flag combinations.
+ * @library /testlibrary
+ * @run main TestLargePagesFlags
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+
+public class TestLargePagesFlags {
+
+  public static void main(String [] args) throws Exception {
+    if (!Platform.isLinux()) {
+      System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux.");
+      return;
+    }
+
+    testUseTransparentHugePages();
+    testUseHugeTLBFS();
+    testUseSHM();
+    testCombinations();
+  }
+
+  public static void testUseTransparentHugePages() throws Exception {
+    if (!canUse(UseTransparentHugePages(true))) {
+      System.out.println("Skipping testUseTransparentHugePages");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseTransparentHugePages.
+    new FlagTester()
+      .use(UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Don't turn on UseTransparentHugePages
+    // unless the user explicitly asks for them.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseTransparentHugePages(false));
+  }
+
+  public static void testUseHugeTLBFS() throws Exception {
+    if (!canUse(UseHugeTLBFS(true))) {
+      System.out.println("Skipping testUseHugeTLBFS");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseHugeTLBFS.
+    new FlagTester()
+      .use(UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Using UseLargePages will default to UseHugeTLBFS large pages.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+  }
+
+  public static void testUseSHM() throws Exception {
+    if (!canUse(UseSHM(true))) {
+      System.out.println("Skipping testUseSHM");
+      return;
+    }
+
+    // -XX:-UseLargePages overrides all other flags.
+    new FlagTester()
+      .use(UseLargePages(false),
+           UseSHM(true))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Explicitly turn on UseSHM.
+    new FlagTester()
+      .use(UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true)) ;
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true)) ;
+
+    // Setting a specific large pages flag will turn
+    // off heuristics to choose large pages type.
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseSHM(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    // Setting UseLargePages can allow the system to choose
+    // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages.
+    new FlagTester()
+      .use(UseLargePages(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false));
+  }
+
+  public static void testCombinations() throws Exception {
+    if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) {
+      System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination");
+      return;
+    }
+
+    // UseHugeTLBFS takes precedence over SHM.
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(true));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(false))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(true),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false))
+      .expect(
+           UseLargePages(false),
+           UseTransparentHugePages(false),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+
+    if (!canUse(UseTransparentHugePages(true))) {
+      return;
+    }
+
+    // UseTransparentHugePages takes precedence.
+
+    new FlagTester()
+      .use(UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+
+    new FlagTester()
+      .use(UseTransparentHugePages(true),
+           UseHugeTLBFS(true),
+           UseSHM(true))
+      .expect(
+           UseLargePages(true),
+           UseTransparentHugePages(true),
+           UseHugeTLBFS(false),
+           UseSHM(false));
+  }
+
+  private static class FlagTester {
+    private Flag [] useFlags;
+
+    public FlagTester use(Flag... useFlags) {
+      this.useFlags = useFlags;
+      return this;
+    }
+
+    public void expect(Flag... expectedFlags) throws Exception {
+      if (useFlags == null) {
+        throw new IllegalStateException("Must run use() before expect()");
+      }
+
+      OutputAnalyzer output = executeNewJVM(useFlags);
+
+      for (Flag flag : expectedFlags) {
+        System.out.println("Looking for: " + flag.flagString());
+        String strValue = output.firstMatch(".* " + flag.name() +  " .* :?= (\\S+).*", 1);
+
+        if (strValue == null) {
+          throw new RuntimeException("Flag " + flag.name() + " couldn't be found");
+        }
+
+        if (!flag.value().equals(strValue)) {
+          throw new RuntimeException("Wrong value for: " + flag.name()
+                                     + " expected: " + flag.value()
+                                     + " got: " + strValue);
+        }
+      }
+
+      output.shouldHaveExitValue(0);
+    }
+  }
+
+  private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception {
+    ArrayList<String> args = new ArrayList<>();
+    for (Flag flag : flags) {
+      args.add(flag.flagString());
+    }
+    args.add("-XX:+PrintFlagsFinal");
+    args.add("-version");
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    return output;
+  }
+
+  private static boolean canUse(Flag flag) {
+    try {
+      new FlagTester().use(flag).expect(flag);
+    } catch (Exception e) {
+      return false;
+    }
+
+    return true;
+  }
+
+  private static Flag UseLargePages(boolean value) {
+    return new BooleanFlag("UseLargePages", value);
+  }
+
+  private static Flag UseTransparentHugePages(boolean value) {
+    return new BooleanFlag("UseTransparentHugePages", value);
+  }
+
+  private static Flag UseHugeTLBFS(boolean value) {
+    return new BooleanFlag("UseHugeTLBFS", value);
+  }
+
+  private static Flag UseSHM(boolean value) {
+    return new BooleanFlag("UseSHM", value);
+  }
+
+  private static class BooleanFlag implements Flag {
+    private String name;
+    private boolean value;
+
+    BooleanFlag(String name, boolean value) {
+      this.name = name;
+      this.value = value;
+    }
+
+    public String flagString() {
+      return "-XX:" + (value ? "+" : "-") + name;
+    }
+
+    public String name() {
+      return name;
+    }
+
+    public String value() {
+      return Boolean.toString(value);
+    }
+  }
+
+  private static interface Flag {
+    public String flagString();
+    public String name();
+    public String value();
+  }
+}
--- a/test/runtime/memory/ReserveMemory.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/runtime/memory/ReserveMemory.java	Tue Nov 05 17:38:04 2013 -0800
@@ -56,6 +56,7 @@
           "-Xbootclasspath/a:.",
           "-XX:+UnlockDiagnosticVMOptions",
           "-XX:+WhiteBoxAPI",
+          "-XX:-TransmitErrorReport",
           "ReserveMemory",
           "test");
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.management.VMManagement;
+
+public class JMapHProfLargeHeapProc {
+    private static final List<byte[]> heapGarbage = new ArrayList<>();
+
+    public static void main(String[] args) throws Exception {
+
+        buildLargeHeap(args);
+
+        // Print our pid on stdout
+        System.out.println("PID[" + getProcessId() + "]");
+
+        // Wait for input before termination
+        System.in.read();
+    }
+
+    private static void buildLargeHeap(String[] args) {
+        for (long i = 0; i < Integer.parseInt(args[0]); i++) {
+            heapGarbage.add(new byte[1024]);
+        }
+    }
+
+    public static int getProcessId() throws Exception {
+
+        // Get the current process id using a reflection hack
+        RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+        Field jvm = runtime.getClass().getDeclaredField("jvm");
+
+        jvm.setAccessible(true);
+        VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime);
+
+        Method pid_method = mgmt.getClass().getDeclaredMethod("getProcessId");
+
+        pid_method.setAccessible(true);
+
+        int pid = (Integer) pid_method.invoke(mgmt);
+
+        return pid;
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.Reader;
+import java.nio.CharBuffer;
+import java.util.Arrays;
+import java.util.Scanner;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.JDKToolLauncher;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+
+/*
+ * @test
+ * @bug 6313383
+ * @key regression
+ * @summary Regression test for hprof export issue due to large heaps (>2G)
+ * @library /testlibrary
+ * @compile JMapHProfLargeHeapProc.java
+ * @run main JMapHProfLargeHeapTest
+ */
+
+public class JMapHProfLargeHeapTest {
+    private static final String HEAP_DUMP_FILE_NAME = "heap.hprof";
+    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
+    private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
+    private static final long M = 1024L;
+    private static final long G = 1024L * M;
+
+    public static void main(String[] args) throws Exception {
+        // If we are on MacOSX, test if JMap tool is signed, otherwise return
+        // since test will fail with privilege error.
+        if (Platform.isOSX()) {
+            String jmapToolPath = JDKToolFinder.getTestJDKTool("jmap");
+            ProcessBuilder codesignProcessBuilder = new ProcessBuilder(
+                    "codesign", "-v", jmapToolPath);
+            Process codesignProcess = codesignProcessBuilder.start();
+            OutputAnalyzer analyser = new OutputAnalyzer(codesignProcess);
+            try {
+                analyser.shouldNotContain("code object is not signed at all");
+                System.out.println("Signed jmap found at: " + jmapToolPath);
+            } catch (Exception e) {
+                // Abort since we can't know if the test will work
+                System.out
+                        .println("Test aborted since we are on MacOSX and the jmap tool is not signed.");
+                return;
+            }
+        }
+
+        // Small heap 22 megabytes, should create 1.0.1 file format
+        testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_1);
+
+        /**
+         * This test was deliberately commented out since the test system lacks
+         * support to handle the requirements for this kind of heap size in a
+         * good way. If or when it becomes possible to run this kind of tests in
+         * the test environment the test should be enabled again.
+         * */
+        // Large heap 2,2 gigabytes, should create 1.0.2 file format
+        // testHProfFileFormat("-Xmx4g", 2 * G + 2 * M, HPROF_HEADER_1_0_2);
+    }
+
+    private static void testHProfFileFormat(String vmArgs, long heapSize,
+            String expectedFormat) throws Exception, IOException,
+            InterruptedException, FileNotFoundException {
+        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(
+                vmArgs, "JMapHProfLargeHeapProc", String.valueOf(heapSize));
+        procBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
+        Process largeHeapProc = procBuilder.start();
+
+        try (Scanner largeHeapScanner = new Scanner(
+                largeHeapProc.getInputStream());) {
+            String pidstring = null;
+            while ((pidstring = largeHeapScanner.findInLine("PID\\[[0-9].*\\]")) == null) {
+                Thread.sleep(500);
+            }
+            int pid = Integer.parseInt(pidstring.substring(4,
+                    pidstring.length() - 1));
+            System.out.println("Extracted pid: " + pid);
+
+            JDKToolLauncher jMapLauncher = JDKToolLauncher
+                    .createUsingTestJDK("jmap");
+            jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-"
+                    + HEAP_DUMP_FILE_NAME);
+            jMapLauncher.addToolArg(String.valueOf(pid));
+
+            ProcessBuilder jMapProcessBuilder = new ProcessBuilder(
+                    jMapLauncher.getCommand());
+            System.out.println("jmap command: "
+                    + Arrays.toString(jMapLauncher.getCommand()));
+
+            Process jMapProcess = jMapProcessBuilder.start();
+            OutputAnalyzer analyzer = new OutputAnalyzer(jMapProcess);
+            analyzer.shouldHaveExitValue(0);
+            analyzer.shouldContain(pid + "-" + HEAP_DUMP_FILE_NAME);
+            analyzer.shouldContain("Heap dump file created");
+
+            largeHeapProc.getOutputStream().write('\n');
+
+            File dumpFile = new File(pid + "-" + HEAP_DUMP_FILE_NAME);
+            Asserts.assertTrue(dumpFile.exists(), "Heap dump file not found.");
+
+            try (Reader reader = new BufferedReader(new FileReader(dumpFile))) {
+                CharBuffer buf = CharBuffer.allocate(expectedFormat.length());
+                reader.read(buf);
+                buf.clear();
+                Asserts.assertEQ(buf.toString(), expectedFormat,
+                        "Wrong file format. Expected '" + expectedFormat
+                                + "', but found '" + buf.toString() + "'");
+            }
+
+            System.out.println("Success!");
+
+        } finally {
+            largeHeapProc.destroyForcibly();
+        }
+    }
+}
--- a/test/testlibrary/AssertsTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import static com.oracle.java.testlibrary.Asserts.*;
-
-/* @test
- * @summary Tests the different assertions in the Assert class
- * @library /testlibrary
- */
-public class AssertsTest {
-    private static class Foo implements Comparable<Foo> {
-        final int id;
-        public Foo(int id) {
-            this.id = id;
-        }
-
-        public int compareTo(Foo f) {
-            return new Integer(id).compareTo(new Integer(f.id));
-        }
-    }
-
-    public static void main(String[] args) throws Exception {
-        testLessThan();
-        testLessThanOrEqual();
-        testEquals();
-        testGreaterThanOrEqual();
-        testGreaterThan();
-        testNotEquals();
-        testNull();
-        testNotNull();
-        testTrue();
-        testFalse();
-    }
-
-    private static void testLessThan() throws Exception {
-        expectPass(Assertion.LT, 1, 2);
-
-        expectFail(Assertion.LT, 2, 2);
-        expectFail(Assertion.LT, 2, 1);
-        expectFail(Assertion.LT, null, 2);
-        expectFail(Assertion.LT, 2, null);
-    }
-
-    private static void testLessThanOrEqual() throws Exception {
-        expectPass(Assertion.LTE, 1, 2);
-        expectPass(Assertion.LTE, 2, 2);
-
-        expectFail(Assertion.LTE, 3, 2);
-        expectFail(Assertion.LTE, null, 2);
-        expectFail(Assertion.LTE, 2, null);
-    }
-
-    private static void testEquals() throws Exception {
-        expectPass(Assertion.EQ, 1, 1);
-        expectPass(Assertion.EQ, null, null);
-
-        Foo f1 = new Foo(1);
-        expectPass(Assertion.EQ, f1, f1);
-
-        Foo f2 = new Foo(1);
-        expectFail(Assertion.EQ, f1, f2);
-        expectFail(Assertion.LTE, null, 2);
-        expectFail(Assertion.LTE, 2, null);
-    }
-
-    private static void testGreaterThanOrEqual() throws Exception {
-        expectPass(Assertion.GTE, 1, 1);
-        expectPass(Assertion.GTE, 2, 1);
-
-        expectFail(Assertion.GTE, 1, 2);
-        expectFail(Assertion.GTE, null, 2);
-        expectFail(Assertion.GTE, 2, null);
-    }
-
-    private static void testGreaterThan() throws Exception {
-        expectPass(Assertion.GT, 2, 1);
-
-        expectFail(Assertion.GT, 1, 1);
-        expectFail(Assertion.GT, 1, 2);
-        expectFail(Assertion.GT, null, 2);
-        expectFail(Assertion.GT, 2, null);
-    }
-
-    private static void testNotEquals() throws Exception {
-        expectPass(Assertion.NE, null, 1);
-        expectPass(Assertion.NE, 1, null);
-
-        Foo f1 = new Foo(1);
-        Foo f2 = new Foo(1);
-        expectPass(Assertion.NE, f1, f2);
-
-        expectFail(Assertion.NE, null, null);
-        expectFail(Assertion.NE, f1, f1);
-        expectFail(Assertion.NE, 1, 1);
-    }
-
-    private static void testNull() throws Exception {
-        expectPass(Assertion.NULL, null);
-
-        expectFail(Assertion.NULL, 1);
-    }
-
-    private static void testNotNull() throws Exception {
-        expectPass(Assertion.NOTNULL, 1);
-
-        expectFail(Assertion.NOTNULL, null);
-    }
-
-    private static void testTrue() throws Exception {
-        expectPass(Assertion.TRUE, true);
-
-        expectFail(Assertion.TRUE, false);
-    }
-
-    private static void testFalse() throws Exception {
-        expectPass(Assertion.FALSE, false);
-
-        expectFail(Assertion.FALSE, true);
-    }
-
-    private static <T extends Comparable<T>> void expectPass(Assertion assertion, T ... args)
-        throws Exception {
-        Assertion.run(assertion, args);
-    }
-
-    private static <T extends Comparable<T>> void expectFail(Assertion assertion, T ... args)
-        throws Exception {
-        try {
-            Assertion.run(assertion, args);
-        } catch (RuntimeException e) {
-            return;
-        }
-        throw new Exception("Expected " + Assertion.format(assertion, (Object[]) args) +
-                            " to throw a RuntimeException");
-    }
-
-}
-
-enum Assertion {
-    LT, LTE, EQ, GTE, GT, NE, NULL, NOTNULL, FALSE, TRUE;
-
-    public static <T extends Comparable<T>> void run(Assertion assertion, T ... args) {
-        String msg = "Expected " + format(assertion, args) + " to pass";
-        switch (assertion) {
-            case LT:
-                assertLessThan(args[0], args[1], msg);
-                break;
-            case LTE:
-                assertLessThanOrEqual(args[0], args[1], msg);
-                break;
-            case EQ:
-                assertEquals(args[0], args[1], msg);
-                break;
-            case GTE:
-                assertGreaterThanOrEqual(args[0], args[1], msg);
-                break;
-            case GT:
-                assertGreaterThan(args[0], args[1], msg);
-                break;
-            case NE:
-                assertNotEquals(args[0], args[1], msg);
-                break;
-            case NULL:
-                assertNull(args == null ? args : args[0], msg);
-                break;
-            case NOTNULL:
-                assertNotNull(args == null ? args : args[0], msg);
-                break;
-            case FALSE:
-                assertFalse((Boolean) args[0], msg);
-                break;
-            case TRUE:
-                assertTrue((Boolean) args[0], msg);
-                break;
-            default:
-                // do nothing
-        }
-    }
-
-    public static String format(Assertion assertion, Object ... args) {
-        switch (assertion) {
-            case LT:
-                return asString("assertLessThan", args);
-            case LTE:
-                return asString("assertLessThanOrEqual", args);
-            case EQ:
-                return asString("assertEquals", args);
-            case GTE:
-                return asString("assertGreaterThanOrEquals", args);
-            case GT:
-                return asString("assertGreaterThan", args);
-            case NE:
-                return asString("assertNotEquals", args);
-            case NULL:
-                return asString("assertNull", args);
-            case NOTNULL:
-                return asString("assertNotNull", args);
-            case FALSE:
-                return asString("assertFalse", args);
-            case TRUE:
-                return asString("assertTrue", args);
-            default:
-                return "";
-        }
-    }
-
-    private static String asString(String assertion, Object ... args) {
-        if (args == null) {
-            return String.format("%s(null)", assertion);
-        }
-        if (args.length == 1) {
-            return String.format("%s(%s)", assertion, args[0]);
-        } else {
-            return String.format("%s(%s, %s)", assertion, args[0], args[1]);
-        }
-    }
-}
--- a/test/testlibrary/OutputAnalyzerReportingTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-
-/*
- * @test
- * @summary Test the OutputAnalyzer reporting functionality,
- *     such as printing additional diagnostic info
- *     (exit code, stdout, stderr, command line, etc.)
- * @library /testlibrary
- */
-
-import java.io.ByteArrayOutputStream;
-import java.io.PrintStream;
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-
-public class OutputAnalyzerReportingTest {
-
-    public static void main(String[] args) throws Exception {
-        // Create the output analyzer under test
-        String stdout = "aaaaaa";
-        String stderr = "bbbbbb";
-        OutputAnalyzer output = new OutputAnalyzer(stdout, stderr);
-
-        // Expected summary values should be the same for all cases,
-        // since the outputAnalyzer object is the same
-        String expectedExitValue = "-1";
-        String expectedSummary =
-                " stdout: [" + stdout + "];\n" +
-                " stderr: [" + stderr + "]\n" +
-                " exitValue = " + expectedExitValue + "\n";
-
-
-        DiagnosticSummaryTestRunner testRunner =
-                new DiagnosticSummaryTestRunner();
-
-        // should have exit value
-        testRunner.init(expectedSummary);
-        int unexpectedExitValue = 2;
-        try {
-            output.shouldHaveExitValue(unexpectedExitValue);
-        } catch (RuntimeException e) { }
-        testRunner.closeAndCheckResults();
-
-        // should not contain
-        testRunner.init(expectedSummary);
-        try {
-            output.shouldNotContain(stdout);
-        } catch (RuntimeException e) { }
-        testRunner.closeAndCheckResults();
-
-        // should contain
-        testRunner.init(expectedSummary);
-        try {
-            output.shouldContain("unexpected-stuff");
-        } catch (RuntimeException e) { }
-        testRunner.closeAndCheckResults();
-
-        // should not match
-        testRunner.init(expectedSummary);
-        try {
-            output.shouldNotMatch("[a]");
-        } catch (RuntimeException e) { }
-        testRunner.closeAndCheckResults();
-
-        // should match
-        testRunner.init(expectedSummary);
-        try {
-            output.shouldMatch("[qwerty]");
-        } catch (RuntimeException e) { }
-        testRunner.closeAndCheckResults();
-
-    }
-
-    private static class DiagnosticSummaryTestRunner {
-        private ByteArrayOutputStream byteStream =
-                new ByteArrayOutputStream(10000);
-
-        private String expectedSummary = "";
-        private PrintStream errStream;
-
-
-        public void init(String expectedSummary) {
-            this.expectedSummary = expectedSummary;
-            byteStream.reset();
-            errStream = new PrintStream(byteStream);
-            System.setErr(errStream);
-        }
-
-        public void closeAndCheckResults() {
-            // check results
-            errStream.close();
-            String stdErrStr = byteStream.toString();
-            if (!stdErrStr.contains(expectedSummary)) {
-                throw new RuntimeException("The output does not contain "
-                    + "the diagnostic message, or the message is incorrect");
-            }
-        }
-    }
-
-}
--- a/test/testlibrary/OutputAnalyzerTest.java	Wed Oct 16 10:52:41 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary Test the OutputAnalyzer utility class
- * @library /testlibrary
- */
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-
-public class OutputAnalyzerTest {
-
-  public static void main(String args[]) throws Exception {
-
-    String stdout = "aaaaaa";
-    String stderr = "bbbbbb";
-
-    // Regexps used for testing pattern matching of the test input
-    String stdoutPattern = "[a]";
-    String stderrPattern = "[b]";
-    String nonExistingPattern = "[c]";
-
-    OutputAnalyzer output = new OutputAnalyzer(stdout, stderr);
-
-    if (!stdout.equals(output.getStdout())) {
-      throw new Exception("getStdout() returned '" + output.getStdout() + "', expected '" + stdout + "'");
-    }
-
-    if (!stderr.equals(output.getStderr())) {
-      throw new Exception("getStderr() returned '" + output.getStderr() + "', expected '" + stderr + "'");
-    }
-
-    try {
-      output.shouldContain(stdout);
-      output.stdoutShouldContain(stdout);
-      output.shouldContain(stderr);
-      output.stderrShouldContain(stderr);
-    } catch (RuntimeException e) {
-      throw new Exception("shouldContain() failed", e);
-    }
-
-    try {
-      output.shouldContain("cccc");
-      throw new Exception("shouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-      // expected
-    }
-
-    try {
-      output.stdoutShouldContain(stderr);
-      throw new Exception("stdoutShouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-      // expected
-    }
-
-    try {
-      output.stderrShouldContain(stdout);
-      throw new Exception("stdoutShouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-      // expected
-    }
-
-    try {
-      output.shouldNotContain("cccc");
-      output.stdoutShouldNotContain("cccc");
-      output.stderrShouldNotContain("cccc");
-    } catch (RuntimeException e) {
-      throw new Exception("shouldNotContain() failed", e);
-    }
-
-    try {
-      output.shouldNotContain(stdout);
-      throw new Exception("shouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-      // expected
-    }
-
-    try {
-      output.stdoutShouldNotContain(stdout);
-      throw new Exception("shouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-      // expected
-    }
-
-    try {
-        output.stderrShouldNotContain(stderr);
-        throw new Exception("shouldContain() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    // Should match
-    try {
-        output.shouldMatch(stdoutPattern);
-        output.stdoutShouldMatch(stdoutPattern);
-        output.shouldMatch(stderrPattern);
-        output.stderrShouldMatch(stderrPattern);
-    } catch (RuntimeException e) {
-        throw new Exception("shouldMatch() failed", e);
-    }
-
-    try {
-        output.shouldMatch(nonExistingPattern);
-        throw new Exception("shouldMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    try {
-        output.stdoutShouldMatch(stderrPattern);
-        throw new Exception(
-                "stdoutShouldMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    try {
-        output.stderrShouldMatch(stdoutPattern);
-        throw new Exception(
-                "stderrShouldMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    // Should not match
-    try {
-        output.shouldNotMatch(nonExistingPattern);
-        output.stdoutShouldNotMatch(nonExistingPattern);
-        output.stderrShouldNotMatch(nonExistingPattern);
-    } catch (RuntimeException e) {
-        throw new Exception("shouldNotMatch() failed", e);
-    }
-
-    try {
-        output.shouldNotMatch(stdoutPattern);
-        throw new Exception("shouldNotMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    try {
-        output.stdoutShouldNotMatch(stdoutPattern);
-        throw new Exception("shouldNotMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-
-    try {
-        output.stderrShouldNotMatch(stderrPattern);
-        throw new Exception("shouldNotMatch() failed to throw exception");
-    } catch (RuntimeException e) {
-        // expected
-    }
-  }
-}
--- a/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Tue Nov 05 17:38:04 2013 -0800
@@ -41,6 +41,9 @@
     /**
      * Returns true if {@code arg} is an input argument to the VM.
      *
+     * This is useful for checking boolean flags such as -XX:+UseSerialGC or
+     * -XX:-UsePerfData.
+     *
      * @param arg The name of the argument.
      * @return {@code true} if the given argument is an input argument,
      *         otherwise {@code false}.
@@ -48,4 +51,26 @@
     public static boolean contains(String arg) {
         return args.contains(arg);
     }
+
+    /**
+     * Returns true if {@code prefix} is the start of an input argument to the
+     * VM.
+     *
+     * This is useful for checking if flags describing a quantity, such as
+     * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity.
+     * To check if the flag -XX:MaxMetaspaceSize is set, use
+     * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}.
+     *
+     * @param prefix The start of the argument.
+     * @return {@code true} if the given argument is the start of an input
+     *         argument, otherwise {@code false}.
+     */
+    public static boolean containsPrefix(String prefix) {
+        for (String arg : args) {
+            if (arg.startsWith(prefix)) {
+                return true;
+            }
+        }
+        return false;
+    }
 }
--- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Tue Nov 05 17:38:04 2013 -0800
@@ -23,28 +23,84 @@
 
 package com.oracle.java.testlibrary;
 
-import java.io.File;
+import java.io.FileNotFoundException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 
 public final class JDKToolFinder {
 
-  private JDKToolFinder() {
-  }
+    private JDKToolFinder() {
+    }
+
+    /**
+     * Returns the full path to an executable in jdk/bin based on System
+     * property {@code test.jdk} or {@code compile.jdk} (both are set by the jtreg test suite)
+     *
+     * @return Full path to an executable in jdk/bin
+     */
+    public static String getJDKTool(String tool) {
 
-  /**
-   * Returns the full path to an executable in jdk/bin based on System property
-   * test.jdk (set by jtreg test suite)
-   *
-   * @return Full path to an executable in jdk/bin
-   */
-  public static String getJDKTool(String tool) {
-    String binPath = System.getProperty("test.jdk");
-    if (binPath == null) {
-      throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. "
-          + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'.");
+        // First try to find the executable in test.jdk
+        try {
+            return getTool(tool, "test.jdk");
+        } catch (FileNotFoundException e) {
+
+        }
+
+        // Now see if it's available in compile.jdk
+        try {
+            return getTool(tool, "compile.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException("Failed to find " + tool +
+                    ", looked in test.jdk (" + System.getProperty("test.jdk") +
+                    ") and compile.jdk (" + System.getProperty("compile.jdk") + ")");
+        }
     }
 
-    binPath += File.separatorChar + "bin" + File.separatorChar + tool;
+    /**
+     * Returns the full path to an executable in jdk/bin based on System
+     * property {@code compile.jdk}
+     *
+     * @return Full path to an executable in jdk/bin
+     */
+    public static String getCompileJDKTool(String tool) {
+        try {
+            return getTool(tool, "compile.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException(e);
+        }
+    }
 
-    return binPath;
-  }
+    /**
+     * Returns the full path to an executable in jdk/bin based on System
+     * property {@code test.jdk}
+     *
+     * @return Full path to an executable in jdk/bin
+     */
+    public static String getTestJDKTool(String tool) {
+        try {
+            return getTool(tool, "test.jdk");
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private static String getTool(String tool, String property) throws FileNotFoundException {
+        String jdkPath = System.getProperty(property);
+
+        if (jdkPath == null) {
+            throw new RuntimeException(
+                    "System property '" + property + "' not set. This property is normally set by jtreg. "
+                    + "When running test separately, set this property using '-D" + property + "=/path/to/jdk'.");
+        }
+
+        Path toolName = Paths.get("bin", tool + (Platform.isWindows() ? ".exe" : ""));
+
+        Path jdkTool = Paths.get(jdkPath, toolName.toString());
+        if (!jdkTool.toFile().exists()) {
+            throw new FileNotFoundException("Could not find file " + jdkTool.toAbsolutePath());
+        }
+
+        return jdkTool.toAbsolutePath().toString();
+    }
 }
--- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Tue Nov 05 17:38:04 2013 -0800
@@ -23,20 +23,17 @@
 
 package com.oracle.java.testlibrary;
 
-import java.util.List;
 import java.util.ArrayList;
 import java.util.Arrays;
-
-import com.oracle.java.testlibrary.JDKToolFinder;
-import com.oracle.java.testlibrary.ProcessTools;
+import java.util.List;
 
 /**
  * A utility for constructing command lines for starting JDK tool processes.
  *
  * The JDKToolLauncher can in particular be combined with a
- * java.lang.ProcessBuilder to easily run a JDK tool. For example, the
- * following code run {@code jmap -heap} against a process with GC logging
- * turned on for the {@code jmap} process:
+ * java.lang.ProcessBuilder to easily run a JDK tool. For example, the following
+ * code run {@code jmap -heap} against a process with GC logging turned on for
+ * the {@code jmap} process:
  *
  * <pre>
  * {@code
@@ -55,19 +52,37 @@
     private final List<String> vmArgs = new ArrayList<String>();
     private final List<String> toolArgs = new ArrayList<String>();
 
-    private JDKToolLauncher(String tool) {
-        executable = JDKToolFinder.getJDKTool(tool);
+    private JDKToolLauncher(String tool, boolean useCompilerJDK) {
+        if (useCompilerJDK) {
+            executable = JDKToolFinder.getJDKTool(tool);
+        } else {
+            executable = JDKToolFinder.getTestJDKTool(tool);
+        }
         vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
     }
 
     /**
-     * Creates a new JDKToolLauncher for the specified tool.
+     * Creates a new JDKToolLauncher for the specified tool. Using tools path
+     * from the compiler JDK.
      *
-     * @param tool The name of the tool
+     * @param tool
+     *            The name of the tool
      * @return A new JDKToolLauncher
      */
     public static JDKToolLauncher create(String tool) {
-        return new JDKToolLauncher(tool);
+        return new JDKToolLauncher(tool, true);
+    }
+
+    /**
+     * Creates a new JDKToolLauncher for the specified tool in the Tested JDK.
+     *
+     * @param tool
+     *            The name of the tool
+     *
+     * @return A new JDKToolLauncher
+     */
+    public static JDKToolLauncher createUsingTestJDK(String tool) {
+        return new JDKToolLauncher(tool, false);
     }
 
     /**
@@ -80,18 +95,20 @@
      * automatically added.
      *
      *
-     * @param arg The argument to VM running the tool
+     * @param arg
+     *            The argument to VM running the tool
      * @return The JDKToolLauncher instance
      */
     public JDKToolLauncher addVMArg(String arg) {
-        vmArgs.add("-J" + arg);
+        vmArgs.add(arg);
         return this;
     }
 
     /**
      * Adds an argument to the tool.
      *
-     * @param arg The argument to the tool
+     * @param arg
+     *            The argument to the tool
      * @return The JDKToolLauncher instance
      */
     public JDKToolLauncher addToolArg(String arg) {
@@ -107,7 +124,10 @@
     public String[] getCommand() {
         List<String> command = new ArrayList<String>();
         command.add(executable);
-        command.addAll(vmArgs);
+        // Add -J in front of all vmArgs
+        for (String arg : vmArgs) {
+            command.add("-J" + arg);
+        }
         command.addAll(toolArgs);
         return command.toArray(new String[command.size()]);
     }
--- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Tue Nov 05 17:38:04 2013 -0800
@@ -211,13 +211,13 @@
       if (matcher.find()) {
           reportDiagnosticSummary();
           throw new RuntimeException("'" + pattern
-                  + "' found in stdout \n");
+                  + "' found in stdout: '" + matcher.group() + "' \n");
       }
       matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
       if (matcher.find()) {
           reportDiagnosticSummary();
           throw new RuntimeException("'" + pattern
-                  + "' found in stderr \n");
+                  + "' found in stderr: '" + matcher.group() + "' \n");
       }
   }
 
@@ -254,6 +254,37 @@
   }
 
   /**
+   * Get the captured group of the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @param group The group to capture
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern, int group) {
+    Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
+    Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
+    if (stderrMatcher.find()) {
+      return stderrMatcher.group(group);
+    }
+    if (stdoutMatcher.find()) {
+      return stdoutMatcher.group(group);
+    }
+    return null;
+  }
+
+  /**
+   * Get the first string matching the pattern.
+   * stderr is searched before stdout.
+   *
+   * @param pattern The multi-line pattern to match
+   * @return The matched string or null if no match was found
+   */
+  public String firstMatch(String pattern) {
+    return firstMatch(pattern, 0);
+  }
+
+  /**
    * Verify the exit value of the process
    *
    * @param expectedExitValue Expected exit value from process
--- a/test/testlibrary/com/oracle/java/testlibrary/Platform.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java	Tue Nov 05 17:38:04 2013 -0800
@@ -24,50 +24,80 @@
 package com.oracle.java.testlibrary;
 
 public class Platform {
-  private static final String osName = System.getProperty("os.name");
-  private static final String dataModel = System.getProperty("sun.arch.data.model");
-  private static final String vmVersion = System.getProperty("java.vm.version");
-  private static final String osArch = System.getProperty("os.arch");
+    private static final String osName      = System.getProperty("os.name");
+    private static final String dataModel   = System.getProperty("sun.arch.data.model");
+    private static final String vmVersion   = System.getProperty("java.vm.version");
+    private static final String osArch      = System.getProperty("os.arch");
 
-  public static boolean is64bit() {
-    return dataModel.equals("64");
-  }
+    public static boolean is32bit() {
+        return dataModel.equals("32");
+    }
+
+    public static boolean is64bit() {
+        return dataModel.equals("64");
+    }
+
+    public static boolean isSolaris() {
+        return isOs("sunos");
+    }
 
-  public static boolean isSolaris() {
-    return osName.toLowerCase().startsWith("sunos");
-  }
+    public static boolean isWindows() {
+        return isOs("win");
+    }
+
+    public static boolean isOSX() {
+        return isOs("mac");
+    }
 
-  public static boolean isWindows() {
-    return osName.toLowerCase().startsWith("win");
-  }
+    public static boolean isLinux() {
+        return isOs("linux");
+    }
 
-  public static boolean isOSX() {
-    return osName.toLowerCase().startsWith("mac");
-  }
+    private static boolean isOs(String osname) {
+        return osName.toLowerCase().startsWith(osname.toLowerCase());
+    }
+
+    public static String getOsName() {
+        return osName;
+    }
 
-  public static boolean isLinux() {
-    return osName.toLowerCase().startsWith("linux");
-  }
+    public static boolean isDebugBuild() {
+        return vmVersion.toLowerCase().contains("debug");
+    }
+
+    public static String getVMVersion() {
+        return vmVersion;
+    }
 
-  public static String getOsName() {
-    return osName;
-  }
+    // Returns true for sparc and sparcv9.
+    public static boolean isSparc() {
+        return isArch("sparc");
+    }
 
-  public static boolean isDebugBuild() {
-    return vmVersion.toLowerCase().contains("debug");
-  }
+    public static boolean isARM() {
+        return isArch("arm");
+    }
 
-  public static String getVMVersion() {
-    return vmVersion;
-  }
+    public static boolean isPPC() {
+        return isArch("ppc");
+    }
+
+    public static boolean isX86() {
+        // On Linux it's 'i386', Windows 'x86'
+        return (isArch("i386") || isArch("x86"));
+    }
 
-  // Returns true for sparc and sparcv9.
-  public static boolean isSparc() {
-    return osArch.toLowerCase().startsWith("sparc");
-  }
+    public static boolean isX64() {
+        // On OSX it's 'x86_64' and on other (Linux, Windows and Solaris) platforms it's 'amd64'
+        return (isArch("amd64") || isArch("x86_64"));
+    }
 
-  public static String getOsArch() {
-    return osArch;
-  }
+    private static boolean isArch(String archname) {
+        return osArch.toLowerCase().startsWith(archname.toLowerCase());
+    }
+
+    public static String getOsArch() {
+        return osArch;
+    }
 
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/Makefile	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,73 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.	See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+ifneq "x$(ALT_BOOTDIR)" "x"
+	BOOTDIR := $(ALT_BOOTDIR)
+endif
+
+ifeq "x$(BOOTDIR)" "x"
+	JDK_HOME := $(shell dirname $(shell which java))/..
+else
+	JDK_HOME := $(BOOTDIR)
+endif
+
+SRC_DIR = src
+BUILD_DIR = build
+OUTPUT_DIR = $(BUILD_DIR)/classes
+WHITEBOX_DIR = ../whitebox
+
+JAVAC = $(JDK_HOME)/bin/javac
+JAR = $(JDK_HOME)/bin/jar
+
+SRC_FILES = $(shell find $(SRC_DIR) -name '*.java')
+
+MAIN_CLASS = sun.hotspot.tools.ctw.CompileTheWorld
+
+.PHONY: clean cleantmp
+
+all: ctw.jar cleantmp
+
+clean: cleantmp
+	@rm -rf ctw.jar wb.jar
+
+cleantmp:
+	@rm -rf filelist manifest.mf
+	@rm -rf $(BUILD_DIR)
+
+ctw.jar: filelist wb.jar manifest.mf
+	@mkdir -p $(OUTPUT_DIR)
+	$(JAVAC) -sourcepath $(SRC_DIR) -d $(OUTPUT_DIR) -cp wb.jar @filelist
+	$(JAR) cfm ctw.jar manifest.mf -C $(OUTPUT_DIR) .
+
+wb.jar: 
+	make -C ${WHITEBOX_DIR} wb.jar
+	cp ${WHITEBOX_DIR}/wb.jar ./
+	make -C ${WHITEBOX_DIR} clean
+
+filelist: $(SRC_FILES)
+	@rm -f $@
+	@echo $(SRC_FILES) > $@
+
+manifest.mf:
+	@echo "Main-Class: ${MAIN_CLASS}" > manifest.mf
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/README	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,93 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+DESCRIPTION
+
+This is replacement for CompileTheWorld (CTW) written on java. Its purpose is
+to make possible the use of CTW in product builds.
+
+DEPENDENCES
+
+The tool depends on Whitebox API. Assumed, that the sources of whitebox are
+located in '../whitebox' directory.
+
+BUILDING
+
+Simple way to build, just type 'make'.
+
+Makefile uses environment variables 'ALT_BOOTDIR', 'BOOTDIR' as root-dir of jdk
+that will be used for compilation and creating jar.
+
+On successful building 'ctw.jar' will be created.
+
+RUNNING
+
+Since the tool uses WhiteBox API, options 'UnlockDiagnosticVMOptions' and
+'WhiteBoxAPI' should be specified, and 'wb.jar' should be added to
+boot-classpath:
+  $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar 
+
+Arguments can be paths to '.jar, '.zip', '.lst' files or directories with
+classes, that define which classes will be compiled:
+  - '.jar', '.zip' files and directories are interpreted like in classpath
+(including '<dir>/*' syntax)
+  - '.lst' files -- files with class names (in java notation) to compile.
+CTW will try to find these classes with default class loader, so they should
+be located in classpath.
+
+Without arguments it would work as old version of CTW: all classes in
+boot-classpath will be compiled, excluding classes in 'rt.jar' if 'rt.jar' isn't
+first in boot-classpath.
+
+Due CTW's flags also are not available in product builds, the tool uses
+properties with the same names:
+  - 'CompileTheWorldPreloadClasses' -- type:boolean, default:true, description:
+Preload all classes used by a class before start loading
+  - 'CompileTheWorldStartAt' -- type:long, default:1, description: First class
+to consider
+  - 'CompileTheWorldStopAt' -- type:long, default:Long.MAX_VALUE, description:
+Last class to consider
+
+Also it uses additional properties:
+  - 'sun.hotspot.tools.ctw.verbose' -- type:boolean, default:false,
+description: Verbose output, adds additional information about compilation
+  - 'sun.hotspot.tools.ctw.logfile' -- type:string, default:null,
+description: Path to logfile, if it's null, cout will be used.
+
+EXAMPLES
+
+compile classes from 'rt.jar':
+  $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ${JAVA_HOME}/jre/lib/rt.jar
+
+compile classes from all '.jar' in './testjars' directory:
+  $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ./testjars/*
+
+compile classes from './build/classes' directory:
+  $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar ./build/classes
+
+compile only java.lang.String, java.lang.Object classes:
+  $ echo java.lang.String > classes.lst
+  $ echo java.lang.Object >> classes.lst
+  $ java -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:wb.jar -jar ctw.jar classes.lst
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathDirEntry.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.Set;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.concurrent.Executor;
+
+import java.io.*;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
+
+/**
+ * Handler for dirs containing classes to compile.
+ */
+public class ClassPathDirEntry extends PathHandler {
+
+    private final int rootLength = root.toString().length();
+
+    public ClassPathDirEntry(Path root, Executor executor) {
+        super(root, executor);
+        try {
+            URL url = root.toUri().toURL();
+            setLoader(new URLClassLoader(new URL[]{url}));
+        } catch (MalformedURLException e) {
+            e.printStackTrace();
+        }
+    }
+
+    @Override
+    public void process() {
+        System.out.println("# dir: " + root);
+        if (!Files.exists(root)) {
+            return;
+        }
+        try {
+            Files.walkFileTree(root, EnumSet.of(FileVisitOption.FOLLOW_LINKS),
+                    Integer.MAX_VALUE, new CompileFileVisitor());
+        } catch (IOException ioe) {
+            ioe.printStackTrace();
+        }
+    }
+
+    private void processFile(Path file) {
+        if (Utils.isClassFile(file.toString())) {
+            processClass(pathToClassName(file));
+        }
+    }
+
+    private String pathToClassName(Path file) {
+        String fileString;
+        if (root == file) {
+            fileString = file.normalize().toString();
+        } else {
+            fileString = file.normalize().toString().substring(rootLength + 1);
+        }
+        return Utils.fileNameToClassName(fileString);
+    }
+
+    private class CompileFileVisitor extends SimpleFileVisitor<Path> {
+
+        private final Set<Path> ready = new HashSet<>();
+
+        @Override
+        public FileVisitResult preVisitDirectory(Path dir,
+                BasicFileAttributes attrs) throws IOException {
+            if (ready.contains(dir)) {
+                return FileVisitResult.SKIP_SUBTREE;
+            }
+            ready.add(dir);
+            return super.preVisitDirectory(dir, attrs);
+        }
+
+        @Override
+        public FileVisitResult visitFile(Path file,
+                BasicFileAttributes attrs) throws IOException {
+            if (!ready.contains(file)) {
+                processFile(file);
+            }
+            return isFinished() ? FileVisitResult.TERMINATE
+                    : FileVisitResult.CONTINUE;
+        }
+
+        @Override
+        public FileVisitResult visitFileFailed(Path file,
+                IOException exc) throws IOException {
+            return FileVisitResult.CONTINUE;
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarEntry.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.*;
+import java.util.jar.*;
+import java.util.concurrent.Executor;
+
+import java.io.*;
+import java.nio.file.*;
+
+/**
+ * Handler for jar-files containing classes to compile.
+ */
+public class ClassPathJarEntry extends PathHandler {
+
+    public ClassPathJarEntry(Path root, Executor executor) {
+        super(root, executor);
+        try {
+            URL url = root.toUri().toURL();
+            setLoader(new URLClassLoader(new URL[]{url}));
+        } catch (MalformedURLException e) {
+            e.printStackTrace();
+        }
+    }
+
+    @Override
+    public void process() {
+        System.out.println("# jar: " + root);
+        if (!Files.exists(root)) {
+            return;
+        }
+        try {
+            JarFile jarFile = new JarFile(root.toFile());
+            JarEntry entry;
+            for (Enumeration<JarEntry> e = jarFile.entries();
+                    e.hasMoreElements(); ) {
+                entry = e.nextElement();
+                processJarEntry(entry);
+                if (isFinished()) {
+                    return;
+                }
+            }
+        } catch (IOException ioe) {
+            ioe.printStackTrace();
+        }
+    }
+
+     private void processJarEntry(JarEntry entry) {
+        String filename = entry.getName();
+        if (Utils.isClassFile(filename)) {
+            processClass(Utils.fileNameToClassName(filename));
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassPathJarInDirEntry.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.Executor;
+
+/**
+ * Handler for dirs containing jar-files with classes to compile.
+ */
+public class ClassPathJarInDirEntry extends PathHandler {
+
+    public ClassPathJarInDirEntry(Path root, Executor executor) {
+        super(root, executor);
+    }
+
+    @Override
+    public void process() {
+        System.out.println("# jar_in_dir: " + root);
+        if (!Files.exists(root)) {
+            return;
+        }
+        try (DirectoryStream<Path> ds
+                = Files.newDirectoryStream(root, "*.jar")) {
+            for (Path p : ds) {
+                new ClassPathJarEntry(p, executor).process();
+                if (isFinished()) {
+                    return;
+                }
+            }
+        } catch (IOException ioe) {
+            ioe.printStackTrace();
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/ClassesListInFile.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.Executor;
+
+/**
+ * Handler for files containing a list of classes to compile.
+ */
+public class ClassesListInFile extends PathHandler {
+    public ClassesListInFile(Path root, Executor executor) {
+        super(root, executor);
+    }
+
+    @Override
+    public void process() {
+        System.out.println("# list: " + root);
+        if (!Files.exists(root)) {
+            return;
+        }
+        try {
+            try (BufferedReader reader = Files.newBufferedReader(root,
+                    StandardCharsets.UTF_8)) {
+                String line;
+                while (!isFinished() && ((line = reader.readLine()) != null)) {
+                    processClass(line);
+                }
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/CompileTheWorld.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import sun.management.ManagementFactoryHelper;
+
+import java.io.*;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+import java.util.List;
+import java.util.concurrent.*;
+
+public class CompileTheWorld {
+    /**
+     * Entry point. Compiles classes in {@code args}, or all classes in
+     * boot-classpath if args is empty
+     *
+     * @param args paths to jar/zip, dir contains classes, or to .lst file
+     *             contains list of classes to compile
+     */
+    public static void main(String[] args) {
+        String logfile = Utils.LOG_FILE;
+        PrintStream os = null;
+        if (logfile != null) {
+            try {
+                os = new PrintStream(Files.newOutputStream(Paths.get(logfile)));
+            } catch (IOException io) {
+            }
+        }
+        if (os != null) {
+            System.setOut(os);
+        }
+
+        try {
+            try {
+                if (ManagementFactoryHelper.getCompilationMXBean() == null) {
+                    throw new RuntimeException(
+                            "CTW can not work in interpreted mode");
+                }
+            } catch (java.lang.NoClassDefFoundError e) {
+                // compact1, compact2 support
+            }
+            String[] paths = args;
+            boolean skipRtJar = false;
+            if (args.length == 0) {
+                paths = getDefaultPaths();
+                skipRtJar = true;
+            }
+            ExecutorService executor = createExecutor();
+            long start = System.currentTimeMillis();
+            try {
+                String path;
+                for (int i = 0, n = paths.length; i < n
+                        && !PathHandler.isFinished(); ++i) {
+                    path = paths[i];
+                    if (skipRtJar && i > 0 && isRtJar(path)) {
+                        // rt.jar is not first, so skip it
+                        continue;
+                    }
+                    PathHandler.create(path, executor).process();
+                }
+            } finally {
+                await(executor);
+            }
+            System.out.printf("Done (%d classes, %d methods, %d ms)%n",
+                    Compiler.getClassCount(),
+                    Compiler.getMethodCount(),
+                    System.currentTimeMillis() - start);
+        } finally {
+            if (os != null) {
+                os.close();
+            }
+        }
+    }
+
+    private static ExecutorService createExecutor() {
+        final int threadsCount = Math.min(
+                Runtime.getRuntime().availableProcessors(),
+                Utils.CI_COMPILER_COUNT);
+        ExecutorService result;
+        if (threadsCount > 1) {
+            result = new ThreadPoolExecutor(threadsCount, threadsCount,
+                    /* keepAliveTime */ 0L, TimeUnit.MILLISECONDS,
+                    new ArrayBlockingQueue<>(threadsCount),
+                    new ThreadPoolExecutor.CallerRunsPolicy());
+        } else {
+            result = new CurrentThreadExecutor();
+        }
+        return result;
+    }
+
+    private static String[] getDefaultPaths() {
+        String property = System.getProperty("sun.boot.class.path");
+        System.out.println(
+                "# use 'sun.boot.class.path' as args: " + property);
+        return Utils.PATH_SEPARATOR.split(property);
+    }
+
+    private static void await(ExecutorService executor) {
+        executor.shutdown();
+        while (!executor.isTerminated()) {
+            try {
+                executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
+            } catch (InterruptedException ie) {
+                Thread.currentThread().interrupt();
+                break;
+            }
+        }
+    }
+
+    private static boolean isRtJar(String path) {
+        return Utils.endsWithIgnoreCase(path, File.separator + "rt.jar");
+    }
+
+    private static class CurrentThreadExecutor extends AbstractExecutorService {
+        private boolean isShutdown;
+
+        @Override
+        public void shutdown() {
+            this.isShutdown = true;
+        }
+
+        @Override
+        public List<Runnable> shutdownNow() {
+            return null;
+        }
+
+        @Override
+        public boolean isShutdown() {
+            return isShutdown;
+        }
+
+        @Override
+        public boolean isTerminated() {
+            return isShutdown;
+        }
+
+        @Override
+        public boolean awaitTermination(long timeout, TimeUnit unit)
+                throws InterruptedException {
+            return isShutdown;
+        }
+
+        @Override
+        public void execute(Runnable command) {
+            command.run();
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Compiler.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import sun.hotspot.WhiteBox;
+import sun.misc.SharedSecrets;
+import sun.reflect.ConstantPool;
+
+import java.lang.reflect.Executable;
+
+import java.util.Objects;
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Provide method to compile whole class.
+ * Also contains compiled methods and classes counters.
+ */
+public class Compiler {
+    private Compiler() { }
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final AtomicLong CLASS_COUNT = new AtomicLong(0L);
+    private static final AtomicLong METHOD_COUNT = new AtomicLong(0L);
+    private static volatile boolean CLASSES_LIMIT_REACHED = false;
+
+    /**
+     * @return count of processed classes
+     */
+    public static long getClassCount() {
+        return CLASS_COUNT.get();
+    }
+
+    /**
+     * @return count of processed methods
+     */
+    public static long getMethodCount() {
+        return METHOD_COUNT.get();
+    }
+
+    /**
+     * @return {@code true} if classes limit is reached
+     */
+    public static boolean isLimitReached() {
+        return CLASSES_LIMIT_REACHED;
+    }
+
+    /**
+     * Compiles all methods and constructors.
+     *
+     * @param aClass class to compile
+     * @param executor executor used for compile task invocation
+     * @throws NullPointerException if {@code class} or {@code executor}
+     *                              is {@code null}
+     */
+    public static void compileClass(Class aClass, Executor executor) {
+        Objects.requireNonNull(aClass);
+        Objects.requireNonNull(executor);
+        long id = CLASS_COUNT.incrementAndGet();
+        if (id > Utils.COMPILE_THE_WORLD_STOP_AT) {
+            CLASS_COUNT.decrementAndGet();
+            CLASSES_LIMIT_REACHED = true;
+            return;
+        }
+
+        if (id >= Utils.COMPILE_THE_WORLD_START_AT) {
+            String name = aClass.getName();
+            try {
+                System.out.printf("[%d]\t%s%n", id, name);
+                ConstantPool constantPool = SharedSecrets.getJavaLangAccess().
+                        getConstantPool(aClass);
+                if (Utils.COMPILE_THE_WORLD_PRELOAD_CLASSES) {
+                    preloadClasses(name, id, constantPool);
+                }
+                long methodCount = 0;
+                for (Executable e : aClass.getDeclaredConstructors()) {
+                    ++methodCount;
+                    executor.execute(new CompileMethodCommand(id, name, e));
+                }
+                for (Executable e : aClass.getDeclaredMethods()) {
+                    ++methodCount;
+                    executor.execute(new CompileMethodCommand(id, name, e));
+                }
+                METHOD_COUNT.addAndGet(methodCount);
+
+                if (Utils.DEOPTIMIZE_ALL_CLASSES_RATE > 0
+                        && (id % Utils.DEOPTIMIZE_ALL_CLASSES_RATE == 0)) {
+                    WHITE_BOX.deoptimizeAll();
+                }
+            } catch (Throwable t) {
+                System.out.printf("[%d]\t%s\tskipping %s%n", id, name, t);
+                t.printStackTrace();
+            }
+        }
+    }
+
+    private static void preloadClasses(String className, long id,
+            ConstantPool constantPool) {
+        try {
+            for (int i = 0, n = constantPool.getSize(); i < n; ++i) {
+                try {
+                    constantPool.getClassAt(i);
+                } catch (IllegalArgumentException ignore) {
+                }
+            }
+        } catch (Throwable t) {
+            System.out.printf("[%d]\t%s\tpreloading failed : %s%n", id,
+                    className, t);
+        }
+    }
+
+
+
+    /**
+     * Compilation of method.
+     * Will compile method on all available comp levels.
+     */
+    private static class CompileMethodCommand implements Runnable {
+        private final long classId;
+        private final String className;
+        private final Executable method;
+
+        /**
+         * @param classId   id of class
+         * @param className name of class
+         * @param method    compiled for compilation
+         */
+        public CompileMethodCommand(long classId, String className,
+                Executable method) {
+            this.classId = classId;
+            this.className = className;
+            this.method = method;
+        }
+
+        @Override
+        public final void run() {
+            int compLevel = Utils.INITIAL_COMP_LEVEL;
+            if (Utils.TIERED_COMPILATION) {
+                for (int i = compLevel; i <= Utils.TIERED_STOP_AT_LEVEL; ++i) {
+                    WHITE_BOX.deoptimizeMethod(method);
+                    compileMethod(method, i);
+                }
+            } else {
+                compileMethod(method, compLevel);
+            }
+        }
+
+        private void waitCompilation() {
+            if (!Utils.BACKGROUND_COMPILATION) {
+                return;
+            }
+            final Object obj = new Object();
+            synchronized (obj) {
+                for (int i = 0;
+                     i < 10 && WHITE_BOX.isMethodQueuedForCompilation(method);
+                     ++i) {
+                    try {
+                        obj.wait(1000);
+                    } catch (InterruptedException e) {
+                        Thread.currentThread().interrupt();
+                    }
+                }
+            }
+        }
+
+        private void compileMethod(Executable method, int compLevel) {
+            if (WHITE_BOX.isMethodCompilable(method, compLevel)) {
+                try {
+                    WHITE_BOX.enqueueMethodForCompilation(method, compLevel);
+                    waitCompilation();
+                    int tmp = WHITE_BOX.getMethodCompilationLevel(method);
+                    if (tmp != compLevel) {
+                        logMethod(method, "compilation level = " + tmp
+                                + ", but not " + compLevel);
+                    } else if (Utils.IS_VERBOSE) {
+                        logMethod(method, "compilation level = " + tmp + ". OK");
+                    }
+                } catch (Throwable t) {
+                    logMethod(method, "error on compile at " + compLevel
+                            + " level");
+                    t.printStackTrace();
+                }
+            } else if (Utils.IS_VERBOSE) {
+                logMethod(method, "not compilable at " + compLevel);
+            }
+        }
+
+        private void logMethod(Executable method, String message) {
+            StringBuilder builder = new StringBuilder("[");
+            builder.append(classId);
+            builder.append("]\t");
+            builder.append(className);
+            builder.append("::");
+            builder.append(method.getName());
+            builder.append('(');
+            Class[] params = method.getParameterTypes();
+            for (int i = 0, n = params.length - 1; i < n; ++i) {
+                builder.append(params[i].getName());
+                builder.append(", ");
+            }
+            if (params.length != 0) {
+                builder.append(params[params.length - 1].getName());
+            }
+            builder.append(')');
+            if (message != null) {
+                builder.append('\t');
+                builder.append(message);
+            }
+            System.err.println(builder);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/PathHandler.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.io.File;
+
+import java.util.Objects;
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+import java.util.concurrent.Executor;
+
+/**
+ * Abstract handler for path.
+ * Concrete subclasses should implement method {@link #process()}.
+ */
+public abstract class PathHandler {
+    private static final Pattern JAR_IN_DIR_PATTERN
+            = Pattern.compile("^(.*[/\\\\])?\\*$");
+    protected final Path root;
+    protected final Executor executor;
+    private ClassLoader loader;
+
+    /**
+     * @param root     root path to process
+     * @param executor executor used for process task invocation
+     * @throws NullPointerException if {@code root} or {@code executor} is
+     *                              {@code null}
+     */
+    protected PathHandler(Path root, Executor executor) {
+        Objects.requireNonNull(root);
+        Objects.requireNonNull(executor);
+        this.root = root.normalize();
+        this.executor = executor;
+        this.loader = ClassLoader.getSystemClassLoader();
+    }
+
+   /**
+     * Factory method. Construct concrete handler in depends from {@code path}.
+     *
+     * @param path     the path to process
+     * @param executor executor used for compile task invocation
+     * @throws NullPointerException if {@code path} or {@code executor} is
+     *                              {@code null}
+     */
+    public static PathHandler create(String path, Executor executor) {
+        Objects.requireNonNull(path);
+        Objects.requireNonNull(executor);
+        Matcher matcher = JAR_IN_DIR_PATTERN.matcher(path);
+        if (matcher.matches()) {
+            path = matcher.group(1);
+            path = path.isEmpty() ? "." : path;
+            return new ClassPathJarInDirEntry(Paths.get(path), executor);
+        } else {
+            path = path.isEmpty() ? "." : path;
+            Path p = Paths.get(path);
+            if (isJarFile(p)) {
+                return new ClassPathJarEntry(p, executor);
+            } else if (isListFile(p)) {
+                return new ClassesListInFile(p, executor);
+            } else {
+                return new ClassPathDirEntry(p, executor);
+            }
+        }
+    }
+
+    private static boolean isJarFile(Path path) {
+        if (Files.isRegularFile(path)) {
+            String name = path.toString();
+            return Utils.endsWithIgnoreCase(name, ".zip")
+                    || Utils.endsWithIgnoreCase(name, ".jar");
+        }
+        return false;
+    }
+
+    private static boolean isListFile(Path path) {
+        if (Files.isRegularFile(path)) {
+            String name = path.toString();
+            return Utils.endsWithIgnoreCase(name, ".lst");
+        }
+        return false;
+    }
+
+    /**
+     * Processes all classes in specified path.
+     */
+    public abstract void process();
+
+   /**
+     * Sets class loader, that will be used to define class at
+     * {@link #processClass(String)}.
+     *
+     * @param loader class loader
+     * @throws NullPointerException if {@code loader} is {@code null}
+     */
+    protected final void setLoader(ClassLoader loader) {
+        Objects.requireNonNull(loader);
+        this.loader = loader;
+    }
+
+    /**
+     * Processes specificed class.
+     * @param name fully qualified name of class to process
+     */
+    protected final void processClass(String name) {
+        try {
+            Class aClass = Class.forName(name, true, loader);
+            Compiler.compileClass(aClass, executor);
+        } catch (ClassNotFoundException | LinkageError e) {
+            System.out.printf("Class %s loading failed : %s%n", name,
+                e.getMessage());
+        }
+    }
+
+    /**
+     * @return {@code true} if processing should be stopped
+     */
+    public static boolean isFinished() {
+        return Compiler.isLimitReached();
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/src/sun/hotspot/tools/ctw/Utils.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.hotspot.tools.ctw;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import sun.management.ManagementFactoryHelper;
+
+import java.io.File;
+import java.util.regex.Pattern;
+
+/**
+ * Auxiliary methods.
+ */
+public class Utils {
+    /**
+     * Value of {@code -XX:CompileThreshold}
+     */
+    public static final boolean TIERED_COMPILATION
+            = Boolean.parseBoolean(getVMOption("TieredCompilation", "false"));
+    /**
+     * Value of {@code -XX:BackgroundCompilation}
+     */
+    public static final boolean BACKGROUND_COMPILATION
+            = Boolean.parseBoolean(getVMOption("BackgroundCompilation",
+            "false"));
+    /**
+     * Value of {@code -XX:TieredStopAtLevel}
+     */
+    public static final int TIERED_STOP_AT_LEVEL;
+    /**
+     * Value of {@code -XX:CICompilerCount}
+     */
+    public static final Integer CI_COMPILER_COUNT
+            = Integer.valueOf(getVMOption("CICompilerCount", "1"));
+    /**
+     * Initial compilation level.
+     */
+    public static final int INITIAL_COMP_LEVEL;
+    /**
+     * Compiled path-separator regexp.
+     */
+    public static final Pattern PATH_SEPARATOR = Pattern.compile(
+            File.pathSeparator, Pattern.LITERAL);
+    /**
+     * Value of {@code -DDeoptimizeAllClassesRate}. Frequency of
+     * {@code WB.deoptimizeAll()} invocation If it less that {@code 0},
+     * {@code WB.deoptimizeAll()} will not be invoked.
+     */
+    public static final int DEOPTIMIZE_ALL_CLASSES_RATE
+            = Integer.getInteger("DeoptimizeAllClassesRate", -1);
+    /**
+     * Value of {@code -DCompileTheWorldStopAt}. Last class to consider.
+     */
+    public static final long COMPILE_THE_WORLD_STOP_AT
+            = Long.getLong("CompileTheWorldStopAt", Long.MAX_VALUE);
+    /**
+     * Value of {@code -DCompileTheWorldStartAt}. First class to consider.
+     */
+    public static final long COMPILE_THE_WORLD_START_AT
+            = Long.getLong("CompileTheWorldStartAt", 1);
+    /**
+     * Value of {@code -DCompileTheWorldPreloadClasses}. Preload all classes
+     * used by a class before start loading.
+     */
+    public static final boolean COMPILE_THE_WORLD_PRELOAD_CLASSES;
+    /**
+     * Value of {@code -Dsun.hotspot.tools.ctw.verbose}. Verbose output,
+     * adds additional information about compilation.
+     */
+    public static final boolean IS_VERBOSE
+            = Boolean.getBoolean("sun.hotspot.tools.ctw.verbose");
+    /**
+     * Value of {@code -Dsun.hotspot.tools.ctw.logfile}.Path to logfile, if
+     * it's null, cout will be used.
+     */
+    public static final String LOG_FILE
+            = System.getProperty("sun.hotspot.tools.ctw.logfile");
+    static {
+        if (Utils.TIERED_COMPILATION) {
+            INITIAL_COMP_LEVEL = 1;
+        } else {
+            String vmName = System.getProperty("java.vm.name");
+            if (Utils.endsWithIgnoreCase(vmName, " Server VM")) {
+                INITIAL_COMP_LEVEL = 4;
+            } else if (Utils.endsWithIgnoreCase(vmName, " Client VM")
+                    || Utils.endsWithIgnoreCase(vmName, " Minimal VM")) {
+                INITIAL_COMP_LEVEL = 1;
+            } else {
+                throw new RuntimeException("Unknown VM: " + vmName);
+            }
+        }
+
+        TIERED_STOP_AT_LEVEL = Integer.parseInt(getVMOption("TieredStopAtLevel",
+                String.valueOf(INITIAL_COMP_LEVEL)));
+    }
+
+    static {
+        String tmp = System.getProperty("CompileTheWorldPreloadClasses");
+        if (tmp == null) {
+            COMPILE_THE_WORLD_PRELOAD_CLASSES = true;
+        } else {
+            COMPILE_THE_WORLD_PRELOAD_CLASSES = Boolean.parseBoolean(tmp);
+        }
+    }
+
+    public static final String CLASSFILE_EXT = ".class";
+
+    private Utils() {
+    }
+
+    /**
+     * Tests if the string ends with the suffix, ignoring case
+     * considerations
+     *
+     * @param string the tested string
+     * @param suffix the suffix
+     * @return {@code true} if {@code string} ends with the {@code suffix}
+     * @see String#endsWith(String)
+     */
+    public static boolean endsWithIgnoreCase(String string, String suffix) {
+        if (string == null || suffix == null) {
+            return false;
+        }
+        int length = suffix.length();
+        int toffset = string.length() - length;
+        if (toffset < 0) {
+            return false;
+        }
+        return string.regionMatches(true, toffset, suffix, 0, length);
+    }
+
+    /**
+     * Returns value of VM option.
+     *
+     * @param name option's name
+     * @return value of option or {@code null}, if option doesn't exist
+     * @throws NullPointerException if name is null
+     */
+    public static String getVMOption(String name) {
+        String result;
+        HotSpotDiagnosticMXBean diagnostic
+                = ManagementFactoryHelper.getDiagnosticMXBean();
+        result = diagnostic.getVMOption(name).getValue();
+        return result;
+    }
+
+    /**
+     * Returns value of VM option or default value.
+     *
+     * @param name         option's name
+     * @param defaultValue default value
+     * @return value of option or {@code defaultValue}, if option doesn't exist
+     * @throws NullPointerException if name is null
+     * @see #getVMOption(String)
+     */
+    public static String getVMOption(String name, String defaultValue) {
+        String result;
+        try {
+            result = getVMOption(name);
+        } catch (NoClassDefFoundError e) {
+            // compact1, compact2 support
+            result = defaultValue;
+        }
+        return result == null ? defaultValue : result;
+    }
+
+    /**
+     * Tests if the filename is valid filename for class file.
+     *
+     * @param filename tested filename
+     */
+    public static boolean isClassFile(String filename) {
+        // If the filename has a period after removing '.class', it's not valid class file
+        return endsWithIgnoreCase(filename, CLASSFILE_EXT)
+                && (filename.indexOf('.')
+                == (filename.length() - CLASSFILE_EXT.length()));
+    }
+
+    /**
+     * Converts the filename to classname.
+     *
+     * @param filename filename to convert
+     * @return corresponding classname.
+     * @throws AssertionError if filename isn't valid filename for class file -
+     *                        {@link #isClassFile(String)}
+     */
+    public static String fileNameToClassName(String filename) {
+        assert isClassFile(filename);
+        return filename.substring(0, filename.length() - CLASSFILE_EXT.length())
+                       .replace(File.separatorChar, '.');
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/Bar.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,5 @@
+public class Bar {
+  private static void staticMethod() { }
+  public void method() { }
+  protected Bar() { }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/ClassesDirTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ClassesDirTest
+ * @bug 8012447
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
+ * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesDirTest Foo Bar
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ * @run main ClassesDirTest prepare
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes
+ * @run main ClassesDirTest check ctw.log
+ * @summary testing of CompileTheWorld :: classes in directory
+ * @author igor.ignatyev@oracle.com
+ */
+
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+
+public class ClassesDirTest extends CtwTest {
+    private static final String[] SHOULD_CONTAIN
+            = {"# dir: classes", "Done (2 classes, 6 methods, "};
+
+    private ClassesDirTest() {
+        super(SHOULD_CONTAIN);
+    }
+
+    public static void main(String[] args) throws Exception {
+        new ClassesDirTest().run(args);
+    }
+
+    protected void prepare() throws Exception {
+        String path = "classes";
+        Files.createDirectory(Paths.get(path));
+        Files.move(Paths.get("Foo.class"), Paths.get(path, "Foo.class"),
+                StandardCopyOption.REPLACE_EXISTING);
+        Files.move(Paths.get("Bar.class"), Paths.get(path, "Bar.class"),
+                StandardCopyOption.REPLACE_EXISTING);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/ClassesListTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ClassesListTest
+ * @bug 8012447
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
+ * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesListTest Foo Bar
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ * @run main ClassesListTest prepare
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes.lst
+ * @run main ClassesListTest check ctw.log
+ * @summary testing of CompileTheWorld :: list of classes in file
+ * @author igor.ignatyev@oracle.com
+ */
+
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+
+public class ClassesListTest extends CtwTest {
+    private static final String[] SHOULD_CONTAIN
+            = {"# list: classes.lst", "Done (4 classes, "};
+
+    private ClassesListTest() {
+        super(SHOULD_CONTAIN);
+    }
+
+    public static void main(String[] args) throws Exception {
+        new ClassesListTest().run(args);
+    }
+
+    protected void prepare() throws Exception {
+        String path = "classes.lst";
+        Files.copy(Paths.get(System.getProperty("test.src"), path),
+                Paths.get(path), StandardCopyOption.REPLACE_EXISTING);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/CtwTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.List;
+import java.util.Collections;
+import java.util.ArrayList;
+
+import java.io.File;
+import java.io.Writer;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.BufferedReader;
+
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.nio.charset.Charset;
+
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public abstract class CtwTest {
+    protected final String[] shouldContain;
+    protected CtwTest(String[] shouldContain) {
+        this.shouldContain = shouldContain;
+    }
+
+    public void run(String[] args) throws Exception {
+        if (args.length == 0) {
+            throw new Error("args is empty");
+        }
+        switch (args[0]) {
+            case "prepare":
+                prepare();
+                break;
+            case "check":
+                check(args);
+                break;
+            default:
+                throw new Error("unregonized action -- " + args[0]);
+        }
+    }
+
+    protected void prepare() throws Exception { }
+
+    protected void check(String[] args) throws Exception  {
+        if (args.length < 2) {
+            throw new Error("logfile isn't specified");
+        }
+        String logfile = args[1];
+        try (BufferedReader r = Files.newBufferedReader(Paths.get(logfile),
+                Charset.defaultCharset())) {
+            OutputAnalyzer output = readOutput(r);
+           for (String test : shouldContain) {
+                output.shouldContain(test);
+            }
+        }
+    }
+
+    private static OutputAnalyzer readOutput(BufferedReader reader)
+            throws IOException {
+        StringBuilder builder = new StringBuilder();
+        String eol = String.format("%n");
+        String line;
+
+        while ((line = reader.readLine()) != null) {
+            builder.append(line);
+            builder.append(eol);
+        }
+        return new OutputAnalyzer(builder.toString(), "");
+    }
+
+    protected void dump(OutputAnalyzer output, String name) {
+        try (Writer w = new FileWriter(name + ".out")) {
+            String s = output.getStdout();
+            w.write(s, s.length(), 0);
+        } catch (IOException io) {
+            io.printStackTrace();
+        }
+        try (Writer w = new FileWriter(name + ".err")) {
+            String s = output.getStderr();
+            w.write(s, s.length(), 0);
+        } catch (IOException io) {
+            io.printStackTrace();
+        }
+    }
+
+    protected ProcessBuilder createJarProcessBuilder(String... command)
+            throws Exception {
+        String javapath = JDKToolFinder.getJDKTool("jar");
+
+        ArrayList<String> args = new ArrayList<>();
+        args.add(javapath);
+        Collections.addAll(args, command);
+
+        return new ProcessBuilder(args.toArray(new String[args.size()]));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/Foo.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,5 @@
+public class Foo {
+  private static void staticMethod() { }
+  public void method() { }
+  protected Foo() { }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/JarDirTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test JarDirTest
+ * @bug 8012447
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
+ * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarDirTest Foo Bar
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ * @run main JarDirTest prepare
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld jars/*
+ * @run main JarDirTest check ctw.log
+ * @summary testing of CompileTheWorld :: jars in directory
+ * @author igor.ignatyev@oracle.com
+ */
+
+import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class JarDirTest extends CtwTest {
+    private static final String[] SHOULD_CONTAIN
+            = {"# jar_in_dir: jars",
+                    "# jar: jars" + File.separator +"foo.jar",
+                    "# jar: jars" + File.separator +"bar.jar",
+                    "Done (4 classes, 12 methods, "};
+
+    private JarDirTest() {
+        super(SHOULD_CONTAIN);
+    }
+
+    public static void main(String[] args) throws Exception {
+        new JarDirTest().run(args);
+    }
+
+    protected void prepare() throws Exception {
+        String path = "jars";
+        Files.createDirectory(Paths.get(path));
+
+        ProcessBuilder pb = createJarProcessBuilder("cf", "jars/foo.jar",
+                "Foo.class", "Bar.class");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        dump(output, "ctw-foo.jar");
+        output.shouldHaveExitValue(0);
+
+        pb = createJarProcessBuilder("cf", "jars/bar.jar", "Foo.class",
+                "Bar.class");
+        output = new OutputAnalyzer(pb.start());
+        dump(output, "ctw-bar.jar");
+        output.shouldHaveExitValue(0);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/JarsTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test JarsTest
+ * @bug 8012447
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
+ * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarsTest Foo Bar
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
+ * @run main JarsTest prepare
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld foo.jar bar.jar
+ * @run main JarsTest check ctw.log
+ * @summary testing of CompileTheWorld :: jars
+ * @author igor.ignatyev@oracle.com
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class JarsTest extends CtwTest {
+    private static final String[] SHOULD_CONTAIN
+            = {"# jar: foo.jar", "# jar: bar.jar",
+                    "Done (4 classes, 12 methods, "};
+
+    private JarsTest() {
+        super(SHOULD_CONTAIN);
+    }
+
+    public static void main(String[] args) throws Exception {
+        new JarsTest().run(args);
+    }
+
+    protected void prepare() throws Exception {
+        ProcessBuilder pb = createJarProcessBuilder("cf", "foo.jar",
+                "Foo.class", "Bar.class");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        dump(output, "ctw-foo.jar");
+        output.shouldHaveExitValue(0);
+
+        pb = createJarProcessBuilder("cf", "bar.jar", "Foo.class", "Bar.class");
+        output = new OutputAnalyzer(pb.start());
+        dump(output, "ctw-bar.jar");
+        output.shouldHaveExitValue(0);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/ctw/test/classes.lst	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,4 @@
+java.lang.String
+java.lang.Object
+Foo
+Bar
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/whitebox/Makefile	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,63 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.	See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+ifneq "x$(ALT_BOOTDIR)" "x"
+	BOOTDIR := $(ALT_BOOTDIR)
+endif
+
+ifeq "x$(BOOTDIR)" "x"
+	JDK_HOME := $(shell dirname $(shell which java))/..
+else
+	JDK_HOME := $(BOOTDIR)
+endif
+
+SRC_DIR = ./
+BUILD_DIR = build
+OUTPUT_DIR = $(BUILD_DIR)/classes
+
+JAVAC = $(JDK_HOME)/bin/javac
+JAR = $(JDK_HOME)/bin/jar
+
+SRC_FILES = $(shell find $(SRC_DIR) -name '*.java')
+
+.PHONY: filelist clean cleantmp
+
+all: wb.jar cleantmp
+
+wb.jar: filelist
+	@mkdir -p $(OUTPUT_DIR)
+	$(JAVAC) -sourcepath $(SRC_DIR) -d $(OUTPUT_DIR) -cp $(OUTPUT_DIR) @filelist
+	$(JAR) cf wb.jar -C $(OUTPUT_DIR) .
+	@rm -rf $(OUTPUT_DIR)
+
+filelist: $(SRC_FILES)
+	@rm -f $@
+	@echo $(SRC_FILES) > $@
+
+clean: cleantmp
+	@rm -rf wb.jar
+
+cleantmp:
+	@rm -rf filelist
+	@rm -rf $(BUILD_DIR)
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Oct 16 10:52:41 2013 +0200
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Tue Nov 05 17:38:04 2013 -0800
@@ -61,6 +61,8 @@
     registerNatives();
   }
 
+  // Get the maximum heap size supporting COOPs
+  public native long getCompressedOopsMaxHeapSize();
   // Arguments
   public native void printHeapSizes();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/AssertsTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import static com.oracle.java.testlibrary.Asserts.*;
+
+/* @test
+ * @summary Tests the different assertions in the Assert class
+ * @library /testlibrary
+ */
+public class AssertsTest {
+    private static class Foo implements Comparable<Foo> {
+        final int id;
+        public Foo(int id) {
+            this.id = id;
+        }
+
+        public int compareTo(Foo f) {
+            return new Integer(id).compareTo(new Integer(f.id));
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        testLessThan();
+        testLessThanOrEqual();
+        testEquals();
+        testGreaterThanOrEqual();
+        testGreaterThan();
+        testNotEquals();
+        testNull();
+        testNotNull();
+        testTrue();
+        testFalse();
+    }
+
+    private static void testLessThan() throws Exception {
+        expectPass(Assertion.LT, 1, 2);
+
+        expectFail(Assertion.LT, 2, 2);
+        expectFail(Assertion.LT, 2, 1);
+        expectFail(Assertion.LT, null, 2);
+        expectFail(Assertion.LT, 2, null);
+    }
+
+    private static void testLessThanOrEqual() throws Exception {
+        expectPass(Assertion.LTE, 1, 2);
+        expectPass(Assertion.LTE, 2, 2);
+
+        expectFail(Assertion.LTE, 3, 2);
+        expectFail(Assertion.LTE, null, 2);
+        expectFail(Assertion.LTE, 2, null);
+    }
+
+    private static void testEquals() throws Exception {
+        expectPass(Assertion.EQ, 1, 1);
+        expectPass(Assertion.EQ, null, null);
+
+        Foo f1 = new Foo(1);
+        expectPass(Assertion.EQ, f1, f1);
+
+        Foo f2 = new Foo(1);
+        expectFail(Assertion.EQ, f1, f2);
+        expectFail(Assertion.LTE, null, 2);
+        expectFail(Assertion.LTE, 2, null);
+    }
+
+    private static void testGreaterThanOrEqual() throws Exception {
+        expectPass(Assertion.GTE, 1, 1);
+        expectPass(Assertion.GTE, 2, 1);
+
+        expectFail(Assertion.GTE, 1, 2);
+        expectFail(Assertion.GTE, null, 2);
+        expectFail(Assertion.GTE, 2, null);
+    }
+
+    private static void testGreaterThan() throws Exception {
+        expectPass(Assertion.GT, 2, 1);
+
+        expectFail(Assertion.GT, 1, 1);
+        expectFail(Assertion.GT, 1, 2);
+        expectFail(Assertion.GT, null, 2);
+        expectFail(Assertion.GT, 2, null);
+    }
+
+    private static void testNotEquals() throws Exception {
+        expectPass(Assertion.NE, null, 1);
+        expectPass(Assertion.NE, 1, null);
+
+        Foo f1 = new Foo(1);
+        Foo f2 = new Foo(1);
+        expectPass(Assertion.NE, f1, f2);
+
+        expectFail(Assertion.NE, null, null);
+        expectFail(Assertion.NE, f1, f1);
+        expectFail(Assertion.NE, 1, 1);
+    }
+
+    private static void testNull() throws Exception {
+        expectPass(Assertion.NULL, null);
+
+        expectFail(Assertion.NULL, 1);
+    }
+
+    private static void testNotNull() throws Exception {
+        expectPass(Assertion.NOTNULL, 1);
+
+        expectFail(Assertion.NOTNULL, null);
+    }
+
+    private static void testTrue() throws Exception {
+        expectPass(Assertion.TRUE, true);
+
+        expectFail(Assertion.TRUE, false);
+    }
+
+    private static void testFalse() throws Exception {
+        expectPass(Assertion.FALSE, false);
+
+        expectFail(Assertion.FALSE, true);
+    }
+
+    private static <T extends Comparable<T>> void expectPass(Assertion assertion, T ... args)
+        throws Exception {
+        Assertion.run(assertion, args);
+    }
+
+    private static <T extends Comparable<T>> void expectFail(Assertion assertion, T ... args)
+        throws Exception {
+        try {
+            Assertion.run(assertion, args);
+        } catch (RuntimeException e) {
+            return;
+        }
+        throw new Exception("Expected " + Assertion.format(assertion, (Object[]) args) +
+                            " to throw a RuntimeException");
+    }
+
+}
+
+enum Assertion {
+    LT, LTE, EQ, GTE, GT, NE, NULL, NOTNULL, FALSE, TRUE;
+
+    public static <T extends Comparable<T>> void run(Assertion assertion, T ... args) {
+        String msg = "Expected " + format(assertion, args) + " to pass";
+        switch (assertion) {
+            case LT:
+                assertLessThan(args[0], args[1], msg);
+                break;
+            case LTE:
+                assertLessThanOrEqual(args[0], args[1], msg);
+                break;
+            case EQ:
+                assertEquals(args[0], args[1], msg);
+                break;
+            case GTE:
+                assertGreaterThanOrEqual(args[0], args[1], msg);
+                break;
+            case GT:
+                assertGreaterThan(args[0], args[1], msg);
+                break;
+            case NE:
+                assertNotEquals(args[0], args[1], msg);
+                break;
+            case NULL:
+                assertNull(args == null ? args : args[0], msg);
+                break;
+            case NOTNULL:
+                assertNotNull(args == null ? args : args[0], msg);
+                break;
+            case FALSE:
+                assertFalse((Boolean) args[0], msg);
+                break;
+            case TRUE:
+                assertTrue((Boolean) args[0], msg);
+                break;
+            default:
+                // do nothing
+        }
+    }
+
+    public static String format(Assertion assertion, Object ... args) {
+        switch (assertion) {
+            case LT:
+                return asString("assertLessThan", args);
+            case LTE:
+                return asString("assertLessThanOrEqual", args);
+            case EQ:
+                return asString("assertEquals", args);
+            case GTE:
+                return asString("assertGreaterThanOrEquals", args);
+            case GT:
+                return asString("assertGreaterThan", args);
+            case NE:
+                return asString("assertNotEquals", args);
+            case NULL:
+                return asString("assertNull", args);
+            case NOTNULL:
+                return asString("assertNotNull", args);
+            case FALSE:
+                return asString("assertFalse", args);
+            case TRUE:
+                return asString("assertTrue", args);
+            default:
+                return "";
+        }
+    }
+
+    private static String asString(String assertion, Object ... args) {
+        if (args == null) {
+            return String.format("%s(null)", assertion);
+        }
+        if (args.length == 1) {
+            return String.format("%s(%s)", assertion, args[0]);
+        } else {
+            return String.format("%s(%s, %s)", assertion, args[0], args[1]);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/OutputAnalyzerReportingTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Test the OutputAnalyzer reporting functionality,
+ *     such as printing additional diagnostic info
+ *     (exit code, stdout, stderr, command line, etc.)
+ * @library /testlibrary
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+
+public class OutputAnalyzerReportingTest {
+
+    public static void main(String[] args) throws Exception {
+        // Create the output analyzer under test
+        String stdout = "aaaaaa";
+        String stderr = "bbbbbb";
+        OutputAnalyzer output = new OutputAnalyzer(stdout, stderr);
+
+        // Expected summary values should be the same for all cases,
+        // since the outputAnalyzer object is the same
+        String expectedExitValue = "-1";
+        String expectedSummary =
+                " stdout: [" + stdout + "];\n" +
+                " stderr: [" + stderr + "]\n" +
+                " exitValue = " + expectedExitValue + "\n";
+
+
+        DiagnosticSummaryTestRunner testRunner =
+                new DiagnosticSummaryTestRunner();
+
+        // should have exit value
+        testRunner.init(expectedSummary);
+        int unexpectedExitValue = 2;
+        try {
+            output.shouldHaveExitValue(unexpectedExitValue);
+        } catch (RuntimeException e) { }
+        testRunner.closeAndCheckResults();
+
+        // should not contain
+        testRunner.init(expectedSummary);
+        try {
+            output.shouldNotContain(stdout);
+        } catch (RuntimeException e) { }
+        testRunner.closeAndCheckResults();
+
+        // should contain
+        testRunner.init(expectedSummary);
+        try {
+            output.shouldContain("unexpected-stuff");
+        } catch (RuntimeException e) { }
+        testRunner.closeAndCheckResults();
+
+        // should not match
+        testRunner.init(expectedSummary);
+        try {
+            output.shouldNotMatch("[a]");
+        } catch (RuntimeException e) { }
+        testRunner.closeAndCheckResults();
+
+        // should match
+        testRunner.init(expectedSummary);
+        try {
+            output.shouldMatch("[qwerty]");
+        } catch (RuntimeException e) { }
+        testRunner.closeAndCheckResults();
+
+    }
+
+    private static class DiagnosticSummaryTestRunner {
+        private ByteArrayOutputStream byteStream =
+                new ByteArrayOutputStream(10000);
+
+        private String expectedSummary = "";
+        private PrintStream errStream;
+
+
+        public void init(String expectedSummary) {
+            this.expectedSummary = expectedSummary;
+            byteStream.reset();
+            errStream = new PrintStream(byteStream);
+            System.setErr(errStream);
+        }
+
+        public void closeAndCheckResults() {
+            // check results
+            errStream.close();
+            String stdErrStr = byteStream.toString();
+            if (!stdErrStr.contains(expectedSummary)) {
+                throw new RuntimeException("The output does not contain "
+                    + "the diagnostic message, or the message is incorrect");
+            }
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/OutputAnalyzerTest.java	Tue Nov 05 17:38:04 2013 -0800
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test the OutputAnalyzer utility class
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class OutputAnalyzerTest {
+
+  public static void main(String args[]) throws Exception {
+
+    String stdout = "aaaaaa";
+    String stderr = "bbbbbb";
+
+    // Regexps used for testing pattern matching of the test input
+    String stdoutPattern = "[a]";
+    String stderrPattern = "[b]";
+    String nonExistingPattern = "[c]";
+
+    OutputAnalyzer output = new OutputAnalyzer(stdout, stderr);
+
+    if (!stdout.equals(output.getStdout())) {
+      throw new Exception("getStdout() returned '" + output.getStdout() + "', expected '" + stdout + "'");
+    }
+
+    if (!stderr.equals(output.getStderr())) {
+      throw new Exception("getStderr() returned '" + output.getStderr() + "', expected '" + stderr + "'");
+    }
+
+    try {
+      output.shouldContain(stdout);
+      output.stdoutShouldContain(stdout);
+      output.shouldContain(stderr);
+      output.stderrShouldContain(stderr);
+    } catch (RuntimeException e) {
+      throw new Exception("shouldContain() failed", e);
+    }
+
+    try {
+      output.shouldContain("cccc");
+      throw new Exception("shouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+      // expected
+    }
+
+    try {
+      output.stdoutShouldContain(stderr);
+      throw new Exception("stdoutShouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+      // expected
+    }
+
+    try {
+      output.stderrShouldContain(stdout);
+      throw new Exception("stdoutShouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+      // expected
+    }
+
+    try {
+      output.shouldNotContain("cccc");
+      output.stdoutShouldNotContain("cccc");
+      output.stderrShouldNotContain("cccc");
+    } catch (RuntimeException e) {
+      throw new Exception("shouldNotContain() failed", e);
+    }
+
+    try {
+      output.shouldNotContain(stdout);
+      throw new Exception("shouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+      // expected
+    }
+
+    try {
+      output.stdoutShouldNotContain(stdout);
+      throw new Exception("shouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+      // expected
+    }
+
+    try {
+        output.stderrShouldNotContain(stderr);
+        throw new Exception("shouldContain() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    // Should match
+    try {
+        output.shouldMatch(stdoutPattern);
+        output.stdoutShouldMatch(stdoutPattern);
+        output.shouldMatch(stderrPattern);
+        output.stderrShouldMatch(stderrPattern);
+    } catch (RuntimeException e) {
+        throw new Exception("shouldMatch() failed", e);
+    }
+
+    try {
+        output.shouldMatch(nonExistingPattern);
+        throw new Exception("shouldMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    try {
+        output.stdoutShouldMatch(stderrPattern);
+        throw new Exception(
+                "stdoutShouldMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    try {
+        output.stderrShouldMatch(stdoutPattern);
+        throw new Exception(
+                "stderrShouldMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    // Should not match
+    try {
+        output.shouldNotMatch(nonExistingPattern);
+        output.stdoutShouldNotMatch(nonExistingPattern);
+        output.stderrShouldNotMatch(nonExistingPattern);
+    } catch (RuntimeException e) {
+        throw new Exception("shouldNotMatch() failed", e);
+    }
+
+    try {
+        output.shouldNotMatch(stdoutPattern);
+        throw new Exception("shouldNotMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    try {
+        output.stdoutShouldNotMatch(stdoutPattern);
+        throw new Exception("shouldNotMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    try {
+        output.stderrShouldNotMatch(stderrPattern);
+        throw new Exception("shouldNotMatch() failed to throw exception");
+    } catch (RuntimeException e) {
+        // expected
+    }
+
+    {
+      String aaaa = "aaaa";
+      String result = output.firstMatch(aaaa);
+      if (!aaaa.equals(result)) {
+        throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
+      }
+    }
+
+    {
+      String aa = "aa";
+      String aa_grouped_aa = aa + "(" + aa + ")";
+      String result = output.firstMatch(aa_grouped_aa, 1);
+      if (!aa.equals(result)) {
+        throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
+      }
+    }
+  }
+}