changeset 12342:ccef6e165e8b

Merge
author tschatzl
date Fri, 27 Sep 2013 13:41:07 +0200
parents bc918fd1e584 (current diff) 4fa18058548e (diff)
children d55c004e1d4d
files
diffstat 41 files changed, 679 insertions(+), 444 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Sep 27 10:23:12 2013 +0200
+++ b/.hgtags	Fri Sep 27 13:41:07 2013 +0200
@@ -379,3 +379,5 @@
 a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
 85072013aad46050a362d10ab78e963121c8014c jdk8-b108
 566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
+c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
+58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
--- a/agent/src/os/linux/ps_core.c	Fri Sep 27 10:23:12 2013 +0200
+++ b/agent/src/os/linux/ps_core.c	Fri Sep 27 13:41:07 2013 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -698,29 +698,58 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
+
+  int page_size=sysconf(_SC_PAGE_SIZE);
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
+
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_filesz) == NULL) {
+          goto err;
+        }
+      } else {
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (existing_map->memsz != lib_php->p_filesz)){
+
+          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, lib_php->p_filesz);
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = lib_php->p_filesz;
       }
-      lib_php++;
-   }
+    }
+
+    lib_php++;
+  }
 
-   free(phbuf);
-   return true;
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
 // process segments from interpreter (ld.so or ld-linux.so)
--- a/make/hotspot_version	Fri Sep 27 10:23:12 2013 +0200
+++ b/make/hotspot_version	Fri Sep 27 13:41:07 2013 +0200
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=52
+HS_BUILD_NUMBER=53
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.properties	Fri Sep 27 10:23:12 2013 +0200
+++ b/make/jprt.properties	Fri Sep 27 13:41:07 2013 +0200
@@ -120,13 +120,13 @@
 jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
-jprt.my.windows.i586.jdk8=windows_i586_5.1
-jprt.my.windows.i586.jdk7=windows_i586_5.1
+jprt.my.windows.i586.jdk8=windows_i586_6.1
+jprt.my.windows.i586.jdk7=windows_i586_6.1
 jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
-jprt.my.windows.x64.jdk8=windows_x64_5.2
-jprt.my.windows.x64.jdk7=windows_x64_5.2
+jprt.my.windows.x64.jdk8=windows_x64_6.1
+jprt.my.windows.x64.jdk7=windows_x64_6.1
 jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -52,6 +52,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -58,6 +58,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int i486_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
   //            add code here, bump the code stub size returned by pd_code_size_limit!
   const int i486_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -49,6 +49,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
   // returned by pd_code_size_limit!
   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -4219,7 +4219,9 @@
     }
   }
 
-  if (!PrintInlining)  return;
+  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+    return;
+  }
   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
--- a/src/share/vm/classfile/javaClasses.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/classfile/javaClasses.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -438,6 +438,29 @@
   return true;
 }
 
+bool java_lang_String::equals(oop str1, oop str2) {
+  assert(str1->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  assert(str2->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  typeArrayOop value1  = java_lang_String::value(str1);
+  int          offset1 = java_lang_String::offset(str1);
+  int          length1 = java_lang_String::length(str1);
+  typeArrayOop value2  = java_lang_String::value(str2);
+  int          offset2 = java_lang_String::offset(str2);
+  int          length2 = java_lang_String::length(str2);
+
+  if (length1 != length2) {
+    return false;
+  }
+  for (int i = 0; i < length1; i++) {
+    if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void java_lang_String::print(Handle java_string, outputStream* st) {
   oop          obj    = java_string();
   assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
--- a/src/share/vm/classfile/javaClasses.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/classfile/javaClasses.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -182,6 +182,7 @@
   static unsigned int hash_string(oop java_string);
 
   static bool equals(oop java_string, jchar* chars, int len);
+  static bool equals(oop str1, oop str2);
 
   // Conversion between '.' and '/' formats
   static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
--- a/src/share/vm/classfile/symbolTable.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/classfile/symbolTable.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -807,6 +807,8 @@
   }
 }
 
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
     HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -825,6 +827,162 @@
   the_table()->dump_table(st, "StringTable");
 }
 
+StringTable::VerifyRetTypes StringTable::compare_entries(
+                                      int bkt1, int e_cnt1,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                      int bkt2, int e_cnt2,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr2) {
+  // These entries are sanity checked by verify_and_compare_entries()
+  // before this function is called.
+  oop str1 = e_ptr1->literal();
+  oop str2 = e_ptr2->literal();
+
+  if (str1 == str2) {
+    tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+                  "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  str1, bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  if (java_lang_String::equals(str1, str2)) {
+    tty->print_cr("ERROR: identical String values in entry @ "
+                  "bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  return _verify_pass;
+}
+
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr,
+                                      StringTable::VerifyMesgModes mesg_mode) {
+
+  VerifyRetTypes ret = _verify_pass;  // be optimistic
+
+  oop str = e_ptr->literal();
+  if (str == NULL) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+                    e_cnt);
+    }
+    // NULL oop means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  if (str->klass() != SystemDictionary::String_klass()) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+                    bkt, e_cnt);
+    }
+    // not a String means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  unsigned int h = java_lang_String::hash_string(str);
+  if (e_ptr->hash() != h) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+                    "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+    }
+    ret = _verify_fail_continue;
+  }
+
+  if (the_table()->hash_to_index(h) != bkt) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+                    "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+                    the_table()->hash_to_index(h));
+    }
+    ret = _verify_fail_continue;
+  }
+
+  return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+  assert(StringTable_lock->is_locked(), "sanity check");
+
+  int  fail_cnt = 0;
+
+  // first, verify all the entries individually:
+  for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+    for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+      VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+      if (ret != _verify_pass) {
+        fail_cnt++;
+      }
+    }
+  }
+
+  // Optimization: if the above check did not find any failures, then
+  // the comparison loop below does not need to call verify_entry()
+  // before calling compare_entries(). If there were failures, then we
+  // have to call verify_entry() to see if the entry can be passed to
+  // compare_entries() safely. When we call verify_entry() in the loop
+  // below, we do so quietly to void duplicate messages and we don't
+  // increment fail_cnt because the failures have already been counted.
+  bool need_entry_verify = (fail_cnt != 0);
+
+  // second, verify all entries relative to each other:
+  for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+    for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+      if (need_entry_verify) {
+        VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+                                          _verify_quietly);
+        if (ret == _verify_fail_done) {
+          // cannot use the current entry to compare against other entries
+          continue;
+        }
+      }
+
+      for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+        HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+        int e_cnt2;
+        for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+          if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+            // skip the entries up to and including the one that
+            // we're comparing against
+            continue;
+          }
+
+          if (need_entry_verify) {
+            VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+                                              _verify_quietly);
+            if (ret == _verify_fail_done) {
+              // cannot compare against this entry
+              continue;
+            }
+          }
+
+          // compare two entries, report and count any failures:
+          if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+              != _verify_pass) {
+            fail_cnt++;
+          }
+        }
+      }
+    }
+  }
+  return fail_cnt;
+}
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing strings.   Set flag to use the alternate hash code afterwards.
--- a/src/share/vm/classfile/symbolTable.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/classfile/symbolTable.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -311,6 +311,26 @@
   static void verify();
   static void dump(outputStream* st);
 
+  enum VerifyMesgModes {
+    _verify_quietly    = 0,
+    _verify_with_mesgs = 1
+  };
+
+  enum VerifyRetTypes {
+    _verify_pass          = 0,
+    _verify_fail_continue = 1,
+    _verify_fail_done     = 2
+  };
+
+  static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                        int bkt2, int e_cnt2,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr2);
+  static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+                                     HashtableEntry<oop, mtSymbol>* e_ptr,
+                                     VerifyMesgModes mesg_mode);
+  static int verify_and_compare_entries();
+
   // Sharing
   static void copy_buckets(char** top, char*end) {
     the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
--- a/src/share/vm/code/compiledIC.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/code/compiledIC.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -160,7 +160,7 @@
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
 
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@@ -170,8 +170,10 @@
     assert(bytecode == Bytecodes::_invokeinterface, "");
     int itable_index = call_info->itable_index();
     entry = VtableStubs::find_itable_stub(itable_index);
+    if (entry == false) {
+      return false;
+    }
 #ifdef ASSERT
-    assert(entry != NULL, "entry not computed");
     int index = call_info->resolved_method()->itable_index();
     assert(index == itable_index, "CallInfo pre-computes this");
 #endif //ASSERT
@@ -184,6 +186,9 @@
     int vtable_index = call_info->vtable_index();
     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
     entry = VtableStubs::find_vtable_stub(vtable_index);
+    if (entry == NULL) {
+      return false;
+    }
     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   }
 
@@ -200,6 +205,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_megamorphic(), "sanity check");
+  return true;
 }
 
 
--- a/src/share/vm/code/compiledIC.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/code/compiledIC.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -226,7 +226,10 @@
   //
   void set_to_clean();  // Can only be called during a safepoint operation
   void set_to_monomorphic(CompiledICInfo& info);
-  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+  // Returns true if successful and false otherwise. The call can fail if memory
+  // allocation in the code cache fails.
+  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 
   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/src/share/vm/code/vtableStubs.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/code/vtableStubs.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -46,12 +46,9 @@
 address VtableStub::_chunk_end         = NULL;
 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 
-static int num_vtable_chunks = 0;
-
 
 void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
-  num_vtable_chunks++;
   // compute real VtableStub size (rounded to nearest word)
   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   // malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+      return NULL;
     }
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
@@ -121,6 +118,12 @@
     } else {
       s = create_itable_stub(vtable_index);
     }
+
+    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+    if (s == NULL) {
+      return NULL;
+    }
+
     enter(is_vtable_stub, vtable_index, s);
     if (PrintAdapterHandlers) {
       tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- a/src/share/vm/memory/metaspaceShared.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/memory/metaspaceShared.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -103,9 +103,10 @@
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
       for (int i = 0; i < ik->methods()->length(); i++) {
-        ResourceMark rm;
         Method* m = ik->methods()->at(i);
-        (new Fingerprinter(m))->fingerprint();
+        Fingerprinter fp(m);
+        // The side effect of this call sets method's fingerprint field.
+        fp.fingerprint();
       }
     }
   }
--- a/src/share/vm/oops/constantPool.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/oops/constantPool.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -108,16 +108,16 @@
 void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
                                                   intStack reference_map,
                                                   int constant_pool_map_length,
-                                                   TRAPS) {
+                                                  TRAPS) {
   // Initialized the resolved object cache.
   int map_length = reference_map.length();
   if (map_length > 0) {
     // Only need mapping back to constant pool entries.  The map isn't used for
-    // invokedynamic resolved_reference entries.  The constant pool cache index
-    // has the mapping back to both the constant pool and to the resolved
-    // reference index.
+    // invokedynamic resolved_reference entries.  For invokedynamic entries,
+    // the constant pool cache index has the mapping back to both the constant
+    // pool and to the resolved reference index.
     if (constant_pool_map_length > 0) {
-      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
+      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
 
       for (int i = 0; i < constant_pool_map_length; i++) {
         int x = reference_map.at(i);
@@ -182,16 +182,9 @@
 
 int ConstantPool::cp_to_object_index(int cp_index) {
   // this is harder don't do this so much.
-  for (int i = 0; i< reference_map()->length(); i++) {
-    if (reference_map()->at(i) == cp_index) return i;
-    // Zero entry is divider between constant pool indices for strings,
-    // method handles and method types. After that the index is a constant
-    // pool cache index for invokedynamic.  Stop when zero (which can never
-    // be a constant pool index)
-    if (reference_map()->at(i) == 0) break;
-  }
-  // We might not find the index.
-  return _no_index_sentinel;
+  int i = reference_map()->find(cp_index);
+  // We might not find the index for jsr292 call.
+  return (i < 0) ? _no_index_sentinel : i;
 }
 
 Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@@ -840,8 +833,7 @@
   // If the string has already been interned, this entry will be non-null
   oop str = this_oop->resolved_references()->obj_at(obj_index);
   if (str != NULL) return str;
-
-      Symbol* sym = this_oop->unresolved_string_at(which);
+  Symbol* sym = this_oop->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
   this_oop->string_at_put(which, obj_index, str);
   assert(java_lang_String::is_instance(str), "must be string");
@@ -1619,9 +1611,11 @@
     case JVM_CONSTANT_UnresolvedClassInError:
     case JVM_CONSTANT_StringIndex:
     case JVM_CONSTANT_MethodType:
+    case JVM_CONSTANT_MethodTypeInError:
       return 3;
 
     case JVM_CONSTANT_MethodHandle:
+    case JVM_CONSTANT_MethodHandleInError:
       return 4; //tag, ref_kind, ref_index
 
     case JVM_CONSTANT_Integer:
@@ -1802,8 +1796,8 @@
       case JVM_CONSTANT_MethodHandle:
       case JVM_CONSTANT_MethodHandleInError: {
         *bytes = JVM_CONSTANT_MethodHandle;
-        int kind = method_handle_ref_kind_at(idx);
-        idx1 = method_handle_index_at(idx);
+        int kind = method_handle_ref_kind_at_error_ok(idx);
+        idx1 = method_handle_index_at_error_ok(idx);
         *(bytes+1) = (unsigned char) kind;
         Bytes::put_Java_u2((address) (bytes+2), idx1);
         DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@@ -1812,7 +1806,7 @@
       case JVM_CONSTANT_MethodType:
       case JVM_CONSTANT_MethodTypeInError: {
         *bytes = JVM_CONSTANT_MethodType;
-        idx1 = method_type_index_at(idx);
+        idx1 = method_type_index_at_error_ok(idx);
         Bytes::put_Java_u2((address) (bytes+1), idx1);
         DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
         break;
@@ -2000,12 +1994,12 @@
       break;
     case JVM_CONSTANT_MethodHandle :
     case JVM_CONSTANT_MethodHandleInError :
-      st->print("ref_kind=%d", method_handle_ref_kind_at(index));
-      st->print(" ref_index=%d", method_handle_index_at(index));
+      st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
+      st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_MethodType :
     case JVM_CONSTANT_MethodTypeInError :
-      st->print("signature_index=%d", method_type_index_at(index));
+      st->print("signature_index=%d", method_type_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_InvokeDynamic :
       {
--- a/src/share/vm/oops/constantPool.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/oops/constantPool.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -231,7 +231,6 @@
   static int cache_offset_in_bytes()        { return offset_of(ConstantPool, _cache); }
   static int pool_holder_offset_in_bytes()  { return offset_of(ConstantPool, _pool_holder); }
   static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
-  static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
 
   // Storing constants
 
@@ -475,18 +474,42 @@
     return *int_at_addr(which);
   }
 
+ private:
+  int method_handle_ref_kind_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+  }
+  int method_handle_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+  }
+  int method_type_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_type() ||
+           (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
+    return *int_at_addr(which);
+  }
+ public:
   int method_handle_ref_kind_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+    return method_handle_ref_kind_at(which, false);
+  }
+  int method_handle_ref_kind_at_error_ok(int which) {
+    return method_handle_ref_kind_at(which, true);
   }
   int method_handle_index_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+    return method_handle_index_at(which, false);
+  }
+  int method_handle_index_at_error_ok(int which) {
+    return method_handle_index_at(which, true);
   }
   int method_type_index_at(int which) {
-    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
-    return *int_at_addr(which);
+    return method_type_index_at(which, false);
   }
+  int method_type_index_at_error_ok(int which) {
+    return method_type_index_at(which, true);
+  }
+
   // Derived queries:
   Symbol* method_handle_name_ref_at(int which) {
     int member = method_handle_index_at(which);
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -2769,24 +2769,17 @@
   st->print(BULLET"field annotations:       "); fields_annotations()->print_value_on(st); st->cr();
   st->print(BULLET"field type annotations:  "); fields_type_annotations()->print_value_on(st); st->cr();
   {
-    ResourceMark rm;
-    // PreviousVersionInfo objects returned via PreviousVersionWalker
-    // contain a GrowableArray of handles. We have to clean up the
-    // GrowableArray _after_ the PreviousVersionWalker destructor
-    // has destroyed the handles.
-    {
-      bool have_pv = false;
-      PreviousVersionWalker pvw((InstanceKlass*)this);
-      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-           pv_info != NULL; pv_info = pvw.next_previous_version()) {
-        if (!have_pv)
-          st->print(BULLET"previous version:  ");
-        have_pv = true;
-        pv_info->prev_constant_pool_handle()()->print_value_on(st);
-      }
-      if (have_pv)  st->cr();
-    } // pvw is cleaned up
-  } // rm is cleaned up
+    bool have_pv = false;
+    PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+         pv_node != NULL; pv_node = pvw.next_previous_version()) {
+      if (!have_pv)
+        st->print(BULLET"previous version:  ");
+      have_pv = true;
+      pv_node->prev_constant_pool()->print_value_on(st);
+    }
+    if (have_pv) st->cr();
+  } // pvw is cleaned up
 
   if (generic_signature() != NULL) {
     st->print(BULLET"generic signature: ");
@@ -3317,34 +3310,34 @@
   Array<Method*>* old_methods = ikh->methods();
 
   if (cp_ref->on_stack()) {
-  PreviousVersionNode * pv_node = NULL;
-  if (emcp_method_count == 0) {
+    PreviousVersionNode * pv_node = NULL;
+    if (emcp_method_count == 0) {
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
-    RC_TRACE(0x00000400,
-        ("add: all methods are obsolete; flushing any EMCP refs"));
-  } else {
-    int local_count = 0;
+      pv_node = new PreviousVersionNode(cp_ref, NULL);
+      RC_TRACE(0x00000400,
+          ("add: all methods are obsolete; flushing any EMCP refs"));
+    } else {
+      int local_count = 0;
       GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
-        GrowableArray<Method*>(emcp_method_count, true);
-    for (int i = 0; i < old_methods->length(); i++) {
-      if (emcp_methods->at(i)) {
-          // this old method is EMCP. Save it only if it's on the stack
-          Method* old_method = old_methods->at(i);
-          if (old_method->on_stack()) {
-            method_refs->append(old_method);
+          GrowableArray<Method*>(emcp_method_count, true);
+      for (int i = 0; i < old_methods->length(); i++) {
+        if (emcp_methods->at(i)) {
+            // this old method is EMCP. Save it only if it's on the stack
+            Method* old_method = old_methods->at(i);
+            if (old_method->on_stack()) {
+              method_refs->append(old_method);
+            }
+          if (++local_count >= emcp_method_count) {
+            // no more EMCP methods so bail out now
+            break;
           }
-        if (++local_count >= emcp_method_count) {
-          // no more EMCP methods so bail out now
-          break;
         }
       }
-    }
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
+      pv_node = new PreviousVersionNode(cp_ref, method_refs);
     }
     // append new previous version.
-  _previous_versions->append(pv_node);
+    _previous_versions->append(pv_node);
   }
 
   // Since the caller is the VMThread and we are at a safepoint, this
@@ -3445,6 +3438,8 @@
         return m;
       }
     }
+    // None found, return null for the caller to handle.
+    return NULL;
   }
   return m;
 }
@@ -3461,10 +3456,9 @@
 // Construct a PreviousVersionNode entry for the array hung off
 // the InstanceKlass.
 PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
-  bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
+  GrowableArray<Method*>* prev_EMCP_methods) {
 
   _prev_constant_pool = prev_constant_pool;
-  _prev_cp_is_weak = prev_cp_is_weak;
   _prev_EMCP_methods = prev_EMCP_methods;
 }
 
@@ -3480,99 +3474,38 @@
   }
 }
 
-
-// Construct a PreviousVersionInfo entry
-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
-  _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
-  _prev_EMCP_method_handles = NULL;
-
-  ConstantPool* cp = pv_node->prev_constant_pool();
-  assert(cp != NULL, "constant pool ref was unexpectedly cleared");
-  if (cp == NULL) {
-    return;  // robustness
-  }
-
-  // make the ConstantPool* safe to return
-  _prev_constant_pool_handle = constantPoolHandle(cp);
-
-  GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
-  if (method_refs == NULL) {
-    // the InstanceKlass did not have any EMCP methods
-    return;
-  }
-
-  _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
-
-  int n_methods = method_refs->length();
-  for (int i = 0; i < n_methods; i++) {
-    Method* method = method_refs->at(i);
-    assert (method != NULL, "method has been cleared");
-    if (method == NULL) {
-      continue;  // robustness
-    }
-    // make the Method* safe to return
-    _prev_EMCP_method_handles->append(methodHandle(method));
-  }
-}
-
-
-// Destroy a PreviousVersionInfo
-PreviousVersionInfo::~PreviousVersionInfo() {
-  // Since _prev_EMCP_method_handles is not C-heap allocated, we
-  // don't have to delete it.
-}
-
-
 // Construct a helper for walking the previous versions array
-PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
+PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
+  _thread = thread;
   _previous_versions = ik->previous_versions();
   _current_index = 0;
-  // _hm needs no initialization
   _current_p = NULL;
-}
-
-
-// Destroy a PreviousVersionWalker
-PreviousVersionWalker::~PreviousVersionWalker() {
-  // Delete the current info just in case the caller didn't walk to
-  // the end of the previous versions list. No harm if _current_p is
-  // already NULL.
-  delete _current_p;
-
-  // When _hm is destroyed, all the Handles returned in
-  // PreviousVersionInfo objects will be destroyed.
-  // Also, after this destructor is finished it will be
-  // safe to delete the GrowableArray allocated in the
-  // PreviousVersionInfo objects.
+  _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
 }
 
 
 // Return the interesting information for the next previous version
 // of the klass. Returns NULL if there are no more previous versions.
-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
+PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
   if (_previous_versions == NULL) {
     // no previous versions so nothing to return
     return NULL;
   }
 
-  delete _current_p;  // cleanup the previous info for the caller
-  _current_p = NULL;  // reset to NULL so we don't delete same object twice
+  _current_p = NULL;  // reset to NULL
+  _current_constant_pool_handle = NULL;
 
   int length = _previous_versions->length();
 
   while (_current_index < length) {
     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
-    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
-                                          PreviousVersionInfo(pv_node);
-
-    constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
-    assert (!cp_h.is_null(), "null cp found in previous version");
-
-    // The caller will need to delete pv_info when they are done with it.
-    _current_p = pv_info;
-    return pv_info;
+
+    // Save a handle to the constant pool for this previous version,
+    // which keeps all the methods from being deallocated.
+    _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
+    _current_p = pv_node;
+    return pv_node;
   }
 
-  // all of the underlying nodes' info has been deleted
   return NULL;
 } // end next_previous_version()
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -1126,21 +1126,11 @@
 
 
 // A collection point for interesting information about the previous
-// version(s) of an InstanceKlass. This class uses weak references to
-// the information so that the information may be collected as needed
-// by the system. If the information is shared, then a regular
-// reference must be used because a weak reference would be seen as
-// collectible. A GrowableArray of PreviousVersionNodes is attached
-// to the InstanceKlass as needed. See PreviousVersionWalker below.
+// version(s) of an InstanceKlass.  A GrowableArray of PreviousVersionNodes
+// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
 class PreviousVersionNode : public CHeapObj<mtClass> {
  private:
-  // A shared ConstantPool is never collected so we'll always have
-  // a reference to it so we can update items in the cache. We'll
-  // have a weak reference to a non-shared ConstantPool until all
-  // of the methods (EMCP or obsolete) have been collected; the
-  // non-shared ConstantPool becomes collectible at that point.
-  ConstantPool*    _prev_constant_pool;  // regular or weak reference
-  bool    _prev_cp_is_weak;     // true if not a shared ConstantPool
+  ConstantPool*    _prev_constant_pool;
 
   // If the previous version of the InstanceKlass doesn't have any
   // EMCP methods, then _prev_EMCP_methods will be NULL. If all the
@@ -1149,8 +1139,8 @@
   GrowableArray<Method*>* _prev_EMCP_methods;
 
 public:
-  PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
-    GrowableArray<Method*>* prev_EMCP_methods);
+  PreviousVersionNode(ConstantPool* prev_constant_pool,
+                      GrowableArray<Method*>* prev_EMCP_methods);
   ~PreviousVersionNode();
   ConstantPool* prev_constant_pool() const {
     return _prev_constant_pool;
@@ -1161,59 +1151,26 @@
 };
 
 
-// A Handle-ized version of PreviousVersionNode.
-class PreviousVersionInfo : public ResourceObj {
- private:
-  constantPoolHandle   _prev_constant_pool_handle;
-  // If the previous version of the InstanceKlass doesn't have any
-  // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
-  // methods cannot be collected while we hold a handle,
-  // _prev_EMCP_methods should never have a length of zero.
-  GrowableArray<methodHandle>* _prev_EMCP_method_handles;
-
-public:
-  PreviousVersionInfo(PreviousVersionNode *pv_node);
-  ~PreviousVersionInfo();
-  constantPoolHandle prev_constant_pool_handle() const {
-    return _prev_constant_pool_handle;
-  }
-  GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
-    return _prev_EMCP_method_handles;
-  }
-};
-
-
-// Helper object for walking previous versions. This helper cleans up
-// the Handles that it allocates when the helper object is destroyed.
-// The PreviousVersionInfo object returned by next_previous_version()
-// is only valid until a subsequent call to next_previous_version() or
-// the helper object is destroyed.
+// Helper object for walking previous versions.
 class PreviousVersionWalker : public StackObj {
  private:
+  Thread*                               _thread;
   GrowableArray<PreviousVersionNode *>* _previous_versions;
   int                                   _current_index;
-  // Fields for cleaning up when we are done walking the previous versions:
-  // A HandleMark for the PreviousVersionInfo handles:
-  HandleMark                            _hm;
+
+  // A pointer to the current node object so we can handle the deletes.
+  PreviousVersionNode*                  _current_p;
 
-  // It would be nice to have a ResourceMark field in this helper also,
-  // but the ResourceMark code says to be careful to delete handles held
-  // in GrowableArrays _before_ deleting the GrowableArray. Since we
-  // can't guarantee the order in which the fields are destroyed, we
-  // have to let the creator of the PreviousVersionWalker object do
-  // the right thing. Also, adding a ResourceMark here causes an
-  // include loop.
-
-  // A pointer to the current info object so we can handle the deletes.
-  PreviousVersionInfo *                 _current_p;
+  // The constant pool handle keeps all the methods in this class from being
+  // deallocated from the metaspace during class unloading.
+  constantPoolHandle                    _current_constant_pool_handle;
 
  public:
-  PreviousVersionWalker(InstanceKlass *ik);
-  ~PreviousVersionWalker();
+  PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
 
   // Return the interesting information for the next previous version
   // of the klass. Returns NULL if there are no more previous versions.
-  PreviousVersionInfo* next_previous_version();
+  PreviousVersionNode* next_previous_version();
 };
 
 
--- a/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -123,7 +123,7 @@
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
@@ -137,7 +137,7 @@
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
@@ -491,7 +491,7 @@
       C->log()->inline_fail(inline_msg);
     }
   }
-  if (PrintInlining) {
+  if (C->print_inlining()) {
     C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
     if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
     if (Verbose && callee_method) {
@@ -540,7 +540,7 @@
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
-      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+      && (PrintOpto || C->print_inlining())) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
     bool old_cold = !success;
@@ -617,7 +617,7 @@
              callee_method->is_compiled_lambda_form()) {
       max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr(" \\-> discounting inline depth");
     }
--- a/src/share/vm/opto/callGenerator.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/callGenerator.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -159,8 +159,9 @@
   virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
 
   static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
-    if (PrintInlining)
+    if (C->print_inlining()) {
       C->print_inlining(callee, inline_level, bci, msg);
+    }
   }
 };
 
--- a/src/share/vm/opto/compile.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/compile.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -654,7 +654,7 @@
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
-                  _print_inlining(0) {
+                  _print_inlining_idx(0) {
   C = this;
 
   CompileWrapper cw(this);
@@ -679,6 +679,8 @@
   set_print_assembly(print_opto_assembly);
   set_parsed_irreducible_loop(false);
 #endif
+  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 
   if (ProfileTraps) {
     // Make sure the method being compiled gets its own MDO,
@@ -710,7 +712,7 @@
   PhaseGVN gvn(node_arena(), estimated_size);
   set_initial_gvn(&gvn);
 
-  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   }
   { // Scope for timing the parser
@@ -937,7 +939,7 @@
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
-    _print_inlining(0) {
+    _print_inlining_idx(0) {
   C = this;
 
 #ifndef PRODUCT
@@ -3611,7 +3613,7 @@
 }
 
 void Compile::dump_inlining() {
-  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     // Print inlining message for candidates that we couldn't inline
     // for lack of space or non constant receiver
     for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3635,7 +3637,7 @@
       }
     }
     for (int i = 0; i < _print_inlining_list->length(); i++) {
-      tty->print(_print_inlining_list->at(i).ss()->as_string());
+      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
     }
   }
 }
--- a/src/share/vm/opto/compile.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/compile.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -312,6 +312,8 @@
   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
+  bool                  _print_inlining;        // True if we should print inlining for this compilation
+  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 #ifndef PRODUCT
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -414,7 +416,7 @@
   };
 
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
-  int _print_inlining;
+  int _print_inlining_idx;
 
   // Only keep nodes in the expensive node list that need to be optimized
   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -426,24 +428,24 @@
  public:
 
   outputStream* print_inlining_stream() const {
-    return _print_inlining_list->at(_print_inlining).ss();
+    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   }
 
   void print_inlining_skip(CallGenerator* cg) {
-    if (PrintInlining) {
-      _print_inlining_list->at(_print_inlining).set_cg(cg);
-      _print_inlining++;
-      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+    if (_print_inlining) {
+      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+      _print_inlining_idx++;
+      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
     }
   }
 
   void print_inlining_insert(CallGenerator* cg) {
-    if (PrintInlining) {
+    if (_print_inlining) {
       for (int i = 0; i < _print_inlining_list->length(); i++) {
-        if (_print_inlining_list->at(i).cg() == cg) {
+        if (_print_inlining_list->adr_at(i)->cg() == cg) {
           _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
-          _print_inlining = i+1;
-          _print_inlining_list->at(i).set_cg(NULL);
+          _print_inlining_idx = i+1;
+          _print_inlining_list->adr_at(i)->set_cg(NULL);
           return;
         }
       }
@@ -572,6 +574,10 @@
   int               AliasLevel() const          { return _AliasLevel; }
   bool              print_assembly() const       { return _print_assembly; }
   void          set_print_assembly(bool z)       { _print_assembly = z; }
+  bool              print_inlining() const       { return _print_inlining; }
+  void          set_print_inlining(bool z)       { _print_inlining = z; }
+  bool              print_intrinsics() const     { return _print_intrinsics; }
+  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
--- a/src/share/vm/opto/doCall.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/doCall.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -41,9 +41,9 @@
 #include "runtime/sharedRuntime.hpp"
 
 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
-  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+  if (TraceTypeProfile || C->print_inlining()) {
     outputStream* out = tty;
-    if (!PrintInlining) {
+    if (!C->print_inlining()) {
       if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
         method->print_short_name();
         tty->cr();
--- a/src/share/vm/opto/library_call.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/opto/library_call.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -543,7 +543,7 @@
   Compile* C = kit.C;
   int nodes = C->unique();
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Intrinsic %s", str);
@@ -554,7 +554,7 @@
 
   // Try to inline the intrinsic.
   if (kit.try_to_inline()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -570,7 +570,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -592,7 +592,7 @@
   int nodes = C->unique();
 #ifndef PRODUCT
   assert(is_predicted(), "sanity");
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Predicate for intrinsic %s", str);
@@ -603,7 +603,7 @@
 
   Node* slow_ctl = kit.try_to_predicate();
   if (!kit.failing()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -617,7 +617,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = "failed to generate predicate for intrinsic";
@@ -2299,7 +2299,7 @@
     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
 
 #ifndef PRODUCT
-    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       tty->print("  from base type: ");  adr_type->dump();
       tty->print("  sharpened value: ");  tjp->dump();
     }
@@ -3260,7 +3260,7 @@
   if (mirror_con == NULL)  return false;  // cannot happen?
 
 #ifndef PRODUCT
-  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     ciType* k = mirror_con->java_mirror_type();
     if (k) {
       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3952,14 +3952,14 @@
 // caller sensitive methods.
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
 
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
     }
 #endif
@@ -3983,7 +3983,7 @@
       // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
       if (!m->caller_sensitive()) {
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
         }
 #endif
@@ -3999,7 +3999,7 @@
         set_result(makecon(TypeInstPtr::make(caller_mirror)));
 
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
           tty->print_cr("  JVM state at this point:");
           for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4015,7 +4015,7 @@
   }
 
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
     tty->print_cr("  JVM state at this point:");
     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
--- a/src/share/vm/prims/jvm.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/prims/jvm.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -1835,16 +1835,27 @@
 }
 JVM_END
 
-JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
-{
-  JVMWrapper("JVM_GetClassDeclaredMethods");
+static bool select_method(methodHandle method, bool want_constructor) {
+  if (want_constructor) {
+    return (method->is_initializer() && !method->is_static());
+  } else {
+    return  (!method->is_initializer() && !method->is_overpass());
+  }
+}
+
+static jobjectArray get_class_declared_methods_helper(
+                                  JNIEnv *env,
+                                  jclass ofClass, jboolean publicOnly,
+                                  bool want_constructor,
+                                  Klass* klass, TRAPS) {
+
   JvmtiVMObjectAllocEventCollector oam;
 
   // Exclude primitive types and array types
   if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
       || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
     // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
+    oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
     return (jobjectArray) JNIHandles::make_local(env, res);
   }
 
@@ -1855,87 +1866,67 @@
 
   Array<Method*>* methods = k->methods();
   int methods_length = methods->length();
+
+  // Save original method_idnum in case of redefinition, which can change
+  // the idnum of obsolete methods.  The new method will have the same idnum
+  // but if we refresh the methods array, the counts will be wrong.
+  ResourceMark rm(THREAD);
+  GrowableArray<int>* idnums = new GrowableArray<int>(methods_length);
   int num_methods = 0;
 
-  int i;
-  for (i = 0; i < methods_length; i++) {
+  for (int i = 0; i < methods_length; i++) {
     methodHandle method(THREAD, methods->at(i));
-    if (!method->is_initializer() && !method->is_overpass()) {
+    if (select_method(method, want_constructor)) {
       if (!publicOnly || method->is_public()) {
+        idnums->push(method->method_idnum());
         ++num_methods;
       }
     }
   }
 
   // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
+  objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL);
   objArrayHandle result (THREAD, r);
 
-  int out_idx = 0;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (!method->is_initializer() && !method->is_overpass()) {
-      if (!publicOnly || method->is_public()) {
-        oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
-        result->obj_at_put(out_idx, m);
-        ++out_idx;
+  // Now just put the methods that we selected above, but go by their idnum
+  // in case of redefinition.  The methods can be redefined at any safepoint,
+  // so above when allocating the oop array and below when creating reflect
+  // objects.
+  for (int i = 0; i < num_methods; i++) {
+    methodHandle method(THREAD, k->method_with_idnum(idnums->at(i)));
+    if (method.is_null()) {
+      // Method may have been deleted and seems this API can handle null
+      // Otherwise should probably put a method that throws NSME
+      result->obj_at_put(i, NULL);
+    } else {
+      oop m;
+      if (want_constructor) {
+        m = Reflection::new_constructor(method, CHECK_NULL);
+      } else {
+        m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
       }
+      result->obj_at_put(i, m);
     }
   }
-  assert(out_idx == num_methods, "just checking");
+
   return (jobjectArray) JNIHandles::make_local(env, result());
 }
+
+JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
+{
+  JVMWrapper("JVM_GetClassDeclaredMethods");
+  return get_class_declared_methods_helper(env, ofClass, publicOnly,
+                                           /*want_constructor*/ false,
+                                           SystemDictionary::reflect_Method_klass(), THREAD);
+}
 JVM_END
 
 JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly))
 {
   JVMWrapper("JVM_GetClassDeclaredConstructors");
-  JvmtiVMObjectAllocEventCollector oam;
-
-  // Exclude primitive types and array types
-  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
-      || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
-    // Return empty array
-    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
-    return (jobjectArray) JNIHandles::make_local(env, res);
-  }
-
-  instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass)));
-
-  // Ensure class is linked
-  k->link_class(CHECK_NULL);
-
-  Array<Method*>* methods = k->methods();
-  int methods_length = methods->length();
-  int num_constructors = 0;
-
-  int i;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (method->is_initializer() && !method->is_static()) {
-      if (!publicOnly || method->is_public()) {
-        ++num_constructors;
-      }
-    }
-  }
-
-  // Allocate result
-  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
-  objArrayHandle result(THREAD, r);
-
-  int out_idx = 0;
-  for (i = 0; i < methods_length; i++) {
-    methodHandle method(THREAD, methods->at(i));
-    if (method->is_initializer() && !method->is_static()) {
-      if (!publicOnly || method->is_public()) {
-        oop m = Reflection::new_constructor(method, CHECK_NULL);
-        result->obj_at_put(out_idx, m);
-        ++out_idx;
-      }
-    }
-  }
-  assert(out_idx == num_constructors, "just checking");
-  return (jobjectArray) JNIHandles::make_local(env, result());
+  return get_class_declared_methods_helper(env, ofClass, publicOnly,
+                                           /*want_constructor*/ true,
+                                           SystemDictionary::reflect_Constructor_klass(), THREAD);
 }
 JVM_END
 
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -406,7 +406,11 @@
   VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
   jvmtiError result() { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+        _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+    }
   }
 };
 
--- a/src/share/vm/prims/jvmtiImpl.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/prims/jvmtiImpl.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -273,59 +273,49 @@
 
   // add/remove breakpoint to/from versions of the method that
   // are EMCP. Directly or transitively obsolete methods are
-  // not saved in the PreviousVersionInfo.
+  // not saved in the PreviousVersionNodes.
   Thread *thread = Thread::current();
   instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
   Symbol* m_name = _method->name();
   Symbol* m_signature = _method->signature();
 
-  {
-    ResourceMark rm(thread);
-    // PreviousVersionInfo objects returned via PreviousVersionWalker
-    // contain a GrowableArray of handles. We have to clean up the
-    // GrowableArray _after_ the PreviousVersionWalker destructor
-    // has destroyed the handles.
-    {
-      // search previous versions if they exist
-      PreviousVersionWalker pvw((InstanceKlass *)ikh());
-      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-           pv_info != NULL; pv_info = pvw.next_previous_version()) {
-        GrowableArray<methodHandle>* methods =
-          pv_info->prev_EMCP_method_handles();
+  // search previous versions if they exist
+  PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
+  for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+       pv_node != NULL; pv_node = pvw.next_previous_version()) {
+    GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
 
-        if (methods == NULL) {
-          // We have run into a PreviousVersion generation where
-          // all methods were made obsolete during that generation's
-          // RedefineClasses() operation. At the time of that
-          // operation, all EMCP methods were flushed so we don't
-          // have to go back any further.
-          //
-          // A NULL methods array is different than an empty methods
-          // array. We cannot infer any optimizations about older
-          // generations from an empty methods array for the current
-          // generation.
-          break;
-        }
+    if (methods == NULL) {
+      // We have run into a PreviousVersion generation where
+      // all methods were made obsolete during that generation's
+      // RedefineClasses() operation. At the time of that
+      // operation, all EMCP methods were flushed so we don't
+      // have to go back any further.
+      //
+      // A NULL methods array is different than an empty methods
+      // array. We cannot infer any optimizations about older
+      // generations from an empty methods array for the current
+      // generation.
+      break;
+    }
 
-        for (int i = methods->length() - 1; i >= 0; i--) {
-          methodHandle method = methods->at(i);
-          // obsolete methods that are running are not deleted from
-          // previous version array, but they are skipped here.
-          if (!method->is_obsolete() &&
-              method->name() == m_name &&
-              method->signature() == m_signature) {
-            RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
-              meth_act == &Method::set_breakpoint ? "sett" : "clear",
-              method->name()->as_C_string(),
-              method->signature()->as_C_string()));
+    for (int i = methods->length() - 1; i >= 0; i--) {
+      Method* method = methods->at(i);
+      // obsolete methods that are running are not deleted from
+      // previous version array, but they are skipped here.
+      if (!method->is_obsolete() &&
+          method->name() == m_name &&
+          method->signature() == m_signature) {
+        RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
+          meth_act == &Method::set_breakpoint ? "sett" : "clear",
+          method->name()->as_C_string(),
+          method->signature()->as_C_string()));
 
-            ((Method*)method()->*meth_act)(_bci);
-            break;
-          }
-        }
+        (method->*meth_act)(_bci);
+        break;
       }
-    } // pvw is cleaned up
-  } // rm is cleaned up
+    }
+  }
 }
 
 void JvmtiBreakpoint::set() {
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -2807,28 +2807,20 @@
                                         &trace_name_printed);
       }
     }
-    {
-      ResourceMark rm(_thread);
-      // PreviousVersionInfo objects returned via PreviousVersionWalker
-      // contain a GrowableArray of handles. We have to clean up the
-      // GrowableArray _after_ the PreviousVersionWalker destructor
-      // has destroyed the handles.
-      {
-        // the previous versions' constant pool caches may need adjustment
-        PreviousVersionWalker pvw(ik);
-        for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-             pv_info != NULL; pv_info = pvw.next_previous_version()) {
-          other_cp = pv_info->prev_constant_pool_handle();
-          cp_cache = other_cp->cache();
-          if (cp_cache != NULL) {
-            cp_cache->adjust_method_entries(_matching_old_methods,
-                                            _matching_new_methods,
-                                            _matching_methods_length,
-                                            &trace_name_printed);
-          }
-        }
-      } // pvw is cleaned up
-    } // rm is cleaned up
+
+    // the previous versions' constant pool caches may need adjustment
+    PreviousVersionWalker pvw(_thread, ik);
+    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+         pv_node != NULL; pv_node = pvw.next_previous_version()) {
+      other_cp = pv_node->prev_constant_pool();
+      cp_cache = other_cp->cache();
+      if (cp_cache != NULL) {
+        cp_cache->adjust_method_entries(_matching_old_methods,
+                                        _matching_new_methods,
+                                        _matching_methods_length,
+                                        &trace_name_printed);
+      }
+    }
   }
 }
 
@@ -2942,10 +2934,9 @@
       // obsolete methods need a unique idnum
       u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
       if (num != ConstMethod::UNSET_IDNUM) {
-//      u2 old_num = old_method->method_idnum();
         old_method->set_method_idnum(num);
-// TO DO: attach obsolete annotations to obsolete method's new idnum
       }
+
       // With tracing we try not to "yack" too much. The position of
       // this trace assumes there are fewer obsolete methods than
       // EMCP methods.
--- a/src/share/vm/runtime/arguments.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/arguments.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -1100,6 +1100,7 @@
   }
 }
 
+#if defined(COMPILER2) || defined(_LP64) || !INCLUDE_CDS
 // Conflict: required to use shared spaces (-Xshare:on), but
 // incompatible command line options were chosen.
 
@@ -1112,6 +1113,7 @@
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
   }
 }
+#endif
 
 void Arguments::set_tiered_flags() {
   // With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
@@ -1520,16 +1522,18 @@
         FLAG_SET_ERGO(bool, UseParallelGC, true);
       }
     }
-    // Shared spaces work fine with other GCs but causes bytecode rewriting
-    // to be disabled, which hurts interpreter performance and decreases
-    // server performance.   On server class machines, keep the default
-    // off unless it is asked for.  Future work: either add bytecode rewriting
-    // at link time, or rewrite bytecodes in non-shared methods.
-    if (!DumpSharedSpaces && !RequireSharedSpaces &&
-        (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
-      no_shared_spaces();
-    }
   }
+#ifdef COMPILER2
+  // Shared spaces work fine with other GCs but causes bytecode rewriting
+  // to be disabled, which hurts interpreter performance and decreases
+  // server performance.  When -server is specified, keep the default off
+  // unless it is asked for.  Future work: either add bytecode rewriting
+  // at link time, or rewrite bytecodes in non-shared methods.
+  if (!DumpSharedSpaces && !RequireSharedSpaces &&
+      (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
+    no_shared_spaces();
+  }
+#endif
 
   set_conservative_max_heap_alignment();
 
@@ -2439,21 +2443,6 @@
     return result;
   }
 
-  if (AggressiveOpts) {
-    // Insert alt-rt.jar between user-specified bootclasspath
-    // prefix and the default bootclasspath.  os::set_boot_path()
-    // uses meta_index_dir as the default bootclasspath directory.
-    const char* altclasses_jar = "alt-rt.jar";
-    size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
-                                 strlen(altclasses_jar);
-    char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
-    strcpy(altclasses_path, get_meta_index_dir());
-    strcat(altclasses_path, altclasses_jar);
-    scp.add_suffix_to_prefix(altclasses_path);
-    scp_assembly_required = true;
-    FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
-  }
-
   // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
   result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
   if (result != JNI_OK) {
--- a/src/share/vm/runtime/globals.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/globals.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -2526,6 +2526,9 @@
   product(bool, PrintStringTableStatistics, false,                          \
           "print statistics about the StringTable and SymbolTable")         \
                                                                             \
+  diagnostic(bool, VerifyStringTableAtExit, false,                          \
+          "verify StringTable contents at exit")                            \
+                                                                            \
   notproduct(bool, PrintSymbolTableSizeHistogram, false,                    \
           "print histogram of the symbol table")                            \
                                                                             \
--- a/src/share/vm/runtime/handles.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/handles.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -136,7 +136,7 @@
 // Specific Handles for different oop types
 #define DEF_METADATA_HANDLE(name, type)          \
   class name##Handle;                            \
-  class name##Handle {                           \
+  class name##Handle : public StackObj {         \
     type*     _value;                            \
     Thread*   _thread;                           \
    protected:                                    \
@@ -175,7 +175,7 @@
 // Writing this class explicitly, since DEF_METADATA_HANDLE(klass) doesn't
 // provide the necessary Klass* <-> Klass* conversions. This Klass
 // could be removed when we don't have the Klass* typedef anymore.
-class KlassHandle {
+class KlassHandle : public StackObj {
   Klass* _value;
  protected:
    Klass* obj() const          { return _value; }
--- a/src/share/vm/runtime/handles.inline.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/handles.inline.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -79,6 +79,7 @@
     } else {                                                           \
       _thread = Thread::current();                                     \
     }                                                                  \
+    assert (_thread->is_in_stack((address)this), "not on stack?");     \
     _thread->metadata_handles()->push((Metadata*)_value);              \
   } else {                                                             \
     _thread = NULL;                                                    \
@@ -95,6 +96,7 @@
     } else {                                                           \
       _thread = Thread::current();                                     \
     }                                                                  \
+    assert (_thread->is_in_stack((address)this), "not on stack?");     \
     _thread->metadata_handles()->push((Metadata*)_value);              \
   } else {                                                             \
     _thread = NULL;                                                    \
--- a/src/share/vm/runtime/java.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/java.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -544,6 +544,19 @@
   // it will run into trouble when system destroys static variables.
   MemTracker::shutdown(MemTracker::NMT_normal);
 
+  if (VerifyStringTableAtExit) {
+    int fail_cnt = 0;
+    {
+      MutexLocker ml(StringTable_lock);
+      fail_cnt = StringTable::verify_and_compare_entries();
+    }
+
+    if (fail_cnt != 0) {
+      tty->print_cr("ERROR: fail_cnt=%d", fail_cnt);
+      guarantee(fail_cnt == 0, "unexpected StringTable verification failures");
+    }
+  }
+
   #undef BEFORE_EXIT_NOT_RUN
   #undef BEFORE_EXIT_RUNNING
   #undef BEFORE_EXIT_DONE
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -1506,8 +1506,11 @@
                                                 info, CHECK_(methodHandle()));
         inline_cache->set_to_monomorphic(info);
       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
-        // Change to megamorphic
-        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+        // Potential change to megamorphic
+        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+        if (!successful) {
+          inline_cache->set_to_clean();
+        }
       } else {
         // Either clean or megamorphic
       }
--- a/src/share/vm/runtime/vm_version.hpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/runtime/vm_version.hpp	Fri Sep 27 13:41:07 2013 +0200
@@ -78,7 +78,13 @@
   static const char* jre_release_version();
 
   // does HW support an 8-byte compare-exchange operation?
-  static bool supports_cx8()  {return _supports_cx8;}
+  static bool supports_cx8()  {
+#ifdef SUPPORTS_NATIVE_CX8
+    return true;
+#else
+    return _supports_cx8;
+#endif
+  }
   // does HW support atomic get-and-set or atomic get-and-add?  Used
   // to guide intrinsification decisions for Unsafe atomic ops
   static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
--- a/src/share/vm/services/attachListener.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/services/attachListener.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -470,7 +470,17 @@
                        vmSymbols::threadgroup_string_void_signature(),
                        thread_group,
                        string,
-                       CHECK);
+                       THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
   JavaCalls::call_special(&result,
@@ -479,7 +489,17 @@
                         vmSymbols::add_method_name(),
                         vmSymbols::thread_void_signature(),
                         thread_oop,             // ARG 1
-                        CHECK);
+                        THREAD);
+
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return;
+  }
 
   { MutexLocker mu(Threads_lock);
     JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
--- a/src/share/vm/services/diagnosticArgument.cpp	Fri Sep 27 10:23:12 2013 +0200
+++ b/src/share/vm/services/diagnosticArgument.cpp	Fri Sep 27 13:41:07 2013 +0200
@@ -61,7 +61,7 @@
 }
 
 void GenDCmdArgument::to_string(char* c, char* buf, size_t len) {
-  jio_snprintf(buf, len, "%s", c);
+  jio_snprintf(buf, len, "%s", (c != NULL) ? c : "");
 }
 
 void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/print/PrintInlining.java	Fri Sep 27 13:41:07 2013 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8022585
+ * @summary VM crashes when ran with -XX:+PrintInlining
+ * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
+ *
+ */
+
+public class PrintInlining {
+  public static void main(String[] args) {
+    System.out.println("Passed");
+  }
+}
--- a/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 27 10:23:12 2013 +0200
+++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 27 13:41:07 2013 +0200
@@ -33,16 +33,9 @@
 
 public class XShareAuto {
     public static void main(String[] args) throws Exception {
-        if (!Platform.is64bit()) {
-            System.out.println("ObjectAlignmentInBytes for CDS is only " +
-                "supported on 64bit platforms; this plaform is " +
-                System.getProperty("sun.arch.data.model"));
-            System.out.println("Skipping the test");
-            return;
-        }
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
-            "-Xshare:dump");
+            "-server", "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldContain("Loading classes to share");
         output.shouldHaveExitValue(0);