changeset 13070:755c423791ab

Merge
author ehelin
date Thu, 14 Nov 2013 21:05:16 +0100
parents ee527493b36d (current diff) 41cb10cbfb3c (diff)
children 6f206b5d258f
files src/share/vm/prims/whitebox.cpp
diffstat 14 files changed, 632 insertions(+), 111 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/classfile/classFileParser.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -4080,7 +4080,7 @@
 
     // Generate any default methods - default methods are interface methods
     // that have a default implementation.  This is new with Lambda project.
-    if (has_default_methods && !access_flags.is_interface() ) {
+    if (has_default_methods ) {
       DefaultMethods::generate_default_methods(
           this_klass(), &all_mirandas, CHECK_(nullHandle));
     }
--- a/src/share/vm/classfile/defaultMethods.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/classfile/defaultMethods.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -171,8 +171,12 @@
   }
   bool is_cancelled() const { return _cancelled; }
 
+  // This code used to skip interface classes because their only
+  // superclass was j.l.Object which would be also covered by class
+  // superclass hierarchy walks. Now that the starting point can be
+  // an interface, we must ensure we catch j.l.Object as the super.
   static bool has_super(InstanceKlass* cls) {
-    return cls->super() != NULL && !cls->is_interface();
+    return cls->super() != NULL;
   }
 
   Node* node_at_depth(int i) const {
@@ -391,16 +395,21 @@
       return;
     }
 
+    // Qualified methods are maximally-specific methods
+    // These include public, instance concrete (=default) and abstract methods
     GrowableArray<Method*> qualified_methods;
     int num_defaults = 0;
     int default_index = -1;
+    int qualified_index = -1;
     for (int i = 0; i < _members.length(); ++i) {
       Pair<Method*,QualifiedState> entry = _members.at(i);
       if (entry.second == QUALIFIED) {
         qualified_methods.append(entry.first);
-        default_index++;
+        qualified_index++;
         if (entry.first->is_default_method()) {
           num_defaults++;
+          default_index = qualified_index;
+
         }
       }
     }
@@ -408,16 +417,10 @@
     if (qualified_methods.length() == 0) {
       _exception_message = generate_no_defaults_message(CHECK);
       _exception_name = vmSymbols::java_lang_AbstractMethodError();
-    } else if (qualified_methods.length() == 1) {
-      // leave abstract methods alone, they will be found via normal search path
-      Method* method = qualified_methods.at(0);
-      if (!method->is_abstract()) {
-        _selected_target = qualified_methods.at(0);
-      }
-      // If only one qualified method is default, select that
+    // If only one qualified method is default, select that
     } else if (num_defaults == 1) {
         _selected_target = qualified_methods.at(default_index);
-    } else {
+    } else if (num_defaults > 1) {
       _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
       _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
       if (TraceDefaultMethods) {
@@ -425,6 +428,7 @@
         tty->print_cr("");
       }
     }
+    // leave abstract methods alone, they will be found via normal search path
   }
 
   bool contains_signature(Symbol* query) {
@@ -704,8 +708,10 @@
     Method* m = iklass->find_method(_method_name, _method_signature);
     // private interface methods are not candidates for default methods
     // invokespecial to private interface methods doesn't use default method logic
+    // The overpasses are your supertypes' errors, we do not include them
     // future: take access controls into account for superclass methods
-    if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
+    if (m != NULL && !m->is_static() && !m->is_overpass() &&
+         (!iklass->is_interface() || m->is_public())) {
       if (_family == NULL) {
         _family = new StatefulMethodFamily();
       }
@@ -781,7 +787,8 @@
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
     ResourceMark rm;  // be careful with these!
-    tty->print_cr("Class %s requires default method processing",
+    tty->print_cr("%s %s requires default method processing",
+        klass->is_interface() ? "Interface" : "Class",
         klass->name()->as_klass_external_name());
     PrintHierarchy printer;
     printer.run(klass);
@@ -806,7 +813,7 @@
  }
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
-    tty->print_cr("Creating overpasses...");
+    tty->print_cr("Creating defaults and overpasses...");
   }
 #endif // ndef PRODUCT
 
@@ -1076,7 +1083,9 @@
   klass->set_initial_method_idnum(new_size);
 
   ClassLoaderData* cld = klass->class_loader_data();
-  MetadataFactory::free_array(cld, original_methods);
+  if (original_methods ->length() > 0) {
+    MetadataFactory::free_array(cld, original_methods);
+  }
   if (original_ordering->length() > 0) {
     klass->set_method_ordering(merged_ordering);
     MetadataFactory::free_array(cld, original_ordering);
--- a/src/share/vm/interpreter/linkResolver.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -152,11 +152,13 @@
     // Could be an Object method inherited into an interface, but still a vtable call.
     kind = CallInfo::vtable_call;
   } else if (!resolved_klass->is_interface()) {
-    // A miranda method.  Compute the vtable index.
+    // A default or miranda method.  Compute the vtable index.
     ResourceMark rm;
     klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
-    index = vt->index_of_miranda(resolved_method->name(),
-                                 resolved_method->signature());
+    index = LinkResolver::vtable_index_of_interface_method(resolved_klass,
+                           resolved_method);
+    assert(index >= 0 , "we should have valid vtable index at this point");
+
     kind = CallInfo::vtable_call;
   } else if (resolved_method->has_vtable_index()) {
     // Can occur if an interface redeclares a method of Object.
@@ -279,7 +281,7 @@
 }
 
 int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
-                                          methodHandle resolved_method, TRAPS) {
+                                          methodHandle resolved_method) {
 
   int vtable_index = Method::invalid_vtable_index;
   Symbol* name = resolved_method->name();
@@ -295,7 +297,7 @@
   }
   if (vtable_index == Method::invalid_vtable_index) {
     // get vtable_index for miranda methods
-    ResourceMark rm(THREAD);
+    ResourceMark rm;
     klassVtable *vt = InstanceKlass::cast(klass())->vtable();
     vtable_index = vt->index_of_miranda(name, signature);
   }
@@ -691,7 +693,7 @@
                   );
     resolved_method->access_flags().print_on(tty);
     if (resolved_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (resolved_method->is_overpass()) {
       tty->print("overpass");
@@ -937,7 +939,7 @@
                );
     resolved_method->access_flags().print_on(tty);
     if (resolved_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (resolved_method->is_overpass()) {
       tty->print("overpass");
@@ -1017,7 +1019,7 @@
                 );
     sel_method->access_flags().print_on(tty);
     if (sel_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (sel_method->is_overpass()) {
       tty->print("overpass");
@@ -1081,7 +1083,7 @@
                   );
     resolved_method->access_flags().print_on(tty);
     if (resolved_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (resolved_method->is_overpass()) {
       tty->print("overpass");
@@ -1118,7 +1120,7 @@
   // do lookup based on receiver klass using the vtable index
   if (resolved_method->method_holder()->is_interface()) { // miranda method
     vtable_index = vtable_index_of_interface_method(resolved_klass,
-                           resolved_method, CHECK);
+                           resolved_method);
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
 
     InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -1175,7 +1177,7 @@
                   );
     selected_method->access_flags().print_on(tty);
     if (selected_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (selected_method->is_overpass()) {
       tty->print("overpass");
@@ -1268,14 +1270,6 @@
                                                       sel_method->name(),
                                                       sel_method->signature()));
   }
-  // setup result
-  if (!resolved_method->has_itable_index()) {
-    int vtable_index = resolved_method->vtable_index();
-    assert(vtable_index == sel_method->vtable_index(), "sanity check");
-    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
-    return;
-  }
-  int itable_index = resolved_method()->itable_index();
 
   if (TraceItables && Verbose) {
     ResourceMark rm(THREAD);
@@ -1289,14 +1283,22 @@
                   );
     sel_method->access_flags().print_on(tty);
     if (sel_method->is_default_method()) {
-      tty->print("default");
+      tty->print("default ");
     }
     if (sel_method->is_overpass()) {
       tty->print("overpass");
     }
     tty->cr();
   }
-  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
+  // setup result
+  if (!resolved_method->has_itable_index()) {
+    int vtable_index = resolved_method->vtable_index();
+    assert(vtable_index == sel_method->vtable_index(), "sanity check");
+    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+  } else {
+    int itable_index = resolved_method()->itable_index();
+    result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
+  }
 }
 
 
--- a/src/share/vm/interpreter/linkResolver.hpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.hpp	Thu Nov 14 21:05:16 2013 +0100
@@ -130,7 +130,6 @@
   static void lookup_polymorphic_method         (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
                                                  KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
 
-  static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
   static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
 
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
@@ -186,6 +185,7 @@
   static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
   static methodHandle resolve_static_call_or_null   (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
   static methodHandle resolve_special_call_or_null  (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
+  static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
 
   // same as above for compile-time resolution; returns vtable_index if current_klass if linked
   static int resolve_virtual_vtable_index  (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
--- a/src/share/vm/interpreter/rewriter.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -70,21 +70,21 @@
 }
 
 // Unrewrite the bytecodes if an error occurs.
-void Rewriter::restore_bytecodes() {
+void Rewriter::restore_bytecodes(TRAPS) {
   int len = _methods->length();
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method, true);
+    scan_method(method, true, CHECK);
   }
 }
 
 // Creates a constant pool cache given a CPC map
 void Rewriter::make_constant_pool_cache(TRAPS) {
-  const int length = _cp_cache_map.length();
   ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
   ConstantPoolCache* cache =
-      ConstantPoolCache::allocate(loader_data, length, _cp_cache_map,
+      ConstantPoolCache::allocate(loader_data, _cp_cache_map,
+                                  _invokedynamic_cp_cache_map,
                                   _invokedynamic_references_map, CHECK);
 
   // initialize object cache in constant pool
@@ -154,6 +154,31 @@
   }
 }
 
+// If the constant pool entry for invokespecial is InterfaceMethodref,
+// we need to add a separate cpCache entry for its resolution, because it is
+// different than the resolution for invokeinterface with InterfaceMethodref.
+// These cannot share cpCache entries.  It's unclear if all invokespecial to
+// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
+// is created for each one.  This was added with lambda.
+void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) {
+  static int count = 0;
+  address p = bcp + offset;
+  if (!reverse) {
+    int cp_index = Bytes::get_Java_u2(p);
+    int cache_index = add_invokespecial_cp_cache_entry(cp_index);
+    if (cache_index != (int)(jushort) cache_index) {
+      THROW_MSG(vmSymbols::java_lang_InternalError(),
+                "This classfile overflows invokespecial for interfaces "
+                "and cannot be loaded");
+    }
+    Bytes::put_native_u2(p, cache_index);
+  } else {
+    int cache_index = Bytes::get_native_u2(p);
+    int cp_index = cp_cache_entry_pool_index(cache_index);
+    Bytes::put_Java_u2(p, cp_index);
+  }
+}
+
 
 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
@@ -203,7 +228,7 @@
   if (!reverse) {
     int cp_index = Bytes::get_Java_u2(p);
     int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
-    add_invokedynamic_resolved_references_entries(cp_index, cache_index);
+    int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
     // Replace the trailing four bytes with a CPC index for the dynamic
     // call site.  Unlike other CPC entries, there is one per bytecode,
     // not just one per distinct CP entry.  In other words, the
@@ -212,13 +237,20 @@
     // all these entries.  That is the main reason invokedynamic
     // must have a five-byte instruction format.  (Of course, other JVM
     // implementations can use the bytes for other purposes.)
+    // Note: We use native_u4 format exclusively for 4-byte indexes.
     Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
-    // Note: We use native_u4 format exclusively for 4-byte indexes.
+    // add the bcp in case we need to patch this bytecode if we also find a
+    // invokespecial/InterfaceMethodref in the bytecode stream
+    _patch_invokedynamic_bcps->push(p);
+    _patch_invokedynamic_refs->push(resolved_index);
   } else {
-    // callsite index
     int cache_index = ConstantPool::decode_invokedynamic_index(
                         Bytes::get_native_u4(p));
-    int cp_index = cp_cache_entry_pool_index(cache_index);
+    // We will reverse the bytecode rewriting _after_ adjusting them.
+    // Adjust the cache index by offset to the invokedynamic entries in the
+    // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
+    cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
+    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
     assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
     // zero out 4 bytes
     Bytes::put_Java_u4(p, 0);
@@ -226,6 +258,34 @@
   }
 }
 
+void Rewriter::patch_invokedynamic_bytecodes() {
+  // If the end of the cp_cache is the same as after initializing with the
+  // cpool, nothing needs to be done.  Invokedynamic bytecodes are at the
+  // correct offsets. ie. no invokespecials added
+  int delta = cp_cache_delta();
+  if (delta > 0) {
+    int length = _patch_invokedynamic_bcps->length();
+    assert(length == _patch_invokedynamic_refs->length(),
+           "lengths should match");
+    for (int i = 0; i < length; i++) {
+      address p = _patch_invokedynamic_bcps->at(i);
+      int cache_index = ConstantPool::decode_invokedynamic_index(
+                          Bytes::get_native_u4(p));
+      Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta));
+
+      // invokedynamic resolved references map also points to cp cache and must
+      // add delta to each.
+      int resolved_index = _patch_invokedynamic_refs->at(i);
+      for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
+        assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
+             "should be the same index");
+        _invokedynamic_references_map.at_put(resolved_index+entry,
+                                             cache_index + delta);
+      }
+    }
+  }
+}
+
 
 // Rewrite some ldc bytecodes to _fast_aldc
 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
@@ -269,7 +329,7 @@
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(Method* method, bool reverse) {
+void Rewriter::scan_method(Method* method, bool reverse, TRAPS) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -329,12 +389,25 @@
 #endif
           break;
         }
+
+        case Bytecodes::_invokespecial  : {
+          int offset = prefix_length + 1;
+          address p = bcp + offset;
+          int cp_index = Bytes::get_Java_u2(p);
+          // InterfaceMethodref
+          if (_pool->tag_at(cp_index).is_interface_method()) {
+            rewrite_invokespecial(bcp, offset, reverse, CHECK);
+          } else {
+            rewrite_member_reference(bcp, offset, reverse);
+          }
+          break;
+        }
+
         case Bytecodes::_getstatic      : // fall through
         case Bytecodes::_putstatic      : // fall through
         case Bytecodes::_getfield       : // fall through
         case Bytecodes::_putfield       : // fall through
         case Bytecodes::_invokevirtual  : // fall through
-        case Bytecodes::_invokespecial  : // fall through
         case Bytecodes::_invokestatic   :
         case Bytecodes::_invokeinterface:
         case Bytecodes::_invokehandle   : // if reverse=true
@@ -426,16 +499,21 @@
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method);
+    scan_method(method, false, CHECK);  // If you get an error here,
+                                        // there is no reversing bytecodes
   }
 
+  // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
+  // entries had to be added.
+  patch_invokedynamic_bytecodes();
+
   // allocate constant pool cache, now that we've seen all the bytecodes
   make_constant_pool_cache(THREAD);
 
   // Restore bytecodes to their unrewritten state if there are exceptions
   // rewriting bytecodes or allocating the cpCache
   if (HAS_PENDING_EXCEPTION) {
-    restore_bytecodes();
+    restore_bytecodes(CATCH);
     return;
   }
 
@@ -452,7 +530,7 @@
       // relocating bytecodes.  If some are relocated, that is ok because that
       // doesn't affect constant pool to cpCache rewriting.
       if (HAS_PENDING_EXCEPTION) {
-        restore_bytecodes();
+        restore_bytecodes(CATCH);
         return;
       }
       // Method might have gotten rewritten.
--- a/src/share/vm/interpreter/rewriter.hpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/interpreter/rewriter.hpp	Thu Nov 14 21:05:16 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,55 +46,102 @@
   intArray            _method_handle_invokers;
   int                 _resolved_reference_limit;
 
+  // For mapping invokedynamic bytecodes, which are discovered during method
+  // scanning.  The invokedynamic entries are added at the end of the cpCache.
+  // If there are any invokespecial/InterfaceMethodref special case bytecodes,
+  // these entries are added before invokedynamic entries so that the
+  // invokespecial bytecode 16 bit index doesn't overflow.
+  intStack            _invokedynamic_cp_cache_map;
+
+  // For patching.
+  GrowableArray<address>* _patch_invokedynamic_bcps;
+  GrowableArray<int>*     _patch_invokedynamic_refs;
+
   void init_maps(int length) {
     _cp_map.initialize(length, -1);
     // Choose an initial value large enough that we don't get frequent
     // calls to grow().
-    _cp_cache_map.initialize(length / 2);
+    _cp_cache_map.initialize(length/2);
     // Also cache resolved objects, in another different cache.
     _reference_map.initialize(length, -1);
-    _resolved_references_map.initialize(length / 2);
-    _invokedynamic_references_map.initialize(length / 2);
+    _resolved_references_map.initialize(length/2);
+    _invokedynamic_references_map.initialize(length/2);
     _resolved_reference_limit = -1;
-    DEBUG_ONLY(_cp_cache_index_limit = -1);
+    _first_iteration_cp_cache_limit = -1;
+
+    // invokedynamic specific fields
+    _invokedynamic_cp_cache_map.initialize(length/4);
+    _patch_invokedynamic_bcps = new GrowableArray<address>(length/4);
+    _patch_invokedynamic_refs = new GrowableArray<int>(length/4);
   }
 
-  int _cp_cache_index_limit;
+  int _first_iteration_cp_cache_limit;
   void record_map_limits() {
-#ifdef ASSERT
-    // Record initial size of the two arrays generated for the CP cache:
-    _cp_cache_index_limit = _cp_cache_map.length();
-#endif //ASSERT
+    // Record initial size of the two arrays generated for the CP cache
+    // relative to walking the constant pool.
+    _first_iteration_cp_cache_limit = _cp_cache_map.length();
     _resolved_reference_limit = _resolved_references_map.length();
   }
 
+  int cp_cache_delta() {
+    // How many cp cache entries were added since recording map limits after
+    // cp cache initialization?
+    assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
+    return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
+  }
+
   int  cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
   bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
 
+  int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) {
+    assert(cp_map->at(cp_index) == -1, "not twice on same cp_index");
+    int cache_index = cp_cache_map->append(cp_index);
+    cp_map->at_put(cp_index, cache_index);
+    return cache_index;
+  }
+
   int add_cp_cache_entry(int cp_index) {
     assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
-    assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
-    assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration");
-    int cache_index = _cp_cache_map.append(cp_index);
-    _cp_map.at_put(cp_index, cache_index);
+    assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
+    int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
     assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
     assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
     return cache_index;
   }
 
-  // add a new CP cache entry beyond the normal cache (for invokedynamic only)
   int add_invokedynamic_cp_cache_entry(int cp_index) {
     assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
-    assert(_cp_map[cp_index] == -1, "do not map from cp_index");
-    assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration");
+    assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration");
+    // add to the invokedynamic index map.
+    int cache_index = _invokedynamic_cp_cache_map.append(cp_index);
+    // do not update _cp_map, since the mapping is one-to-many
+    assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, "");
+    // this index starts at one but in the bytecode it's appended to the end.
+    return cache_index + _first_iteration_cp_cache_limit;
+  }
+
+  int invokedynamic_cp_cache_entry_pool_index(int cache_index) {
+    int cp_index = _invokedynamic_cp_cache_map[cache_index];
+    return cp_index;
+  }
+
+  // add a new CP cache entry beyond the normal cache for the special case of
+  // invokespecial with InterfaceMethodref as cpool operand.
+  int add_invokespecial_cp_cache_entry(int cp_index) {
+    assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
+    // Don't add InterfaceMethodref if it already exists at the end.
+    for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
+     if (cp_cache_entry_pool_index(i) == cp_index) {
+       return i;
+     }
+    }
     int cache_index = _cp_cache_map.append(cp_index);
-    assert(cache_index >= _cp_cache_index_limit, "");
+    assert(cache_index >= _first_iteration_cp_cache_limit, "");
     // do not update _cp_map, since the mapping is one-to-many
     assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
     return cache_index;
   }
 
-  // fix duplicated code later
   int  cp_entry_to_resolved_references(int cp_index) const {
     assert(has_entry_in_resolved_references(cp_index), "oob");
     return _reference_map[cp_index];
@@ -105,10 +152,7 @@
 
   // add a new entry to the resolved_references map
   int add_resolved_references_entry(int cp_index) {
-    assert(_reference_map[cp_index] == -1, "not twice on same cp_index");
-    assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration");
-    int ref_index = _resolved_references_map.append(cp_index);
-    _reference_map.at_put(cp_index, ref_index);
+    int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
     assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
     return ref_index;
   }
@@ -137,7 +181,7 @@
   // Access the contents of _cp_cache_map to determine CP cache layout.
   int cp_cache_entry_pool_index(int cache_index) {
     int cp_index = _cp_cache_map[cache_index];
-      return cp_index;
+    return cp_index;
   }
 
   // All the work goes in here:
@@ -145,14 +189,18 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(Method* m, bool reverse = false);
+  void scan_method(Method* m, bool reverse, TRAPS);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
-  void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false);
-  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
-  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
+  void rewrite_member_reference(address bcp, int offset, bool reverse);
+  void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
+  void rewrite_invokedynamic(address bcp, int offset, bool reverse);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse);
+  void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS);
+
+  void patch_invokedynamic_bytecodes();
+
   // Revert bytecodes in case of an exception.
-  void restore_bytecodes();
+  void restore_bytecodes(TRAPS);
 
   static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
  public:
--- a/src/share/vm/oops/cpCache.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/oops/cpCache.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -554,24 +554,37 @@
 // Implementation of ConstantPoolCache
 
 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
-                                     int length,
                                      const intStack& index_map,
+                                     const intStack& invokedynamic_index_map,
                                      const intStack& invokedynamic_map, TRAPS) {
+
+  const int length = index_map.length() + invokedynamic_index_map.length();
   int size = ConstantPoolCache::size(length);
 
   return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
-    ConstantPoolCache(length, index_map, invokedynamic_map);
+    ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
 }
 
 void ConstantPoolCache::initialize(const intArray& inverse_index_map,
+                                   const intArray& invokedynamic_inverse_index_map,
                                    const intArray& invokedynamic_references_map) {
-  assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
-  for (int i = 0; i < length(); i++) {
+  for (int i = 0; i < inverse_index_map.length(); i++) {
     ConstantPoolCacheEntry* e = entry_at(i);
     int original_index = inverse_index_map[i];
     e->initialize_entry(original_index);
     assert(entry_at(i) == e, "sanity");
   }
+
+  // Append invokedynamic entries at the end
+  int invokedynamic_offset = inverse_index_map.length();
+  for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
+    int offset = i + invokedynamic_offset;
+    ConstantPoolCacheEntry* e = entry_at(offset);
+    int original_index = invokedynamic_inverse_index_map[i];
+    e->initialize_entry(original_index);
+    assert(entry_at(offset) == e, "sanity");
+  }
+
   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
     const int cpci = invokedynamic_references_map[ref];
     if (cpci >= 0) {
--- a/src/share/vm/oops/cpCache.hpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/oops/cpCache.hpp	Thu Nov 14 21:05:16 2013 +0100
@@ -31,6 +31,10 @@
 
 class PSPromotionManager;
 
+// The ConstantPoolCache is not a cache! It is the resolution table that the
+// interpreter uses to avoid going into the runtime and a way to access resolved
+// values.
+
 // A ConstantPoolCacheEntry describes an individual entry of the constant
 // pool cache. There's 2 principal kinds of entries: field entries for in-
 // stance & static field access, and method entries for invokes. Some of
@@ -392,26 +396,33 @@
   friend class MetadataFactory;
  private:
   int             _length;
-  ConstantPool* _constant_pool;                // the corresponding constant pool
+  ConstantPool*   _constant_pool;          // the corresponding constant pool
 
   // Sizing
   debug_only(friend class ClassVerifier;)
 
   // Constructor
-  ConstantPoolCache(int length, const intStack& inverse_index_map,
+  ConstantPoolCache(int length,
+                    const intStack& inverse_index_map,
+                    const intStack& invokedynamic_inverse_index_map,
                     const intStack& invokedynamic_references_map) :
-                                        _length(length), _constant_pool(NULL) {
-    initialize(inverse_index_map, invokedynamic_references_map);
+                          _length(length),
+                          _constant_pool(NULL) {
+    initialize(inverse_index_map, invokedynamic_inverse_index_map,
+               invokedynamic_references_map);
     for (int i = 0; i < length; i++) {
       assert(entry_at(i)->is_f1_null(), "Failed to clear?");
     }
   }
 
   // Initialization
-  void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map);
+  void initialize(const intArray& inverse_index_map,
+                  const intArray& invokedynamic_inverse_index_map,
+                  const intArray& invokedynamic_references_map);
  public:
-  static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length,
-                                     const intStack& inverse_index_map,
+  static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
+                                     const intStack& cp_cache_map,
+                                     const intStack& invokedynamic_cp_cache_map,
                                      const intStack& invokedynamic_references_map, TRAPS);
   bool is_constantPoolCache() const { return true; }
 
--- a/src/share/vm/oops/klassVtable.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/oops/klassVtable.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -86,7 +86,11 @@
   get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
   *num_new_mirandas = new_mirandas.length();
 
-  vtable_length += *num_new_mirandas * vtableEntry::size();
+  // Interfaces do not need interface methods in their vtables
+  // This includes miranda methods and during later processing, default methods
+  if (!class_flags.is_interface()) {
+    vtable_length += *num_new_mirandas * vtableEntry::size();
+  }
 
   if (Universe::is_bootstrapping() && vtable_length == 0) {
     // array classes don't have their superclass set correctly during
@@ -224,7 +228,11 @@
     }
 
     // add miranda methods; it will also return the updated initialized
-    initialized = fill_in_mirandas(initialized);
+    // Interfaces do not need interface methods in their vtables
+    // This includes miranda methods and during later processing, default methods
+    if (!ik()->is_interface()) {
+      initialized = fill_in_mirandas(initialized);
+    }
 
     // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
     // package_private -> public/protected), the vtable might actually be smaller than our initial
@@ -264,12 +272,12 @@
            _klass->internal_name(), sig, vtable_index);
            super_method->access_flags().print_on(tty);
            if (super_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
            if (target_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
         }
 #endif /*PRODUCT*/
@@ -332,9 +340,15 @@
     // An interface never allocates new vtable slots, only inherits old ones.
     // This method will either be assigned its own itable index later,
     // or be assigned an inherited vtable index in the loop below.
-    // default methods store their vtable indices in the inheritors default_vtable_indices
-    assert (default_index == -1, "interfaces don't store resolved default methods");
-    target_method()->set_vtable_index(Method::pending_itable_index);
+    // default methods inherited by classes store their vtable indices
+    // in the inheritor's default_vtable_indices
+    // default methods inherited by interfaces may already have a
+    // valid itable index, if so, don't change it
+    // overpass methods in an interface will be assigned an itable index later
+    // by an inheriting class
+    if (!is_default || !target_method()->has_itable_index()) {
+      target_method()->set_vtable_index(Method::pending_itable_index);
+    }
   }
 
   // we need a new entry if there is no superclass
@@ -441,7 +455,7 @@
            target_klass->internal_name(), sig, i);
            super_method->access_flags().print_on(tty);
            if (super_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
            if (super_method->is_overpass()) {
              tty->print("overpass");
@@ -449,7 +463,7 @@
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
            if (target_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
            if (target_method->is_overpass()) {
              tty->print("overpass");
@@ -468,7 +482,7 @@
            target_klass->internal_name(), sig,i);
            super_method->access_flags().print_on(tty);
            if (super_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
            if (super_method->is_overpass()) {
              tty->print("overpass");
@@ -476,7 +490,7 @@
            tty->print("overriders flags: ");
            target_method->access_flags().print_on(tty);
            if (target_method->is_default_method()) {
-             tty->print("default");
+             tty->print("default ");
            }
            if (target_method->is_overpass()) {
              tty->print("overpass");
@@ -494,8 +508,18 @@
 #ifndef PRODUCT
   if (PrintVtables && Verbose) {
     ResourceMark rm;
-    tty->print_cr("adding %s::%s at index %d", _klass->internal_name(),
-      (m != NULL) ? m->name()->as_C_string() : "<NULL>", index);
+    const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
+    tty->print("adding %s at index %d, flags: ", sig, index);
+    if (m != NULL) {
+      m->access_flags().print_on(tty);
+      if (m->is_default_method()) {
+        tty->print("default ");
+      }
+      if (m->is_overpass()) {
+        tty->print("overpass");
+      }
+    }
+    tty->cr();
   }
 #endif
   table()[index].set(m);
@@ -631,8 +655,10 @@
   if (mhk->is_interface()) {
     assert(m->is_public(), "should be public");
     assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
-    assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
-    return true;
+    // the search could find a miranda or a default method
+    if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
+      return true;
+    }
   }
   return false;
 }
@@ -644,9 +670,10 @@
 // the caller must make sure that the method belongs to an interface implemented by the class
 // Miranda methods only include public interface instance methods
 // Not private methods, not static methods, not default == concrete abstract
+// Miranda methods also do not include overpass methods in interfaces
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
                              Array<Method*>* default_methods, Klass* super) {
-  if (m->is_static() || m->is_private()) {
+  if (m->is_static() || m->is_private() || m->is_overpass()) {
     return false;
   }
   Symbol* name = m->name();
@@ -744,6 +771,8 @@
 // Discover miranda methods ("miranda" = "interface abstract, no binding"),
 // and append them into the vtable starting at index initialized,
 // return the new value of initialized.
+// Miranda methods use vtable entries, but do not get assigned a vtable_index
+// The vtable_index is discovered by searching from the end of the vtable
 int klassVtable::fill_in_mirandas(int initialized) {
   GrowableArray<Method*> mirandas(20);
   get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
@@ -758,7 +787,7 @@
           sig, initialized);
         meth->access_flags().print_on(tty);
         if (meth->is_default_method()) {
-          tty->print("default");
+          tty->print("default ");
         }
         tty->cr();
       }
@@ -858,7 +887,7 @@
       tty->print("      (%5d)  ", i);
       m->access_flags().print_on(tty);
       if (m->is_default_method()) {
-        tty->print("default");
+        tty->print("default ");
       }
       if (m->is_overpass()) {
         tty->print("overpass");
@@ -977,6 +1006,25 @@
     if (interface_method_needs_itable_index(m)) {
       assert(!m->is_final_method(), "no final interface methods");
       // If m is already assigned a vtable index, do not disturb it.
+      if (TraceItables && Verbose) {
+        ResourceMark rm;
+        const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
+        if (m->has_vtable_index()) {
+          tty->print("itable index %d for method: %s, flags: ", m->vtable_index(), sig);
+        } else {
+          tty->print("itable index %d for method: %s, flags: ", ime_num, sig);
+        }
+        if (m != NULL) {
+          m->access_flags().print_on(tty);
+          if (m->is_default_method()) {
+            tty->print("default ");
+          }
+          if (m->is_overpass()) {
+            tty->print("overpass");
+          }
+        }
+        tty->cr();
+      }
       if (!m->has_vtable_index()) {
         assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
         m->set_itable_index(ime_num);
@@ -1079,7 +1127,7 @@
           tty->print("target_method flags: ");
           target()->access_flags().print_on(tty);
           if (target()->is_default_method()) {
-            tty->print("default");
+            tty->print("default ");
           }
           tty->cr();
         }
@@ -1158,7 +1206,7 @@
       tty->print("      (%5d)  ", i);
       m->access_flags().print_on(tty);
       if (m->is_default_method()) {
-        tty->print("default");
+        tty->print("default ");
       }
       tty->print(" --  ");
       m->print_name(tty);
--- a/src/share/vm/prims/whitebox.cpp	Fri Nov 08 17:46:53 2013 +0100
+++ b/src/share/vm/prims/whitebox.cpp	Thu Nov 14 21:05:16 2013 +0100
@@ -53,6 +53,8 @@
 #include "compiler/compileBroker.hpp"
 #include "runtime/compilationPolicy.hpp"
 
+#define SIZE_T_MAX_VALUE ((size_t) -1)
+
 bool WhiteBox::_used = false;
 
 WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
@@ -109,6 +111,112 @@
 }
 WB_END
 
+#ifndef PRODUCT
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+void TestVirtualSpace_test();
+void TestMetaspaceAux_test();
+#endif
+
+WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o))
+#ifndef PRODUCT
+  TestReservedSpace_test();
+  TestReserveMemorySpecial_test();
+  TestVirtualSpace_test();
+  TestMetaspaceAux_test();
+#endif
+WB_END
+
+WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
+  size_t granularity = os::vm_allocation_granularity();
+  ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL);
+  VirtualSpace vs;
+  vs.initialize(rhs, 50 * granularity);
+
+  //Check if constraints are complied
+  if (!( UseCompressedOops && rhs.base() != NULL &&
+         Universe::narrow_oop_base() != NULL &&
+         Universe::narrow_oop_use_implicit_null_checks() )) {
+    tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n "
+                  "\tUseCompressedOops is %d\n"
+                  "\trhs.base() is "PTR_FORMAT"\n"
+                  "\tUniverse::narrow_oop_base() is "PTR_FORMAT"\n"
+                  "\tUniverse::narrow_oop_use_implicit_null_checks() is %d",
+                  UseCompressedOops,
+                  rhs.base(),
+                  Universe::narrow_oop_base(),
+                  Universe::narrow_oop_use_implicit_null_checks());
+    return;
+  }
+  tty->print_cr("Reading from no access area... ");
+  tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c",
+                *(vs.low_boundary() - rhs.noaccess_prefix() / 2 ));
+WB_END
+
+static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
+                                           size_t magnitude, size_t iterations) {
+  size_t granularity = os::vm_allocation_granularity();
+  ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL);
+  VirtualSpace vs;
+  if (!vs.initialize(rhs, 0)) {
+    tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
+    return 3;
+  }
+
+  long seed = os::random();
+  tty->print_cr("Random seed is %ld", seed);
+  os::init_random(seed);
+
+  for (size_t i = 0; i < iterations; i++) {
+
+    // Whether we will shrink or grow
+    bool shrink = os::random() % 2L == 0;
+
+    // Get random delta to resize virtual space
+    size_t delta = (size_t)os::random() % magnitude;
+
+    // If we are about to shrink virtual space below zero, then expand instead
+    if (shrink && vs.committed_size() < delta) {
+      shrink = false;
+    }
+
+    // Resizing by delta
+    if (shrink) {
+      vs.shrink_by(delta);
+    } else {
+      // If expanding fails expand_by will silently return false
+      vs.expand_by(delta, true);
+    }
+  }
+  return 0;
+}
+
+WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o,
+        jlong reserved_space_size, jlong magnitude, jlong iterations))
+  tty->print_cr("reservedSpaceSize="JLONG_FORMAT", magnitude="JLONG_FORMAT", "
+                "iterations="JLONG_FORMAT"\n", reserved_space_size, magnitude,
+                iterations);
+  if (reserved_space_size < 0 || magnitude < 0 || iterations < 0) {
+    tty->print_cr("One of variables printed above is negative. Can't proceed.\n");
+    return 1;
+  }
+
+  // sizeof(size_t) depends on whether OS is 32bit or 64bit. sizeof(jlong) is
+  // always 8 byte. That's why we should avoid overflow in case of 32bit platform.
+  if (sizeof(size_t) < sizeof(jlong)) {
+    jlong size_t_max_value = (jlong) SIZE_T_MAX_VALUE;
+    if (reserved_space_size > size_t_max_value || magnitude > size_t_max_value
+        || iterations > size_t_max_value) {
+      tty->print_cr("One of variables printed above overflows size_t. Can't proceed.\n");
+      return 2;
+    }
+  }
+
+  return wb_stress_virtual_space_resize((size_t) reserved_space_size,
+                                        (size_t) magnitude, (size_t) iterations);
+WB_END
+
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -445,6 +553,9 @@
   {CC"getCompressedOopsMaxHeapSize", CC"()J",
       (void*)&WB_GetCompressedOopsMaxHeapSize},
   {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
+  {CC"runMemoryUnitTests", CC"()V",                   (void*)&WB_RunMemoryUnitTests},
+  {CC"readFromNoaccessArea",CC"()V",                  (void*)&WB_ReadFromNoaccessArea},
+  {CC"stressVirtualSpaceResize",CC"(JJJ)I",           (void*)&WB_StressVirtualSpaceResize},
 #if INCLUDE_ALL_GCS
   {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   {CC"g1IsHumongous",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/memory/ReadFromNoaccessArea.java	Thu Nov 14 21:05:16 2013 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReadFromNoaccessArea
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main ReadFromNoaccessArea
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class ReadFromNoaccessArea {
+
+  public static void main(String args[]) throws Exception {
+    if (!Platform.is64bit()) {
+      System.out.println("ReadFromNoaccessArea tests is useful only on 64bit architecture. Passing silently.");
+      return;
+    }
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+          "-Xbootclasspath/a:.",
+          "-XX:+UnlockDiagnosticVMOptions",
+          "-XX:+WhiteBoxAPI",
+          "-XX:+UseCompressedOops",
+          "-XX:HeapBaseMinAddress=33G",
+          DummyClassWithMainTryingToReadFromNoaccessArea.class.getName());
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    System.out.println("******* Printing stdout for analysis in case of failure *******");
+    System.out.println(output.getStdout());
+    System.out.println("******* Printing stderr for analysis in case of failure *******");
+    System.out.println(output.getStderr());
+    System.out.println("***************************************************************");
+    if (output.getStdout() != null && output.getStdout().contains("WB_ReadFromNoaccessArea method is useless")) {
+      // Test conditions broken. There is no protected page in ReservedHeapSpace in these circumstances. Silently passing test.
+      return;
+    }
+    if (Platform.isWindows()) {
+      output.shouldContain("EXCEPTION_ACCESS_VIOLATION");
+    } else if (Platform.isOSX()) {
+      output.shouldContain("SIGBUS");
+    } else {
+      output.shouldContain("SIGSEGV");
+    }
+  }
+
+  public static class DummyClassWithMainTryingToReadFromNoaccessArea {
+
+    // This method calls whitebox method reading from noaccess area
+    public static void main(String args[]) throws Exception {
+      WhiteBox.getWhiteBox().readFromNoaccessArea();
+      throw new Exception("Call of readFromNoaccessArea succeeded! This is wrong. Crash expected. Test failed.");
+    }
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/memory/RunUnitTestsConcurrently.java	Thu Nov 14 21:05:16 2013 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test launches unit tests inside vm concurrently
+ * @library /testlibrary /testlibrary/whitebox
+ * @build RunUnitTestsConcurrently
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class RunUnitTestsConcurrently {
+
+  private static WhiteBox wb;
+  private static long timeout;
+  private static long timeStamp;
+
+  public static class Worker implements Runnable {
+    @Override
+    public void run() {
+      while (System.currentTimeMillis() - timeStamp < timeout) {
+        WhiteBox.getWhiteBox().runMemoryUnitTests();
+      }
+    }
+  }
+
+  public static void main(String[] args) throws InterruptedException {
+    if (!Platform.isDebugBuild() || !Platform.is64bit()) {
+      return;
+    }
+    wb = WhiteBox.getWhiteBox();
+    System.out.println("Starting threads");
+
+    int threads = Integer.valueOf(args[0]);
+    timeout = Long.valueOf(args[1]);
+
+    timeStamp = System.currentTimeMillis();
+
+    Thread[] threadsArray = new Thread[threads];
+    for (int i = 0; i < threads; i++) {
+      threadsArray[i] = new Thread(new Worker());
+      threadsArray[i].start();
+    }
+    for (int i = 0; i < threads; i++) {
+      threadsArray[i].join();
+    }
+
+    System.out.println("Quitting test.");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/memory/StressVirtualSpaceResize.java	Thu Nov 14 21:05:16 2013 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Stress test that expands/shrinks VirtualSpace
+ * @library /testlibrary /testlibrary/whitebox
+ * @build StressVirtualSpaceResize
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StressVirtualSpaceResize
+ */
+
+import sun.hotspot.WhiteBox;
+
+public class StressVirtualSpaceResize {
+
+  public static void main(String args[]) throws Exception {
+    if (WhiteBox.getWhiteBox().stressVirtualSpaceResize(1000, 0xffffL, 0xffffL) != 0)
+      throw new RuntimeException("Whitebox method stressVirtualSpaceResize returned non zero exit code");
+  }
+}
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Nov 08 17:46:53 2013 +0100
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Nov 14 21:05:16 2013 +0100
@@ -144,4 +144,10 @@
 
   // force Full GC
   public native void fullGC();
+
+  // Tests on ReservedSpace/VirtualSpace classes
+  public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
+  public native void runMemoryUnitTests();
+  public native void readFromNoaccessArea();
+
 }