changeset 10346:cd83e1d98347

Merge
author dcubed
date Fri, 24 May 2013 10:21:12 -0700
parents 194b27b865bc (diff) b7fa10a3a69a (current diff)
children 6c138b9851fb
files
diffstat 49 files changed, 446 insertions(+), 212 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu May 23 23:04:33 2013 -0700
+++ b/.hgtags	Fri May 24 10:21:12 2013 -0700
@@ -343,3 +343,5 @@
 69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32
 1ae0472ff3a0117b5b019d380ad59fface2fde14 jdk8-b90
 b19517cecc2e91636d7c16ba2f35e3d3dc628099 hs25-b33
+7cbdf0e3725c0c56a2ff7540fc70b6d4b5890d04 jdk8-b91
+38da9f4f67096745f851318d792d6468aa1f6cf8 hs25-b34
--- a/make/bsd/makefiles/arm.make	Thu May 23 23:04:33 2013 -0700
+++ b/make/bsd/makefiles/arm.make	Fri May 24 10:21:12 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,8 @@
 
 Obj_Files += bsd_arm.o
 
-LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a 
+ifneq ($(EXT_LIBS_PATH),)
+  LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a 
+endif
 
 CFLAGS += -DVM_LITTLE_ENDIAN
--- a/make/hotspot_version	Thu May 23 23:04:33 2013 -0700
+++ b/make/hotspot_version	Fri May 24 10:21:12 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=34
+HS_BUILD_NUMBER=35
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/makefiles/arm.make	Thu May 23 23:04:33 2013 -0700
+++ b/make/linux/makefiles/arm.make	Fri May 24 10:21:12 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,8 @@
 
 Obj_Files += linux_arm.o
 
-LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a 
+ifneq ($(EXT_LIBS_PATH),)
+  LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a 
+endif
 
 CFLAGS += -DVM_LITTLE_ENDIAN
--- a/make/linux/makefiles/jsig.make	Thu May 23 23:04:33 2013 -0700
+++ b/make/linux/makefiles/jsig.make	Fri May 24 10:21:12 2013 -0700
@@ -54,7 +54,7 @@
 $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $< -ldl
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
 	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
--- a/make/linux/makefiles/saproc.make	Thu May 23 23:04:33 2013 -0700
+++ b/make/linux/makefiles/saproc.make	Fri May 24 10:21:12 2013 -0700
@@ -92,6 +92,7 @@
 	           $(SASRCFILES)                                        \
 	           $(SA_LFLAGS)                                         \
 	           $(SA_DEBUG_CFLAGS)                                   \
+	           $(EXTRA_CFLAGS)                                      \
 	           -o $@                                                \
 	           -lthread_db
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri May 24 10:21:12 2013 -0700
@@ -1498,27 +1498,29 @@
     __ movptr(elem_klass, elem_klass_addr); // query the object klass
     generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
                         &L_store_element, NULL);
-      // (On fall-through, we have failed the element type check.)
+    // (On fall-through, we have failed the element type check.)
     // ======== end loop ========
 
     // It was a real error; we must depend on the caller to finish the job.
     // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
     // Emit GC store barriers for the oops we have copied (length_arg + count),
     // and report their number to the caller.
+    assert_different_registers(to, count, rax);
+    Label L_post_barrier;
     __ addl(count, length_arg);         // transfers = (length - remaining)
     __ movl2ptr(rax, count);            // save the value
-    __ notptr(rax);                     // report (-1^K) to caller
-    __ movptr(to, to_arg);              // reload
-    assert_different_registers(to, count, rax);
-    gen_write_ref_array_post_barrier(to, count);
-    __ jmpb(L_done);
+    __ notptr(rax);                     // report (-1^K) to caller (does not affect flags)
+    __ jccb(Assembler::notZero, L_post_barrier);
+    __ jmp(L_done); // K == 0, nothing was copied, skip post barrier
 
     // Come here on success only.
     __ BIND(L_do_card_marks);
+    __ xorptr(rax, rax);                // return 0 on success
     __ movl2ptr(count, length_arg);
-    __ movptr(to, to_arg);                // reload
+
+    __ BIND(L_post_barrier);
+    __ movptr(to, to_arg);              // reload
     gen_write_ref_array_post_barrier(to, count);
-    __ xorptr(rax, rax);                  // return 0 on success
 
     // Common exit point (success or failure).
     __ BIND(L_done);
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri May 24 10:21:12 2013 -0700
@@ -1217,27 +1217,28 @@
   //
   //  Input:
   //     start    - register containing starting address of destination array
-  //     end      - register containing ending address of destination array
+  //     count    - elements count
   //     scratch  - scratch register
   //
   //  The input registers are overwritten.
-  //  The ending address is inclusive.
-  void  gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
-    assert_different_registers(start, end, scratch);
+  //
+  void  gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) {
+    assert_different_registers(start, count, scratch);
     BarrierSet* bs = Universe::heap()->barrier_set();
     switch (bs->kind()) {
       case BarrierSet::G1SATBCT:
       case BarrierSet::G1SATBCTLogging:
-
         {
-          __ pusha();                      // push registers (overkill)
-          // must compute element count unless barrier set interface is changed (other platforms supply count)
-          assert_different_registers(start, end, scratch);
-          __ lea(scratch, Address(end, BytesPerHeapOop));
-          __ subptr(scratch, start);               // subtract start to get #bytes
-          __ shrptr(scratch, LogBytesPerHeapOop);  // convert to element count
-          __ mov(c_rarg0, start);
-          __ mov(c_rarg1, scratch);
+          __ pusha();             // push registers (overkill)
+          if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
+            assert_different_registers(c_rarg1, start);
+            __ mov(c_rarg1, count);
+            __ mov(c_rarg0, start);
+          } else {
+            assert_different_registers(c_rarg0, count);
+            __ mov(c_rarg0, start);
+            __ mov(c_rarg1, count);
+          }
           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
           __ popa();
         }
@@ -1249,22 +1250,16 @@
           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
 
           Label L_loop;
-
-           __ shrptr(start, CardTableModRefBS::card_shift);
-           __ addptr(end, BytesPerHeapOop);
-           __ shrptr(end, CardTableModRefBS::card_shift);
-           __ subptr(end, start); // number of bytes to copy
-
-          intptr_t disp = (intptr_t) ct->byte_map_base;
-          if (Assembler::is_simm32(disp)) {
-            Address cardtable(noreg, noreg, Address::no_scale, disp);
-            __ lea(scratch, cardtable);
-          } else {
-            ExternalAddress cardtable((address)disp);
-            __ lea(scratch, cardtable);
-          }
-
-          const Register count = end; // 'end' register contains bytes count now
+          const Register end = count;
+
+          __ leaq(end, Address(start, count, TIMES_OOP, 0));  // end == start+count*oop_size
+          __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
+          __ shrptr(start, CardTableModRefBS::card_shift);
+          __ shrptr(end,   CardTableModRefBS::card_shift);
+          __ subptr(end, start); // end --> cards count
+
+          int64_t disp = (int64_t) ct->byte_map_base;
+          __ mov64(scratch, disp);
           __ addptr(start, scratch);
         __ BIND(L_loop);
           __ movb(Address(start, count, Address::times_1), 0);
@@ -1916,8 +1911,7 @@
 
   __ BIND(L_exit);
     if (is_oop) {
-      __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
-      gen_write_ref_array_post_barrier(saved_to, end_to, rax);
+      gen_write_ref_array_post_barrier(saved_to, dword_count, rax);
     }
     restore_arg_regs();
     inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
@@ -2012,12 +2006,10 @@
     // Copy in multi-bytes chunks
     copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
 
-   __ bind(L_exit);
-     if (is_oop) {
-       Register end_to = rdx;
-       __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
-       gen_write_ref_array_post_barrier(to, end_to, rax);
-     }
+  __ BIND(L_exit);
+    if (is_oop) {
+      gen_write_ref_array_post_barrier(to, dword_count, rax);
+    }
     restore_arg_regs();
     inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
@@ -2055,6 +2047,7 @@
     const Register end_from    = from; // source array end address
     const Register end_to      = rcx;  // destination array end address
     const Register saved_to    = to;
+    const Register saved_count = r11;
     // End pointers are inclusive, and if count is not zero they point
     // to the last unit copied:  end_to[0] := end_from[0]
 
@@ -2072,6 +2065,8 @@
                       // r9 and r10 may be used to save non-volatile registers
     // 'from', 'to' and 'qword_count' are now valid
     if (is_oop) {
+      // Save to and count for store barrier
+      __ movptr(saved_count, qword_count);
       // no registers are destroyed by this call
       gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
     }
@@ -2104,7 +2099,7 @@
 
     if (is_oop) {
     __ BIND(L_exit);
-      gen_write_ref_array_post_barrier(saved_to, end_to, rax);
+      gen_write_ref_array_post_barrier(saved_to, saved_count, rax);
     }
     restore_arg_regs();
     if (is_oop) {
@@ -2187,8 +2182,7 @@
 
     if (is_oop) {
     __ BIND(L_exit);
-      __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
-      gen_write_ref_array_post_barrier(to, rcx, rax);
+      gen_write_ref_array_post_barrier(to, saved_count, rax);
     }
     restore_arg_regs();
     if (is_oop) {
@@ -2375,20 +2369,20 @@
     // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
     // Emit GC store barriers for the oops we have copied (r14 + rdx),
     // and report their number to the caller.
-    assert_different_registers(rax, r14_length, count, to, end_to, rcx);
-    __ lea(end_to, to_element_addr);
-    __ addptr(end_to, -heapOopSize);      // make an inclusive end pointer
-    gen_write_ref_array_post_barrier(to, end_to, rscratch1);
-    __ movptr(rax, r14_length);           // original oops
-    __ addptr(rax, count);                // K = (original - remaining) oops
-    __ notptr(rax);                       // report (-1^K) to caller
-    __ jmp(L_done);
+    assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1);
+    Label L_post_barrier;
+    __ addptr(r14_length, count);     // K = (original - remaining) oops
+    __ movptr(rax, r14_length);       // save the value
+    __ notptr(rax);                   // report (-1^K) to caller (does not affect flags)
+    __ jccb(Assembler::notZero, L_post_barrier);
+    __ jmp(L_done); // K == 0, nothing was copied, skip post barrier
 
     // Come here on success only.
     __ BIND(L_do_card_marks);
-    __ addptr(end_to, -heapOopSize);         // make an inclusive end pointer
-    gen_write_ref_array_post_barrier(to, end_to, rscratch1);
-    __ xorptr(rax, rax);                  // return 0 on success
+    __ xorptr(rax, rax);              // return 0 on success
+
+    __ BIND(L_post_barrier);
+    gen_write_ref_array_post_barrier(to, r14_length, rscratch1);
 
     // Common exit point (success or failure).
     __ BIND(L_done);
--- a/src/share/tools/hsdis/hsdis.c	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/tools/hsdis/hsdis.c	Fri May 24 10:21:12 2013 -0700
@@ -27,6 +27,7 @@
    HotSpot PrintAssembly option.
 */
 
+#include <config.h> /* required by bfd.h */
 #include <libiberty.h>
 #include <bfd.h>
 #include <dis-asm.h>
--- a/src/share/vm/c1/c1_Compiler.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/c1/c1_Compiler.cpp	Fri May 24 10:21:12 2013 -0700
@@ -77,30 +77,42 @@
 }
 
 
-BufferBlob* Compiler::build_buffer_blob() {
+BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
+  // Allocate buffer blob once at startup since allocation for each
+  // compilation seems to be too expensive (at least on Intel win32).
+  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  if (buffer_blob != NULL) {
+    return buffer_blob;
+  }
+
   // setup CodeBuffer.  Preallocate a BufferBlob of size
   // NMethodSizeLimit plus some extra space for constants.
   int code_buffer_size = Compilation::desired_max_code_buffer_size() +
     Compilation::desired_max_constant_size();
-  BufferBlob* blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
-                                        code_buffer_size);
-  guarantee(blob != NULL, "must create initial code buffer");
-  return blob;
+
+  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
+                                   code_buffer_size);
+  if (buffer_blob == NULL) {
+    CompileBroker::handle_full_code_cache();
+    env->record_failure("CodeCache is full");
+  } else {
+    CompilerThread::current()->set_buffer_blob(buffer_blob);
+  }
+
+  return buffer_blob;
 }
 
 
 void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
-  // Allocate buffer blob once at startup since allocation for each
-  // compilation seems to be too expensive (at least on Intel win32).
-  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
+  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
   if (buffer_blob == NULL) {
-    buffer_blob = build_buffer_blob();
-    CompilerThread::current()->set_buffer_blob(buffer_blob);
+    return;
   }
 
   if (!is_initialized()) {
     initialize();
   }
+
   // invoke compilation
   {
     // We are nested here because we need for the destructor
--- a/src/share/vm/c1/c1_Compiler.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/c1/c1_Compiler.hpp	Fri May 24 10:21:12 2013 -0700
@@ -46,7 +46,7 @@
 
   virtual bool is_c1()                           { return true; };
 
-  BufferBlob* build_buffer_blob();
+  BufferBlob* get_buffer_blob(ciEnv* env);
 
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
--- a/src/share/vm/code/codeCache.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/code/codeCache.cpp	Fri May 24 10:21:12 2013 -0700
@@ -622,6 +622,15 @@
   return (address)_heap->high();
 }
 
+/**
+ * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
+ * is free, reverse_free_ratio() returns 4.
+ */
+double CodeCache::reverse_free_ratio() {
+  double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
+  double max_capacity = (double)CodeCache::max_capacity();
+  return max_capacity / unallocated_capacity;
+}
 
 void icache_init();
 
--- a/src/share/vm/code/codeCache.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/code/codeCache.hpp	Fri May 24 10:21:12 2013 -0700
@@ -163,6 +163,7 @@
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
   static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
+  static double  reverse_free_ratio();
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Fri May 24 10:21:12 2013 -0700
@@ -51,14 +51,6 @@
 }
 
 template <class Chunk>
-AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
-  init_statistics();
-#ifndef PRODUCT
-  _allocation_stats.set_returned_bytes(size() * HeapWordSize);
-#endif
-}
-
-template <class Chunk>
 void AdaptiveFreeList<Chunk>::initialize() {
   FreeList<Chunk>::initialize();
   set_hint(0);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Fri May 24 10:21:12 2013 -0700
@@ -55,7 +55,6 @@
  public:
 
   AdaptiveFreeList();
-  AdaptiveFreeList(Chunk* fc);
 
   using FreeList<Chunk>::assert_proper_lock_protection;
 #ifdef ASSERT
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri May 24 10:21:12 2013 -0700
@@ -153,8 +153,6 @@
       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
                                               "a freelist par lock",
                                               true);
-      if (_indexedFreeListParLocks[i] == NULL)
-        vm_exit_during_initialization("Could not allocate a par lock");
       DEBUG_ONLY(
         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
       )
@@ -285,6 +283,7 @@
       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
     }
+    coalBirth(mr.word_size());
   }
   _promoInfo.reset();
   _smallLinearAllocBlock._ptr = NULL;
@@ -1762,7 +1761,7 @@
   }
   ec->set_size(size);
   debug_only(ec->mangleFreed(size));
-  if (size < SmallForDictionary) {
+  if (size < SmallForDictionary && ParallelGCThreads != 0) {
     lock = _indexedFreeListParLocks[size];
   }
   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri May 24 10:21:12 2013 -0700
@@ -3381,7 +3381,6 @@
   assert_locked_or_safepoint(Heap_lock);
   bool result = _virtual_space.expand_by(bytes);
   if (result) {
-    HeapWord* old_end = _cmsSpace->end();
     size_t new_word_size =
       heap_word_size(_virtual_space.committed_size());
     MemRegion mr(_cmsSpace->bottom(), new_word_size);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri May 24 10:21:12 2013 -0700
@@ -485,10 +485,6 @@
     assert(!span.is_empty(), "Empty span could spell trouble");
   }
 
-  void do_object(oop obj) {
-    assert(false, "not to be invoked");
-  }
-
   bool do_object_b(oop obj);
 };
 
@@ -1536,9 +1532,6 @@
     _bit_map(bit_map),
     _par_scan_closure(cl) { }
 
-  void do_object(oop obj) {
-    guarantee(false, "Call do_object_b(oop, MemRegion) instead");
-  }
   bool do_object_b(oop obj) {
     guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
     return false;
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri May 24 10:21:12 2013 -0700
@@ -44,9 +44,6 @@
  public:
   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
 
-  void do_object(oop obj) {
-    ShouldNotCallThis();
-  }
   bool do_object_b(oop obj);
 };
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri May 24 10:21:12 2013 -0700
@@ -5090,7 +5090,6 @@
   G1CollectedHeap* _g1;
 public:
   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_object(oop p) { assert(false, "Do not call."); }
   bool do_object_b(oop p) {
     if (p != NULL) {
       return true;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri May 24 10:21:12 2013 -0700
@@ -165,7 +165,6 @@
   G1CollectedHeap* _g1;
 public:
   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_object(oop p) { assert(false, "Do not call."); }
   bool do_object_b(oop p);
 };
 
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri May 24 10:21:12 2013 -0700
@@ -242,11 +242,13 @@
     PerRegionTable* cur = _free_list;
     size_t res = 0;
     while (cur != NULL) {
-      res += sizeof(PerRegionTable);
+      res += cur->mem_size();
       cur = cur->next();
     }
     return res;
   }
+
+  static void test_fl_mem_size();
 };
 
 PerRegionTable* PerRegionTable::_free_list = NULL;
@@ -1149,6 +1151,19 @@
 }
 
 #ifndef PRODUCT
+void PerRegionTable::test_fl_mem_size() {
+  PerRegionTable* dummy = alloc(NULL);
+  free(dummy);
+  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
+  // try to reset the state
+  _free_list = NULL;
+  delete dummy;
+}
+
+void HeapRegionRemSet::test_prt() {
+  PerRegionTable::test_fl_mem_size();
+}
+
 void HeapRegionRemSet::test() {
   os::sleep(Thread::current(), (jlong)5000, false);
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri May 24 10:21:12 2013 -0700
@@ -338,6 +338,7 @@
 
   // Run unit tests.
 #ifndef PRODUCT
+  static void test_prt();
   static void test();
 #endif
 };
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Fri May 24 10:21:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,18 +54,18 @@
   const size_t raw_bytes = words * sizeof(idx_t);
   const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
-  const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+  _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_sz, granularity);
-  ReservedSpace rs(bytes, rs_align, rs_align > 0);
+  ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
   os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz,
                        rs.base(), rs.size());
 
   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 
   _virtual_space = new PSVirtualSpace(rs, page_sz);
-  if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) {
+  if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
     _region_start = covered_region.start();
     _region_size = covered_region.word_size();
     idx_t* map = (idx_t*)_virtual_space->reserved_low_addr();
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Fri May 24 10:21:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -131,6 +131,8 @@
   inline size_t    region_size() const;
   inline size_t    size() const;
 
+  size_t reserved_byte_size() const { return _reserved_byte_size; }
+
   // Convert a heap address to/from a bit index.
   inline idx_t     addr_to_bit(HeapWord* addr) const;
   inline HeapWord* bit_to_addr(idx_t bit) const;
@@ -176,10 +178,11 @@
   BitMap          _beg_bits;
   BitMap          _end_bits;
   PSVirtualSpace* _virtual_space;
+  size_t          _reserved_byte_size;
 };
 
 inline ParMarkBitMap::ParMarkBitMap():
-  _beg_bits(), _end_bits(), _region_start(NULL), _region_size(0), _virtual_space(NULL)
+  _beg_bits(), _end_bits(), _region_start(NULL), _region_size(0), _virtual_space(NULL), _reserved_byte_size(0)
 { }
 
 inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end)
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri May 24 10:21:12 2013 -0700
@@ -580,7 +580,6 @@
 // This should be moved to the shared markSweep code!
 class PSAlwaysTrueClosure: public BoolObjectClosure {
 public:
-  void do_object(oop p) { ShouldNotReachHere(); }
   bool do_object_b(oop p) { return true; }
 };
 static PSAlwaysTrueClosure always_true;
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri May 24 10:21:12 2013 -0700
@@ -356,6 +356,7 @@
   _region_start = 0;
 
   _region_vspace = 0;
+  _reserved_byte_size = 0;
   _region_data = 0;
   _region_count = 0;
 }
@@ -382,11 +383,11 @@
   const size_t raw_bytes = count * element_size;
   const size_t page_sz = os::page_size_for_region(raw_bytes, raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
-  const size_t bytes = align_size_up(raw_bytes, MAX2(page_sz, granularity));
+  _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_sz, granularity);
-  ReservedSpace rs(bytes, rs_align, rs_align > 0);
+  ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
                        rs.size());
 
@@ -394,7 +395,7 @@
 
   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
   if (vspace != 0) {
-    if (vspace->expand_by(bytes)) {
+    if (vspace->expand_by(_reserved_byte_size)) {
       return vspace;
     }
     delete vspace;
@@ -781,7 +782,6 @@
 
 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 
-void PSParallelCompact::IsAliveClosure::do_object(oop p)   { ShouldNotReachHere(); }
 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 
 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
@@ -841,14 +841,18 @@
   initialize_dead_wood_limiter();
 
   if (!_mark_bitmap.initialize(mr)) {
-    vm_shutdown_during_initialization("Unable to allocate bit map for "
-      "parallel garbage collection for the requested heap size.");
+    vm_shutdown_during_initialization(
+      err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
+      "garbage collection for the requested " SIZE_FORMAT "KB heap.",
+      _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
     return false;
   }
 
   if (!_summary_data.initialize(mr)) {
-    vm_shutdown_during_initialization("Unable to allocate tables for "
-      "parallel garbage collection for the requested heap size.");
+    vm_shutdown_during_initialization(
+      err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
+      "garbage collection for the requested " SIZE_FORMAT "KB heap.",
+      _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
     return false;
   }
 
@@ -2413,7 +2417,6 @@
 // This should be moved to the shared markSweep code!
 class PSAlwaysTrueClosure: public BoolObjectClosure {
 public:
-  void do_object(oop p) { ShouldNotReachHere(); }
   bool do_object_b(oop p) { return true; }
 };
 static PSAlwaysTrueClosure always_true;
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri May 24 10:21:12 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -347,6 +347,7 @@
   bool initialize(MemRegion covered_region);
 
   size_t region_count() const { return _region_count; }
+  size_t reserved_byte_size() const { return _reserved_byte_size; }
 
   // Convert region indices to/from RegionData pointers.
   inline RegionData* region(size_t region_idx) const;
@@ -420,6 +421,7 @@
 #endif  // #ifdef ASSERT
 
   PSVirtualSpace* _region_vspace;
+  size_t          _reserved_byte_size;
   RegionData*     _region_data;
   size_t          _region_count;
 };
@@ -784,7 +786,6 @@
   //
   class IsAliveClosure: public BoolObjectClosure {
    public:
-    virtual void do_object(oop p);
     virtual bool do_object_b(oop p);
   };
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri May 24 10:21:12 2013 -0700
@@ -70,9 +70,6 @@
 // Define before use
 class PSIsAliveClosure: public BoolObjectClosure {
 public:
-  void do_object(oop p) {
-    assert(false, "Do not call.");
-  }
   bool do_object_b(oop p) {
     return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
   }
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri May 24 10:21:12 2013 -0700
@@ -166,7 +166,6 @@
 
 MarkSweep::IsAliveClosure   MarkSweep::is_alive;
 
-void MarkSweep::IsAliveClosure::do_object(oop p)   { ShouldNotReachHere(); }
 bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
 
 MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri May 24 10:21:12 2013 -0700
@@ -88,7 +88,6 @@
   // Used for java/lang/ref handling
   class IsAliveClosure: public BoolObjectClosure {
    public:
-    virtual void do_object(oop p);
     virtual bool do_object_b(oop p);
   };
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Fri May 24 10:21:12 2013 -0700
@@ -50,9 +50,6 @@
 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
   assert(g->level() == 0, "Optimized for youngest gen.");
 }
-void DefNewGeneration::IsAliveClosure::do_object(oop p) {
-  assert(false, "Do not call.");
-}
 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
 }
--- a/src/share/vm/memory/defNewGeneration.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/defNewGeneration.hpp	Fri May 24 10:21:12 2013 -0700
@@ -150,7 +150,6 @@
     Generation* _g;
   public:
     IsAliveClosure(Generation* g);
-    void do_object(oop p);
     bool do_object_b(oop p);
   };
 
--- a/src/share/vm/memory/freeList.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/freeList.cpp	Fri May 24 10:21:12 2013 -0700
@@ -55,17 +55,6 @@
 }
 
 template <class Chunk>
-FreeList<Chunk>::FreeList(Chunk* fc) :
-  _head(fc), _tail(fc)
-#ifdef ASSERT
-  , _protecting_lock(NULL)
-#endif
-{
-  _size         = fc->size();
-  _count        = 1;
-}
-
-template <class Chunk>
 void FreeList<Chunk>::link_head(Chunk* v) {
   assert_proper_lock_protection();
   set_head(v);
--- a/src/share/vm/memory/freeList.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/freeList.hpp	Fri May 24 10:21:12 2013 -0700
@@ -80,8 +80,6 @@
   // Constructor
   // Construct a list without any entries.
   FreeList();
-  // Construct a list with "fc" as the first (and lone) entry in the list.
-  FreeList(Chunk_t* fc);
 
   // Do initialization
   void initialize();
@@ -177,9 +175,6 @@
   // found.  Return NULL if "fc" is not found.
   bool verify_chunk_in_free_list(Chunk_t* fc) const;
 
-  // Stats verification
-//  void verify_stats() const { ShouldNotReachHere(); };
-
   // Printing support
   static void print_labels_on(outputStream* st, const char* c);
   void print_on(outputStream* st, const char* c = NULL) const;
--- a/src/share/vm/memory/iterator.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/iterator.hpp	Fri May 24 10:21:12 2013 -0700
@@ -158,7 +158,7 @@
 };
 
 
-class BoolObjectClosure : public ObjectClosure {
+class BoolObjectClosure : public Closure {
  public:
   virtual bool do_object_b(oop obj) = 0;
 };
--- a/src/share/vm/memory/metaspace.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Fri May 24 10:21:12 2013 -0700
@@ -562,6 +562,9 @@
   // protects allocations and contains.
   Mutex* const _lock;
 
+  // Type of metadata allocated.
+  Metaspace::MetadataType _mdtype;
+
   // Chunk related size
   size_t _medium_chunk_bunch;
 
@@ -606,6 +609,7 @@
     return (BlockFreelist*) &_block_freelists;
   }
 
+  Metaspace::MetadataType mdtype() { return _mdtype; }
   VirtualSpaceList* vs_list() const    { return _vs_list; }
 
   Metachunk* current_chunk() const { return _current_chunk; }
@@ -626,7 +630,8 @@
   void initialize();
 
  public:
-  SpaceManager(Mutex* lock,
+  SpaceManager(Metaspace::MetadataType mdtype,
+               Mutex* lock,
                VirtualSpaceList* vs_list);
   ~SpaceManager();
 
@@ -2032,9 +2037,11 @@
   }
 }
 
-SpaceManager::SpaceManager(Mutex* lock,
+SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
+                           Mutex* lock,
                            VirtualSpaceList* vs_list) :
   _vs_list(vs_list),
+  _mdtype(mdtype),
   _allocated_blocks_words(0),
   _allocated_chunks_words(0),
   _allocated_chunks_count(0),
@@ -2050,27 +2057,27 @@
   _allocated_chunks_words = _allocated_chunks_words + words;
   _allocated_chunks_count++;
   // Global total of capacity in allocated Metachunks
-  MetaspaceAux::inc_capacity(words);
+  MetaspaceAux::inc_capacity(mdtype(), words);
   // Global total of allocated Metablocks.
   // used_words_slow() includes the overhead in each
   // Metachunk so include it in the used when the
   // Metachunk is first added (so only added once per
   // Metachunk).
-  MetaspaceAux::inc_used(Metachunk::overhead());
+  MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
 }
 
 void SpaceManager::inc_used_metrics(size_t words) {
   // Add to the per SpaceManager total
   Atomic::add_ptr(words, &_allocated_blocks_words);
   // Add to the global total
-  MetaspaceAux::inc_used(words);
+  MetaspaceAux::inc_used(mdtype(), words);
 }
 
 void SpaceManager::dec_total_from_size_metrics() {
-  MetaspaceAux::dec_capacity(allocated_chunks_words());
-  MetaspaceAux::dec_used(allocated_blocks_words());
+  MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
+  MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
   // Also deduct the overhead per Metachunk
-  MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead());
+  MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
 }
 
 void SpaceManager::initialize() {
@@ -2470,8 +2477,8 @@
 // MetaspaceAux
 
 
-size_t MetaspaceAux::_allocated_capacity_words = 0;
-size_t MetaspaceAux::_allocated_used_words = 0;
+size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
+size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
 
 size_t MetaspaceAux::free_bytes() {
   size_t result = 0;
@@ -2484,40 +2491,40 @@
   return result;
 }
 
-void MetaspaceAux::dec_capacity(size_t words) {
+void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
   assert_lock_strong(SpaceManager::expand_lock());
-  assert(words <= _allocated_capacity_words,
+  assert(words <= allocated_capacity_words(mdtype),
     err_msg("About to decrement below 0: words " SIZE_FORMAT
-            " is greater than _allocated_capacity_words " SIZE_FORMAT,
-            words, _allocated_capacity_words));
-  _allocated_capacity_words = _allocated_capacity_words - words;
+            " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
+            words, mdtype, allocated_capacity_words(mdtype)));
+  _allocated_capacity_words[mdtype] -= words;
 }
 
-void MetaspaceAux::inc_capacity(size_t words) {
+void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
   assert_lock_strong(SpaceManager::expand_lock());
   // Needs to be atomic
-  _allocated_capacity_words = _allocated_capacity_words + words;
+  _allocated_capacity_words[mdtype] += words;
 }
 
-void MetaspaceAux::dec_used(size_t words) {
-  assert(words <= _allocated_used_words,
+void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
+  assert(words <= allocated_used_words(mdtype),
     err_msg("About to decrement below 0: words " SIZE_FORMAT
-            " is greater than _allocated_used_words " SIZE_FORMAT,
-            words, _allocated_used_words));
+            " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
+            words, mdtype, allocated_used_words(mdtype)));
   // For CMS deallocation of the Metaspaces occurs during the
   // sweep which is a concurrent phase.  Protection by the expand_lock()
   // is not enough since allocation is on a per Metaspace basis
   // and protected by the Metaspace lock.
   jlong minus_words = (jlong) - (jlong) words;
-  Atomic::add_ptr(minus_words, &_allocated_used_words);
+  Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
 }
 
-void MetaspaceAux::inc_used(size_t words) {
+void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
   // _allocated_used_words tracks allocations for
   // each piece of metadata.  Those allocations are
   // generally done concurrently by different application
   // threads so must be done atomically.
-  Atomic::add_ptr(words, &_allocated_used_words);
+  Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
 }
 
 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -2619,21 +2626,19 @@
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
                 allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
-#if 0
-// The calls to capacity_bytes_slow() and used_bytes_slow() cause
-// lock ordering assertion failures with some collectors.  Do
-// not include this code until the lock ordering is fixed.
-  if (PrintGCDetails && Verbose) {
-    out->print_cr("  data space     "
-                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                  " reserved " SIZE_FORMAT "K",
-                  capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K);
-    out->print_cr("  class space    "
-                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                  " reserved " SIZE_FORMAT "K",
-                  capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K);
-  }
-#endif
+
+  out->print_cr("  data space     "
+                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                " reserved " SIZE_FORMAT "K",
+                allocated_capacity_bytes(nct)/K,
+                allocated_used_bytes(nct)/K,
+                reserved_in_bytes(nct)/K);
+  out->print_cr("  class space    "
+                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                " reserved " SIZE_FORMAT "K",
+                allocated_capacity_bytes(ct)/K,
+                allocated_used_bytes(ct)/K,
+                reserved_in_bytes(ct)/K);
 }
 
 // Print information for class space and data space separately.
@@ -2717,24 +2722,42 @@
 void MetaspaceAux::verify_capacity() {
 #ifdef ASSERT
   size_t running_sum_capacity_bytes = allocated_capacity_bytes();
-  // For purposes of the running sum of used, verify against capacity
+  // For purposes of the running sum of capacity, verify against capacity
   size_t capacity_in_use_bytes = capacity_bytes_slow();
   assert(running_sum_capacity_bytes == capacity_in_use_bytes,
     err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
             " capacity_bytes_slow()" SIZE_FORMAT,
             running_sum_capacity_bytes, capacity_in_use_bytes));
+  for (Metaspace::MetadataType i = Metaspace::ClassType;
+       i < Metaspace:: MetadataTypeCount;
+       i = (Metaspace::MetadataType)(i + 1)) {
+    size_t capacity_in_use_bytes = capacity_bytes_slow(i);
+    assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
+      err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
+              " capacity_bytes_slow(%u)" SIZE_FORMAT,
+              i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
+  }
 #endif
 }
 
 void MetaspaceAux::verify_used() {
 #ifdef ASSERT
   size_t running_sum_used_bytes = allocated_used_bytes();
-  // For purposes of the running sum of used, verify against capacity
+  // For purposes of the running sum of used, verify against used
   size_t used_in_use_bytes = used_bytes_slow();
   assert(allocated_used_bytes() == used_in_use_bytes,
     err_msg("allocated_used_bytes() " SIZE_FORMAT
-            " used_bytes_slow()()" SIZE_FORMAT,
+            " used_bytes_slow()" SIZE_FORMAT,
             allocated_used_bytes(), used_in_use_bytes));
+  for (Metaspace::MetadataType i = Metaspace::ClassType;
+       i < Metaspace:: MetadataTypeCount;
+       i = (Metaspace::MetadataType)(i + 1)) {
+    size_t used_in_use_bytes = used_bytes_slow(i);
+    assert(allocated_used_bytes(i) == used_in_use_bytes,
+      err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
+              " used_bytes_slow(%u)" SIZE_FORMAT,
+              i, allocated_used_bytes(i), i, used_in_use_bytes));
+  }
 #endif
 }
 
@@ -2835,7 +2858,7 @@
   assert(space_list() != NULL,
     "Metadata VirtualSpaceList has not been initialized");
 
-  _vsm = new SpaceManager(lock, space_list());
+  _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
   if (_vsm == NULL) {
     return;
   }
@@ -2849,7 +2872,7 @@
     "Class VirtualSpaceList has not been initialized");
 
   // Allocate SpaceManager for classes.
-  _class_vsm = new SpaceManager(lock, class_space_list());
+  _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
   if (_class_vsm == NULL) {
     return;
   }
--- a/src/share/vm/memory/metaspace.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/metaspace.hpp	Fri May 24 10:21:12 2013 -0700
@@ -86,7 +86,10 @@
   friend class MetaspaceAux;
 
  public:
-  enum MetadataType {ClassType, NonClassType};
+  enum MetadataType {ClassType = 0,
+                     NonClassType = ClassType + 1,
+                     MetadataTypeCount = ClassType + 2
+  };
   enum MetaspaceType {
     StandardMetaspaceType,
     BootMetaspaceType,
@@ -184,20 +187,22 @@
  public:
   // Running sum of space in all Metachunks that has been
   // allocated to a Metaspace.  This is used instead of
-  // iterating over all the classloaders
-  static size_t _allocated_capacity_words;
+  // iterating over all the classloaders. One for each
+  // type of Metadata
+  static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
   // Running sum of space in all Metachunks that have
-  // are being used for metadata.
-  static size_t _allocated_used_words;
+  // are being used for metadata. One for each
+  // type of Metadata.
+  static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
 
  public:
   // Decrement and increment _allocated_capacity_words
-  static void dec_capacity(size_t words);
-  static void inc_capacity(size_t words);
+  static void dec_capacity(Metaspace::MetadataType type, size_t words);
+  static void inc_capacity(Metaspace::MetadataType type, size_t words);
 
   // Decrement and increment _allocated_used_words
-  static void dec_used(size_t words);
-  static void inc_used(size_t words);
+  static void dec_used(Metaspace::MetadataType type, size_t words);
+  static void inc_used(Metaspace::MetadataType type, size_t words);
 
   // Total of space allocated to metadata in all Metaspaces.
   // This sums the space used in each Metachunk by
@@ -211,18 +216,32 @@
   static size_t free_chunks_total();
   static size_t free_chunks_total_in_bytes();
 
+  static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
+    return _allocated_capacity_words[mdtype];
+  }
   static size_t allocated_capacity_words() {
-    return _allocated_capacity_words;
+    return _allocated_capacity_words[Metaspace::ClassType] +
+           _allocated_capacity_words[Metaspace::NonClassType];
+  }
+  static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
+    return allocated_capacity_words(mdtype) * BytesPerWord;
   }
   static size_t allocated_capacity_bytes() {
-    return _allocated_capacity_words * BytesPerWord;
+    return allocated_capacity_words() * BytesPerWord;
   }
 
+  static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
+    return _allocated_used_words[mdtype];
+  }
   static size_t allocated_used_words() {
-    return _allocated_used_words;
+    return _allocated_used_words[Metaspace::ClassType] +
+           _allocated_used_words[Metaspace::NonClassType];
+  }
+  static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
+    return allocated_used_words(mdtype) * BytesPerWord;
   }
   static size_t allocated_used_bytes() {
-    return _allocated_used_words * BytesPerWord;
+    return allocated_used_words() * BytesPerWord;
   }
 
   static size_t free_bytes();
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/referenceProcessor.cpp	Fri May 24 10:21:12 2013 -0700
@@ -252,7 +252,6 @@
   class AlwaysAliveClosure: public BoolObjectClosure {
   public:
     virtual bool do_object_b(oop obj) { return true; }
-    virtual void do_object(oop obj) { assert(false, "Don't call"); }
   };
 
   class CountHandleClosure: public OopClosure {
--- a/src/share/vm/memory/sharedHeap.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Fri May 24 10:21:12 2013 -0700
@@ -212,7 +212,6 @@
 
 class AlwaysTrueClosure: public BoolObjectClosure {
 public:
-  void do_object(oop p) { ShouldNotReachHere(); }
   bool do_object_b(oop p) { return true; }
 };
 static AlwaysTrueClosure always_true;
--- a/src/share/vm/opto/loopnode.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Fri May 24 10:21:12 2013 -0700
@@ -965,7 +965,7 @@
   // Has use internal to the vector set (ie. not in a phi at the loop head)
   bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop );
   // clone "n" for uses that are outside of loop
-  void clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
+  int  clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist );
   // clone "n" for special uses that are in the not_peeled region
   void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n,
                                           VectorSet& not_peel, Node_List& sink_list, Node_List& worklist );
--- a/src/share/vm/opto/loopopts.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/opto/loopopts.cpp	Fri May 24 10:21:12 2013 -0700
@@ -1939,8 +1939,8 @@
 
 //------------------------------ clone_for_use_outside_loop -------------------------------------
 // clone "n" for uses that are outside of loop
-void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
-
+int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) {
+  int cloned = 0;
   assert(worklist.size() == 0, "should be empty");
   for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
     Node* use = n->fast_out(j);
@@ -1960,6 +1960,7 @@
     // clone "n" and insert it between the inputs of "n" and the use outside the loop
     Node* n_clone = n->clone();
     _igvn.replace_input_of(use, j, n_clone);
+    cloned++;
     Node* use_c;
     if (!use->is_Phi()) {
       use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
@@ -1977,6 +1978,7 @@
     }
 #endif
   }
+  return cloned;
 }
 
 
@@ -2495,6 +2497,7 @@
 
   // Evacuate nodes in peel region into the not_peeled region if possible
   uint new_phi_cnt = 0;
+  uint cloned_for_outside_use = 0;
   for (i = 0; i < peel_list.size();) {
     Node* n = peel_list.at(i);
 #if !defined(PRODUCT)
@@ -2513,8 +2516,7 @@
           // if not pinned and not a load (which maybe anti-dependent on a store)
           // and not a CMove (Matcher expects only bool->cmove).
           if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) {
-            clone_for_use_outside_loop( loop, n, worklist );
-
+            cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist );
             sink_list.push(n);
             peel     >>= n->_idx; // delete n from peel set.
             not_peel <<= n->_idx; // add n to not_peel set.
@@ -2551,6 +2553,12 @@
     // Inhibit more partial peeling on this loop
     assert(!head->is_partial_peel_loop(), "not partial peeled");
     head->mark_partial_peel_failed();
+    if (cloned_for_outside_use > 0) {
+      // Terminate this round of loop opts because
+      // the graph outside this loop was changed.
+      C->set_major_progress();
+      return true;
+    }
     return false;
   }
 
--- a/src/share/vm/prims/jni.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri May 24 10:21:12 2013 -0700
@@ -5015,6 +5015,9 @@
 #ifndef PRODUCT
 
 #include "gc_interface/collectedHeap.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
+#endif
 #include "utilities/quickSort.hpp"
 #if INCLUDE_VM_STRUCTS
 #include "runtime/vmStructs.hpp"
@@ -5035,6 +5038,9 @@
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
+#if INCLUDE_ALL_GCS
+    run_unit_test(HeapRegionRemSet::test_prt());
+#endif
     tty->print_cr("All internal VM tests passed");
   }
 }
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri May 24 10:21:12 2013 -0700
@@ -68,7 +68,7 @@
   }
 #endif
 
-
+  set_increase_threshold_at_ratio();
   set_start_time(os::javaTimeMillis());
 }
 
@@ -205,6 +205,17 @@
   double queue_size = CompileBroker::queue_size(level);
   int comp_count = compiler_count(level);
   double k = queue_size / (feedback_k * comp_count) + 1;
+
+  // Increase C1 compile threshold when the code cache is filled more
+  // than specified by IncreaseFirstTierCompileThresholdAt percentage.
+  // The main intention is to keep enough free space for C2 compiled code
+  // to achieve peak performance if the code cache is under stress.
+  if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
+    double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
+    if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
+      k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
+    }
+  }
   return k;
 }
 
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Fri May 24 10:21:12 2013 -0700
@@ -201,9 +201,12 @@
   // Is method profiled enough?
   bool is_method_profiled(Method* method);
 
+  double _increase_threshold_at_ratio;
+
 protected:
   void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
 
+  void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
   void set_start_time(jlong t) { _start_time = t;    }
   jlong start_time() const     { return _start_time; }
 
--- a/src/share/vm/runtime/arguments.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri May 24 10:21:12 2013 -0700
@@ -2629,6 +2629,16 @@
         return JNI_EINVAL;
       }
       FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize);
+      //-XX:IncreaseFirstTierCompileThresholdAt=
+      } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) {
+        uintx uint_IncreaseFirstTierCompileThresholdAt = 0;
+        if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) {
+          jio_fprintf(defaultStream::error_stream(),
+                      "Invalid value for IncreaseFirstTierCompileThresholdAt: %s. Should be between 0 and 99.\n",
+                      option->optionString);
+          return JNI_EINVAL;
+        }
+        FLAG_SET_CMDLINE(uintx, IncreaseFirstTierCompileThresholdAt, (uintx)uint_IncreaseFirstTierCompileThresholdAt);
     // -green
     } else if (match_option(option, "-green", &tail)) {
       jio_fprintf(defaultStream::error_stream(),
--- a/src/share/vm/runtime/globals.hpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri May 24 10:21:12 2013 -0700
@@ -3436,6 +3436,10 @@
           "Start profiling in interpreter if the counters exceed tier 3"    \
           "thresholds by the specified percentage")                         \
                                                                             \
+  product(uintx, IncreaseFirstTierCompileThresholdAt, 50,                   \
+          "Increase the compile threshold for C1 compilation if the code"   \
+          "cache is filled by the specified percentage.")                   \
+                                                                            \
   product(intx, TieredRateUpdateMinTime, 1,                                 \
           "Minimum rate sampling interval (in milliseconds)")               \
                                                                             \
--- a/src/share/vm/runtime/jniHandles.cpp	Thu May 23 23:04:33 2013 -0700
+++ b/src/share/vm/runtime/jniHandles.cpp	Fri May 24 10:21:12 2013 -0700
@@ -188,7 +188,6 @@
 class AlwaysAliveClosure: public BoolObjectClosure {
 public:
   bool do_object_b(oop obj) { return true; }
-  void do_object(oop obj) { assert(false, "Don't call"); }
 };
 
 class CountHandleClosure: public OopClosure {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8010927/Test8010927.java	Fri May 24 10:21:12 2013 -0700
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8010927
+ * @summary Kitchensink crashed with SIGSEGV, Problematic frame: v ~StubRoutines::checkcast_arraycopy
+ * @library /testlibrary/whitebox /testlibrary
+ * @build Test8010927
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseParNewGC -XX:-UseAdaptiveSizePolicy Test8010927
+ */
+
+import sun.hotspot.WhiteBox;
+import java.lang.reflect.Field;
+import sun.misc.Unsafe;
+
+/**
+ * The test creates uncommitted space between oldgen and young gen
+ * by specifying MaxNewSize bigger than NewSize.
+ * NewSize = 20971520 = (512*4K) * 10 for 4k pages
+ * Then it tries to execute arraycopy() with elements type check
+ * to the array at the end of survive space near unused space.
+ */
+
+public class Test8010927 {
+
+  private static final Unsafe U;
+
+  static {
+    try {
+      Field unsafe = Unsafe.class.getDeclaredField("theUnsafe");
+      unsafe.setAccessible(true);
+      U = (Unsafe) unsafe.get(null);
+    } catch (Exception e) {
+      throw new Error(e);
+    }
+  }
+
+  public static Object[] o;
+
+  public static final boolean debug = Boolean.getBoolean("debug");
+
+  // 2 different obect arrays but same element types
+  static Test8010927[] masterA;
+  static Object[] masterB;
+  static final Test8010927 elem = new Test8010927();
+  static final WhiteBox wb = WhiteBox.getWhiteBox();
+
+  static final int obj_header_size = U.ARRAY_OBJECT_BASE_OFFSET;
+  static final int heap_oop_size = wb.getHeapOopSize();
+  static final int card_size = 512;
+  static final int one_card = (card_size - obj_header_size)/heap_oop_size;
+
+  static final int surv_size = 2112 * 1024;
+
+  // The size is big to not fit into survive space.
+  static final Object[] cache = new Object[(surv_size / card_size)];
+
+  public static void main(String[] args) {
+    masterA = new Test8010927[one_card];
+    masterB = new Object[one_card];
+    for (int i = 0; i < one_card; ++i) {
+      masterA[i] = elem;
+      masterB[i] = elem;
+    }
+
+    // Move cache[] to the old gen.
+    long low_limit = wb.getObjectAddress(cache);
+    System.gc();
+    // Move 'cache' to oldgen.
+    long upper_limit = wb.getObjectAddress(cache);
+    if ((low_limit - upper_limit) > 0) { // substaction works with unsigned values
+      // OldGen is placed before youngger for ParallelOldGC.
+      upper_limit = low_limit + 21000000l; // +20971520
+    }
+    // Each A[one_card] size is 512 bytes,
+    // it will take about 40000 allocations to trigger GC.
+    // cache[] has 8192 elements so GC should happen
+    // each 5th iteration.
+    for(long l = 0; l < 20; l++) {
+      fill_heap();
+      if (debug) {
+        System.out.println("test oop_disjoint_arraycopy");
+      }
+      testA_arraycopy();
+      if (debug) {
+        System.out.println("test checkcast_arraycopy");
+      }
+      testB_arraycopy();
+      // Execute arraycopy to the topmost array in young gen
+      if (debug) {
+        int top_index = get_top_address(low_limit, upper_limit);
+        if (top_index >= 0) {
+          long addr = wb.getObjectAddress(cache[top_index]);
+          System.out.println("top_addr: 0x" + Long.toHexString(addr) + ", 0x" + Long.toHexString(addr + 512));
+        }
+      }
+    }
+  }
+  static void fill_heap() {
+    for (int i = 0; i < cache.length; ++i) {
+      o = new Test8010927[one_card];
+      System.arraycopy(masterA, 0, o, 0, masterA.length);
+      cache[i] = o;
+    }
+    for (long j = 0; j < 256; ++j) {
+      o = new Long[10000]; // to trigger GC
+    }
+  }
+  static void testA_arraycopy() {
+    for (int i = 0; i < cache.length; ++i) {
+      System.arraycopy(masterA, 0, cache[i], 0, masterA.length);
+    }
+  }
+  static void testB_arraycopy() {
+    for (int i = 0; i < cache.length; ++i) {
+      System.arraycopy(masterB, 0, cache[i], 0, masterB.length);
+    }
+  }
+  static int get_top_address(long min, long max) {
+    int index = -1;
+    long addr = min;
+    for (int i = 0; i < cache.length; ++i) {
+      long test = wb.getObjectAddress(cache[i]);
+      if (((test - addr) > 0) && ((max - test) > 0)) { // substaction works with unsigned values
+        addr = test;
+        index = i;
+      }
+    }
+    return index;
+  }
+}