changeset 20525:e193bbae24ef hs25.40-b14

Merge
author amurillo
date Fri, 10 Oct 2014 03:07:08 -0700
parents e11846cf32bf (current diff) 85f4c4ecc963 (diff)
children 584df6794686
files test/gc/class_unloading/AllocateBeyondMetaspaceSize.java
diffstat 12 files changed, 212 insertions(+), 96 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed Oct 08 12:38:41 2014 -0700
+++ b/make/hotspot_version	Fri Oct 10 03:07:08 2014 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=40
-HS_BUILD_NUMBER=13
+HS_BUILD_NUMBER=14
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 10 03:07:08 2014 -0700
@@ -2066,14 +2066,14 @@
   LIR_Opr base_op = base.result();
   LIR_Opr index_op = idx.result();
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       LIR_Opr long_index_op = index_op;
-      if (x->index()->type()->is_constant()) {
+      if (index_op->is_constant()) {
         long_index_op = new_register(T_LONG);
         __ move(index_op, long_index_op);
       }
@@ -2088,14 +2088,14 @@
   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
-      if (!x->index()->type()->is_constant()) {
+    if (index_op->type() == T_INT) {
+      if (!index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ convert(Bytecodes::_i2l, idx.result(), index_op);
       }
     } else {
-      assert(x->index()->type()->tag() == longTag, "must be");
-      if (x->index()->type()->is_constant()) {
+      assert(index_op->type() == T_LONG, "must be");
+      if (index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ move(idx.result(), index_op);
       }
@@ -2176,12 +2176,12 @@
   LIR_Opr index_op = idx.result();
 
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       index_op = new_register(T_INT);
       __ convert(Bytecodes::_l2i, idx.result(), index_op);
     }
@@ -2191,7 +2191,7 @@
   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
+    if (index_op->type() == T_INT) {
       index_op = new_register(T_LONG);
       __ convert(Bytecodes::_i2l, idx.result(), index_op);
     }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Oct 10 03:07:08 2014 -0700
@@ -2733,10 +2733,12 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists.  Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl".  "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
 
   // We'll try all multiples of word_sz in the indexed set, starting with
   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2817,11 +2819,15 @@
                         Mutex::_no_safepoint_check_flag);
         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
         _indexedFreeList[word_sz].set_split_births(births);
-        return;
+        return true;
       }
     }
+    return found;
   }
-  // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
   FreeChunk* fc = NULL;
   FreeChunk* rem_fc = NULL;
   size_t rem;
@@ -2832,16 +2838,12 @@
       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_update(fc->size(),
-                                       true /*split*/,
-                                       false /*birth*/);
         break;
       } else {
         n--;
       }
     }
-    if (fc == NULL) return;
+    if (fc == NULL) return NULL;
     // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
     assert(fc->is_free(), "Error: should be a free block");
@@ -2863,10 +2865,14 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
-      assert(fl->count() == 0, "We never allocated any blocks");
-      return;
+      return NULL;
     }
 
+    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
+    dictionary()->dict_census_update(fc->size(),
+                                     true /*split*/,
+                                     false /*birth*/);
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
     // back the remainder to the dictionary, since a concurrent allocation
@@ -2899,7 +2905,24 @@
     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
     smallSplitBirth(rem);
   }
-  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+  assert(n * word_sz == fc->size(),
+    err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+    SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+    fc->size(), n, word_sz));
+  return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
+
+  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+  if (fc == NULL) {
+    return;
+  }
+
+  size_t n = fc->size() / word_sz;
+
+  assert((ssize_t)n > 0, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2947,6 +2970,20 @@
   assert(fl->tail()->next() == NULL, "List invariant.");
 }
 
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+         "Precondition");
+
+  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+    // Got it
+    return;
+  }
+
+  // Otherwise, we'll split a block from the dictionary.
+  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
 // Set up the space's par_seq_tasks structure for work claiming
 // for parallel rescan. See CMSParRemarkTask where this is currently used.
 // XXX Need to suitably abstract and generalize this and the next
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Oct 10 03:07:08 2014 -0700
@@ -172,6 +172,20 @@
   // list of size "word_sz", and must now be decremented.
   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // indexed_free_lists.
+  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
+  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+  // evenly splittable into "n" "word_sz" chunks.  Returns that
+  // evenly splittable chunk.  May split a larger chunk to get the
+  // evenly splittable chunk.
+  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // dictionary.
+  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
   // first.  This allocation strategy assumes a companion sweeping
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Oct 10 03:07:08 2014 -0700
@@ -2343,6 +2343,7 @@
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
+    case GCCause::_update_allocation_context_stats_inc: return true;
     default:                                return false;
   }
 }
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Oct 10 03:07:08 2014 -0700
@@ -95,8 +95,9 @@
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-    _gc_cause == GCCause::_g1_humongous_allocation),
-         "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
+    _gc_cause == GCCause::_g1_humongous_allocation ||
+    _gc_cause == GCCause::_update_allocation_context_stats_inc),
+      "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
--- a/src/share/vm/gc_interface/gcCause.cpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_interface/gcCause.cpp	Fri Oct 10 03:07:08 2014 -0700
@@ -54,7 +54,8 @@
     case _wb_young_gc:
       return "WhiteBox Initiated Young GC";
 
-    case _update_allocation_context_stats:
+    case _update_allocation_context_stats_inc:
+    case _update_allocation_context_stats_full:
       return "Update Allocation Context Stats";
 
     case _no_gc:
--- a/src/share/vm/gc_interface/gcCause.hpp	Wed Oct 08 12:38:41 2014 -0700
+++ b/src/share/vm/gc_interface/gcCause.hpp	Fri Oct 10 03:07:08 2014 -0700
@@ -47,7 +47,8 @@
     _heap_inspection,
     _heap_dump,
     _wb_young_gc,
-    _update_allocation_context_stats,
+    _update_allocation_context_stats_inc,
+    _update_allocation_context_stats_full,
 
     /* implementation independent, but reserved for GC use */
     _no_gc,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/osr/TestRangeCheck.java	Fri Oct 10 03:07:08 2014 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRangeCheck
+ * @bug 8054883
+ * @summary Tests that range check is not skipped
+ */
+
+public class TestRangeCheck {
+    public static void main(String args[]) {
+        try {
+            test();
+            throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
+        } catch (ArrayIndexOutOfBoundsException e) {
+            System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
+        }
+    }
+
+    private static void test() {
+        int arr[] = new int[1];
+        int result = 1;
+
+        // provoke OSR compilation
+        for (int i = 0; i < Integer.MAX_VALUE; i++) {
+        }
+
+        if (result > 0 && arr[~result] > 0) {
+            arr[~result] = 0;
+        }
+    }
+}
--- a/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java	Wed Oct 08 12:38:41 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import sun.hotspot.WhiteBox;
-
-class AllocateBeyondMetaspaceSize {
-  public static Object dummy;
-
-  public static void main(String [] args) {
-    if (args.length != 2) {
-      throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
-    }
-
-    long metaspaceSize = Long.parseLong(args[0]);
-    long youngGenSize = Long.parseLong(args[1]);
-
-    run(metaspaceSize, youngGenSize);
-  }
-
-  private static void run(long metaspaceSize, long youngGenSize) {
-    WhiteBox wb = WhiteBox.getWhiteBox();
-
-    long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
-    long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
-
-    triggerYoungGC(youngGenSize);
-
-    wb.freeMetaspace(null, metaspace, metaspace);
-  }
-
-  private static void triggerYoungGC(long youngGenSize) {
-    long approxAllocSize = 32 * 1024;
-    long numAllocations  = 2 * youngGenSize / approxAllocSize;
-
-    for (long i = 0; i < numAllocations; i++) {
-      dummy = new byte[(int)approxAllocSize];
-    }
-  }
-}
--- a/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Wed Oct 08 12:38:41 2014 -0700
+++ b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Oct 10 03:07:08 2014 -0700
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8049831
  * @library /testlibrary /testlibrary/whitebox
- * @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize
+ * @build TestCMSClassUnloadingEnabledHWM
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run driver TestCMSClassUnloadingEnabledHWM
  * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
@@ -34,9 +34,11 @@
 
 import com.oracle.java.testlibrary.OutputAnalyzer;
 import com.oracle.java.testlibrary.ProcessTools;
-
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
 import java.util.ArrayList;
 import java.util.Arrays;
+import sun.hotspot.WhiteBox;
 
 public class TestCMSClassUnloadingEnabledHWM {
   private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -47,15 +49,18 @@
       "-Xbootclasspath/a:.",
       "-XX:+UnlockDiagnosticVMOptions",
       "-XX:+WhiteBoxAPI",
+      "-Xmx128m",
+      "-XX:CMSMaxAbortablePrecleanTime=1",
+      "-XX:CMSWaitDuration=50",
       "-XX:MetaspaceSize=" + MetaspaceSize,
       "-Xmn" + YoungGenSize,
       "-XX:+UseConcMarkSweepGC",
       "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
       "-XX:+PrintHeapAtGC",
       "-XX:+PrintGCDetails",
-      "AllocateBeyondMetaspaceSize",
-      "" + MetaspaceSize,
-      "" + YoungGenSize);
+      "-XX:+PrintGCTimeStamps",
+      TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
+      "" + MetaspaceSize);
     return new OutputAnalyzer(pb.start());
   }
 
@@ -87,5 +92,37 @@
     testWithCMSClassUnloading();
     testWithoutCMSClassUnloading();
   }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static void main(String [] args) throws Exception {
+      if (args.length != 1) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit.
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      // Wait for at least one GC to occur. The caller will parse the log files produced.
+      GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
+      while (cmsGCBean.getCollectionCount() == 0) {
+        Thread.sleep(100);
+      }
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    private static GarbageCollectorMXBean getCMSGCBean() {
+      for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
+        if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
+          return gcBean;
+        }
+      }
+      return null;
+    }
+  }
 }
 
--- a/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Wed Oct 08 12:38:41 2014 -0700
+++ b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Fri Oct 10 03:07:08 2014 -0700
@@ -26,7 +26,7 @@
  * @key gc
  * @bug 8049831
  * @library /testlibrary /testlibrary/whitebox
- * @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize
+ * @build TestG1ClassUnloadingHWM
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run driver TestG1ClassUnloadingHWM
  * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
@@ -34,9 +34,9 @@
 
 import com.oracle.java.testlibrary.OutputAnalyzer;
 import com.oracle.java.testlibrary.ProcessTools;
-
 import java.util.ArrayList;
 import java.util.Arrays;
+import sun.hotspot.WhiteBox;
 
 public class TestG1ClassUnloadingHWM {
   private static long MetaspaceSize = 32 * 1024 * 1024;
@@ -53,7 +53,7 @@
       "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
       "-XX:+PrintHeapAtGC",
       "-XX:+PrintGCDetails",
-      "AllocateBeyondMetaspaceSize",
+      TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
       "" + MetaspaceSize,
       "" + YoungGenSize);
     return new OutputAnalyzer(pb.start());
@@ -87,5 +87,36 @@
     testWithG1ClassUnloading();
     testWithoutG1ClassUnloading();
   }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static Object dummy;
+
+    public static void main(String [] args) throws Exception {
+      if (args.length != 2) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      long youngGenSize = Long.parseLong(args[1]);
+      triggerYoungGCs(youngGenSize);
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    public static void triggerYoungGCs(long youngGenSize) {
+      long approxAllocSize = 32 * 1024;
+      long numAllocations  = 2 * youngGenSize / approxAllocSize;
+
+      for (long i = 0; i < numAllocations; i++) {
+        dummy = new byte[(int)approxAllocSize];
+      }
+    }
+  }
 }