changeset 1088:3fc996d4edd2

6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection Summary: ScavengeALot now causes an incremental (but possibly partially young, in the G1 sense) collection. Some such collections may be abandoned on account of MMU specs. Band-aided a native leak associated with abandoned pauses, as well as an MMU tracker overflow related to frequent scavenge events in the face of a large MMU denominator interval; the latter is protected by a product flag that defaults to false. Reviewed-by: tonyp
author ysr
date Thu, 19 Nov 2009 13:43:25 -0800
parents 23b9a8d315fc
children db0d5eba9d20
files src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp src/share/vm/gc_implementation/g1/g1MMUTracker.cpp src/share/vm/gc_implementation/g1/g1MMUTracker.hpp src/share/vm/gc_implementation/g1/g1_globals.hpp src/share/vm/gc_implementation/g1/vm_operations_g1.cpp src/share/vm/gc_implementation/g1/vm_operations_g1.hpp src/share/vm/memory/sharedHeap.hpp
diffstat 8 files changed, 74 insertions(+), 37 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 19 13:43:25 2009 -0800
@@ -1732,13 +1732,6 @@
   return car->free();
 }
 
-void G1CollectedHeap::collect(GCCause::Cause cause) {
-  // The caller doesn't have the Heap_lock
-  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
-  MutexLocker ml(Heap_lock);
-  collect_locked(cause);
-}
-
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1755,17 +1748,31 @@
   }
 }
 
-
-void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
-  // Don't want to do a GC until cleanup is completed.
-  wait_for_cleanup_complete();
-
-  // Read the GC count while holding the Heap_lock
-  int gc_count_before = SharedHeap::heap()->total_collections();
+void G1CollectedHeap::collect(GCCause::Cause cause) {
+  // The caller doesn't have the Heap_lock
+  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+
+  int gc_count_before;
   {
-    MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
-    VM_G1CollectFull op(gc_count_before, cause);
-    VMThread::execute(&op);
+    MutexLocker ml(Heap_lock);
+    // Read the GC count while holding the Heap_lock
+    gc_count_before = SharedHeap::heap()->total_collections();
+
+    // Don't want to do a GC until cleanup is completed.
+    wait_for_cleanup_complete();
+  } // We give up heap lock; VMThread::execute gets it back below
+  switch (cause) {
+    case GCCause::_scavenge_alot: {
+      // Do an incremental pause, which might sometimes be abandoned.
+      VM_G1IncCollectionPause op(gc_count_before, cause);
+      VMThread::execute(&op);
+      break;
+    }
+    default: {
+      // In all other cases, we currently do a full gc.
+      VM_G1CollectFull op(gc_count_before, cause);
+      VMThread::execute(&op);
+    }
   }
 }
 
@@ -2649,8 +2656,6 @@
     if (g1_policy()->should_initiate_conc_mark())
       strcat(verbose_str, " (initial-mark)");
 
-    GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
-
     // if PrintGCDetails is on, we'll print long statistics information
     // in the collector policy code, so let's not print this as the output
     // is messy if we do.
@@ -2802,6 +2807,22 @@
           _young_list->reset_auxilary_lists();
         }
       } else {
+        if (_in_cset_fast_test != NULL) {
+          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
+          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
+          //  this is more for peace of mind; we're nulling them here and
+          // we're expecting them to be null at the beginning of the next GC
+          _in_cset_fast_test = NULL;
+          _in_cset_fast_test_base = NULL;
+        }
+        // This looks confusing, because the DPT should really be empty
+        // at this point -- since we have not done any collection work,
+        // there should not be any derived pointers in the table to update;
+        // however, there is some additional state in the DPT which is
+        // reset at the end of the (null) "gc" here via the following call.
+        // A better approach might be to split off that state resetting work
+        // into a separate method that asserts that the DPT is empty and call
+        // that here. That is deferred for now.
         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
       }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Nov 19 13:43:25 2009 -0800
@@ -2847,8 +2847,15 @@
   double non_young_start_time_sec;
   start_recording_regions();
 
-  guarantee(_target_pause_time_ms > -1.0,
+  guarantee(_target_pause_time_ms > -1.0
+            NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
             "_target_pause_time_ms should have been set!");
+#ifndef PRODUCT
+  if (_target_pause_time_ms <= -1.0) {
+    assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
+    _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+  }
+#endif
   assert(_collection_set == NULL, "Precondition");
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
@@ -2994,7 +3001,3 @@
   G1CollectorPolicy::record_collection_pause_end(abandoned);
   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
 }
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Thu Nov 19 13:43:25 2009 -0800
@@ -86,12 +86,22 @@
     //   increase the array size (:-)
     //   remove the oldest entry (this might allow more GC time for
     //     the time slice than what's allowed)
-    //   concolidate the two entries with the minimum gap between them
-    //     (this mighte allow less GC time than what's allowed)
-    guarantee(0, "array full, currently we can't recover");
+    //   consolidate the two entries with the minimum gap between them
+    //     (this might allow less GC time than what's allowed)
+    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
+              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+    // In the case where ScavengeALot is true, such overflow is not
+    // uncommon; in such cases, we can, without much loss of precision
+    // or performance (we are GC'ing most of the time anyway!),
+    // simply overwrite the oldest entry in the tracker: this
+    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+    _head_index = trim_index(_head_index + 1);
+    assert(_head_index == _tail_index, "Because we have a full circular buffer");
+    _tail_index = trim_index(_tail_index + 1);
+  } else {
+    _head_index = trim_index(_head_index + 1);
+    ++_no_entries;
   }
-  _head_index = trim_index(_head_index + 1);
-  ++_no_entries;
   _array[_head_index] = G1MMUTrackerQueueElem(start, end);
 }
 
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Nov 19 13:43:25 2009 -0800
@@ -99,7 +99,10 @@
   // The array is of fixed size and I don't think we'll need more than
   // two or three entries with the current behaviour of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
-  // the shortest gap between them and concolidate them.
+  // the shortest gap between them and consolidate them.
+  // For now, we have taken the expedient alternative of forgetting
+  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+  // potentially violating MMU specs for some time thereafter.
 
   G1MMUTrackerQueueElem _array[QueueLength];
   int                   _head_index;
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 19 13:43:25 2009 -0800
@@ -256,6 +256,9 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
+  product(bool, G1ForgetfulMMUTracker, false,                               \
+          "If the MMU tracker's memory is full, forget the oldest entry")   \
+                                                                            \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Nov 19 13:43:25 2009 -0800
@@ -42,7 +42,7 @@
 void VM_G1IncCollectionPause::doit() {
   JvmtiGCForAllocationMarker jgcm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
+  GCCauseSetter x(g1h, _gc_cause);
   g1h->do_collection_pause_at_safepoint();
 }
 
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Nov 19 13:43:25 2009 -0800
@@ -68,8 +68,9 @@
 
 class VM_G1IncCollectionPause: public VM_GC_Operation {
  public:
-  VM_G1IncCollectionPause(int gc_count_before) :
-    VM_GC_Operation(gc_count_before) {}
+  VM_G1IncCollectionPause(int gc_count_before,
+                          GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
+    VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
   virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
   virtual void doit();
   virtual const char* name() const {
--- a/src/share/vm/memory/sharedHeap.hpp	Thu Nov 19 10:19:19 2009 -0800
+++ b/src/share/vm/memory/sharedHeap.hpp	Thu Nov 19 13:43:25 2009 -0800
@@ -224,10 +224,6 @@
                           CodeBlobClosure* code_roots,
                           OopClosure* non_root_closure);
 
-
-  // Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
-  virtual void collect_locked(GCCause::Cause cause) = 0;
-
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
   // functions.