changeset 1890:f5c8d6e5bfee

Merge
author jcoomes
date Mon, 01 Nov 2010 10:49:14 -0700
parents ee0d26abaad3 (current diff) c766bae6c14d (diff)
children 9de67bf4244d
files src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp
diffstat 27 files changed, 298 insertions(+), 311 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -354,12 +354,8 @@
 double CMSStats::time_until_cms_gen_full() const {
   size_t cms_free = _cms_gen->cmsSpace()->free();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = gch->get_gen(0)->capacity();
-  if (HandlePromotionFailure) {
-    expected_promotion = MIN2(
-        (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
-        expected_promotion);
-  }
+  size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
+                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
     // for the next minor collection.  Use the padded average as
@@ -865,57 +861,18 @@
   return free() + _virtual_space.uncommitted_size();
 }
 
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
-    size_t max_promotion_in_bytes,
-    bool younger_handles_promotion_failure) const {
-
-  // This is the most conservative test.  Full promotion is
-  // guaranteed if this is used. The multiplicative factor is to
-  // account for the worst case "dilatation".
-  double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
-  if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
-    adjusted_max_promo_bytes = (double)max_uintx;
-  }
-  bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
-
-  if (younger_handles_promotion_failure && !result) {
-    // Full promotion is not guaranteed because fragmentation
-    // of the cms generation can prevent the full promotion.
-    result = (max_available() >= (size_t)adjusted_max_promo_bytes);
-
-    if (!result) {
-      // With promotion failure handling the test for the ability
-      // to support the promotion does not have to be guaranteed.
-      // Use an average of the amount promoted.
-      result = max_available() >= (size_t)
-        gc_stats()->avg_promoted()->padded_average();
-      if (PrintGC && Verbose && result) {
-        gclog_or_tty->print_cr(
-          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " avg_promoted: " SIZE_FORMAT,
-          max_available(), (size_t)
-          gc_stats()->avg_promoted()->padded_average());
-      }
-    } else {
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr(
-          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-          " max_available: " SIZE_FORMAT
-          " adj_max_promo_bytes: " SIZE_FORMAT,
-          max_available(), (size_t)adjusted_max_promo_bytes);
-      }
-    }
-  } else {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr(
-        "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
-        " contiguous_available: " SIZE_FORMAT
-        " adj_max_promo_bytes: " SIZE_FORMAT,
-        max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
-    }
-  }
-  return result;
+bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_available();
+  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
+  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
+  if (PrintGC && Verbose) {
+    gclog_or_tty->print_cr(
+      "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
+      "max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      av_promo, max_promotion_in_bytes);
+  }
+  return res;
 }
 
 // At a promotion failure dump information on block layout in heap
@@ -6091,23 +6048,14 @@
   assert(_collectorState == Resizing, "Change of collector state to"
     " Resizing must be done under the freelistLocks (plural)");
 
-  // Now that sweeping has been completed, if the GCH's
-  // incremental_collection_will_fail flag is set, clear it,
+  // Now that sweeping has been completed, we clear
+  // the incremental_collection_failed flag,
   // thus inviting a younger gen collection to promote into
   // this generation. If such a promotion may still fail,
   // the flag will be set again when a young collection is
   // attempted.
-  // I think the incremental_collection_will_fail flag's use
-  // is specific to a 2 generation collection policy, so i'll
-  // assert that that's the configuration we are operating within.
-  // The use of the flag can and should be generalized appropriately
-  // in the future to deal with a general n-generation system.
-
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_two_generation_policy(),
-         "Resetting of incremental_collection_will_fail flag"
-         " may be incorrect otherwise");
-  gch->clear_incremental_collection_will_fail();
+  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
   gch->update_full_collections_completed(_collection_count_start);
 }
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1185,8 +1185,7 @@
   virtual void par_promote_alloc_done(int thread_num);
   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
 
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
 
   // Inform this (non-young) generation that a promotion failure was
   // encountered during a collection of a younger generation that
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -272,12 +272,16 @@
   }
 }
 
-// Wait until the next synchronous GC or a timeout, whichever is earlier.
-void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
+// Wait until the next synchronous GC, a concurrent full gc request,
+// or a timeout, whichever is earlier.
+void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
   MutexLockerEx x(CGC_lock,
                   Mutex::_no_safepoint_check_flag);
+  if (_should_terminate || _collector->_full_gc_requested) {
+    return;
+  }
   set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
-  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
+  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
   clear_CMS_flag(CMS_cms_wants_token);
   assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
          "Should not be set");
@@ -289,7 +293,8 @@
       icms_wait();
       return;
     } else {
-      // Wait until the next synchronous GC or a timeout, whichever is earlier
+      // Wait until the next synchronous GC, a concurrent full gc
+      // request or a timeout, whichever is earlier.
       wait_on_cms_lock(CMSWaitDuration);
     }
     // Check if we should start a CMS collection cycle
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -120,8 +120,10 @@
   }
 
   // Wait on CMS lock until the next synchronous GC
-  // or given timeout, whichever is earlier.
-  void    wait_on_cms_lock(long t); // milliseconds
+  // or given timeout, whichever is earlier. A timeout value
+  // of 0 indicates that there is no upper bound on the wait time.
+  // A concurrent full gc request terminates the wait.
+  void wait_on_cms_lock(long t_millis);
 
   // The CMS thread will yield during the work portion of its cycle
   // only when requested to.  Both synchronous and asychronous requests
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -2418,6 +2418,8 @@
   for (int i = 0; i < (int)_max_task_num; ++i) {
     OopTaskQueue* queue = _task_queues->queue(i);
     queue->set_empty();
+    // Clear any partial regions from the CMTasks
+    _tasks[i]->clear_aborted_region();
   }
 }
 
@@ -2706,7 +2708,6 @@
   clear_marking_state();
   for (int i = 0; i < (int)_max_task_num; ++i) {
     _tasks[i]->clear_region_fields();
-    _tasks[i]->clear_aborted_region();
   }
   _has_aborted = true;
 
@@ -2985,7 +2986,7 @@
 
   _nextMarkBitMap                = nextMarkBitMap;
   clear_region_fields();
-  clear_aborted_region();
+  assert(_aborted_region.is_empty(), "should have been cleared");
 
   _calls                         = 0;
   _elapsed_time_ms               = 0.0;
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -175,7 +175,7 @@
   }
   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
   assert(_array->offset_array(start_card-1) <= N_words,
-    "Offset card has an unexpected value");
+         "Offset card has an unexpected value");
   size_t start_card_for_region = start_card;
   u_char offset = max_jubyte;
   for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
@@ -577,6 +577,16 @@
 #endif
 }
 
+void
+G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
+  assert(_end ==  new_end, "_end should have already been updated");
+
+  // The first BOT entry should have offset 0.
+  _array->set_offset_array(_array->index_for(_bottom), 0);
+  // The rest should point to the first one.
+  set_remainder_to_point_to_start(_bottom + N_words, new_end);
+}
+
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetArrayContigSpace
 //////////////////////////////////////////////////////////////////////
@@ -626,3 +636,12 @@
          "Precondition of call");
   _array->set_offset_array(bottom_index, 0);
 }
+
+void
+G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
+  G1BlockOffsetArray::set_for_starts_humongous(new_end);
+
+  // Make sure _next_offset_threshold and _next_offset_index point to new_end.
+  _next_offset_threshold = new_end;
+  _next_offset_index     = _array->index_for(new_end);
+}
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -436,6 +436,8 @@
   }
 
   void check_all_cards(size_t left_card, size_t right_card) const;
+
+  virtual void set_for_starts_humongous(HeapWord* new_end);
 };
 
 // A subtype of BlockOffsetArray that takes advantage of the fact
@@ -484,4 +486,6 @@
 
   HeapWord* block_start_unsafe(const void* addr);
   HeapWord* block_start_unsafe_const(const void* addr) const;
+
+  virtual void set_for_starts_humongous(HeapWord* new_end);
 };
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -4118,10 +4118,14 @@
     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
       assert(pss->verify_task(stolen_task), "sanity");
       if (stolen_task.is_narrow()) {
-        pss->push_on_queue((narrowOop*) stolen_task);
+        pss->deal_with_reference((narrowOop*) stolen_task);
       } else {
-        pss->push_on_queue((oop*) stolen_task);
+        pss->deal_with_reference((oop*) stolen_task);
       }
+
+      // We've just processed a reference and we might have made
+      // available new entries on the queues. So we have to make sure
+      // we drain the queues as necessary.
       pss->trim_queue();
     }
   } while (!offer_termination());
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1772,7 +1772,6 @@
     }
   }
 
-private:
   template <class T> void deal_with_reference(T* ref_to_scan) {
     if (has_partial_array_mask(ref_to_scan)) {
       _partial_scan_cl->do_oop_nv(ref_to_scan);
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -377,10 +377,26 @@
 }
 // </PREDICTION>
 
-void HeapRegion::set_startsHumongous() {
+void HeapRegion::set_startsHumongous(HeapWord* new_end) {
+  assert(end() == _orig_end,
+         "Should be normal before the humongous object allocation");
+  assert(top() == bottom(), "should be empty");
+
   _humongous_type = StartsHumongous;
   _humongous_start_region = this;
-  assert(end() == _orig_end, "Should be normal before alloc.");
+
+  set_end(new_end);
+  _offsets.set_for_starts_humongous(new_end);
+}
+
+void HeapRegion::set_continuesHumongous(HeapRegion* start) {
+  assert(end() == _orig_end,
+         "Should be normal before the humongous object allocation");
+  assert(top() == bottom(), "should be empty");
+  assert(start->startsHumongous(), "pre-condition");
+
+  _humongous_type = ContinuesHumongous;
+  _humongous_start_region = start;
 }
 
 bool HeapRegion::claimHeapRegion(jint claimValue) {
@@ -500,23 +516,6 @@
   return blk.result();
 }
 
-void HeapRegion::set_continuesHumongous(HeapRegion* start) {
-  // The order is important here.
-  start->add_continuingHumongousRegion(this);
-  _humongous_type = ContinuesHumongous;
-  _humongous_start_region = start;
-}
-
-void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
-  // Must join the blocks of the current H region seq with the block of the
-  // added region.
-  offsets()->join_blocks(bottom(), cont->bottom());
-  arrayOop obj = (arrayOop)(bottom());
-  obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
-  set_end(cont->end());
-  set_top(cont->end());
-}
-
 void HeapRegion::save_marks() {
   set_saved_mark();
 }
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -395,14 +395,12 @@
 
   // Causes the current region to represent a humongous object spanning "n"
   // regions.
-  virtual void set_startsHumongous();
+  void set_startsHumongous(HeapWord* new_end);
 
   // The regions that continue a humongous sequence should be added using
   // this method, in increasing address order.
   void set_continuesHumongous(HeapRegion* start);
 
-  void add_continuingHumongousRegion(HeapRegion* cont);
-
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
     return _rem_set;
@@ -733,13 +731,6 @@
                                    FilterOutOfRegionClosure* cl,
                                    bool filter_young);
 
-  // The region "mr" is entirely in "this", and starts and ends at block
-  // boundaries. The caller declares that all the contained blocks are
-  // coalesced into one.
-  void declare_filled_region_to_BOT(MemRegion mr) {
-    _offsets.single_block(mr.start(), mr.end());
-  }
-
   // A version of block start that is guaranteed to find *some* block
   // boundary at or before "p", but does not object iteration, and may
   // therefore be used safely when the heap is unparseable.
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1159,9 +1159,7 @@
   _hrrs(NULL),
   _g1h(G1CollectedHeap::heap()),
   _bosa(NULL),
-  _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
-               >> CardTableModRefBS::card_shift)
-{}
+  _sparse_iter() { }
 
 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
   _hrrs = hrrs;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -91,34 +91,118 @@
   }
   if (sumSizes >= word_size) {
     _alloc_search_start = cur;
-    // Mark the allocated regions as allocated.
+
+    // We need to initialize the region(s) we just discovered. This is
+    // a bit tricky given that it can happen concurrently with
+    // refinement threads refining cards on these regions and
+    // potentially wanting to refine the BOT as they are scanning
+    // those cards (this can happen shortly after a cleanup; see CR
+    // 6991377). So we have to set up the region(s) carefully and in
+    // a specific order.
+
+    // Currently, allocs_are_zero_filled() returns false. The zero
+    // filling infrastructure will be going away soon (see CR 6977804).
+    // So no need to do anything else here.
     bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
+    assert(!zf, "not supported");
+
+    // This will be the "starts humongous" region.
     HeapRegion* first_hr = _regions.at(first);
-    for (int i = first; i < cur; i++) {
-      HeapRegion* hr = _regions.at(i);
-      if (zf)
-        hr->ensure_zero_filled();
+    {
+      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
+      first_hr->set_zero_fill_allocated();
+    }
+    // The header of the new object will be placed at the bottom of
+    // the first region.
+    HeapWord* new_obj = first_hr->bottom();
+    // This will be the new end of the first region in the series that
+    // should also match the end of the last region in the seriers.
+    // (Note: sumSizes = "region size" x "number of regions we found").
+    HeapWord* new_end = new_obj + sumSizes;
+    // This will be the new top of the first region that will reflect
+    // this allocation.
+    HeapWord* new_top = new_obj + word_size;
+
+    // First, we need to zero the header of the space that we will be
+    // allocating. When we update top further down, some refinement
+    // threads might try to scan the region. By zeroing the header we
+    // ensure that any thread that will try to scan the region will
+    // come across the zero klass word and bail out.
+    //
+    // NOTE: It would not have been correct to have used
+    // CollectedHeap::fill_with_object() and make the space look like
+    // an int array. The thread that is doing the allocation will
+    // later update the object header to a potentially different array
+    // type and, for a very short period of time, the klass and length
+    // fields will be inconsistent. This could cause a refinement
+    // thread to calculate the object size incorrectly.
+    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
+
+    // We will set up the first region as "starts humongous". This
+    // will also update the BOT covering all the regions to reflect
+    // that there is a single object that starts at the bottom of the
+    // first region.
+    first_hr->set_startsHumongous(new_end);
+
+    // Then, if there are any, we will set up the "continues
+    // humongous" regions.
+    HeapRegion* hr = NULL;
+    for (int i = first + 1; i < cur; ++i) {
+      hr = _regions.at(i);
       {
         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
         hr->set_zero_fill_allocated();
       }
-      size_t sz = hr->capacity() / HeapWordSize;
-      HeapWord* tmp = hr->allocate(sz);
-      assert(tmp != NULL, "Humongous allocation failure");
-      MemRegion mr = MemRegion(tmp, sz);
-      CollectedHeap::fill_with_object(mr);
-      hr->declare_filled_region_to_BOT(mr);
-      if (i == first) {
-        first_hr->set_startsHumongous();
+      hr->set_continuesHumongous(first_hr);
+    }
+    // If we have "continues humongous" regions (hr != NULL), then the
+    // end of the last one should match new_end.
+    assert(hr == NULL || hr->end() == new_end, "sanity");
+
+    // Up to this point no concurrent thread would have been able to
+    // do any scanning on any region in this series. All the top
+    // fields still point to bottom, so the intersection between
+    // [bottom,top] and [card_start,card_end] will be empty. Before we
+    // update the top fields, we'll do a storestore to make sure that
+    // no thread sees the update to top before the zeroing of the
+    // object header and the BOT initialization.
+    OrderAccess::storestore();
+
+    // Now that the BOT and the object header have been initialized,
+    // we can update top of the "starts humongous" region.
+    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
+           "new_top should be in this region");
+    first_hr->set_top(new_top);
+
+    // Now, we will update the top fields of the "continues humongous"
+    // regions. The reason we need to do this is that, otherwise,
+    // these regions would look empty and this will confuse parts of
+    // G1. For example, the code that looks for a consecutive number
+    // of empty regions will consider them empty and try to
+    // re-allocate them. We can extend is_empty() to also include
+    // !continuesHumongous(), but it is easier to just update the top
+    // fields here.
+    hr = NULL;
+    for (int i = first + 1; i < cur; ++i) {
+      hr = _regions.at(i);
+      if ((i + 1) == cur) {
+        // last continues humongous region
+        assert(hr->bottom() < new_top && new_top <= hr->end(),
+               "new_top should fall on this region");
+        hr->set_top(new_top);
       } else {
-        assert(i > first, "sanity");
-        hr->set_continuesHumongous(first_hr);
+        // not last one
+        assert(new_top > hr->end(), "new_top should be above this region");
+        hr->set_top(hr->end());
       }
     }
-    HeapWord* first_hr_bot = first_hr->bottom();
-    HeapWord* obj_end = first_hr_bot + word_size;
-    first_hr->set_top(obj_end);
-    return first_hr_bot;
+    // If we have continues humongous regions (hr != NULL), then the
+    // end of the last one should match new_end and its top should
+    // match new_top.
+    assert(hr == NULL ||
+           (hr->end() == new_end && hr->top() == new_top), "sanity");
+
+    return new_obj;
   } else {
     // If we started from the beginning, we want to know why we can't alloc.
     return NULL;
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -308,7 +308,7 @@
   assert(e2->num_valid_cards() > 0, "Postcondition.");
 }
 
-CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
+CardIdx_t RSHashTableIter::find_first_card_in_list() {
   CardIdx_t res;
   while (_bl_ind != RSHashTable::NullEntry) {
     res = _rsht->entry(_bl_ind)->card(0);
@@ -322,14 +322,11 @@
   return SparsePRTEntry::NullEntry;
 }
 
-size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
-  return
-    _heap_bot_card_ind
-    + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
-    + ci;
+size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
+  return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
 }
 
-bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
+bool RSHashTableIter::has_next(size_t& card_index) {
   _card_ind++;
   CardIdx_t ci;
   if (_card_ind < SparsePRTEntry::cards_num() &&
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -169,7 +169,6 @@
   int _bl_ind;          // [-1, 0.._rsht->_capacity)
   short _card_ind;      // [0..SparsePRTEntry::cards_num())
   RSHashTable* _rsht;
-  size_t _heap_bot_card_ind;
 
   // If the bucket list pointed to by _bl_ind contains a card, sets
   // _bl_ind to the index of that entry, and returns the card.
@@ -183,13 +182,11 @@
   size_t compute_card_ind(CardIdx_t ci);
 
 public:
-  RSHashTableIter(size_t heap_bot_card_ind) :
+  RSHashTableIter() :
     _tbl_ind(RSHashTable::NullEntry),
     _bl_ind(RSHashTable::NullEntry),
     _card_ind((SparsePRTEntry::cards_num() - 1)),
-    _rsht(NULL),
-    _heap_bot_card_ind(heap_bot_card_ind)
-  {}
+    _rsht(NULL) {}
 
   void init(RSHashTable* rsht) {
     _rsht = rsht;
@@ -280,20 +277,11 @@
   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
     return _next->contains_card(region_id, card_index);
   }
-
-#if 0
-  void verify_is_cleared();
-  void print();
-#endif
 };
 
 
-class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter {
+class SparsePRTIter: public RSHashTableIter {
 public:
-  SparsePRTIter(size_t heap_bot_card_ind) :
-    /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
-  {}
-
   void init(const SparsePRT* sprt) {
     RSHashTableIter::init(sprt->cur());
   }
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -846,7 +846,7 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
     return;
   }
   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@@ -935,8 +935,6 @@
 
     assert(to()->is_empty(), "to space should be empty now");
   } else {
-    assert(HandlePromotionFailure,
-      "Should only be here if promotion failure handling is on");
     assert(_promo_failure_scan_stack.is_empty(), "post condition");
     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -947,7 +945,7 @@
     // All the spaces are in play for mark-sweep.
     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
     from()->set_next_compaction_space(to());
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();
     // Inform the next generation that a promotion failure occurred.
     _next_gen->promotion_failure_occurred();
 
@@ -1092,11 +1090,6 @@
                                        old, m, sz);
 
     if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
-        // is incorrectly set. In any case, its seriously wrong to be here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
       // promotion failed, forward to self
       _promotion_failed = true;
       new_obj = old;
@@ -1206,12 +1199,6 @@
                                        old, m, sz);
 
     if (new_obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio
-        // flag is incorrectly set. In any case, its seriously wrong to be
-        // here!
-        vm_exit_out_of_memory(sz*wordSize, "promotion");
-      }
       // promotion failed, forward to self
       forward_ptr = old->forward_to_atomic(old);
       new_obj = old;
--- a/src/share/vm/memory/collectorPolicy.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -659,9 +659,6 @@
     }
     return result;   // could be null if we are out of space
   } else if (!gch->incremental_collection_will_fail()) {
-    // The gc_prologues have not executed yet.  The value
-    // for incremental_collection_will_fail() is the remanent
-    // of the last collection.
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
                        false            /* clear_all_soft_refs */,
@@ -739,9 +736,8 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   return    (word_size > heap_word_size(gen0_capacity))
-         || (GC_locker::is_active_and_needs_gc())
-         || (   gch->last_incremental_collection_failed()
-             && gch->incremental_collection_will_fail());
+         || GC_locker::is_active_and_needs_gc()
+         || gch->incremental_collection_failed();
 }
 
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -510,7 +510,7 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
     return;
   }
   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@@ -596,9 +596,8 @@
     if (PrintGC && !PrintGCDetails) {
       gch->print_heap_change(gch_prev_used);
     }
+    assert(!gch->incremental_collection_failed(), "Should be clear");
   } else {
-    assert(HandlePromotionFailure,
-      "Should not be here unless promotion failure handling is on");
     assert(_promo_failure_scan_stack.is_empty(), "post condition");
     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -613,7 +612,7 @@
     // and from-space.
     swap_spaces();   // For uniformity wrt ParNewGeneration.
     from()->set_next_compaction_space(to());
-    gch->set_incremental_collection_will_fail();
+    gch->set_incremental_collection_failed();
 
     // Inform the next generation that a promotion failure occurred.
     _next_gen->promotion_failure_occurred();
@@ -700,12 +699,6 @@
   if (obj == NULL) {
     obj = _next_gen->promote(old, s);
     if (obj == NULL) {
-      if (!HandlePromotionFailure) {
-        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
-        // is incorrectly set. In any case, its seriously wrong to be here!
-        vm_exit_out_of_memory(s*wordSize, "promotion");
-      }
-
       handle_promotion_failure(old);
       return old;
     }
@@ -812,47 +805,43 @@
     assert(_next_gen != NULL,
            "This must be the youngest gen, and not the only gen");
   }
-
-  // Decide if there's enough room for a full promotion
-  // When using extremely large edens, we effectively lose a
-  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
-  // flag to reduce the minimum evacuation space requirements. If
-  // there is not enough space to evacuate eden during a scavenge,
-  // the VM will immediately exit with an out of memory error.
-  // This flag has not been tested
-  // with collectors other than simple mark & sweep.
-  //
-  // Note that with the addition of promotion failure handling, the
-  // VM will not immediately exit but will undo the young generation
-  // collection.  The parameter is left here for compatibility.
-  const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
-
-  // worst_case_evacuation is based on "used()".  For the case where this
-  // method is called after a collection, this is still appropriate because
-  // the case that needs to be detected is one in which a full collection
-  // has been done and has overflowed into the young generation.  In that
-  // case a minor collection will fail (the overflow of the full collection
-  // means there is no space in the old generation for any promotion).
-  size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
-
-  return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
-                                              HandlePromotionFailure);
+  return _next_gen->promotion_attempt_is_safe(used());
 }
 
 void DefNewGeneration::gc_epilogue(bool full) {
+  DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
+
+  assert(!GC_locker::is_active(), "We should not be executing here");
   // Check if the heap is approaching full after a collection has
   // been done.  Generally the young generation is empty at
   // a minimum at the end of a collection.  If it is not, then
   // the heap is approaching full.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  clear_should_allocate_from_space();
-  if (collection_attempt_is_safe()) {
-    gch->clear_incremental_collection_will_fail();
+  if (full) {
+    DEBUG_ONLY(seen_incremental_collection_failed = false;)
+    if (!collection_attempt_is_safe()) {
+      gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
+      set_should_allocate_from_space(); // we seem to be running out of space
+    } else {
+      gch->clear_incremental_collection_failed(); // We just did a full collection
+      clear_should_allocate_from_space(); // if set
+    }
   } else {
-    gch->set_incremental_collection_will_fail();
-    if (full) { // we seem to be running out of space
-      set_should_allocate_from_space();
+#ifdef ASSERT
+    // It is possible that incremental_collection_failed() == true
+    // here, because an attempted scavenge did not succeed. The policy
+    // is normally expected to cause a full collection which should
+    // clear that condition, so we should not be here twice in a row
+    // with incremental_collection_failed() == true without having done
+    // a full collection in between.
+    if (!seen_incremental_collection_failed &&
+        gch->incremental_collection_failed()) {
+      seen_incremental_collection_failed = true;
+    } else if (seen_incremental_collection_failed) {
+      assert(!gch->incremental_collection_failed(), "Twice in a row");
+      seen_incremental_collection_failed = false;
     }
+#endif // ASSERT
   }
 
   if (ZapUnusedHeapArea) {
--- a/src/share/vm/memory/defNewGeneration.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -82,12 +82,6 @@
   Stack<oop>     _objs_with_preserved_marks;
   Stack<markOop> _preserved_marks_of_objs;
 
-  // Returns true if the collection can be safely attempted.
-  // If this method returns false, a collection is not
-  // guaranteed to fail but the system may not be able
-  // to recover from the failure.
-  bool collection_attempt_is_safe();
-
   // Promotion failure handling
   OopClosure *_promo_failure_scan_stack_closure;
   void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
@@ -304,6 +298,14 @@
 
   // GC support
   virtual void compute_new_size();
+
+  // Returns true if the collection is likely to be safely
+  // completed. Even if this method returns true, a collection
+  // may not be guaranteed to succeed, and the system should be
+  // able to safely unwind and recover from that failure, albeit
+  // at some additional cost. Override superclass's implementation.
+  virtual bool collection_attempt_is_safe();
+
   virtual void collect(bool   full,
                        bool   clear_all_soft_refs,
                        size_t size,
--- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -142,8 +142,7 @@
   }
   _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
 
-  clear_incremental_collection_will_fail();
-  clear_last_incremental_collection_failed();
+  clear_incremental_collection_failed();
 
 #ifndef SERIALGC
   // If we are running CMS, create the collector responsible
@@ -1347,17 +1346,6 @@
 };
 
 void GenCollectedHeap::gc_epilogue(bool full) {
-  // Remember if a partial collection of the heap failed, and
-  // we did a complete collection.
-  if (full && incremental_collection_will_fail()) {
-    set_last_incremental_collection_failed();
-  } else {
-    clear_last_incremental_collection_failed();
-  }
-  // Clear the flag, if set; the generation gc_epilogues will set the
-  // flag again if the condition persists despite the collection.
-  clear_incremental_collection_will_fail();
-
 #ifdef COMPILER2
   assert(DerivedPointerTable::is_empty(), "derived pointer present");
   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
--- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -62,11 +62,10 @@
   // The generational collector policy.
   GenCollectorPolicy* _gen_policy;
 
-  // If a generation would bail out of an incremental collection,
-  // it sets this flag.  If the flag is set, satisfy_failed_allocation
-  // will attempt allocating in all generations before doing a full GC.
-  bool _incremental_collection_will_fail;
-  bool _last_incremental_collection_failed;
+  // Indicates that the most recent previous incremental collection failed.
+  // The flag is cleared when an action is taken that might clear the
+  // condition that caused that incremental collection to fail.
+  bool _incremental_collection_failed;
 
   // In support of ExplicitGCInvokesConcurrent functionality
   unsigned int _full_collections_completed;
@@ -469,26 +468,26 @@
   // call to "save_marks".
   bool no_allocs_since_save_marks(int level);
 
+  // Returns true if an incremental collection is likely to fail.
+  bool incremental_collection_will_fail() {
+    // Assumes a 2-generation system; the first disjunct remembers if an
+    // incremental collection failed, even when we thought (second disjunct)
+    // that it would not.
+    assert(heap()->collector_policy()->is_two_generation_policy(),
+           "the following definition may not be suitable for an n(>2)-generation system");
+    return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
+  }
+
   // If a generation bails out of an incremental collection,
   // it sets this flag.
-  bool incremental_collection_will_fail() {
-    return _incremental_collection_will_fail;
-  }
-  void set_incremental_collection_will_fail() {
-    _incremental_collection_will_fail = true;
-  }
-  void clear_incremental_collection_will_fail() {
-    _incremental_collection_will_fail = false;
+  bool incremental_collection_failed() const {
+    return _incremental_collection_failed;
   }
-
-  bool last_incremental_collection_failed() const {
-    return _last_incremental_collection_failed;
+  void set_incremental_collection_failed() {
+    _incremental_collection_failed = true;
   }
-  void set_last_incremental_collection_failed() {
-    _last_incremental_collection_failed = true;
-  }
-  void clear_last_incremental_collection_failed() {
-    _last_incremental_collection_failed = false;
+  void clear_incremental_collection_failed() {
+    _incremental_collection_failed = false;
   }
 
   // Promotion of obj into gen failed.  Try to promote obj to higher non-perm
--- a/src/share/vm/memory/generation.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/generation.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -165,15 +165,16 @@
   return max;
 }
 
-bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
-                                           bool not_used) const {
+bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_contiguous_available();
+  bool   res = (available >= max_promotion_in_bytes);
   if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
-                " contiguous_available: " SIZE_FORMAT
-                " promotion_in_bytes: " SIZE_FORMAT,
-                max_contiguous_available(), promotion_in_bytes);
+    gclog_or_tty->print_cr(
+      "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      max_promotion_in_bytes);
   }
-  return max_contiguous_available() >= promotion_in_bytes;
+  return res;
 }
 
 // Ignores "ref" and calls allocate().
--- a/src/share/vm/memory/generation.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/generation.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -173,15 +173,11 @@
   // The largest number of contiguous free bytes in this or any higher generation.
   virtual size_t max_contiguous_available() const;
 
-  // Returns true if promotions of the specified amount can
-  // be attempted safely (without a vm failure).
+  // Returns true if promotions of the specified amount are
+  // likely to succeed without a promotion failure.
   // Promotion of the full amount is not guaranteed but
-  // can be attempted.
-  //   younger_handles_promotion_failure
-  // is true if the younger generation handles a promotion
-  // failure.
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  // might be attempted in the worst case.
+  virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
 
   // For a non-young generation, this interface can be used to inform a
   // generation that a promotion attempt into that generation failed.
@@ -358,6 +354,16 @@
     return (full || should_allocate(word_size, is_tlab));
   }
 
+  // Returns true if the collection is likely to be safely
+  // completed. Even if this method returns true, a collection
+  // may not be guaranteed to succeed, and the system should be
+  // able to safely unwind and recover from that failure, albeit
+  // at some additional cost.
+  virtual bool collection_attempt_is_safe() {
+    guarantee(false, "Are you sure you want to call this method?");
+    return true;
+  }
+
   // Perform a garbage collection.
   // If full is true attempt a full garbage collection of this generation.
   // Otherwise, attempting to (at least) free enough space to support an
--- a/src/share/vm/memory/tenuredGeneration.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -419,29 +419,16 @@
 void TenuredGeneration::verify_alloc_buffers_clean() {}
 #endif // SERIALGC
 
-bool TenuredGeneration::promotion_attempt_is_safe(
-    size_t max_promotion_in_bytes,
-    bool younger_handles_promotion_failure) const {
-
-  bool result = max_contiguous_available() >= max_promotion_in_bytes;
-
-  if (younger_handles_promotion_failure && !result) {
-    result = max_contiguous_available() >=
-      (size_t) gc_stats()->avg_promoted()->padded_average();
-    if (PrintGC && Verbose && result) {
-      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
-                  " contiguous_available: " SIZE_FORMAT
-                  " avg_promoted: " SIZE_FORMAT,
-                  max_contiguous_available(),
-                  gc_stats()->avg_promoted()->padded_average());
-    }
-  } else {
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
-                  " contiguous_available: " SIZE_FORMAT
-                  " promotion_in_bytes: " SIZE_FORMAT,
-                  max_contiguous_available(), max_promotion_in_bytes);
-    }
+bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
+  size_t available = max_contiguous_available();
+  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
+  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
+  if (PrintGC && Verbose) {
+    gclog_or_tty->print_cr(
+      "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
+      "max_promo("SIZE_FORMAT")",
+      res? "":" not", available, res? ">=":"<",
+      av_promo, max_promotion_in_bytes);
   }
-  return result;
+  return res;
 }
--- a/src/share/vm/memory/tenuredGeneration.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/memory/tenuredGeneration.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -101,8 +101,7 @@
 
   virtual void update_gc_stats(int level, bool full);
 
-  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes,
-    bool younger_handles_promotion_failure) const;
+  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
 
   void verify_alloc_buffers_clean();
 };
--- a/src/share/vm/runtime/arguments.cpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Mon Nov 01 10:49:14 2010 -0700
@@ -190,6 +190,10 @@
                            JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
   { "UseDepthFirstScavengeOrder",
                            JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
+  { "HandlePromotionFailure",
+                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
+  { "MaxLiveObjectEvacuationRatio",
+                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -1728,8 +1732,6 @@
     status = false;
   }
 
-  status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
-                              "MaxLiveObjectEvacuationRatio");
   status = status && verify_percentage(AdaptiveSizePolicyWeight,
                               "AdaptiveSizePolicyWeight");
   status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
--- a/src/share/vm/runtime/globals.hpp	Tue Oct 26 16:48:28 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Mon Nov 01 10:49:14 2010 -0700
@@ -1588,7 +1588,7 @@
           "(Temporary, subject to experimentation)"                         \
           "Nominal minimum work per abortable preclean iteration")          \
                                                                             \
-  product(intx, CMSAbortablePrecleanWaitMillis, 100,                        \
+  manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
           "(Temporary, subject to experimentation)"                         \
           " Time that we sleep between iterations when not given"           \
           " enough work per iteration")                                     \
@@ -1680,7 +1680,7 @@
   product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
           "Don't drain below this size per parallel worker/thief")          \
                                                                             \
-  product(intx, CMSWaitDuration, 2000,                                      \
+  manageable(intx, CMSWaitDuration, 2000,                                   \
           "Time in milliseconds that CMS thread waits for young GC")        \
                                                                             \
   product(bool, CMSYield, true,                                             \
@@ -1789,10 +1789,6 @@
   notproduct(bool, GCALotAtAllSafepoints, false,                            \
           "Enforce ScavengeALot/GCALot at all potential safepoints")        \
                                                                             \
-  product(bool, HandlePromotionFailure, true,                               \
-          "The youngest generation collection does not require "            \
-          "a guarantee of full promotion of all live objects.")             \
-                                                                            \
   product(bool, PrintPromotionFailure, false,                               \
           "Print additional diagnostic information following "              \
           " promotion failure")                                             \
@@ -3006,9 +3002,6 @@
   product(intx, NewRatio, 2,                                                \
           "Ratio of new/old generation sizes")                              \
                                                                             \
-  product(uintx, MaxLiveObjectEvacuationRatio, 100,                         \
-          "Max percent of eden objects that will be live at scavenge")      \
-                                                                            \
   product_pd(uintx, NewSizeThreadIncrease,                                  \
           "Additional size added to desired new generation size per "       \
           "non-daemon thread (in bytes)")                                   \