changeset 20336:6701abbc4441

8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code. Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author tschatzl
date Tue, 19 Aug 2014 10:50:27 +0200
parents eec72fa4b108
children 1f1d373cd044
files agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java src/share/vm/gc_implementation/g1/concurrentMark.cpp src/share/vm/gc_implementation/g1/concurrentMark.hpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp src/share/vm/gc_implementation/g1/heapRegion.cpp src/share/vm/gc_implementation/g1/heapRegion.hpp src/share/vm/gc_implementation/g1/heapRegionSeq.cpp src/share/vm/gc_implementation/g1/heapRegionSeq.hpp src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp src/share/vm/gc_implementation/g1/heapRegionSet.cpp src/share/vm/gc_implementation/g1/heapRegionSet.hpp src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp src/share/vm/gc_implementation/g1/vmStructs_g1.hpp src/share/vm/prims/whitebox.cpp
diffstat 21 files changed, 834 insertions(+), 956 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Tue Aug 19 12:39:06 2014 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Tue Aug 19 10:50:27 2014 +0200
@@ -45,8 +45,8 @@
 public class G1CollectedHeap extends SharedHeap {
     // HeapRegionSeq _seq;
     static private long hrsFieldOffset;
-    // MemRegion _g1_committed;
-    static private long g1CommittedFieldOffset;
+    // MemRegion _g1_reserved;
+    static private long g1ReservedFieldOffset;
     // size_t _summary_bytes_used;
     static private CIntegerField summaryBytesUsedField;
     // G1MonitoringSupport* _g1mm;
@@ -68,7 +68,6 @@
         Type type = db.lookupType("G1CollectedHeap");
 
         hrsFieldOffset = type.getField("_hrs").getOffset();
-        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
         summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
@@ -76,9 +75,7 @@
     }
 
     public long capacity() {
-        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
-        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
-        return g1Committed.byteSize();
+        return hrs().capacity();
     }
 
     public long used() {
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Tue Aug 19 12:39:06 2014 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Tue Aug 19 10:50:27 2014 +0200
@@ -93,19 +93,35 @@
     private class HeapRegionIterator implements Iterator<HeapRegion> {
         private long index;
         private long length;
+        private HeapRegion next;
 
-        @Override
-        public boolean hasNext() { return index < length; }
+        public HeapRegion positionToNext() {
+          HeapRegion result = next;
+          while (index < length && at(index) == null) {
+            index++;
+          }
+          if (index < length) {
+            next = at(index);
+            index++; // restart search at next element
+          } else {
+            next = null;
+          }
+          return result;
+        }
 
         @Override
-        public HeapRegion next() { return at(index++);    }
+        public boolean hasNext() { return next != null;     }
+
+        @Override
+        public HeapRegion next() { return positionToNext(); }
 
         @Override
-        public void remove()     { /* not supported */    }
+        public void remove()     { /* not supported */      }
 
-        HeapRegionIterator(long committedLength) {
+        HeapRegionIterator(long totalLength) {
             index = 0;
-            length = committedLength;
+            length = totalLength;
+            positionToNext();
         }
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Tue Aug 19 12:39:06 2014 +0200
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Tue Aug 19 10:50:27 2014 +0200
@@ -43,7 +43,7 @@
     // G1HeapRegionTable _regions
     static private long regionsFieldOffset;
     // uint _committed_length
-    static private CIntegerField committedLengthField;
+    static private CIntegerField numCommittedField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -57,7 +57,7 @@
         Type type = db.lookupType("HeapRegionSeq");
 
         regionsFieldOffset = type.getField("_regions").getOffset();
-        committedLengthField = type.getCIntegerField("_committed_length");
+        numCommittedField = type.getCIntegerField("_num_committed");
     }
 
     private G1HeapRegionTable regions() {
@@ -66,16 +66,20 @@
                                                              regionsAddr);
     }
 
+    public long capacity() {
+        return length() * HeapRegion.grainBytes();
+    }
+
     public long length() {
         return regions().length();
     }
 
     public long committedLength() {
-        return committedLengthField.getValue(addr);
+        return numCommittedField.getValue(addr);
     }
 
     public Iterator<HeapRegion> heapRegionIterator() {
-        return regions().heapRegionIterator(committedLength());
+        return regions().heapRegionIterator(length());
     }
 
     public HeapRegionSeq(Address addr) {
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -728,14 +728,13 @@
   _completed_initialization = true;
 }
 
-void ConcurrentMark::update_g1_committed(bool force) {
+void ConcurrentMark::update_heap_boundaries(MemRegion bounds, bool force) {
   // If concurrent marking is not in progress, then we do not need to
   // update _heap_end.
   if (!concurrent_marking_in_progress() && !force) return;
 
-  MemRegion committed = _g1h->g1_committed();
-  assert(committed.start() == _heap_start, "start shouldn't change");
-  HeapWord* new_end = committed.end();
+  assert(bounds.start() == _heap_start, "start shouldn't change");
+  HeapWord* new_end = bounds.end();
   if (new_end > _heap_end) {
     // The heap has been expanded.
 
@@ -826,7 +825,7 @@
     assert(out_of_regions(),
            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
                    p2i(_finger), p2i(_heap_end)));
-    update_g1_committed(true);
+    update_heap_boundaries(_g1h->g1_committed(), true);
   }
 }
 
@@ -2194,7 +2193,7 @@
   // Noone else should be accessing the _cleanup_list at this point,
   // so it's not necessary to take any locks
   while (!_cleanup_list.is_empty()) {
-    HeapRegion* hr = _cleanup_list.remove_head();
+    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
     assert(hr != NULL, "Got NULL from a non-empty list");
     hr->par_clear();
     tmp_free_list.add_ordered(hr);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -798,7 +798,7 @@
   // that CM is notified of where the new end of the heap is. It
   // doesn't do anything if concurrent_marking_in_progress() is false,
   // unless the force parameter is true.
-  void update_g1_committed(bool force = false);
+  void update_heap_boundaries(MemRegion bounds, bool force = false);
 
   bool isMarked(oop p) const {
     assert(p != NULL && p->is_oop(), "expected an oop");
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -675,8 +675,7 @@
   assert(new_top <= _end, "_end should have already been updated");
 
   // The first BOT entry should have offset 0.
-  zero_bottom_entry();
-  initialize_threshold();
+  reset_bot();
   alloc_block(_bottom, new_top);
  }
 
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -231,10 +231,6 @@
 
   void set_bottom(HeapWord* new_bottom);
 
-  // Updates all the BlockOffsetArray's sharing this shared array to
-  // reflect the current "top"'s of their spaces.
-  void update_offset_arrays();
-
   // Return the appropriate index into "_offset_array" for "p".
   inline size_t index_for(const void* p) const;
 
@@ -480,6 +476,8 @@
                       blk_start, blk_end);
   }
 
+  // Zero out the entry for _bottom (offset will be zero).
+  void zero_bottom_entry();
  public:
   G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
 
@@ -487,8 +485,10 @@
   // bottom of the covered region.
   HeapWord* initialize_threshold();
 
-  // Zero out the entry for _bottom (offset will be zero).
-  void      zero_bottom_entry();
+  void reset_bot() {
+    zero_bottom_entry();
+    initialize_threshold();
+  }
 
   // Return the next threshold, the point at which the table should be
   // updated.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -48,7 +48,7 @@
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
@@ -519,9 +519,9 @@
       // again to allocate from it.
       append_secondary_free_list();
 
-      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
+      assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
              "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _free_list.remove_region(is_old);
+      HeapRegion* res = _hrs.allocate_free_region(is_old);
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                                "allocated "HR_FORMAT" from secondary_free_list",
@@ -562,7 +562,7 @@
     }
   }
 
-  res = _free_list.remove_region(is_old);
+  res = _hrs.allocate_free_region(is_old);
 
   if (res == NULL) {
     if (G1ConcRegionFreeingVerbose) {
@@ -587,8 +587,8 @@
       // Given that expand() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
-      // In either case remove_region() will check for NULL.
-      res = _free_list.remove_region(is_old);
+      // In either case allocate_free_region() will check for NULL.
+      res = _hrs.allocate_free_region(is_old);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -596,55 +596,11 @@
   return res;
 }
 
-uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
-                                                        size_t word_size) {
-  assert(isHumongous(word_size), "word_size should be humongous");
-  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
-
-  uint first = G1_NULL_HRS_INDEX;
-  if (num_regions == 1) {
-    // Only one region to allocate, no need to go through the slower
-    // path. The caller will attempt the expansion if this fails, so
-    // let's not try to expand here too.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
-    if (hr != NULL) {
-      first = hr->hrs_index();
-    } else {
-      first = G1_NULL_HRS_INDEX;
-    }
-  } else {
-    // We can't allocate humongous regions while cleanupComplete() is
-    // running, since some of the regions we find to be empty might not
-    // yet be added to the free list and it is not straightforward to
-    // know which list they are on so that we can remove them. Note
-    // that we only need to do this if we need to allocate more than
-    // one region to satisfy the current humongous allocation
-    // request. If we are only allocating one region we use the common
-    // region allocation code (see above).
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
-    if (free_regions() >= num_regions) {
-      first = _hrs.find_contiguous(num_regions);
-      if (first != G1_NULL_HRS_INDEX) {
-        for (uint i = first; i < first + num_regions; ++i) {
-          HeapRegion* hr = region_at(i);
-          assert(hr->is_empty(), "sanity");
-          assert(is_on_master_free_list(hr), "sanity");
-          hr->set_pending_removal(true);
-        }
-        _free_list.remove_all_pending(num_regions);
-      }
-    }
-  }
-  return first;
-}
-
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
                                                            size_t word_size) {
-  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
+  assert(first != G1_NO_HRS_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
@@ -782,42 +738,69 @@
 
   verify_region_sets_optional();
 
-  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
-  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
-  uint x_num = expansion_regions();
-  uint fs = _hrs.free_suffix();
-  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
-  if (first == G1_NULL_HRS_INDEX) {
-    // The only thing we can do now is attempt expansion.
-    if (fs + x_num >= num_regions) {
-      // If the number of regions we're trying to allocate for this
-      // object is at most the number of regions in the free suffix,
-      // then the call to humongous_obj_allocate_find_first() above
-      // should have succeeded and we wouldn't be here.
-      //
-      // We should only be trying to expand when the free suffix is
-      // not sufficient for the object _and_ we have some expansion
-      // room available.
-      assert(num_regions > fs, "earlier allocation should have succeeded");
-
+  uint first = G1_NO_HRS_INDEX;
+  uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
+
+  if (obj_regions == 1) {
+    // Only one region to allocate, try to use a fast path by directly allocating
+    // from the free lists. Do not try to expand here, we will potentially do that
+    // later.
+    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    if (hr != NULL) {
+      first = hr->hrs_index();
+    }
+  } else {
+    // We can't allocate humongous regions spanning more than one region while
+    // cleanupComplete() is running, since some of the regions we find to be
+    // empty might not yet be added to the free list. It is not straightforward
+    // to know in which list they are on so that we can remove them. We only
+    // need to do this if we need to allocate more than one region to satisfy the
+    // current humongous allocation request. If we are only allocating one region
+    // we use the one-region region allocation code (see above), or end up here.
+    wait_while_free_regions_coming();
+    append_secondary_free_list_if_not_empty_with_lock();
+
+    // Policy: Try only empty regions (i.e. already committed first). Maybe we
+    // are lucky enough to find some.
+    first = _hrs.find_contiguous(obj_regions, true);
+    if (first != G1_NO_HRS_INDEX) {
+      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+    }
+  }
+
+  if (first == G1_NO_HRS_INDEX) {
+    // Policy: We could not find enough regions for the humongous object in the
+    // free list. Look through the heap to find a mix of free and uncommitted regions.
+    // If so, try expansion.
+    first = _hrs.find_contiguous(obj_regions, false);
+    if (first != G1_NO_HRS_INDEX) {
+      // We found something. Make sure these regions are committed, i.e. expand
+      // the heap. Alternatively we could do a defragmentation GC.
       ergo_verbose1(ErgoHeapSizing,
                     "attempt heap expansion",
                     ergo_format_reason("humongous allocation request failed")
                     ergo_format_byte("allocation request"),
                     word_size * HeapWordSize);
-      if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
-        // Even though the heap was expanded, it might not have
-        // reached the desired size. So, we cannot assume that the
-        // allocation will succeed.
-        first = humongous_obj_allocate_find_first(num_regions, word_size);
+
+      _hrs.expand_at(first, obj_regions);
+      g1_policy()->record_new_heap_size(num_regions());
+
+#ifdef ASSERT
+      for (uint i = first; i < first + obj_regions; ++i) {
+        HeapRegion* hr = region_at(i);
+        assert(hr->is_empty(), "sanity");
+        assert(is_on_master_free_list(hr), "sanity");
       }
+#endif
+      _hrs.allocate_free_regions_starting_at(first, obj_regions);
+    } else {
+      // Policy: Potentially trigger a defragmentation GC.
     }
   }
 
   HeapWord* result = NULL;
-  if (first != G1_NULL_HRS_INDEX) {
-    result =
-      humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
+  if (first != G1_NO_HRS_INDEX) {
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
@@ -1380,7 +1363,7 @@
         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
       }
 
-      assert(free_regions() == 0, "we should not have added any free regions");
+      assert(num_free_regions() == 0, "we should not have added any free regions");
       rebuild_region_sets(false /* free_list_only */);
 
       // Enqueue any discovered reference objects that have
@@ -1745,21 +1728,6 @@
   return NULL;
 }
 
-void G1CollectedHeap::update_committed_space(HeapWord* old_end,
-                                             HeapWord* new_end) {
-  assert(old_end != new_end, "don't call this otherwise");
-  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
-
-  // Update the committed mem region.
-  _g1_committed.set_end(new_end);
-  // Tell the card table about the update.
-  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-  // Tell the BOT about the update.
-  _bot_shared->resize(_g1_committed.word_size());
-  // Tell the hot card cache about the update
-  _cg1r->hot_card_cache()->resize_card_counts(capacity());
-}
-
 bool G1CollectedHeap::expand(size_t expand_bytes) {
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
@@ -1770,55 +1738,22 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
-  if (_g1_storage.uncommitted_size() == 0) {
+  if (is_maximal_no_gc()) {
     ergo_verbose0(ErgoHeapSizing,
                       "did not expand the heap",
                       ergo_format_reason("heap already fully expanded"));
     return false;
   }
 
-  // First commit the memory.
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
-  bool successful = _g1_storage.expand_by(aligned_expand_bytes);
-  if (successful) {
-    // Then propagate this update to the necessary data structures.
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-    update_committed_space(old_end, new_end);
-
-    FreeRegionList expansion_list("Local Expansion List");
-    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
-    assert(mr.start() == old_end, "post-condition");
-    // mr might be a smaller region than what was requested if
-    // expand_by() was unable to allocate the HeapRegion instances
-    assert(mr.end() <= new_end, "post-condition");
-
-    size_t actual_expand_bytes = mr.byte_size();
+  uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
+  assert(regions_to_expand > 0, "Must expand by at least one region");
+
+  uint expanded_by = _hrs.expand_by(regions_to_expand);
+
+  if (expanded_by > 0) {
+    size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
-    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
-           "post-condition");
-    if (actual_expand_bytes < aligned_expand_bytes) {
-      // We could not expand _hrs to the desired size. In this case we
-      // need to shrink the committed space accordingly.
-      assert(mr.end() < new_end, "invariant");
-
-      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
-      // First uncommit the memory.
-      _g1_storage.shrink_by(diff_bytes);
-      // Then propagate this update to the necessary data structures.
-      update_committed_space(new_end, mr.end());
-    }
-    _free_list.add_as_tail(&expansion_list);
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = mr.start();
-      while (curr < mr.end()) {
-        HeapWord* curr_end = curr + HeapRegion::GrainWords;
-        _hr_printer.commit(curr, curr_end);
-        curr = curr_end;
-      }
-      assert(curr == mr.end(), "post-condition");
-    }
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not expand the heap",
@@ -1826,12 +1761,12 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
+        _hrs.available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
   }
-  return successful;
+  return regions_to_expand > 0;
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
@@ -1842,7 +1777,6 @@
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
@@ -1852,22 +1786,7 @@
                 ergo_format_byte("attempted shrinking amount"),
                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
-    _g1_storage.shrink_by(shrunk_bytes);
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = old_end;
-      while (curr > new_end) {
-        HeapWord* curr_end = curr;
-        curr -= HeapRegion::GrainWords;
-        _hr_printer.uncommit(curr, curr_end);
-      }
-    }
-
-    _expansion_regions += num_regions_removed;
-    update_committed_space(old_end, new_end);
-    HeapRegionRemSet::shrink_heap(n_regions());
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not shrink the heap",
@@ -1918,7 +1837,6 @@
   _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
-  _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
@@ -2047,14 +1965,9 @@
 
   // Carve out the G1 part of the heap.
 
-  ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
-  _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
-                           g1_rs.size()/HeapWordSize);
-
-  _g1_storage.initialize(g1_rs, 0);
-  _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
-  _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end());
+  ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
+  _hrs.initialize(g1_rs);
+
   assert(_hrs.max_length() == _expansion_regions,
          err_msg("max length: %u expansion regions: %u",
                  _hrs.max_length(), _expansion_regions));
@@ -2079,8 +1992,8 @@
 
   _g1h = this;
 
-  _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
-  _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
+  _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
+  _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
@@ -2139,12 +2052,10 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  // Here we allocate the dummy full region that is required by the
-  // G1AllocRegion class. If we don't pass an address in the reserved
-  // space here, lots of asserts fire.
-
-  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
-                                             _g1_reserved.start());
+  // Here we allocate the dummy HeapRegion that is required by the
+  // G1AllocRegion class.
+
+  HeapRegion* dummy_region = _hrs.get_dummy_region();
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
@@ -2260,7 +2171,7 @@
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _g1_committed.byte_size();
+  return _hrs.length() * HeapRegion::GrainBytes;
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
@@ -2369,7 +2280,7 @@
 }
 
 size_t G1CollectedHeap::unsafe_max_alloc() {
-  if (free_regions() > 0) return HeapRegion::GrainBytes;
+  if (num_free_regions() > 0) return HeapRegion::GrainBytes;
   // otherwise, is there space in the current allocation region?
 
   // We need to store the current allocation region in a local variable
@@ -2584,7 +2495,7 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_g1_committed.contains(p)) {
+  if (_hrs.committed().contains(p)) {
     // Given that we know that p is in the committed space,
     // heap_region_containing_raw() should successfully
     // return the containing region.
@@ -2659,83 +2570,9 @@
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
                                                  uint worker_id,
-                                                 uint no_of_par_workers,
-                                                 jint claim_value) {
-  const uint regions = n_regions();
-  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                             no_of_par_workers :
-                             1);
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  // try to spread out the starting points of the workers
-  const HeapRegion* start_hr =
-                        start_region_for_worker(worker_id, no_of_par_workers);
-  const uint start_index = start_hr->hrs_index();
-
-  // each worker will actually look at all regions
-  for (uint count = 0; count < regions; ++count) {
-    const uint index = (start_index + count) % regions;
-    assert(0 <= index && index < regions, "sanity");
-    HeapRegion* r = region_at(index);
-    // we'll ignore "continues humongous" regions (we'll process them
-    // when we come across their corresponding "start humongous"
-    // region) and regions already claimed
-    if (r->claim_value() == claim_value || r->continuesHumongous()) {
-      continue;
-    }
-    // OK, try to claim it
-    if (r->claimHeapRegion(claim_value)) {
-      // success!
-      assert(!r->continuesHumongous(), "sanity");
-      if (r->startsHumongous()) {
-        // If the region is "starts humongous" we'll iterate over its
-        // "continues humongous" first; in fact we'll do them
-        // first. The order is important. In on case, calling the
-        // closure on the "starts humongous" region might de-allocate
-        // and clear all its "continues humongous" regions and, as a
-        // result, we might end up processing them twice. So, we'll do
-        // them first (notice: most closures will ignore them anyway) and
-        // then we'll do the "starts humongous" region.
-        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
-          HeapRegion* chr = region_at(ch_index);
-
-          // if the region has already been claimed or it's not
-          // "continues humongous" we're done
-          if (chr->claim_value() == claim_value ||
-              !chr->continuesHumongous()) {
-            break;
-          }
-
-          // No one should have claimed it directly. We can given
-          // that we claimed its "starts humongous" region.
-          assert(chr->claim_value() != claim_value, "sanity");
-          assert(chr->humongous_start_region() == r, "sanity");
-
-          if (chr->claimHeapRegion(claim_value)) {
-            // we should always be able to claim it; no one else should
-            // be trying to claim this region
-
-            bool res2 = cl->doHeapRegion(chr);
-            assert(!res2, "Should not abort");
-
-            // Right now, this holds (i.e., no closure that actually
-            // does something with "continues humongous" regions
-            // clears them). We might have to weaken it in the future,
-            // but let's leave these two asserts here for extra safety.
-            assert(chr->continuesHumongous(), "should still be the case");
-            assert(chr->humongous_start_region() == r, "sanity");
-          } else {
-            guarantee(false, "we should not reach here");
-          }
-        }
-      }
-
-      assert(!r->continuesHumongous(), "sanity");
-      bool res = cl->doHeapRegion(r);
-      assert(!res, "Should not abort");
-    }
-  }
+                                                 uint num_workers,
+                                                 jint claim_value) const {
+  _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
 }
 
 class ResetClaimValuesClosure: public HeapRegionClosure {
@@ -2913,17 +2750,6 @@
   return result;
 }
 
-HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
-                                                     uint no_of_par_workers) {
-  uint worker_num =
-           G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  const uint start_index = n_regions() * worker_i / worker_num;
-  return region_at(start_index);
-}
-
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
   HeapRegion* r = g1_policy()->collection_set();
   while (r != NULL) {
@@ -2966,15 +2792,11 @@
 }
 
 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
-  // We're not using an iterator given that it will wrap around when
-  // it reaches the last region and this is not what we want here.
-  for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
-    HeapRegion* hr = region_at(index);
-    if (!hr->isHumongous()) {
-      return hr;
-    }
-  }
-  return NULL;
+  HeapRegion* result = _hrs.next_region_in_heap(from);
+  while (result != NULL && result->isHumongous()) {
+    result = _hrs.next_region_in_heap(result);
+  }
+  return result;
 }
 
 Space* G1CollectedHeap::space_containing(const void* addr) const {
@@ -3032,7 +2854,7 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _g1_reserved.byte_size();
+  return _hrs.reserved().byte_size();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -3561,9 +3383,9 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
-            _g1_storage.low_boundary(),
-            _g1_storage.high(),
-            _g1_storage.high_boundary());
+            _hrs.committed().start(),
+            _hrs.committed().end(),
+            _hrs.reserved().end());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = _young_list->length();
@@ -4253,10 +4075,7 @@
             // No need for an ergo verbose message here,
             // expansion_amount() does this when it returns a value > 0.
             if (!expand(expand_bytes)) {
-              // We failed to expand the heap so let's verify that
-              // committed/uncommitted amount match the backing store
-              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
-              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+              // We failed to expand the heap. Cannot do anything about it.
             }
           }
         }
@@ -4317,7 +4136,7 @@
       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
 
       if (mark_in_progress()) {
-        concurrent_mark()->update_g1_committed();
+        concurrent_mark()->update_heap_boundaries(_hrs.committed());
       }
 
 #ifdef TRACESPINNING
@@ -6154,6 +5973,7 @@
                                   bool locked) {
   assert(!hr->isHumongous(), "this is only for non-humongous regions");
   assert(!hr->is_empty(), "the region should not be empty");
+  assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
   if (G1VerifyBitmaps) {
@@ -6208,7 +6028,7 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _free_list.add_ordered(list);
+    _hrs.insert_list_into_free_list(list);
   }
 }
 
@@ -6816,22 +6636,22 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _free_list.remove_all();
+  _hrs.remove_all_free_regions();
 }
 
 class RebuildRegionSetsClosure : public HeapRegionClosure {
 private:
   bool            _free_list_only;
   HeapRegionSet*   _old_set;
-  FreeRegionList* _free_list;
+  HeapRegionSeq*   _hrs;
   size_t          _total_used;
 
 public:
   RebuildRegionSetsClosure(bool free_list_only,
-                           HeapRegionSet* old_set, FreeRegionList* free_list) :
+                           HeapRegionSet* old_set, HeapRegionSeq* hrs) :
     _free_list_only(free_list_only),
-    _old_set(old_set), _free_list(free_list), _total_used(0) {
-    assert(_free_list->is_empty(), "pre-condition");
+    _old_set(old_set), _hrs(hrs), _total_used(0) {
+    assert(_hrs->num_free_regions() == 0, "pre-condition");
     if (!free_list_only) {
       assert(_old_set->is_empty(), "pre-condition");
     }
@@ -6844,7 +6664,7 @@
 
     if (r->is_empty()) {
       // Add free regions to the free list
-      _free_list->add_as_tail(r);
+      _hrs->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
 
@@ -6872,7 +6692,7 @@
     _young_list->empty_list();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
@@ -7062,7 +6882,7 @@
 private:
   HeapRegionSet*   _old_set;
   HeapRegionSet*   _humongous_set;
-  FreeRegionList*  _free_list;
+  HeapRegionSeq*   _hrs;
 
 public:
   HeapRegionSetCount _old_count;
@@ -7071,8 +6891,8 @@
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
                            HeapRegionSet* humongous_set,
-                           FreeRegionList* free_list) :
-    _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
+                           HeapRegionSeq* hrs) :
+    _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
@@ -7086,7 +6906,7 @@
       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
-      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
+      assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
       _free_count.increment(1u, hr->capacity());
     } else {
       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
@@ -7095,7 +6915,7 @@
     return false;
   }
 
-  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
+  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         old_set->total_capacity_bytes(), _old_count.capacity()));
@@ -7104,26 +6924,17 @@
     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
 
-    guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
+    guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         free_list->total_capacity_bytes(), _free_count.capacity()));
   }
 };
 
-HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
-                                             HeapWord* bottom) {
-  HeapWord* end = bottom + HeapRegion::GrainWords;
-  MemRegion mr(bottom, end);
-  assert(_g1_reserved.contains(mr), "invariant");
-  // This might return NULL if the allocation fails
-  return new HeapRegion(hrs_index, _bot_shared, mr);
-}
-
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _free_list.verify_list();
+  _hrs.verify();
   {
     // Given that a concurrent operation might be adding regions to
     // the secondary free list we have to take the lock before
@@ -7154,9 +6965,9 @@
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
+  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
   heap_region_iterate(&cl);
-  cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
+  cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
 }
 
 // Optimized nmethod scanning
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -244,19 +244,9 @@
 
   static size_t _humongous_object_threshold_in_words;
 
-  // Storage for the G1 heap.
-  VirtualSpace _g1_storage;
-  MemRegion    _g1_reserved;
-
-  // The part of _g1_storage that is currently committed.
-  MemRegion _g1_committed;
-
-  // The master free list. It will satisfy all new region allocations.
-  FreeRegionList _free_list;
-
   // The secondary free list which contains regions that have been
-  // freed up during the cleanup process. This will be appended to the
-  // master free list when appropriate.
+  // freed up during the cleanup process. This will be appended to
+  // the master free list when appropriate.
   FreeRegionList _secondary_free_list;
 
   // It keeps track of the old regions.
@@ -520,14 +510,6 @@
   // humongous object, set is_old to true. If not, to false.
   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 
-  // Attempt to satisfy a humongous allocation request of the given
-  // size by finding a contiguous set of free regions of num_regions
-  // length and remove them from the master free list. Return the
-  // index of the first region or G1_NULL_HRS_INDEX if the search
-  // was unsuccessful.
-  uint humongous_obj_allocate_find_first(uint num_regions,
-                                         size_t word_size);
-
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
@@ -1193,27 +1175,20 @@
   virtual size_t unsafe_max_alloc();
 
   virtual bool is_maximal_no_gc() const {
-    return _g1_storage.uncommitted_size() == 0;
+    return _hrs.available() == 0;
   }
 
-  // The total number of regions in the heap.
-  uint n_regions() const { return _hrs.length(); }
+  // The current number of regions in the heap.
+  uint num_regions() const { return _hrs.length(); }
 
   // The max number of regions in the heap.
   uint max_regions() const { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  uint free_regions() const { return _free_list.length(); }
+  uint num_free_regions() const { return _hrs.num_free_regions(); }
 
   // The number of regions that are not completely free.
-  uint used_regions() const { return n_regions() - free_regions(); }
-
-  // The number of regions available for "regular" expansion.
-  uint expansion_regions() const { return _expansion_regions; }
-
-  // Factory method for HeapRegion instances. It will return NULL if
-  // the allocation fails.
-  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
+  uint num_used_regions() const { return num_regions() - num_free_regions(); }
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@@ -1262,7 +1237,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return hr->containing_set() == &_free_list;
+    return _hrs.is_free(hr);
   }
 #endif // ASSERT
 
@@ -1274,7 +1249,7 @@
   }
 
   void append_secondary_free_list() {
-    _free_list.add_ordered(&_secondary_free_list);
+    _hrs.insert_list_into_free_list(&_secondary_free_list);
   }
 
   void append_secondary_free_list_if_not_empty_with_lock() {
@@ -1380,19 +1355,19 @@
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _g1_reserved.contains(p);
+    return _hrs.reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
-  MemRegion g1_reserved() {
-    return _g1_reserved;
+  MemRegion g1_reserved() const {
+    return _hrs.reserved();
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // committed in the heap
   MemRegion g1_committed() {
-    return _g1_committed;
+    return _hrs.committed();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
@@ -1432,6 +1407,8 @@
   // within the heap.
   inline uint addr_to_region(HeapWord* addr) const;
 
+  inline HeapWord* bottom_addr_for_region(uint index) const;
+
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
@@ -1445,10 +1422,10 @@
   // setting the claim value of the second and subsequent regions of the
   // chunk.)  For now requires that "doHeapRegion" always returns "false",
   // i.e., that a closure never attempt to abort a traversal.
-  void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
-                                       uint worker,
-                                       uint no_of_par_workers,
-                                       jint claim_value);
+  void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
+                                       uint worker_id,
+                                       uint num_workers,
+                                       jint claim_value) const;
 
   // It resets all the region claim values to the default.
   void reset_heap_region_claim_values();
@@ -1473,11 +1450,6 @@
   // starting region for iterating over the current collection set.
   HeapRegion* start_cset_region_for_worker(uint worker_i);
 
-  // This is a convenience method that is used by the
-  // HeapRegionIterator classes to calculate the starting region for
-  // each worker so that they do not all start from the same region.
-  HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
-
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -47,19 +47,21 @@
   return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
 }
 
+inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
+  return _hrs.reserved().start() + index * HeapRegion::GrainWords;
+}
+
 template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
   assert(addr != NULL, "invariant");
-  assert(_g1_reserved.contains((const void*) addr),
+  assert(is_in_g1_reserved((const void*) addr),
       err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
-          p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
+          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
   return _hrs.addr_to_region((HeapWord*) addr);
 }
 
 template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing(const T addr) const {
+inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
   HeapRegion* hr = heap_region_containing_raw(addr);
   if (hr->continuesHumongous()) {
     return hr->humongous_start_region();
@@ -89,10 +91,9 @@
   return r != NULL && r->in_collection_set();
 }
 
-inline HeapWord*
-G1CollectedHeap::attempt_allocation(size_t word_size,
-                                    unsigned int* gc_count_before_ret,
-                                    int* gclocker_retry_count_ret) {
+inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
+                                                     unsigned int* gc_count_before_ret,
+                                                     int* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
@@ -252,8 +253,7 @@
   }
 }
 
-inline bool
-G1CollectedHeap::evacuation_should_fail() {
+inline bool G1CollectedHeap::evacuation_should_fail() {
   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
     return false;
   }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -455,7 +455,7 @@
   } else {
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // We may immediately start allocating regions and placing them on the
@@ -828,7 +828,7 @@
 
   record_survivor_regions(0, NULL, NULL);
 
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
@@ -1180,7 +1180,7 @@
 
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
@@ -1202,7 +1202,7 @@
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
   _heap_used_bytes_before_gc = _g1->used();
-  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
+  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
 
   _eden_capacity_bytes_before_gc =
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@@ -1617,7 +1617,7 @@
 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
   _collectionSetChooser->clear();
 
-  uint region_num = _g1->n_regions();
+  uint region_num = _g1->num_regions();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     const uint OverpartitionFactor = 4;
     uint WorkUnit;
@@ -1638,7 +1638,7 @@
         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
              MinWorkUnit);
     }
-    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
+    _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
                                                            WorkUnit);
     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
                                             (int) WorkUnit);
@@ -1935,7 +1935,7 @@
   // of them are available.
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->n_regions();
+  const size_t region_num = g1h->num_regions();
   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
   size_t result = region_num * perc / 100;
   // emulate ceiling
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -344,11 +344,6 @@
   return low;
 }
 
-#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
-#endif // _MSC_VER
-
-
 HeapRegion::HeapRegion(uint hrs_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
                        MemRegion mr) :
@@ -360,7 +355,7 @@
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _young_type(NotYoung), _next_young_region(NULL),
-    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
+    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 #ifdef ASSERT
     _containing_set(NULL),
 #endif // ASSERT
@@ -369,14 +364,20 @@
     _predicted_bytes_to_copy(0)
 {
   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
+  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
+
+  initialize(mr);
+}
+
+void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  assert(_rem_set->is_empty(), "Remembered set must be empty");
+
+  G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
+
   _orig_end = mr.end();
-  // Note that initialize() will set the start of the unmarked area of the
-  // region.
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
   record_top_and_timestamp();
-
-  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
@@ -907,7 +908,7 @@
     }
 
     // If it returns false, verify_for_object() will output the
-    // appropriate messasge.
+    // appropriate message.
     if (do_bot_verify &&
         !g1->is_obj_dead(obj, this) &&
         !_offsets.verify_for_object(p, obj_size)) {
@@ -1038,8 +1039,7 @@
   set_top(bottom());
   set_saved_mark_word(bottom());
   CompactibleSpace::clear(mangle_space);
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
+  reset_bot();
 }
 
 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
@@ -1129,9 +1129,11 @@
   _gc_time_stamp(0)
 {
   _offsets.set_space(this);
-  // false ==> we'll do the clearing if there's clearing to be done.
-  CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
+}
+
+void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  CompactibleSpace::initialize(mr, clear_space, mangle_space);
   _top = bottom();
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
+  reset_bot();
 }
+
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -62,7 +62,7 @@
                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
 
 // sentinel value for hrs_index
-#define G1_NULL_HRS_INDEX ((uint) -1)
+#define G1_NO_HRS_INDEX ((uint) -1)
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -146,6 +146,9 @@
   HeapWord* top() const { return _top; }
 
  protected:
+  // Reset the G1OffsetTableContigSpace.
+  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
+
   HeapWord** top_addr() { return &_top; }
   // Allocation helpers (return NULL if full).
   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
@@ -200,8 +203,7 @@
   virtual void print() const;
 
   void reset_bot() {
-    _offsets.zero_bottom_entry();
-    _offsets.initialize_threshold();
+    _offsets.reset_bot();
   }
 
   void update_bot_for_object(HeapWord* start, size_t word_size) {
@@ -264,7 +266,6 @@
 #ifdef ASSERT
   HeapRegionSetBase* _containing_set;
 #endif // ASSERT
-  bool _pending_removal;
 
   // For parallel heapRegion traversal.
   jint _claimed;
@@ -333,6 +334,12 @@
              G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr);
 
+  // Initializing the HeapRegion not only resets the data structure, but also
+  // resets the BOT for that heap region.
+  // The default values for clear_space means that we will do the clearing if
+  // there's clearing to be done ourselves. We also always mangle the space.
+  virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
+
   static int    LogOfHRGrainBytes;
   static int    LogOfHRGrainWords;
 
@@ -553,26 +560,6 @@
   // to provide a dummy version of it.
 #endif // ASSERT
 
-  // If we want to remove regions from a list in bulk we can simply tag
-  // them with the pending_removal tag and call the
-  // remove_all_pending() method on the list.
-
-  bool pending_removal() { return _pending_removal; }
-
-  void set_pending_removal(bool pending_removal) {
-    if (pending_removal) {
-      assert(!_pending_removal && containing_set() != NULL,
-             "can only set pending removal to true if it's false and "
-             "the region belongs to a region set");
-    } else {
-      assert( _pending_removal && containing_set() == NULL,
-              "can only set pending removal to false if it's true and "
-              "the region does not belong to a region set");
-    }
-
-    _pending_removal = pending_removal;
-  }
-
   HeapRegion* get_next_young_region() { return _next_young_region; }
   void set_next_young_region(HeapRegion* hr) {
     _next_young_region = hr;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -25,163 +25,189 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "memory/allocation.hpp"
 
-// Private
+void HeapRegionSeq::initialize(ReservedSpace reserved) {
+  _reserved = reserved;
+  _storage.initialize(reserved, 0);
+
+  _num_committed = 0;
+
+  _allocated_heapregions_length = 0;
+
+  _regions.initialize((HeapWord*)_storage.low_boundary(), (HeapWord*)_storage.high_boundary(), HeapRegion::GrainBytes);
+}
+
+bool HeapRegionSeq::is_available(uint region) const {
+  return region <  _num_committed;
+}
+
+#ifdef ASSERT
+bool HeapRegionSeq::is_free(HeapRegion* hr) const {
+  return _free_list.contains(hr);
+}
+#endif
 
-uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
-  uint len = length();
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(from <= len,
-         err_msg("from: %u should be valid and <= than %u", from, len));
+HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
+  HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
+  MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+  assert(reserved().contains(mr), "invariant");
+  return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
+}
+
+void HeapRegionSeq::update_committed_space(HeapWord* old_end,
+                                           HeapWord* new_end) {
+  assert(old_end != new_end, "don't call this otherwise");
+  // We may not have officially committed the area. So construct and use a separate one.
+  MemRegion new_committed(heap_bottom(), new_end);
+  // Tell the card table about the update.
+  Universe::heap()->barrier_set()->resize_covered_region(new_committed);
+  // Tell the BOT about the update.
+  G1CollectedHeap::heap()->bot_shared()->resize(new_committed.word_size());
+  // Tell the hot card cache about the update
+  G1CollectedHeap::heap()->concurrent_g1_refine()->hot_card_cache()->resize_card_counts(new_committed.byte_size());
+}
+
+void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
+  guarantee(num_regions > 0, "Must commit more than zero regions");
+  guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 
-  uint curr = from;
-  uint first = G1_NULL_HRS_INDEX;
-  uint num_so_far = 0;
-  while (curr < len && num_so_far < num) {
-    if (at(curr)->is_empty()) {
-      if (first == G1_NULL_HRS_INDEX) {
-        first = curr;
-        num_so_far = 1;
-      } else {
-        num_so_far += 1;
-      }
-    } else {
-      first = G1_NULL_HRS_INDEX;
-      num_so_far = 0;
+  _storage.expand_by(num_regions * HeapRegion::GrainBytes);
+  update_committed_space(heap_top(), heap_top() + num_regions * HeapRegion::GrainWords);
+}
+
+void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
+  guarantee(num_regions >= 1, "Need to specify at least one region to uncommit");
+  guarantee(_num_committed >= num_regions, "pre-condition");
+
+  // Print before uncommitting.
+  if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+    for (uint i = start; i < start + num_regions; i++) {
+      HeapRegion* hr = at(i);
+      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
     }
-    curr += 1;
   }
-  assert(num_so_far <= num, "post-condition");
-  if (num_so_far == num) {
-    // we found enough space for the humongous object
-    assert(from <= first && first < len, "post-condition");
-    assert(first < curr && (curr - first) == num, "post-condition");
-    for (uint i = first; i < first + num; ++i) {
-      assert(at(i)->is_empty(), "post-condition");
+
+  HeapWord* old_end = heap_top();
+  _num_committed -= (uint)num_regions;
+  OrderAccess::fence();
+
+  _storage.shrink_by(num_regions * HeapRegion::GrainBytes);
+  update_committed_space(old_end, heap_top());
+}
+
+void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
+  guarantee(num_regions > 0, "No point in calling this for zero regions");
+  commit_regions(start, num_regions);
+  for (uint i = start; i < start + num_regions; i++) {
+    if (_regions.get_by_index(i) == NULL) {
+      HeapRegion* new_hr = new_heap_region(i);
+      _regions.set_by_index(i, new_hr);
+      _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
     }
-    return first;
-  } else {
-    // we failed to find enough space for the humongous object
-    return G1_NULL_HRS_INDEX;
+  }
+
+  _num_committed += (size_t)num_regions;
+
+  OrderAccess::fence();
+
+  for (uint i = start; i < start + num_regions; i++) {
+    assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
+    HeapRegion* hr = at(i);
+    if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
+    }
+    HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
+    MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+
+    hr->initialize(mr);
+    insert_into_free_list(at(i));
   }
 }
 
-// Public
+uint HeapRegionSeq::expand_by(uint num_regions) {
+  // Only ever expand from the end of the heap.
+  return expand_at(_num_committed, num_regions);
+}
+
+uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
+  if (num_regions == 0) {
+    return 0;
+  }
 
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
-  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
-         "bottom should be heap region aligned");
-  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
-         "end should be heap region aligned");
+  uint cur = start;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
+
+  uint expanded = 0;
 
-  _next_search_index = 0;
-  _allocated_length = 0;
+  while (expanded < num_regions &&
+         (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
+    uint to_expand = MIN2(num_regions - expanded, num_last_found);
+    make_regions_available(idx_last_found, to_expand);
+    expanded += to_expand;
+    cur = idx_last_found + num_last_found + 1;
+  }
 
-  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
+  verify_optional();
+  return expanded;
 }
 
-MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
-                                   HeapWord* new_end,
-                                   FreeRegionList* list) {
-  assert(old_end < new_end, "don't call it otherwise");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  HeapWord* next_bottom = old_end;
-  assert(heap_bottom() <= next_bottom, "invariant");
-  while (next_bottom < new_end) {
-    assert(next_bottom < heap_end(), "invariant");
-    uint index = length();
-
-    assert(index < max_length(), "otherwise we cannot expand further");
-    if (index == 0) {
-      // We have not allocated any regions so far
-      assert(next_bottom == heap_bottom(), "invariant");
-    } else {
-      // next_bottom should match the end of the last/previous region
-      assert(next_bottom == at(index - 1)->end(), "invariant");
-    }
+uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = 0;
 
-    if (index == _allocated_length) {
-      // We have to allocate a new HeapRegion.
-      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
-      if (new_hr == NULL) {
-        // allocation failed, we bail out and return what we have done so far
-        return MemRegion(old_end, next_bottom);
-      }
-      assert(_regions.get_by_index(index) == NULL, "invariant");
-      _regions.set_by_index(index, new_hr);
-      increment_allocated_length();
+  while (length_found < num && cur < max_length()) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      length_found++;
+    } else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
     }
-    // Have to increment the length first, otherwise we will get an
-    // assert failure at(index) below.
-    increment_length();
-    HeapRegion* hr = at(index);
-    list->add_as_tail(hr);
+    cur++;
+  }
 
-    next_bottom = hr->end();
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                        " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
+    }
+    return found;
+  } else {
+    return G1_NO_HRS_INDEX;
   }
-  assert(next_bottom == new_end, "post-condition");
-  return MemRegion(old_end, next_bottom);
 }
 
-uint HeapRegionSeq::free_suffix() {
-  uint res = 0;
-  uint index = length();
-  while (index > 0) {
-    index -= 1;
-    if (!at(index)->is_empty()) {
-      break;
+HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
+  guarantee(r != NULL, "Start region must be a valid region");
+  guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
+  for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
+    HeapRegion* hr = _regions.get_by_index(i);
+    if (is_available(i)) {
+      return hr;
     }
-    res += 1;
   }
-  return res;
-}
-
-uint HeapRegionSeq::find_contiguous(uint num) {
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(_next_search_index <= length(),
-         err_msg("_next_search_index: %u should be valid and <= than %u",
-                 _next_search_index, length()));
-
-  uint start = _next_search_index;
-  uint res = find_contiguous_from(start, num);
-  if (res == G1_NULL_HRS_INDEX && start > 0) {
-    // Try starting from the beginning. If _next_search_index was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
-  }
-  if (res != G1_NULL_HRS_INDEX) {
-    assert(res < length(), err_msg("res: %u should be valid", res));
-    _next_search_index = res + num;
-    assert(_next_search_index <= length(),
-           err_msg("_next_search_index: %u should be valid and <= than %u",
-                   _next_search_index, length()));
-  }
-  return res;
+  return NULL;
 }
 
 void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
-  iterate_from((HeapRegion*) NULL, blk);
-}
-
-void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
-  uint hr_index = 0;
-  if (hr != NULL) {
-    hr_index = hr->hrs_index();
-  }
+  uint len = max_length();
 
-  uint len = length();
-  for (uint i = hr_index; i < len; i += 1) {
-    bool res = blk->doHeapRegion(at(i));
-    if (res) {
-      blk->incomplete();
-      return;
+  for (uint i = 0; i < len; i++) {
+    if (!is_available(i)) {
+      continue;
     }
-  }
-  for (uint i = 0; i < hr_index; i += 1) {
+    guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
     bool res = blk->doHeapRegion(at(i));
     if (res) {
       blk->incomplete();
@@ -190,71 +216,219 @@
   }
 }
 
+uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur < max_length() && is_available(cur)) {
+    cur++;
+  }
+  if (cur == max_length()) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur < max_length() && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == max_length() || num_regions == 0 || is_available(cur),
+         err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
+#endif
+  return num_regions;
+}
+
+uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
+  return num_regions * worker_i / num_workers;
+}
+
+void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
+  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+
+  // Every worker will actually look at all regions, skipping over regions that
+  // are currently not committed.
+  // This also (potentially) iterates over regions newly allocated during GC. This
+  // is no problem except for some extra work.
+  for (uint count = 0; count < _allocated_heapregions_length; count++) {
+    const uint index = (start_index + count) % _allocated_heapregions_length;
+    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+    // Skip over unavailable regions
+    if (!is_available(index)) {
+      continue;
+    }
+    HeapRegion* r = _regions.get_by_index(index);
+    // We'll ignore "continues humongous" regions (we'll process them
+    // when we come across their corresponding "start humongous"
+    // region) and regions already claimed.
+    if (r->claim_value() == claim_value || r->continuesHumongous()) {
+      continue;
+    }
+    // OK, try to claim it
+    if (!r->claimHeapRegion(claim_value)) {
+      continue;
+    }
+    // Success!
+    if (r->startsHumongous()) {
+      // If the region is "starts humongous" we'll iterate over its
+      // "continues humongous" first; in fact we'll do them
+      // first. The order is important. In one case, calling the
+      // closure on the "starts humongous" region might de-allocate
+      // and clear all its "continues humongous" regions and, as a
+      // result, we might end up processing them twice. So, we'll do
+      // them first (note: most closures will ignore them anyway) and
+      // then we'll do the "starts humongous" region.
+      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
+        HeapRegion* chr = _regions.get_by_index(ch_index);
+
+        assert(chr->continuesHumongous(), "Must be humongous region");
+        assert(chr->humongous_start_region() == r,
+               err_msg("Must work on humongous continuation of the original start region "
+                       PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
+        assert(chr->claim_value() != claim_value,
+               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
+
+        bool claim_result = chr->claimHeapRegion(claim_value);
+        // We should always be able to claim it; no one else should
+        // be trying to claim this region.
+        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+
+        bool res2 = blk->doHeapRegion(chr);
+        if (res2) {
+          return;
+        }
+
+        // Right now, this holds (i.e., no closure that actually
+        // does something with "continues humongous" regions
+        // clears them). We might have to weaken it in the future,
+        // but let's leave these two asserts here for extra safety.
+        assert(chr->continuesHumongous(), "should still be the case");
+        assert(chr->humongous_start_region() == r, "sanity");
+      }
+    }
+
+    bool res = blk->doHeapRegion(r);
+    if (res) {
+      return;
+    }
+  }
+}
+
 uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
-  // Reset this in case it's currently pointing into the regions that
-  // we just removed.
-  _next_search_index = 0;
-
   assert(length() > 0, "the region sequence should not be empty");
-  assert(length() <= _allocated_length, "invariant");
-  assert(_allocated_length > 0, "we should have at least one region committed");
+  assert(length() <= _allocated_heapregions_length, "invariant");
+  assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
   assert(num_regions_to_remove < length(), "We should never remove all regions");
 
-  uint i = 0;
-  for (; i < num_regions_to_remove; i++) {
-    HeapRegion* cur = at(length() - 1);
+  if (num_regions_to_remove == 0) {
+    return 0;
+  }
+
+  uint removed = 0;
+  uint cur = _allocated_heapregions_length - 1;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
 
-    if (!cur->is_empty()) {
-      // We have to give up if the region can not be moved
-      break;
+  if ((num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
+    // Only allow uncommit from the end of the heap.
+    if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
+      return 0;
+    }
+    uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
+
+    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+
+    cur -= num_last_found;
+    removed += to_remove;
   }
-    assert(!cur->isHumongous(), "Humongous regions should not be empty");
 
-    decrement_length();
-  }
-  return i;
+  verify_optional();
+
+  return removed;
 }
 
-#ifndef PRODUCT
-void HeapRegionSeq::verify_optional() {
-  guarantee(length() <= _allocated_length,
+uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
+  guarantee(start_idx < _allocated_heapregions_length, "checking");
+  guarantee(res_idx != NULL, "checking");
+
+  uint num_regions_found = 0;
+
+  jlong cur = start_idx;
+  while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == -1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+void HeapRegionSeq::verify() {
+  guarantee(length() <= _allocated_heapregions_length,
             err_msg("invariant: _length: %u _allocated_length: %u",
-                    length(), _allocated_length));
-  guarantee(_allocated_length <= max_length(),
+                    length(), _allocated_heapregions_length));
+  guarantee(_allocated_heapregions_length <= max_length(),
             err_msg("invariant: _allocated_length: %u _max_length: %u",
-                    _allocated_length, max_length()));
-  guarantee(_next_search_index <= length(),
-            err_msg("invariant: _next_search_index: %u _length: %u",
-                    _next_search_index, length()));
+                    _allocated_heapregions_length, max_length()));
 
+  bool prev_committed = true;
+  uint num_committed = 0;
   HeapWord* prev_end = heap_bottom();
-  for (uint i = 0; i < _allocated_length; i += 1) {
+  for (uint i = 0; i < _allocated_heapregions_length; i++) {
+    if (!is_available(i)) {
+      prev_committed = false;
+      continue;
+    }
+    num_committed++;
     HeapRegion* hr = _regions.get_by_index(i);
     guarantee(hr != NULL, err_msg("invariant: i: %u", i));
-    guarantee(hr->bottom() == prev_end,
+    guarantee(!prev_committed || hr->bottom() == prev_end,
               err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
                       i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
     guarantee(hr->hrs_index() == i,
               err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
-    if (i < length()) {
-      // Asserts will fire if i is >= _length
-      HeapWord* addr = hr->bottom();
-      guarantee(addr_to_region(addr) == hr, "sanity");
-    } else {
-      guarantee(hr->is_empty(), "sanity");
-      guarantee(!hr->isHumongous(), "sanity");
-      // using assert instead of guarantee here since containing_set()
-      // is only available in non-product builds.
-      assert(hr->containing_set() == NULL, "sanity");
-    }
+    // Asserts will fire if i is >= _length
+    HeapWord* addr = hr->bottom();
+    guarantee(addr_to_region(addr) == hr, "sanity");
+    // We cannot check whether the region is part of a particular set: at the time
+    // this method may be called, we have only completed allocation of the regions,
+    // but not put into a region set.
+    prev_committed = true;
     if (hr->startsHumongous()) {
       prev_end = hr->orig_end();
     } else {
       prev_end = hr->end();
     }
   }
-  for (uint i = _allocated_length; i < max_length(); i += 1) {
+  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
     guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
   }
+
+  guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
+  _free_list.verify();
+}
+
+#ifndef PRODUCT
+void HeapRegionSeq::verify_optional() {
+  verify();
 }
 #endif // PRODUCT
+
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
 #include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/heapRegionSet.hpp"
 
 class HeapRegion;
 class HeapRegionClosure;
@@ -33,7 +34,7 @@
 
 class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
  protected:
-   virtual HeapRegion* default_value() const { return NULL; }
+  virtual HeapRegion* default_value() const { return NULL; }
 };
 
 // This class keeps track of the region metadata (i.e., HeapRegion
@@ -64,43 +65,64 @@
 
   G1HeapRegionTable _regions;
 
-  // The number of regions committed in the heap.
-  uint _committed_length;
+  ReservedSpace _reserved;
+  VirtualSpace _storage;
+
+  FreeRegionList _free_list;
 
-  // A hint for which index to start searching from for humongous
-  // allocations.
-  uint _next_search_index;
+   // The number of regions committed in the heap.
+  uint _num_committed;
+
+  // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
+  uint _allocated_heapregions_length;
 
-  // The number of regions for which we have allocated HeapRegions for.
-  uint _allocated_length;
+   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+  HeapWord* heap_top() const { return heap_bottom() + _num_committed * HeapRegion::GrainWords; }
+   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
+  void make_regions_available(uint index, uint num_regions = 1);
 
-  // Find a contiguous set of empty regions of length num, starting
-  // from the given index.
-  uint find_contiguous_from(uint from, uint num);
+  // Pass down commit calls to the VirtualSpace.
+  void commit_regions(uint index, size_t num_regions = 1);
+  void uncommit_regions(uint index, size_t num_regions = 1);
 
-  void increment_allocated_length() {
-    assert(_allocated_length < max_length(), "pre-condition");
-    _allocated_length++;
-  }
+  // Notify other data structures about change in the heap layout.
+  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+  // Calculate the starting region for each worker during parallel iteration so
+  // that they do not all start from the same region.
+  uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
 
-  void increment_length() {
-    assert(length() < max_length(), "pre-condition");
-    _committed_length++;
-  }
+  // Finds the next sequence of unavailable regions starting from start_idx. Returns the
+  // length of the sequence found. If this result is zero, no such sequence could be found,
+  // otherwise res_idx indicates the start index of these regions.
+  uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
+  // Finds the next sequence of empty regions starting from start_idx, going backwards in
+  // the heap. Returns the length of the sequence found. If this value is zero, no
+  // sequence could be found, otherwise res_idx contains the start index of this range.
+  uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+#ifdef ASSERT
+public:
+  bool is_free(HeapRegion* hr) const;
+#endif
+  // Returns whether the given region is available for allocation.
+  bool is_available(uint region) const;
 
-  void decrement_length() {
-    assert(length() > 0, "pre-condition");
-    _committed_length--;
-  }
+  // Allocate a new HeapRegion for the given index.
+  HeapRegion* new_heap_region(uint hrs_index);
+  public:
+   // Empty constructor, we'll initialize it with the initialize() method.
+  HeapRegionSeq() : _regions(), _reserved(), _storage(), _num_committed(0),
+          _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
+          _allocated_heapregions_length(0)
+  { }
 
-  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
-  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+  void initialize(ReservedSpace reserved);
 
- public:
-  // Empty contructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
-
-  void initialize(HeapWord* bottom, HeapWord* end);
+  // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
+  // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
+  // the heap from the lowest address, this region (and its associated data
+  // structures) are available and we do not need to check further.
+  HeapRegion* get_dummy_region() { return new_heap_region(0); }
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -110,43 +132,84 @@
   // HeapRegion, otherwise return NULL.
   inline HeapRegion* addr_to_region(HeapWord* addr) const;
 
+  // Insert the given region into the free region list.
+  inline void insert_into_free_list(HeapRegion* hr);
+
+  // Insert the given region list into the global free region list.
+  void insert_list_into_free_list(FreeRegionList* list) {
+    _free_list.add_ordered(list);
+  }
+
+  HeapRegion* allocate_free_region(bool is_old) {
+    HeapRegion* hr = _free_list.remove_region(is_old);
+
+    if (hr != NULL) {
+      assert(hr->next() == NULL, "Single region should not have next");
+      assert(is_available(hr->hrs_index()), "Must be committed");
+    }
+    return hr;
+  }
+
+  inline void allocate_free_regions_starting_at(uint first, uint num_regions);
+
+  // Remove all regions from the free list.
+  void remove_all_free_regions() {
+    _free_list.remove_all();
+  }
+
+  // Return the number of committed free regions in the heap.
+  uint num_free_regions() const {
+    return _free_list.length();
+  }
+
+  size_t total_capacity_bytes() const {
+    return num_free_regions() * HeapRegion::GrainBytes;
+  }
+
+  // Return the number of available (uncommitted) regions.
+  uint available() const { return max_length() - length(); }
+
   // Return the number of regions that have been committed in the heap.
-  uint length() const { return _committed_length; }
+  uint length() const { return _num_committed; }
 
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
-  // Expand the sequence to reflect that the heap has grown from
-  // old_end to new_end. Either create new HeapRegions, or re-use
-  // existing ones, and return them in the given list. Returns the
-  // memory region that covers the newly-created regions. If a
-  // HeapRegion allocation fails, the result memory region might be
-  // smaller than the desired one.
-  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
-                      FreeRegionList* list);
+  MemRegion committed() const { return MemRegion(heap_bottom(), heap_top()); }
+
+  MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
+
+  // Expand the sequence to reflect that the heap has grown. Either create new
+  // HeapRegions, or re-use existing ones. Returns the number of regions the
+  // sequence was expanded by. If a HeapRegion allocation fails, the resulting
+  // number of regions might be smaller than what's desired.
+  uint expand_by(uint num_regions);
 
-  // Return the number of contiguous regions at the end of the sequence
-  // that are available for allocation.
-  uint free_suffix();
+  // Makes sure that the regions from start to start+num_regions-1 are available
+  // for allocation. Returns the number of regions that were committed to achieve
+  // this.
+  uint expand_at(uint start, uint num_regions);
 
-  // Find a contiguous set of empty regions of length num and return
-  // the index of the first region or G1_NULL_HRS_INDEX if the
-  // search was unsuccessful.
-  uint find_contiguous(uint num);
+  // Find a contiguous set of empty or uncommitted regions of length num and return
+  // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
+  // If only_empty is true, only empty regions are considered.
+  // Searches from bottom to top of the heap, doing a first-fit.
+  uint find_contiguous(size_t num, bool only_empty);
+
+  HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
   // Apply blk->doHeapRegion() on all committed regions in address order,
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  // As above, but start the iteration from hr and loop around. If hr
-  // is NULL, we start from the first region in the heap.
-  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
 
-  // Tag as uncommitted as many regions that are completely free as
-  // possible, up to num_regions_to_remove, from the suffix of the committed
-  // sequence. Return the actual number of removed regions.
+  // Uncommit up to num_regions_to_remove regions that are completely free.
+  // Return the actual number of uncommitted regions.
   uint shrink_by(uint num_regions_to_remove);
 
+  void verify();
+
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
 };
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -40,11 +40,19 @@
 }
 
 inline HeapRegion* HeapRegionSeq::at(uint index) const {
-  assert(index < length(), "pre-condition");
+  assert(is_available(index), "pre-condition");
   HeapRegion* hr = _regions.get_by_index(index);
   assert(hr != NULL, "sanity");
   assert(hr->hrs_index() == index, "sanity");
   return hr;
 }
 
+inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
+  _free_list.add_ordered(hr);
+}
+
+inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
+  _free_list.remove_starting_at(at(first), num_regions);
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
@@ -67,7 +68,7 @@
   // Do the basic verification first before we do the checks over the regions.
   HeapRegionSetBase::verify();
 
-  _verify_in_progress        = true;
+  _verify_in_progress = true;
 }
 
 void HeapRegionSetBase::verify_end() {
@@ -103,62 +104,7 @@
 }
 
 void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
-  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
-}
-
-void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
-  check_mt_safety();
-  from_list->check_mt_safety();
-
-  verify_optional();
-  from_list->verify_optional();
-
-  if (from_list->is_empty()) {
-    return;
-  }
-
-#ifdef ASSERT
-  FreeRegionListIterator iter(from_list);
-  while (iter.more_available()) {
-    HeapRegion* hr = iter.get_next();
-    // In set_containing_set() we check that we either set the value
-    // from NULL to non-NULL or vice versa to catch bugs. So, we have
-    // to NULL it first before setting it to the value.
-    hr->set_containing_set(NULL);
-    hr->set_containing_set(this);
-  }
-#endif // ASSERT
-
-  if (_head == NULL) {
-    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
-    _head = from_list->_head;
-    _tail = from_list->_tail;
-  } else {
-    assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
-    if (as_head) {
-      from_list->_tail->set_next(_head);
-      _head->set_prev(from_list->_tail);
-      _head = from_list->_head;
-    } else {
-      _tail->set_next(from_list->_head);
-      from_list->_head->set_prev(_tail);
-      _tail = from_list->_tail;
-    }
-  }
-
-  _count.increment(from_list->length(), from_list->total_capacity_bytes());
-  from_list->clear();
-
-  verify_optional();
-  from_list->verify_optional();
-}
-
-void FreeRegionList::add_as_head(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, true /* as_head */);
-}
-
-void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, false /* as_head */);
+  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail);
 }
 
 void FreeRegionList::remove_all() {
@@ -191,11 +137,6 @@
     return;
   }
 
-  if (is_empty()) {
-    add_as_head(from_list);
-    return;
-  }
-
   #ifdef ASSERT
   FreeRegionListIterator iter(from_list);
   while (iter.more_available()) {
@@ -208,39 +149,45 @@
   }
   #endif // ASSERT
 
-  HeapRegion* curr_to = _head;
-  HeapRegion* curr_from = from_list->_head;
+  if (is_empty()) {
+    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
+    _head = from_list->_head;
+    _tail = from_list->_tail;
+  } else {
+    HeapRegion* curr_to = _head;
+    HeapRegion* curr_from = from_list->_head;
+
+    while (curr_from != NULL) {
+      while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
+        curr_to = curr_to->next();
+      }
 
-  while (curr_from != NULL) {
-    while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
-      curr_to = curr_to->next();
+      if (curr_to == NULL) {
+        // The rest of the from list should be added as tail
+        _tail->set_next(curr_from);
+        curr_from->set_prev(_tail);
+        curr_from = NULL;
+      } else {
+        HeapRegion* next_from = curr_from->next();
+
+        curr_from->set_next(curr_to);
+        curr_from->set_prev(curr_to->prev());
+        if (curr_to->prev() == NULL) {
+          _head = curr_from;
+        } else {
+          curr_to->prev()->set_next(curr_from);
+        }
+        curr_to->set_prev(curr_from);
+
+        curr_from = next_from;
+      }
     }
 
-    if (curr_to == NULL) {
-      // The rest of the from list should be added as tail
-      _tail->set_next(curr_from);
-      curr_from->set_prev(_tail);
-      curr_from = NULL;
-    } else {
-      HeapRegion* next_from = curr_from->next();
-
-      curr_from->set_next(curr_to);
-      curr_from->set_prev(curr_to->prev());
-      if (curr_to->prev() == NULL) {
-        _head = curr_from;
-      } else {
-        curr_to->prev()->set_next(curr_from);
-      }
-      curr_to->set_prev(curr_from);
-
-      curr_from = next_from;
+    if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
+      _tail = from_list->_tail;
     }
   }
 
-  if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
-    _tail = from_list->_tail;
-  }
-
   _count.increment(from_list->length(), from_list->total_capacity_bytes());
   from_list->clear();
 
@@ -248,68 +195,59 @@
   from_list->verify_optional();
 }
 
-void FreeRegionList::remove_all_pending(uint target_count) {
+void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
   check_mt_safety();
-  assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
+  assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition"));
   assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
 
   verify_optional();
   DEBUG_ONLY(uint old_length = length();)
 
-  HeapRegion* curr = _head;
+  HeapRegion* curr = first;
   uint count = 0;
-  while (curr != NULL) {
+  while (count < num_regions) {
     verify_region(curr);
     HeapRegion* next = curr->next();
     HeapRegion* prev = curr->prev();
 
-    if (curr->pending_removal()) {
-      assert(count < target_count,
-             hrs_err_msg("[%s] should not come across more regions "
-                         "pending for removal than target_count: %u",
-                         name(), target_count));
+    assert(count < num_regions,
+           hrs_err_msg("[%s] should not come across more regions "
+                       "pending for removal than num_regions: %u",
+                       name(), num_regions));
 
-      if (prev == NULL) {
-        assert(_head == curr, hrs_ext_msg(this, "invariant"));
-        _head = next;
-      } else {
-        assert(_head != curr, hrs_ext_msg(this, "invariant"));
-        prev->set_next(next);
-      }
-      if (next == NULL) {
-        assert(_tail == curr, hrs_ext_msg(this, "invariant"));
-        _tail = prev;
-      } else {
-        assert(_tail != curr, hrs_ext_msg(this, "invariant"));
-        next->set_prev(prev);
-      }
-      if (_last = curr) {
-        _last = NULL;
-      }
+    if (prev == NULL) {
+      assert(_head == curr, hrs_ext_msg(this, "invariant"));
+      _head = next;
+    } else {
+      assert(_head != curr, hrs_ext_msg(this, "invariant"));
+      prev->set_next(next);
+    }
+    if (next == NULL) {
+      assert(_tail == curr, hrs_ext_msg(this, "invariant"));
+      _tail = prev;
+    } else {
+      assert(_tail != curr, hrs_ext_msg(this, "invariant"));
+      next->set_prev(prev);
+    }
+    if (_last = curr) {
+      _last = NULL;
+    }
 
-      curr->set_next(NULL);
-      curr->set_prev(NULL);
-      remove(curr);
-      curr->set_pending_removal(false);
-
-      count += 1;
+    curr->set_next(NULL);
+    curr->set_prev(NULL);
+    remove(curr);
 
-      // If we have come across the target number of regions we can
-      // just bail out. However, for debugging purposes, we can just
-      // carry on iterating to make sure there are not more regions
-      // tagged with pending removal.
-      DEBUG_ONLY(if (count == target_count) break;)
-    }
+    count++;
     curr = next;
   }
 
-  assert(count == target_count,
-         hrs_err_msg("[%s] count: %u should be == target_count: %u",
-                     name(), count, target_count));
-  assert(length() + target_count == old_length,
+  assert(count == num_regions,
+         hrs_err_msg("[%s] count: %u should be == num_regions: %u",
+                     name(), count, num_regions));
+  assert(length() + num_regions == old_length,
          hrs_err_msg("[%s] new length should be consistent "
-                     "new length: %u old length: %u target_count: %u",
-                     name(), length(), old_length, target_count));
+                     "new length: %u old length: %u num_regions: %u",
+                     name(), length(), old_length, num_regions));
 
   verify_optional();
 }
@@ -348,10 +286,12 @@
       hr->print_on(out);
     }
   }
+
+  out->cr();
 }
 
 void FreeRegionList::verify_list() {
-  HeapRegion* curr = head();
+  HeapRegion* curr = _head;
   HeapRegion* prev1 = NULL;
   HeapRegion* prev0 = NULL;
   uint count = 0;
@@ -379,7 +319,7 @@
     curr = curr->next();
   }
 
-  guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
+  guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
   guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -162,7 +162,7 @@
 // diagnosing failures.
 class hrs_ext_msg : public hrs_err_msg {
 public:
-  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") {
+  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") {
     set->fill_in_ext_msg(this, message);
   }
 };
@@ -192,13 +192,9 @@
 };
 
 // A set that links all the regions added to it in a doubly-linked
-// list. We should try to avoid doing operations that iterate over
+// sorted list. We should try to avoid doing operations that iterate over
 // such lists in performance critical paths. Typically we should
-// add / remove one region at a time or concatenate two lists. There are
-// two ways to treat your lists, ordered and un-ordered. All un-ordered
-// operations are done in constant time. To keep a list ordered only use
-// add_ordered() to add elements to the list. If a list is not ordered
-// from start, there is no way to sort it later.
+// add / remove one region at a time or concatenate two lists.
 
 class FreeRegionListIterator;
 
@@ -210,13 +206,13 @@
   HeapRegion* _tail;
 
   // _last is used to keep track of where we added an element the last
-  // time in ordered lists. It helps to improve performance when adding
-  // several ordered items in a row.
+  // time. It helps to improve performance when adding several ordered items in a row.
   HeapRegion* _last;
 
   static uint _unrealistically_long_length;
 
-  void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
+  inline HeapRegion* remove_from_head_impl();
+  inline HeapRegion* remove_from_tail_impl();
 
 protected:
   virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
@@ -232,8 +228,11 @@
 
   void verify_list();
 
-  HeapRegion* head() { return _head; }
-  HeapRegion* tail() { return _tail; }
+#ifdef ASSERT
+  bool contains(HeapRegion* hr) const {
+    return hr->containing_set() == this;
+  }
+#endif
 
   static void set_unrealistically_long_length(uint len);
 
@@ -242,55 +241,20 @@
   // is determined by hrs_index.
   inline void add_ordered(HeapRegion* hr);
 
-  // It adds hr to the list as the new head. The region should not be
-  // a member of another set.
-  inline void add_as_head(HeapRegion* hr);
-
-  // It adds hr to the list as the new tail. The region should not be
-  // a member of another set.
-  inline void add_as_tail(HeapRegion* hr);
-
-  // It removes and returns the head of the list. It assumes that the
-  // list is not empty so it will return a non-NULL value.
-  inline HeapRegion* remove_head();
-
-  // Convenience method.
-  inline HeapRegion* remove_head_or_null();
-
-  // Removes and returns the last element (_tail) of the list. It assumes
-  // that the list isn't empty so that it can return a non-NULL value.
-  inline HeapRegion* remove_tail();
-
-  // Convenience method
-  inline HeapRegion* remove_tail_or_null();
-
   // Removes from head or tail based on the given argument.
-  inline HeapRegion* remove_region(bool from_head);
+  HeapRegion* remove_region(bool from_head);
 
   // Merge two ordered lists. The result is also ordered. The order is
   // determined by hrs_index.
   void add_ordered(FreeRegionList* from_list);
 
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the beginning of this list.
-  void add_as_head(FreeRegionList* from_list);
-
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the end of this list.
-  void add_as_tail(FreeRegionList* from_list);
-
   // It empties the list by removing all regions from it.
   void remove_all();
 
-  // It removes all regions in the list that are pending for removal
-  // (i.e., they have been tagged with "pending_removal"). The list
-  // must not be empty, target_count should reflect the exact number
-  // of regions that are pending for removal in the list, and
-  // target_count should be > 1 (currently, we never need to remove a
-  // single region using this).
-  void remove_all_pending(uint target_count);
+  // Remove all (contiguous) regions from first to first + num_regions -1 from
+  // this list.
+  // Num_regions must be > 1.
+  void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
 
@@ -298,7 +262,7 @@
 };
 
 // Iterator class that provides a convenient way to iterate over the
-// regions of a HeapRegionLinkedList instance.
+// regions of a FreeRegionList.
 
 class FreeRegionListIterator : public StackObj {
 private:
@@ -324,7 +288,7 @@
   }
 
   FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
-    _curr = list->head();
+    _curr = list->_head;
   }
 };
 
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -30,7 +30,8 @@
 inline void HeapRegionSetBase::add(HeapRegion* hr) {
   check_mt_safety();
   assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
 
   _count.increment(1u, hr->capacity());
   hr->set_containing_set(this);
@@ -40,7 +41,8 @@
 inline void HeapRegionSetBase::remove(HeapRegion* hr) {
   check_mt_safety();
   verify_region(hr);
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
 
   hr->set_containing_set(NULL);
   assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
@@ -48,8 +50,7 @@
 }
 
 inline void FreeRegionList::add_ordered(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
+  assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
          (length() >  0 && _head != NULL && _tail != NULL),
          hrs_ext_msg(this, "invariant"));
   // add() will verify the region and check mt safety.
@@ -95,55 +96,48 @@
   _last = hr;
 }
 
-inline void FreeRegionList::add_as_head(HeapRegion* hr) {
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_head != NULL) {
-    hr->set_next(_head);
-    _head->set_prev(hr);
-  } else {
-    _tail = hr;
-  }
-  _head = hr;
-}
-
-inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_tail != NULL) {
-    _tail->set_next(hr);
-    hr->set_prev(_tail);
-  } else {
-    _head = hr;
-  }
-  _tail = hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_head() {
-  assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first.
-  HeapRegion* hr = _head;
-  _head = hr->next();
+inline HeapRegion* FreeRegionList::remove_from_head_impl() {
+  HeapRegion* result = _head;
+  _head = result->next();
   if (_head == NULL) {
     _tail = NULL;
   } else {
     _head->set_prev(NULL);
   }
-  hr->set_next(NULL);
+  result->set_next(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
+  HeapRegion* result = _tail;
+
+  _tail = result->prev();
+  if (_tail == NULL) {
+    _head = NULL;
+  } else {
+    _tail->set_next(NULL);
+  }
+  result->set_prev(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
+  check_mt_safety();
+  verify_optional();
+
+  if (is_empty()) {
+    return NULL;
+  }
+  assert(length() > 0 && _head != NULL && _tail != NULL,
+         hrs_ext_msg(this, "invariant"));
+
+  HeapRegion* hr;
+
+  if (from_head) {
+    hr = remove_from_head_impl();
+  } else {
+    hr = remove_from_tail_impl();
+  }
 
   if (_last == hr) {
     _last = NULL;
@@ -154,56 +148,5 @@
   return hr;
 }
 
-inline HeapRegion* FreeRegionList::remove_head_or_null() {
-  check_mt_safety();
-  if (!is_empty()) {
-    return remove_head();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_tail() {
-  assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first
-  HeapRegion* hr = _tail;
-
-  _tail = hr->prev();
-  if (_tail == NULL) {
-    _head = NULL;
-  } else {
-    _tail->set_next(NULL);
-  }
-  hr->set_prev(NULL);
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
 
-  if (_last == hr) {
-    _last = NULL;
-  }
-
-  // remove() will verify the region and check mt safety.
-  remove(hr);
-  return hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_tail_or_null() {
-  check_mt_safety();
-
-  if (!is_empty()) {
-    return remove_tail();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
-  if (from_head) {
-    return remove_head_or_null();
-  } else {
-    return remove_tail_or_null();
-  }
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Aug 19 10:50:27 2014 +0200
@@ -43,10 +43,9 @@
   nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
                                                                               \
   nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
-  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
+  nonstatic_field(HeapRegionSeq,   _num_committed,      uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
-  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
--- a/src/share/vm/prims/whitebox.cpp	Tue Aug 19 12:39:06 2014 +0200
+++ b/src/share/vm/prims/whitebox.cpp	Tue Aug 19 10:50:27 2014 +0200
@@ -231,7 +231,7 @@
 
 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  size_t nr = g1->free_regions();
+  size_t nr = g1->num_free_regions();
   return (jlong)nr;
 WB_END