changeset 20337:1f1d373cd044

8038423: G1: Decommit memory within heap Summary: Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps. Reviewed-by: mgerdin, brutisso, jwilhelm
author tschatzl
date Thu, 21 Aug 2014 11:47:10 +0200
parents 6701abbc4441
children 8d5f66b42c53
files src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp src/share/vm/gc_implementation/g1/concurrentMark.cpp src/share/vm/gc_implementation/g1/concurrentMark.hpp src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp src/share/vm/gc_implementation/g1/g1CardCounts.cpp src/share/vm/gc_implementation/g1/g1CardCounts.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1HotCardCache.cpp src/share/vm/gc_implementation/g1/g1HotCardCache.hpp src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp src/share/vm/gc_implementation/g1/g1RemSet.cpp src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp src/share/vm/gc_implementation/g1/heapRegionSeq.cpp src/share/vm/gc_implementation/g1/heapRegionSeq.hpp src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp src/share/vm/gc_implementation/g1/heapRegionSet.cpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp src/share/vm/memory/cardTableModRefBS.cpp src/share/vm/memory/cardTableModRefBS.hpp src/share/vm/memory/cardTableRS.cpp src/share/vm/prims/jni.cpp
diffstat 32 files changed, 1326 insertions(+), 583 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -81,8 +81,8 @@
   }
 }
 
-void ConcurrentG1Refine::init() {
-  _hot_card_cache.initialize();
+void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
+  _hot_card_cache.initialize(card_counts_storage);
 }
 
 void ConcurrentG1Refine::stop() {
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -34,6 +34,7 @@
 class ConcurrentG1RefineThread;
 class G1CollectedHeap;
 class G1HotCardCache;
+class G1RegionToSpaceMapper;
 class G1RemSet;
 class DirtyCardQueue;
 
@@ -74,7 +75,7 @@
   ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
   ~ConcurrentG1Refine();
 
-  void init(); // Accomplish some initialization that has to wait.
+  void init(G1RegionToSpaceMapper* card_counts_storage);
   void stop();
 
   void reinitialize_threads();
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -36,6 +36,7 @@
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
@@ -98,12 +99,12 @@
 }
 
 #ifndef PRODUCT
-bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
+bool CMBitMapRO::covers(MemRegion heap_rs) const {
   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
          "size inconsistency");
-  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
-         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
+  return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
+         _bmWordSize  == heap_rs.word_size();
 }
 #endif
 
@@ -111,33 +112,73 @@
   _bm.print_on_error(st, prefix);
 }
 
-bool CMBitMap::allocate(ReservedSpace heap_rs) {
-  _bmStartWord = (HeapWord*)(heap_rs.base());
-  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
-  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
-                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-  if (!brs.is_reserved()) {
-    warning("ConcurrentMark marking bit map allocation failure");
+size_t CMBitMap::compute_size(size_t heap_size) {
+  return heap_size / mark_distance();
+}
+
+size_t CMBitMap::mark_distance() {
+  return MinObjAlignmentInBytes * BitsPerByte;
+}
+
+void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
+  _bmStartWord = heap.start();
+  _bmWordSize = heap.word_size();
+
+  _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
+  _bm.set_size(_bmWordSize >> _shifter);
+
+  storage->set_mapping_changed_listener(&_listener);
+}
+
+void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
+  // We need to clear the bitmap on commit, removing any existing information.
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
+  _bm->clearRange(mr);
+}
+
+// Closure used for clearing the given mark bitmap.
+class ClearBitmapHRClosure : public HeapRegionClosure {
+ private:
+  ConcurrentMark* _cm;
+  CMBitMap* _bitmap;
+  bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
+ public:
+  ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
+    assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    size_t const chunk_size_in_words = M / HeapWordSize;
+
+    HeapWord* cur = r->bottom();
+    HeapWord* const end = r->end();
+
+    while (cur < end) {
+      MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
+      _bitmap->clearRange(mr);
+
+      cur += chunk_size_in_words;
+
+      // Abort iteration if after yielding the marking has been aborted.
+      if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
+        return true;
+      }
+      // Repeat the asserts from before the start of the closure. We will do them
+      // as asserts here to minimize their overhead on the product. However, we
+      // will have them as guarantees at the beginning / end of the bitmap
+      // clearing to get some checking in the product.
+      assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
+      assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
+    }
+
     return false;
   }
-  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
-  // For now we'll just commit all of the bit map up front.
-  // Later on we'll try to be more parsimonious with swap.
-  if (!_virtual_space.initialize(brs, brs.size())) {
-    warning("ConcurrentMark marking bit map backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of concurrent marking bit map?");
-  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
-  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
-         _bmWordSize, "inconsistency in bit map sizing");
-  _bm.set_size(_bmWordSize >> _shifter);
-  return true;
-}
+};
 
 void CMBitMap::clearAll() {
-  _bm.clear();
+  ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
+  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
 
@@ -482,10 +523,10 @@
   return MAX2((n_par_threads + 2) / 4, 1U);
 }
 
-ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
+ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
   _g1h(g1h),
-  _markBitMap1(log2_intptr(MinObjAlignment)),
-  _markBitMap2(log2_intptr(MinObjAlignment)),
+  _markBitMap1(),
+  _markBitMap2(),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
@@ -494,7 +535,7 @@
   _cleanup_task_overhead(1.0),
   _cleanup_list("Cleanup List"),
   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
-  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
+  _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
             CardTableModRefBS::card_shift,
             false /* in_resource_area*/),
 
@@ -544,14 +585,8 @@
                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
   }
 
-  if (!_markBitMap1.allocate(heap_rs)) {
-    warning("Failed to allocate first CM bit map");
-    return;
-  }
-  if (!_markBitMap2.allocate(heap_rs)) {
-    warning("Failed to allocate second CM bit map");
-    return;
-  }
+  _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
+  _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 
   // Create & start a ConcurrentMark thread.
   _cmThread = new ConcurrentMarkThread(this);
@@ -562,8 +597,8 @@
   }
 
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
-  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
-  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
+  assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
+  assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
@@ -723,37 +758,17 @@
   clear_all_count_data();
 
   // so that the call below can read a sensible value
-  _heap_start = (HeapWord*) heap_rs.base();
+  _heap_start = g1h->reserved_region().start();
   set_non_marking_state();
   _completed_initialization = true;
 }
 
-void ConcurrentMark::update_heap_boundaries(MemRegion bounds, bool force) {
-  // If concurrent marking is not in progress, then we do not need to
-  // update _heap_end.
-  if (!concurrent_marking_in_progress() && !force) return;
-
-  assert(bounds.start() == _heap_start, "start shouldn't change");
-  HeapWord* new_end = bounds.end();
-  if (new_end > _heap_end) {
-    // The heap has been expanded.
-
-    _heap_end = new_end;
-  }
-  // Notice that the heap can also shrink. However, this only happens
-  // during a Full GC (at least currently) and the entire marking
-  // phase will bail out and the task will not be restarted. So, let's
-  // do nothing.
-}
-
 void ConcurrentMark::reset() {
   // Starting values for these two. This should be called in a STW
-  // phase. CM will be notified of any future g1_committed expansions
-  // will be at the end of evacuation pauses, when tasks are
-  // inactive.
-  MemRegion committed = _g1h->g1_committed();
-  _heap_start = committed.start();
-  _heap_end   = committed.end();
+  // phase.
+  MemRegion reserved = _g1h->g1_reserved();
+  _heap_start = reserved.start();
+  _heap_end   = reserved.end();
 
   // Separated the asserts so that we know which one fires.
   assert(_heap_start != NULL, "heap bounds should look ok");
@@ -825,7 +840,6 @@
     assert(out_of_regions(),
            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
                    p2i(_finger), p2i(_heap_end)));
-    update_heap_boundaries(_g1h->g1_committed(), true);
   }
 }
 
@@ -844,7 +858,6 @@
 
 void ConcurrentMark::clearNextBitmap() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
 
   // Make sure that the concurrent mark thread looks to still be in
   // the current cycle.
@@ -856,41 +869,36 @@
   // is the case.
   guarantee(!g1h->mark_in_progress(), "invariant");
 
-  // clear the mark bitmap (no grey objects to start with).
-  // We need to do this in chunks and offer to yield in between
-  // each chunk.
-  HeapWord* start  = _nextMarkBitMap->startWord();
-  HeapWord* end    = _nextMarkBitMap->endWord();
-  HeapWord* cur    = start;
-  size_t chunkSize = M;
-  while (cur < end) {
-    HeapWord* next = cur + chunkSize;
-    if (next > end) {
-      next = end;
-    }
-    MemRegion mr(cur,next);
-    _nextMarkBitMap->clearRange(mr);
-    cur = next;
-    do_yield_check();
-
-    // Repeat the asserts from above. We'll do them as asserts here to
-    // minimize their overhead on the product. However, we'll have
-    // them as guarantees at the beginning / end of the bitmap
-    // clearing to get some checking in the product.
-    assert(cmThread()->during_cycle(), "invariant");
-    assert(!g1h->mark_in_progress(), "invariant");
+  ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
+  g1h->heap_region_iterate(&cl);
+
+  // Clear the liveness counting data. If the marking has been aborted, the abort()
+  // call already did that.
+  if (cl.complete()) {
+    clear_all_count_data();
   }
 
-  // Clear the liveness counting data
-  clear_all_count_data();
-
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
   guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
+class CheckBitmapClearHRClosure : public HeapRegionClosure {
+  CMBitMap* _bitmap;
+  bool _error;
+ public:
+  CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
+  }
+};
+
 bool ConcurrentMark::nextMarkBitmapIsClear() {
-  return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
+  CheckBitmapClearHRClosure cl(_nextMarkBitMap);
+  _g1h->heap_region_iterate(&cl);
+  return cl.complete();
 }
 
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
@@ -2190,8 +2198,8 @@
                            _cleanup_list.length());
   }
 
-  // Noone else should be accessing the _cleanup_list at this point,
-  // so it's not necessary to take any locks
+  // No one else should be accessing the _cleanup_list at this point,
+  // so it is not necessary to take any locks
   while (!_cleanup_list.is_empty()) {
     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
     assert(hr != NULL, "Got NULL from a non-empty list");
@@ -2977,22 +2985,25 @@
     // claim_region() and a humongous object allocation might force us
     // to do a bit of unnecessary work (due to some unnecessary bitmap
     // iterations) but it should not introduce and correctness issues.
-    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
-    HeapWord*   bottom        = curr_region->bottom();
-    HeapWord*   end           = curr_region->end();
-    HeapWord*   limit         = curr_region->next_top_at_mark_start();
-
-    if (verbose_low()) {
-      gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
-                             "["PTR_FORMAT", "PTR_FORMAT"), "
-                             "limit = "PTR_FORMAT,
-                             worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
-    }
+    HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
+
+    // Above heap_region_containing_raw may return NULL as we always scan claim
+    // until the end of the heap. In this case, just jump to the next region.
+    HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
     // Is the gap between reading the finger and doing the CAS too long?
     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
-    if (res == finger) {
+    if (res == finger && curr_region != NULL) {
       // we succeeded
+      HeapWord*   bottom        = curr_region->bottom();
+      HeapWord*   limit         = curr_region->next_top_at_mark_start();
+
+      if (verbose_low()) {
+        gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
+                               "["PTR_FORMAT", "PTR_FORMAT"), "
+                               "limit = "PTR_FORMAT,
+                               worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
+      }
 
       // notice that _finger == end cannot be guaranteed here since,
       // someone else might have moved the finger even further
@@ -3023,10 +3034,17 @@
     } else {
       assert(_finger > finger, "the finger should have moved forward");
       if (verbose_low()) {
-        gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
-                               "global finger = "PTR_FORMAT", "
-                               "our finger = "PTR_FORMAT,
-                               worker_id, p2i(_finger), p2i(finger));
+        if (curr_region == NULL) {
+          gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        } else {
+          gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        }
       }
 
       // read it again
@@ -3141,8 +3159,10 @@
       // happens, heap_region_containing() will return the bottom of the
       // corresponding starts humongous region and the check below will
       // not hold any more.
+      // Since we always iterate over all regions, we might get a NULL HeapRegion
+      // here.
       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
-      guarantee(global_finger == global_hr->bottom(),
+      guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
     }
@@ -3155,7 +3175,7 @@
       if (task_finger != NULL && task_finger < _heap_end) {
         // See above note on the global finger verification.
         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
-        guarantee(task_finger == task_hr->bottom() ||
+        guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
                   !task_hr->in_collection_set(),
                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
@@ -4671,7 +4691,6 @@
     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  MemRegion g1_committed = g1h->g1_committed();
   MemRegion g1_reserved = g1h->g1_reserved();
   double now = os::elapsedTime();
 
@@ -4679,10 +4698,8 @@
   _out->cr();
   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
-                 G1PPRL_SUM_ADDR_FORMAT("committed")
                  G1PPRL_SUM_ADDR_FORMAT("reserved")
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
-                 p2i(g1_committed.start()), p2i(g1_committed.end()),
                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
                  HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -27,10 +27,12 @@
 
 #include "classfile/javaClasses.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/shared/gcId.hpp"
 #include "utilities/taskqueue.hpp"
 
 class G1CollectedHeap;
+class CMBitMap;
 class CMTask;
 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
@@ -57,7 +59,6 @@
   HeapWord* _bmStartWord;      // base address of range covered by map
   size_t    _bmWordSize;       // map size (in #HeapWords covered)
   const int _shifter;          // map to char or bit
-  VirtualSpace _virtual_space; // underlying the bit map
   BitMap    _bm;               // the bit map itself
 
  public:
@@ -115,42 +116,41 @@
   void print_on_error(outputStream* st, const char* prefix) const;
 
   // debugging
-  NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
+  NOT_PRODUCT(bool covers(MemRegion rs) const;)
+};
+
+class CMBitMapMappingChangedListener : public G1MappingChangedListener {
+ private:
+  CMBitMap* _bm;
+ public:
+  CMBitMapMappingChangedListener() : _bm(NULL) {}
+
+  void set_bitmap(CMBitMap* bm) { _bm = bm; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
 };
 
 class CMBitMap : public CMBitMapRO {
+ private:
+  CMBitMapMappingChangedListener _listener;
 
  public:
-  // constructor
-  CMBitMap(int shifter) :
-    CMBitMapRO(shifter) {}
+  static size_t compute_size(size_t heap_size);
+  // Returns the amount of bytes on the heap between two marks in the bitmap.
+  static size_t mark_distance();
 
-  // Allocates the back store for the marking bitmap
-  bool allocate(ReservedSpace heap_rs);
+  CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 
-  // write marks
-  void mark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.set_bit(heapWordToOffset(addr));
-  }
-  void clear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.clear_bit(heapWordToOffset(addr));
-  }
-  bool parMark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_set_bit(heapWordToOffset(addr));
-  }
-  bool parClear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_clear_bit(heapWordToOffset(addr));
-  }
+  // Initializes the underlying BitMap to cover the given area.
+  void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
+
+  // Write marks.
+  inline void mark(HeapWord* addr);
+  inline void clear(HeapWord* addr);
+  inline bool parMark(HeapWord* addr);
+  inline bool parClear(HeapWord* addr);
+
   void markRange(MemRegion mr);
-  void clearAll();
   void clearRange(MemRegion mr);
 
   // Starting at the bit corresponding to "addr" (inclusive), find the next
@@ -161,6 +161,9 @@
   // the run.  If there is no "1" bit at or after "addr", return an empty
   // MemRegion.
   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
+
+  // Clear the whole mark bitmap.
+  void clearAll();
 };
 
 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
@@ -680,7 +683,7 @@
     return _task_queues->steal(worker_id, hash_seed, obj);
   }
 
-  ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
+  ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
   ~ConcurrentMark();
 
   ConcurrentMarkThread* cmThread() { return _cmThread; }
@@ -736,7 +739,8 @@
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
 
-  // Return whether the next mark bitmap has no marks set.
+  // Return whether the next mark bitmap has no marks set. To be used for assertions
+  // only. Will not yield to pause requests.
   bool nextMarkBitmapIsClear();
 
   // These two do the work that needs to be done before and after the
@@ -794,12 +798,6 @@
                            bool verify_thread_buffers,
                            bool verify_fingers) PRODUCT_RETURN;
 
-  // It is called at the end of an evacuation pause during marking so
-  // that CM is notified of where the new end of the heap is. It
-  // doesn't do anything if concurrent_marking_in_progress() is false,
-  // unless the force parameter is true.
-  void update_heap_boundaries(MemRegion bounds, bool force = false);
-
   bool isMarked(oop p) const {
     assert(p != NULL && p->is_oop(), "expected an oop");
     HeapWord* addr = (HeapWord*)p;
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -268,6 +268,36 @@
   return iterate(cl, mr);
 }
 
+#define check_mark(addr)                                                       \
+  assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize),      \
+         "outside underlying space?");                                         \
+  assert(G1CollectedHeap::heap()->is_in_exact(addr),                           \
+         err_msg("Trying to access not available bitmap "PTR_FORMAT            \
+                 " corresponding to "PTR_FORMAT" (%u)",                        \
+                 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
+
+inline void CMBitMap::mark(HeapWord* addr) {
+  check_mark(addr);
+  _bm.set_bit(heapWordToOffset(addr));
+}
+
+inline void CMBitMap::clear(HeapWord* addr) {
+  check_mark(addr);
+  _bm.clear_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parMark(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_set_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parClear(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_clear_bit(heapWordToOffset(addr));
+}
+
+#undef check_mark
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -32,64 +32,37 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
+  // retrieve it here since this would cause firing of several asserts. The code
+  // executed after commit of a region already needs to do some re-initialization of
+  // the HeapRegion, so we combine that.
+}
+
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetSharedArray
 //////////////////////////////////////////////////////////////////////
 
-G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
-                                                   size_t init_word_size) :
-  _reserved(reserved), _end(NULL)
-{
-  size_t size = compute_size(reserved.word_size());
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
-  if (!rs.is_reserved()) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
-  if (!_vs.initialize(rs, 0)) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
+G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
+  _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
+
+  _reserved = heap;
+  _end = NULL;
 
-  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+  MemRegion bot_reserved = storage->reserved();
 
-  _offset_array = (u_char*)_vs.low_boundary();
-  resize(init_word_size);
+  _offset_array = (u_char*)bot_reserved.start();
+  _end = _reserved.end();
+
+  storage->set_mapping_changed_listener(&_listener);
+
   if (TraceBlockOffsetTable) {
     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
     gclog_or_tty->print_cr("  "
                   "  rs.base(): " INTPTR_FORMAT
                   "  rs.size(): " INTPTR_FORMAT
                   "  rs end(): " INTPTR_FORMAT,
-                  rs.base(), rs.size(), rs.base() + rs.size());
-    gclog_or_tty->print_cr("  "
-                  "  _vs.low_boundary(): " INTPTR_FORMAT
-                  "  _vs.high_boundary(): " INTPTR_FORMAT,
-                  _vs.low_boundary(),
-                  _vs.high_boundary());
-  }
-}
-
-void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
-  assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
-  size_t new_size = compute_size(new_word_size);
-  size_t old_size = _vs.committed_size();
-  size_t delta;
-  char* high = _vs.high();
-  _end = _reserved.start() + new_word_size;
-  if (new_size > old_size) {
-    delta = ReservedSpace::page_align_size_up(new_size - old_size);
-    assert(delta > 0, "just checking");
-    if (!_vs.expand_by(delta)) {
-      // Do better than this for Merlin
-      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
-    }
-    assert(_vs.high() == high + delta, "invalid expansion");
-    // Initialization of the contents is left to the
-    // G1BlockOffsetArray that uses it.
-  } else {
-    delta = ReservedSpace::page_align_size_down(old_size - new_size);
-    if (delta == 0) return;
-    _vs.shrink_by(delta);
-    assert(_vs.high() == high - delta, "invalid expansion");
+                  bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
   }
 }
 
@@ -100,18 +73,7 @@
 }
 
 void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
-  check_index(index_for(right - 1), "right address out of range");
-  assert(left  < right, "Heap addresses out of order");
-  size_t num_cards = pointer_delta(right, left) >> LogN_words;
-  if (UseMemSetInBOT) {
-    memset(&_offset_array[index_for(left)], offset, num_cards);
-  } else {
-    size_t i = index_for(left);
-    const size_t end = i + num_cards;
-    for (; i < end; i++) {
-      _offset_array[i] = offset;
-    }
-  }
+  set_offset_array(index_for(left), index_for(right -1), offset);
 }
 
 //////////////////////////////////////////////////////////////////////
@@ -651,6 +613,25 @@
   _next_offset_index = 0;
 }
 
+HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  _next_offset_index = _array->index_for_raw(_bottom);
+  _next_offset_index++;
+  _next_offset_threshold =
+    _array->address_for_index_raw(_next_offset_index);
+  return _next_offset_threshold;
+}
+
+void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  size_t bottom_index = _array->index_for_raw(_bottom);
+  assert(_array->address_for_index_raw(bottom_index) == _bottom,
+         "Precondition of call");
+  _array->set_offset_array_raw(bottom_index, 0);
+}
+
 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/memRegion.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -106,6 +107,11 @@
   inline HeapWord* block_start_const(const void* addr) const;
 };
 
+class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 // This implementation of "G1BlockOffsetTable" divides the covered region
 // into "N"-word subregions (where "N" = 2^"LogN".  An array with an entry
 // for each such subregion indicates how far back one must go to find the
@@ -125,6 +131,7 @@
   friend class VMStructs;
 
 private:
+  G1BlockOffsetSharedArrayMappingChangedListener _listener;
   // The reserved region covered by the shared array.
   MemRegion _reserved;
 
@@ -133,16 +140,8 @@
 
   // Array for keeping offsets for retrieving object start fast given an
   // address.
-  VirtualSpace _vs;
   u_char* _offset_array;          // byte array keeping backwards offsets
 
-  void check_index(size_t index, const char* msg) const {
-    assert(index < _vs.committed_size(),
-           err_msg("%s - "
-                   "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
-                   msg, index, _vs.committed_size()));
-  }
-
   void check_offset(size_t offset, const char* msg) const {
     assert(offset <= N_words,
            err_msg("%s - "
@@ -152,63 +151,33 @@
 
   // Bounds checking accessors:
   // For performance these have to devolve to array accesses in product builds.
-  u_char offset_array(size_t index) const {
-    check_index(index, "index out of range");
-    return _offset_array[index];
-  }
+  inline u_char offset_array(size_t index) const;
 
   void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
 
-  void set_offset_array(size_t index, u_char offset) {
-    check_index(index, "index out of range");
-    check_offset(offset, "offset too large");
+  void set_offset_array_raw(size_t index, u_char offset) {
     _offset_array[index] = offset;
   }
 
-  void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    _offset_array[index] = (u_char) pointer_delta(high, low);
-  }
+  inline void set_offset_array(size_t index, u_char offset);
+
+  inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
 
-  void set_offset_array(size_t left, size_t right, u_char offset) {
-    check_index(right, "right index out of range");
-    assert(left <= right, "indexes out of order");
-    size_t num_cards = right - left + 1;
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[left], offset, num_cards);
-    } else {
-      size_t i = left;
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        _offset_array[i] = offset;
-      }
-    }
-  }
+  inline void set_offset_array(size_t left, size_t right, u_char offset);
 
-  void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
-  }
+  inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
 
   bool is_card_boundary(HeapWord* p) const;
 
+public:
+
   // Return the number of slots needed for an offset array
   // that covers mem_region_words words.
-  // We always add an extra slot because if an object
-  // ends on a card boundary we put a 0 in the next
-  // offset array slot, so we want that slot always
-  // to be reserved.
-
-  size_t compute_size(size_t mem_region_words) {
-    size_t number_of_slots = (mem_region_words / N_words) + 1;
-    return ReservedSpace::page_align_size_up(number_of_slots);
+  static size_t compute_size(size_t mem_region_words) {
+    size_t number_of_slots = (mem_region_words / N_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
   }
 
-public:
   enum SomePublicConstants {
     LogN = 9,
     LogN_words = LogN - LogHeapWordSize,
@@ -222,21 +191,21 @@
   // least "init_word_size".) The contents of the initial table are
   // undefined; it is the responsibility of the constituent
   // G1BlockOffsetTable(s) to initialize cards.
-  G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
-
-  // Notes a change in the committed size of the region covered by the
-  // table.  The "new_word_size" may not be larger than the size of the
-  // reserved region this table covers.
-  void resize(size_t new_word_size);
+  G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
 
   void set_bottom(HeapWord* new_bottom);
 
   // Return the appropriate index into "_offset_array" for "p".
   inline size_t index_for(const void* p) const;
+  inline size_t index_for_raw(const void* p) const;
 
   // Return the address indicating the start of the region corresponding to
   // "index" in "_offset_array".
   inline HeapWord* address_for_index(size_t index) const;
+  // Variant of address_for_index that does not check the index for validity.
+  inline HeapWord* address_for_index_raw(size_t index) const {
+    return _reserved.start() + (index << LogN_words);
+  }
 };
 
 // And here is the G1BlockOffsetTable subtype that uses the array.
@@ -476,6 +445,12 @@
                       blk_start, blk_end);
   }
 
+  // Variant of zero_bottom_entry that does not check for availability of the
+  // memory first.
+  void zero_bottom_entry_raw();
+  // Variant of initialize_threshold that does not check for availability of the
+  // memory first.
+  HeapWord* initialize_threshold_raw();
   // Zero out the entry for _bottom (offset will be zero).
   void zero_bottom_entry();
  public:
@@ -486,8 +461,8 @@
   HeapWord* initialize_threshold();
 
   void reset_bot() {
-    zero_bottom_entry();
-    initialize_threshold();
+    zero_bottom_entry_raw();
+    initialize_threshold_raw();
   }
 
   // Return the next threshold, the point at which the table should be
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -47,14 +47,69 @@
   }
 }
 
+#define check_index(index, msg)                                                \
+  assert((index) < (_reserved.word_size() >> LogN_words),                      \
+         err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
+                 msg, (index), (_reserved.word_size() >> LogN_words)));        \
+  assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),   \
+         err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT             \
+                 " (%u) is not in committed area.",                            \
+                 (index),                                                      \
+                 p2i(address_for_index_raw(index)),                            \
+                 G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
+
+u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
+  check_index(index, "index out of range");
+  return _offset_array[index];
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
+  check_index(index, "index out of range");
+  set_offset_array_raw(index, offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
+  check_index(index, "index out of range");
+  assert(high >= low, "addresses out of order");
+  size_t offset = pointer_delta(high, low);
+  check_offset(offset, "offset too large");
+  set_offset_array(index, (u_char)offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
+  check_index(right, "right index out of range");
+  assert(left <= right, "indexes out of order");
+  size_t num_cards = right - left + 1;
+  if (UseMemSetInBOT) {
+    memset(&_offset_array[left], offset, num_cards);
+  } else {
+    size_t i = left;
+    const size_t end = i + num_cards;
+    for (; i < end; i++) {
+      _offset_array[i] = offset;
+    }
+  }
+}
+
+void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
+  check_index(index, "index out of range");
+  assert(high >= low, "addresses out of order");
+  check_offset(pointer_delta(high, low), "offset too large");
+  assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
+}
+
+// Variant of index_for that does not check the index for validity.
+inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
+  return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
+}
+
 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
   char* pc = (char*)p;
   assert(pc >= (char*)_reserved.start() &&
          pc <  (char*)_reserved.end(),
          err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
                  p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
-  size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
-  size_t result = delta >> LogN;
+  size_t result = index_for_raw(p);
   check_index(result, "bad index from address");
   return result;
 }
@@ -62,7 +117,7 @@
 inline HeapWord*
 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
   check_index(index, "index out of range");
-  HeapWord* result = _reserved.start() + (index << LogN_words);
+  HeapWord* result = address_for_index_raw(index);
   assert(result >= _reserved.start() && result < _reserved.end(),
          err_msg("bad address from index result " PTR_FORMAT
                  " _reserved.start() " PTR_FORMAT " _reserved.end() "
@@ -71,6 +126,8 @@
   return result;
 }
 
+#undef check_index
+
 inline size_t
 G1BlockOffsetArray::block_size(const HeapWord* p) const {
   return gsp()->block_size(p);
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -33,31 +33,26 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _counts->clear_range(mr);
+}
+
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
-           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
-    assert(to_card_num <= _committed_max_card_num,
-           err_msg("to card num out of range: "
-                   "to: "SIZE_FORMAT ", "
-                   "max: "SIZE_FORMAT,
-                   to_card_num, _committed_max_card_num));
-
-    to_card_num = MIN2(_committed_max_card_num, to_card_num);
-
     Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
   }
 }
 
 G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
-  _g1h(g1h), _card_counts(NULL),
-  _reserved_max_card_num(0), _committed_max_card_num(0),
-  _committed_size(0) {}
+  _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
+  _listener.set_cardcounts(this);
+}
 
-void G1CardCounts::initialize() {
+void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
   assert(_g1h->max_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
@@ -70,70 +65,9 @@
     _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
-    // Allocate/Reserve the counts table
-    size_t reserved_bytes = _g1h->max_capacity();
-    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
-
-    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
-    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
-    if (!rs.is_reserved()) {
-      warning("Could not reserve enough space for the card counts table");
-      guarantee(!has_reserved_count_table(), "should be NULL");
-      return;
-    }
-
-    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
-
-    _card_counts_storage.initialize(rs, 0);
-    _card_counts = (jubyte*) _card_counts_storage.low();
-  }
-}
-
-void G1CardCounts::resize(size_t heap_capacity) {
-  // Expand the card counts table to handle a heap with the given capacity.
-
-  if (!has_reserved_count_table()) {
-    // Don't expand if we failed to reserve the card counts table.
-    return;
-  }
-
-  assert(_committed_size ==
-         ReservedSpace::allocation_align_size_up(_committed_size),
-         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
-
-  // Verify that the committed space for the card counts matches our
-  // committed max card num. Note for some allocation alignments, the
-  // amount of space actually committed for the counts table will be able
-  // to span more cards than the number spanned by the maximum heap.
-  size_t prev_committed_size = _committed_size;
-  size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
-
-  assert(prev_committed_card_num == _committed_max_card_num,
-         err_msg("Card mismatch: "
-                 "prev: " SIZE_FORMAT ", "
-                 "committed: "SIZE_FORMAT", "
-                 "reserved: "SIZE_FORMAT,
-                 prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
-
-  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
-  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
-  size_t new_committed_card_num = committed_to_card_num(new_committed_size);
-
-  if (_committed_max_card_num < new_committed_card_num) {
-    // we need to expand the backing store for the card counts
-    size_t expand_size = new_committed_size - prev_committed_size;
-
-    if (!_card_counts_storage.expand_by(expand_size)) {
-      warning("Card counts table backing store commit failure");
-      return;
-    }
-    assert(_card_counts_storage.committed_size() == new_committed_size,
-           "expansion commit failure");
-
-    _committed_size = new_committed_size;
-    _committed_max_card_num = new_committed_card_num;
-
-    clear_range(prev_committed_card_num, _committed_max_card_num);
+    _card_counts = (jubyte*) mapper->reserved().start();
+    _reserved_max_card_num = mapper->reserved().byte_size();
+    mapper->set_mapping_changed_listener(&_listener);
   }
 }
 
@@ -149,12 +83,13 @@
   uint count = 0;
   if (has_count_table()) {
     size_t card_num = ptr_2_card_num(card_ptr);
-    if (card_num < _committed_max_card_num) {
-      count = (uint) _card_counts[card_num];
-      if (count < G1ConcRSHotCardLimit) {
-        _card_counts[card_num] =
-          (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
-      }
+    assert(card_num < _reserved_max_card_num,
+           err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
+                   card_num, _reserved_max_card_num));
+    count = (uint) _card_counts[card_num];
+    if (count < G1ConcRSHotCardLimit) {
+      _card_counts[card_num] =
+        (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
     }
   }
   return count;
@@ -165,31 +100,23 @@
 }
 
 void G1CardCounts::clear_region(HeapRegion* hr) {
-  assert(!hr->isHumongous(), "Should have been cleared");
-  if (has_count_table()) {
-    HeapWord* bottom = hr->bottom();
+  MemRegion mr(hr->bottom(), hr->end());
+  clear_range(mr);
+}
 
-    // We use the last address in hr as hr could be the
-    // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB accesss to the
-    // card table.
-    HeapWord* last = hr->end() - 1;
-    assert(_g1h->g1_committed().contains(last),
-           err_msg("last not in committed: "
-                   "last: " PTR_FORMAT ", "
-                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
-                   last,
-                   _g1h->g1_committed().start(),
-                   _g1h->g1_committed().end()));
-
-    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
-    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
+void G1CardCounts::clear_range(MemRegion mr) {
+  if (has_count_table()) {
+    const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
+    // We use the last address in the range as the range could represent the
+    // last region in the heap. In which case trying to find the card will be an
+    // OOB access to the card table.
+    const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
 
 #ifdef ASSERT
     HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
-    assert(start_addr == hr->bottom(), "alignment");
+    assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
     HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
-    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
+    assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
 #endif // ASSERT
 
     // Clear the counts for the (exclusive) card range.
@@ -199,14 +126,22 @@
   }
 }
 
+class G1CardCountsClearClosure : public HeapRegionClosure {
+ private:
+  G1CardCounts* _card_counts;
+ public:
+  G1CardCountsClearClosure(G1CardCounts* card_counts) :
+    HeapRegionClosure(), _card_counts(card_counts) { }
+
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _card_counts->clear_region(r);
+    return false;
+  }
+};
+
 void G1CardCounts::clear_all() {
   assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
-  clear_range((size_t)0, _committed_max_card_num);
+  G1CardCountsClearClosure cl(this);
+  _g1h->heap_region_iterate(&cl);
 }
-
-G1CardCounts::~G1CardCounts() {
-  if (has_reserved_count_table()) {
-    _card_counts_storage.release();
-  }
-}
-
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -25,14 +25,26 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class CardTableModRefBS;
+class G1CardCounts;
 class G1CollectedHeap;
+class G1RegionToSpaceMapper;
 class HeapRegion;
 
+class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
+ private:
+  G1CardCounts* _counts;
+ public:
+  void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 // Table to track the number of times a card has been refined. Once
 // a card has been refined a certain number of times, it is
 // considered 'hot' and its refinement is delayed by inserting the
@@ -41,6 +53,8 @@
 // is 'drained' during the next evacuation pause.
 
 class G1CardCounts: public CHeapObj<mtGC> {
+  G1CardCountsMappingChangedListener _listener;
+
   G1CollectedHeap* _g1h;
 
   // The table of counts
@@ -49,27 +63,18 @@
   // Max capacity of the reserved space for the counts table
   size_t _reserved_max_card_num;
 
-  // Max capacity of the committed space for the counts table
-  size_t _committed_max_card_num;
-
-  // Size of committed space for the counts table
-  size_t _committed_size;
-
   // CardTable bottom.
   const jbyte* _ct_bot;
 
   // Barrier set
   CardTableModRefBS* _ct_bs;
 
-  // The virtual memory backing the counts table
-  VirtualSpace _card_counts_storage;
-
   // Returns true if the card counts table has been reserved.
   bool has_reserved_count_table() { return _card_counts != NULL; }
 
   // Returns true if the card counts table has been reserved and committed.
   bool has_count_table() {
-    return has_reserved_count_table() && _committed_max_card_num > 0;
+    return has_reserved_count_table();
   }
 
   size_t ptr_2_card_num(const jbyte* card_ptr) {
@@ -79,37 +84,24 @@
                    "_ct_bot: " PTR_FORMAT,
                    p2i(card_ptr), p2i(_ct_bot)));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
-  // Helper routine.
-  // Returns the number of cards that can be counted by the given committed
-  // table size, with a maximum of the number of cards spanned by the max
-  // capacity of the heap.
-  size_t committed_to_card_num(size_t committed_size) {
-    return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
-  }
-
   // Clear the counts table for the given (exclusive) index range.
   void clear_range(size_t from_card_num, size_t to_card_num);
 
  public:
   G1CardCounts(G1CollectedHeap* g1h);
-  ~G1CardCounts();
 
-  void initialize();
-
-  // Resize the committed space for the card counts table in
-  // response to a resize of the committed space for the heap.
-  void resize(size_t heap_capacity);
+  void initialize(G1RegionToSpaceMapper* mapper);
 
   // Increments the refinement count for the given card.
   // Returns the pre-increment count value.
@@ -122,8 +114,10 @@
   // Clears the card counts for the cards spanned by the region
   void clear_region(HeapRegion* hr);
 
+  // Clears the card counts for the cards spanned by the MemRegion
+  void clear_range(MemRegion mr);
+
   // Clear the entire card counts table during GC.
-  // Updates the policy stats with the duration.
   void clear_all();
 };
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -43,6 +43,7 @@
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -377,6 +378,14 @@
   gclog_or_tty->cr();
 }
 
+void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
+  OtherRegionsTable::invalidate(start_idx, num_regions);
+}
+
+void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  reset_from_card_cache(start_idx, num_regions);
+}
+
 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 {
   // Claim the right to put the region on the dirty cards region list
@@ -756,13 +765,14 @@
     // to know in which list they are on so that we can remove them. We only
     // need to do this if we need to allocate more than one region to satisfy the
     // current humongous allocation request. If we are only allocating one region
-    // we use the one-region region allocation code (see above), or end up here.
+    // we use the one-region region allocation code (see above), that already
+    // potentially waits for regions from the secondary free list.
     wait_while_free_regions_coming();
     append_secondary_free_list_if_not_empty_with_lock();
 
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
-    first = _hrs.find_contiguous(obj_regions, true);
+    first = _hrs.find_contiguous_only_empty(obj_regions);
     if (first != G1_NO_HRS_INDEX) {
       _hrs.allocate_free_regions_starting_at(first, obj_regions);
     }
@@ -772,7 +782,7 @@
     // Policy: We could not find enough regions for the humongous object in the
     // free list. Look through the heap to find a mix of free and uncommitted regions.
     // If so, try expansion.
-    first = _hrs.find_contiguous(obj_regions, false);
+    first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
     if (first != G1_NO_HRS_INDEX) {
       // We found something. Make sure these regions are committed, i.e. expand
       // the heap. Alternatively we could do a defragmentation GC.
@@ -1950,8 +1960,6 @@
   _reserved.set_start((HeapWord*)heap_rs.base());
   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
-
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
@@ -1966,14 +1974,64 @@
   // Carve out the G1 part of the heap.
 
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
-  _hrs.initialize(g1_rs);
-
-  assert(_hrs.max_length() == _expansion_regions,
-         err_msg("max length: %u expansion regions: %u",
-                 _hrs.max_length(), _expansion_regions));
-
-  // Do later initialization work for concurrent refinement.
-  _cg1r->init();
+  G1RegionToSpaceMapper* heap_storage =
+    G1RegionToSpaceMapper::create_mapper(g1_rs,
+                                         UseLargePages ? os::large_page_size() : os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         1,
+                                         mtJavaHeap);
+  heap_storage->set_mapping_changed_listener(&_listener);
+
+  // Reserve space for the block offset table. We do not support automatic uncommit
+  // for the card table at this time. BOT only.
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* cardtable_storage =
+    G1RegionToSpaceMapper::create_mapper(cardtable_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for the card counts table.
+  ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* card_counts_storage =
+    G1RegionToSpaceMapper::create_mapper(card_counts_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for prev and next bitmap.
+  size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
+
+  ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* prev_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* next_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  g1_barrier_set()->initialize(cardtable_storage);
+   // Do later initialization work for concurrent refinement.
+  _cg1r->init(card_counts_storage);
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
@@ -1987,8 +2045,7 @@
 
   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
 
-  _bot_shared = new G1BlockOffsetSharedArray(_reserved,
-                                             heap_word_size(init_byte_size));
+  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
 
   _g1h = this;
 
@@ -1997,7 +2054,7 @@
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
-  _cm = new ConcurrentMark(this, heap_rs);
+  _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
   if (_cm == NULL || !_cm->completed_initialization()) {
     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
     return JNI_ENOMEM;
@@ -2054,8 +2111,8 @@
 
   // Here we allocate the dummy HeapRegion that is required by the
   // G1AllocRegion class.
-
   HeapRegion* dummy_region = _hrs.get_dummy_region();
+
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
@@ -2495,8 +2552,8 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_hrs.committed().contains(p)) {
-    // Given that we know that p is in the committed space,
+  if (_hrs.reserved().contains(p)) {
+    // Given that we know that p is in the reserved space,
     // heap_region_containing_raw() should successfully
     // return the containing region.
     HeapRegion* hr = heap_region_containing_raw(p);
@@ -2506,6 +2563,18 @@
   }
 }
 
+#ifdef ASSERT
+bool G1CollectedHeap::is_in_exact(const void* p) const {
+  bool contains = reserved_region().contains(p);
+  bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
+  if (contains && available) {
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif
+
 // Iteration functions.
 
 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@@ -3383,8 +3452,8 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
-            _hrs.committed().start(),
-            _hrs.committed().end(),
+            _hrs.reserved().start(),
+            _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
             _hrs.reserved().end());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
@@ -4135,10 +4204,6 @@
       // RETIRE events are generated before the end GC event.
       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
 
-      if (mark_in_progress()) {
-        concurrent_mark()->update_heap_boundaries(_hrs.committed());
-      }
-
 #ifdef TRACESPINNING
       ParallelTaskTerminator::print_termination_counts();
 #endif
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -206,6 +206,13 @@
 
 class RefineCardTableEntryClosure;
 
+class G1RegionMappingChangedListener : public G1MappingChangedListener {
+ private:
+  void reset_from_card_cache(uint start_idx, size_t num_regions);
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions);
+};
+
 class G1CollectedHeap : public SharedHeap {
   friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
@@ -280,6 +287,9 @@
   // after heap shrinking (free_list_only == true).
   void rebuild_region_sets(bool free_list_only);
 
+  // Callback for region mapping changed events.
+  G1RegionMappingChangedListener _listener;
+
   // The sequence of all heap regions in the heap.
   HeapRegionSeq _hrs;
 
@@ -851,11 +861,6 @@
                         CodeBlobClosure* scan_strong_code,
                         uint worker_i);
 
-  // Notifies all the necessary spaces that the committed space has
-  // been updated (either expanded or shrunk). It should be called
-  // after _g1_storage is updated.
-  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
-
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
   ConcurrentMarkThread* _cmThread;
@@ -1295,6 +1300,11 @@
 
   // Returns "TRUE" iff "p" points into the committed areas of the heap.
   virtual bool is_in(const void* p) const;
+#ifdef ASSERT
+  // Returns whether p is in one of the available areas of the heap. Slow but
+  // extensive version.
+  bool is_in_exact(const void* p) const;
+#endif
 
   // Return "TRUE" iff the given object address is within the collection
   // set. Slow implementation.
@@ -1364,16 +1374,10 @@
     return _hrs.reserved();
   }
 
-  // Returns a MemRegion that corresponds to the space that has been
-  // committed in the heap
-  MemRegion g1_committed() {
-    return _hrs.committed();
-  }
-
   virtual bool is_in_closed_subset(const void* p) const;
 
-  G1SATBCardTableModRefBS* g1_barrier_set() {
-    return (G1SATBCardTableModRefBS*) barrier_set();
+  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableLoggingModRefBS*) barrier_set();
   }
 
   // This resets the card table to all zeros.  It is used after
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -33,7 +33,7 @@
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
 
-void G1HotCardCache::initialize() {
+void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
   if (default_use_cache()) {
     _use_cache = true;
 
@@ -49,7 +49,7 @@
     _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
     _hot_cache_par_claimed_idx = 0;
 
-    _card_counts.initialize();
+    _card_counts.initialize(card_counts_storage);
   }
 }
 
@@ -135,11 +135,8 @@
   // above, are discarded prior to re-enabling the cache near the end of the GC.
 }
 
-void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
-  _card_counts.resize(heap_capacity);
-}
-
 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
+  assert(!hr->isHumongous(), "Should have been cleared");
   _card_counts.clear_region(hr);
 }
 
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -78,7 +78,7 @@
   G1HotCardCache(G1CollectedHeap* g1h);
   ~G1HotCardCache();
 
-  void initialize();
+  void initialize(G1RegionToSpaceMapper* card_counts_storage);
 
   bool use_cache() { return _use_cache; }
 
@@ -115,9 +115,6 @@
 
   bool hot_cache_is_empty() { return _n_hot == 0; }
 
-  // Resizes the card counts table to match the given capacity
-  void resize_card_counts(size_t heap_capacity);
-
   // Zeros the values in the card counts table for entire committed heap
   void reset_card_counts();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "services/memTracker.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+#include "utilities/bitMap.inline.hpp"
+
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
+  _high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) {
+}
+
+bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
+  if (!rs.is_reserved()) {
+    return false;  // Allocation failed.
+  }
+  assert(_low_boundary == NULL, "VirtualSpace already initialized");
+  assert(page_size > 0, "Granularity must be non-zero.");
+
+  _low_boundary  = rs.base();
+  _high_boundary = _low_boundary + rs.size();
+
+  _special = rs.special();
+  _executable = rs.executable();
+
+  _page_size = page_size;
+
+  assert(_committed.size() == 0, "virtual space initialized more than once");
+  uintx size_in_bits = rs.size() / page_size;
+  _committed.resize(size_in_bits, /* in_resource_area */ false);
+
+  if (_special) {
+    _committed.set_range(0, size_in_bits);
+  }
+
+  return true;
+}
+
+
+G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
+  release();
+}
+
+void G1PageBasedVirtualSpace::release() {
+  // This does not release memory it never reserved.
+  // Caller must release via rs.release();
+  _low_boundary           = NULL;
+  _high_boundary          = NULL;
+  _special                = false;
+  _executable             = false;
+  _page_size              = 0;
+  _committed.resize(0, false);
+}
+
+size_t G1PageBasedVirtualSpace::committed_size() const {
+  return _committed.count_one_bits() * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::reserved_size() const {
+  return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
+}
+
+size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
+  return reserved_size() - committed_size();
+}
+
+uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+  return (addr - _low_boundary) / _page_size;
+}
+
+bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_zero_offset(start, end) >= end;
+}
+
+bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_one_offset(start, end) >= end;
+}
+
+char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+  return _low_boundary + index * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
+  return num * _page_size;
+}
+
+MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+  // We need to make sure to commit all pages covered by the given area.
+  guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+
+  if (!_special) {
+    os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
+                              err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+  }
+  _committed.set_range(start, start + size_in_pages);
+
+  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+  return result;
+}
+
+MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
+  guarantee(is_area_committed(start, size_in_pages), "checking");
+
+  if (!_special) {
+    os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+  }
+
+  _committed.clear_range(start, start + size_in_pages);
+
+  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+  return result;
+}
+
+bool G1PageBasedVirtualSpace::contains(const void* p) const {
+  return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
+}
+
+#ifndef PRODUCT
+void G1PageBasedVirtualSpace::print_on(outputStream* out) {
+  out->print   ("Virtual space:");
+  if (special()) out->print(" (pinned in memory)");
+  out->cr();
+  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
+  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
+}
+
+void G1PageBasedVirtualSpace::print() {
+  print_on(tty);
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/bitMap.hpp"
+
+// Virtual space management helper for a virtual space with an OS page allocation
+// granularity.
+// (De-)Allocation requests are always OS page aligned by passing a page index
+// and multiples of pages.
+// The implementation gives an error when trying to commit or uncommit pages that
+// have already been committed or uncommitted.
+class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  // Reserved area addresses.
+  char* _low_boundary;
+  char* _high_boundary;
+
+  // The commit/uncommit granularity in bytes.
+  size_t _page_size;
+
+  // Bitmap used for verification of commit/uncommit operations.
+  BitMap _committed;
+
+  // Indicates that the entire space has been committed and pinned in memory,
+  // os::commit_memory() or os::uncommit_memory() have no function.
+  bool _special;
+
+  // Indicates whether the committed space should be executable.
+  bool _executable;
+
+  // Returns the index of the page which contains the given address.
+  uintptr_t  addr_to_page_index(char* addr) const;
+  // Returns the address of the given page index.
+  char*  page_start(uintptr_t index);
+  // Returns the byte size of the given number of pages.
+  size_t byte_size_for_pages(size_t num);
+
+  // Returns true if the entire area is backed by committed memory.
+  bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+  // Returns true if the entire area is not backed by committed memory.
+  bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+
+ public:
+
+  // Commit the given area of pages starting at start being size_in_pages large.
+  MemRegion commit(uintptr_t start, size_t size_in_pages);
+
+  // Uncommit the given area of pages starting at start being size_in_pages large.
+  MemRegion uncommit(uintptr_t start, size_t size_in_pages);
+
+  bool special() const { return _special; }
+
+  // Initialization
+  G1PageBasedVirtualSpace();
+  bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+
+  // Destruction
+  ~G1PageBasedVirtualSpace();
+
+  // Amount of reserved memory.
+  size_t reserved_size() const;
+  // Memory used in this virtual space.
+  size_t committed_size() const;
+  // Memory left to use/expand in this virtual space.
+  size_t uncommitted_size() const;
+
+  bool contains(const void* p) const;
+
+  MemRegion reserved() {
+    MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
+    return x;
+  }
+
+  void release();
+
+  void check_for_contiguity() PRODUCT_RETURN;
+
+  // Debugging
+  void print_on(outputStream* out) PRODUCT_RETURN;
+  void print();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "runtime/virtualspace.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
+                                             size_t commit_granularity,
+                                             size_t region_granularity,
+                                             MemoryType type) :
+  _storage(),
+  _commit_granularity(commit_granularity),
+  _region_granularity(region_granularity),
+  _listener(NULL),
+  _commit_map() {
+  guarantee(is_power_of_2(commit_granularity), "must be");
+  guarantee(is_power_of_2(region_granularity), "must be");
+  _storage.initialize_with_granularity(rs, commit_granularity);
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), type);
+}
+
+// G1RegionToSpaceMapper implementation where the region granularity is larger than
+// or the same as the commit granularity.
+// Basically, the space corresponding to one region region spans several OS pages.
+class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  size_t _pages_per_region;
+
+ public:
+  G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
+                                      size_t os_commit_granularity,
+                                      size_t alloc_granularity,
+                                      size_t commit_factor,
+                                      MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+
+    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.set_range(start_idx, start_idx + num_regions);
+    fire_on_commit(start_idx, num_regions);
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.clear_range(start_idx, start_idx + num_regions);
+  }
+};
+
+// G1RegionToSpaceMapper implementation where the region granularity is smaller
+// than the commit granularity.
+// Basically, the contents of one OS page span several regions.
+class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  class CommitRefcountArray : public G1BiasedMappedArray<uint> {
+   protected:
+     virtual uint default_value() const { return 0; }
+  };
+
+  size_t _regions_per_page;
+
+  CommitRefcountArray _refcounts;
+
+  uintptr_t region_idx_to_page_idx(uint region) const {
+    return region / _regions_per_page;
+  }
+
+ public:
+  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
+                                       size_t os_commit_granularity,
+                                       size_t alloc_granularity,
+                                       size_t commit_factor,
+                                       MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+
+    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      if (old_refcount == 0) {
+        _storage.commit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount + 1);
+      _commit_map.set_bit(i);
+      fire_on_commit(i, 1);
+    }
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      assert(old_refcount > 0, "must be");
+      if (old_refcount == 1) {
+        _storage.uncommit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount - 1);
+      _commit_map.clear_bit(i);
+    }
+  }
+};
+
+void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) {
+  if (_listener != NULL) {
+    _listener->on_commit(start_idx, num_regions);
+  }
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
+                                                            size_t os_commit_granularity,
+                                                            size_t region_granularity,
+                                                            size_t commit_factor,
+                                                            MemoryType type) {
+
+  if (region_granularity >= (os_commit_granularity * commit_factor)) {
+    return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  } else {
+    return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "utilities/debug.hpp"
+
+class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
+ public:
+  // Fired after commit of the memory, i.e. the memory this listener is registered
+  // for can be accessed.
+  virtual void on_commit(uint start_idx, size_t num_regions) = 0;
+};
+
+// Maps region based commit/uncommit requests to the underlying page sized virtual
+// space.
+class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
+ private:
+  G1MappingChangedListener* _listener;
+ protected:
+  // Backing storage.
+  G1PageBasedVirtualSpace _storage;
+  size_t _commit_granularity;
+  size_t _region_granularity;
+  // Mapping management
+  BitMap _commit_map;
+
+  G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+
+  void fire_on_commit(uint start_idx, size_t num_regions);
+ public:
+  MemRegion reserved() { return _storage.reserved(); }
+
+  void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
+
+  virtual ~G1RegionToSpaceMapper() {
+    _commit_map.resize(0, /* in_resource_area */ false);
+  }
+
+  bool is_committed(uintptr_t idx) const {
+    return _commit_map.at(idx);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+
+  // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+  // The byte_translation_factor defines how many bytes in a region correspond to
+  // a single byte in the data structure this mapper is for.
+  // Eg. in the card table, this value corresponds to the size a single card
+  // table entry corresponds to.
+  static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
+                                              size_t os_commit_granularity,
+                                              size_t region_granularity,
+                                              size_t byte_translation_factor,
+                                              MemoryType type);
+};
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -554,6 +554,12 @@
 
 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
                            bool check_for_refs_into_cset) {
+  assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
+         err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
+                 p2i(card_ptr),
+                 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
+                 _ct_bs->addr_for(card_ptr),
+                 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
 
   // If the card is no longer dirty, nothing to do.
   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
@@ -37,7 +38,6 @@
   _kind = G1SATBCT;
 }
 
-
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   // Nulls should have been already filtered.
   assert(pre_val->is_oop(true), "Error");
@@ -124,13 +124,52 @@
 }
 #endif
 
+void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _card_table->clear(mr);
+}
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
-  _dcqs(JavaThread::dirty_card_queue_set())
+  _dcqs(JavaThread::dirty_card_queue_set()),
+  _listener()
 {
   _kind = G1SATBCTLogging;
+  _listener.set_card_table(this);
+}
+
+void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
+  mapper->set_mapping_changed_listener(&_listener);
+
+  _byte_map_size = mapper->reserved().byte_size();
+
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  HeapWord* low_bound  = _whole_heap.start();
+  HeapWord* high_bound = _whole_heap.end();
+
+  _cur_covered_regions = 1;
+  _covered[0] = _whole_heap;
+
+  _byte_map = (jbyte*) mapper->reserved().start();
+  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+  if (TraceCardTableModRefBS) {
+    gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
+    gclog_or_tty->print_cr("  "
+                  "  &_byte_map[0]: " INTPTR_FORMAT
+                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                  p2i(&_byte_map[0]),
+                  p2i(&_byte_map[_last_valid_index]));
+    gclog_or_tty->print_cr("  "
+                  "  byte_map_base: " INTPTR_FORMAT,
+                  p2i(byte_map_base));
+  }
 }
 
 void
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
@@ -33,6 +34,7 @@
 #if INCLUDE_ALL_GCS
 
 class DirtyCardQueueSet;
+class G1SATBCardTableLoggingModRefBS;
 
 // This barrier is specialized to use a logging barrier to support
 // snapshot-at-the-beginning marking.
@@ -126,18 +128,40 @@
     jbyte val = _byte_map[card_index];
     return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
   }
+};
 
+class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
+ private:
+  G1SATBCardTableLoggingModRefBS* _card_table;
+ public:
+  G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
+
+  void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions);
 };
 
 // Adds card-table logging to the post-barrier.
 // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
 class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
+  friend class G1SATBCardTableLoggingModRefBSChangedListener;
  private:
+  G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
  public:
+  static size_t compute_size(size_t mem_region_size_in_words) {
+    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
+  }
+
   G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                  int max_covered_regions);
 
+  virtual void initialize() { }
+  virtual void initialize(G1RegionToSpaceMapper* mapper);
+
+  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+
   bool is_a(BarrierSet::Name bsn) {
     return bsn == BarrierSet::G1SATBCTLogging ||
       G1SATBCardTableModRefBS::is_a(bsn);
@@ -154,8 +178,6 @@
 
   void write_region_work(MemRegion mr)    { invalidate(mr); }
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
-
-
 };
 
 
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -372,17 +372,17 @@
                                                        _max_regions,
                                                        &_static_mem_size);
 
-  for (uint i = 0; i < n_par_rs; i++) {
-    for (uint j = 0; j < _max_regions; j++) {
-      set(i, j, InvalidCard);
-    }
-  }
+  invalidate(0, _max_regions);
 }
 
-void FromCardCache::shrink(uint new_num_regions) {
+void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
+  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
+            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
+                    start_idx, new_num_regions));
   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
-    assert(new_num_regions <= _max_regions, "Must be within max.");
-    for (uint j = new_num_regions; j < _max_regions; j++) {
+    uint end_idx = (start_idx + (uint)new_num_regions);
+    assert(end_idx <= _max_regions, "Must be within max.");
+    for (uint j = start_idx; j < end_idx; j++) {
       set(i, j, InvalidCard);
     }
   }
@@ -406,12 +406,12 @@
   }
 }
 
-void OtherRegionsTable::init_from_card_cache(uint max_regions) {
+void OtherRegionsTable::initialize(uint max_regions) {
   FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
 }
 
-void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
-  FromCardCache::shrink(new_num_regions);
+void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
+  FromCardCache::invalidate(start_idx, num_regions);
 }
 
 void OtherRegionsTable::print_from_card_cache() {
@@ -839,8 +839,8 @@
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
   : _bosa(bosa),
-    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
-    _code_roots(), _other_regions(hr, &_m) {
+    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
+    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
   reset_for_par_iteration();
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -84,7 +84,7 @@
 
   static void initialize(uint n_par_rs, uint max_num_regions);
 
-  static void shrink(uint new_num_regions);
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
 
@@ -213,11 +213,11 @@
 
   // Declare the heap size (in # of regions) to the OtherRegionsTable.
   // (Uses it to initialize from_card_cache).
-  static void init_from_card_cache(uint max_regions);
+  static void initialize(uint max_regions);
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  // Make sure any entries for higher regions are invalid.
-  static void shrink_from_card_cache(uint new_num_regions);
+  // Declares that regions between start_idx <= i < start_idx + num_regions are
+  // not in use. Make sure that any entries for these regions are invalid.
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print_from_card_cache();
 };
@@ -404,12 +404,11 @@
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
   static void init_heap(uint max_regions) {
-    OtherRegionsTable::init_from_card_cache(max_regions);
+    OtherRegionsTable::initialize(max_regions);
   }
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  static void shrink_heap(uint new_n_regs) {
-    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
+  static void invalidate(uint start_idx, uint num_regions) {
+    OtherRegionsTable::invalidate(start_idx, num_regions);
   }
 
 #ifndef PRODUCT
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -30,19 +30,33 @@
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "memory/allocation.hpp"
 
-void HeapRegionSeq::initialize(ReservedSpace reserved) {
-  _reserved = reserved;
-  _storage.initialize(reserved, 0);
-
-  _num_committed = 0;
-
+void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
+                               G1RegionToSpaceMapper* prev_bitmap,
+                               G1RegionToSpaceMapper* next_bitmap,
+                               G1RegionToSpaceMapper* bot,
+                               G1RegionToSpaceMapper* cardtable,
+                               G1RegionToSpaceMapper* card_counts) {
   _allocated_heapregions_length = 0;
 
-  _regions.initialize((HeapWord*)_storage.low_boundary(), (HeapWord*)_storage.high_boundary(), HeapRegion::GrainBytes);
+  _heap_mapper = heap_storage;
+
+  _prev_bitmap_mapper = prev_bitmap;
+  _next_bitmap_mapper = next_bitmap;
+
+  _bot_mapper = bot;
+  _cardtable_mapper = cardtable;
+
+  _card_counts_mapper = card_counts;
+
+  MemRegion reserved = heap_storage->reserved();
+  _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
+
+  _available_map.resize(_regions.length(), false);
+  _available_map.clear();
 }
 
 bool HeapRegionSeq::is_available(uint region) const {
-  return region <  _num_committed;
+  return _available_map.at(region);
 }
 
 #ifdef ASSERT
@@ -58,29 +72,26 @@
   return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
 }
 
-void HeapRegionSeq::update_committed_space(HeapWord* old_end,
-                                           HeapWord* new_end) {
-  assert(old_end != new_end, "don't call this otherwise");
-  // We may not have officially committed the area. So construct and use a separate one.
-  MemRegion new_committed(heap_bottom(), new_end);
-  // Tell the card table about the update.
-  Universe::heap()->barrier_set()->resize_covered_region(new_committed);
-  // Tell the BOT about the update.
-  G1CollectedHeap::heap()->bot_shared()->resize(new_committed.word_size());
-  // Tell the hot card cache about the update
-  G1CollectedHeap::heap()->concurrent_g1_refine()->hot_card_cache()->resize_card_counts(new_committed.byte_size());
-}
-
 void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
   guarantee(num_regions > 0, "Must commit more than zero regions");
   guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
 
-  _storage.expand_by(num_regions * HeapRegion::GrainBytes);
-  update_committed_space(heap_top(), heap_top() + num_regions * HeapRegion::GrainWords);
+  _num_committed += (uint)num_regions;
+
+  _heap_mapper->commit_regions(index, num_regions);
+
+  // Also commit auxiliary data
+  _prev_bitmap_mapper->commit_regions(index, num_regions);
+  _next_bitmap_mapper->commit_regions(index, num_regions);
+
+  _bot_mapper->commit_regions(index, num_regions);
+  _cardtable_mapper->commit_regions(index, num_regions);
+
+  _card_counts_mapper->commit_regions(index, num_regions);
 }
 
 void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
-  guarantee(num_regions >= 1, "Need to specify at least one region to uncommit");
+  guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
   guarantee(_num_committed >= num_regions, "pre-condition");
 
   // Print before uncommitting.
@@ -91,12 +102,19 @@
     }
   }
 
-  HeapWord* old_end = heap_top();
   _num_committed -= (uint)num_regions;
-  OrderAccess::fence();
+
+  _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
+  _heap_mapper->uncommit_regions(start, num_regions);
 
-  _storage.shrink_by(num_regions * HeapRegion::GrainBytes);
-  update_committed_space(old_end, heap_top());
+  // Also uncommit auxiliary data
+  _prev_bitmap_mapper->uncommit_regions(start, num_regions);
+  _next_bitmap_mapper->uncommit_regions(start, num_regions);
+
+  _bot_mapper->uncommit_regions(start, num_regions);
+  _cardtable_mapper->uncommit_regions(start, num_regions);
+
+  _card_counts_mapper->uncommit_regions(start, num_regions);
 }
 
 void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
@@ -110,9 +128,7 @@
     }
   }
 
-  _num_committed += (size_t)num_regions;
-
-  OrderAccess::fence();
+  _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
 
   for (uint i = start; i < start + num_regions; i++) {
     assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
@@ -129,8 +145,7 @@
 }
 
 uint HeapRegionSeq::expand_by(uint num_regions) {
-  // Only ever expand from the end of the heap.
-  return expand_at(_num_committed, num_regions);
+  return expand_at(0, num_regions);
 }
 
 uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
@@ -334,7 +349,8 @@
   uint idx_last_found = 0;
   uint num_last_found = 0;
 
-  if ((num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
+  while ((removed < num_regions_to_remove) &&
+      (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
     // Only allow uncommit from the end of the heap.
     if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
       return 0;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
 #include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
 
 class HeapRegion;
@@ -37,13 +38,17 @@
   virtual HeapRegion* default_value() const { return NULL; }
 };
 
-// This class keeps track of the region metadata (i.e., HeapRegion
-// instances). They are kept in the _regions array in address
-// order. A region's index in the array corresponds to its index in
-// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
-// the one after it, etc.). Two regions that are consecutive in the
-// array should also be adjacent in the address space (i.e.,
-// region(i).end() == region(i+1).bottom().
+// This class keeps track of the actual heap memory, auxiliary data
+// and its metadata (i.e., HeapRegion instances) and the list of free regions.
+//
+// This allows maximum flexibility for deciding what to commit or uncommit given
+// a request from outside.
+//
+// HeapRegions are kept in the _regions array in address order. A region's
+// index in the array corresponds to its index in the heap (i.e., 0 is the
+// region at the bottom of the heap, 1 is the one after it, etc.). Two
+// regions that are consecutive in the array should also be adjacent in the
+// address space (i.e., region(i).end() == region(i+1).bottom().
 //
 // We create a HeapRegion when we commit the region's address space
 // for the first time. When we uncommit the address space of a
@@ -52,24 +57,31 @@
 //
 // We keep track of three lengths:
 //
-// * _committed_length (returned by length()) is the number of currently
-//   committed regions.
-// * _allocated_length (not exposed outside this class) is the
-//   number of regions for which we have HeapRegions.
+// * _num_committed (returned by length()) is the number of currently
+//   committed regions. These may not be contiguous.
+// * _allocated_heapregions_length (not exposed outside this class) is the
+//   number of regions+1 for which we have HeapRegions.
 // * max_length() returns the maximum number of regions the heap can have.
 //
-// and maintain that: _committed_length <= _allocated_length <= max_length()
 
 class HeapRegionSeq: public CHeapObj<mtGC> {
   friend class VMStructs;
 
   G1HeapRegionTable _regions;
 
-  ReservedSpace _reserved;
-  VirtualSpace _storage;
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  G1RegionToSpaceMapper* _bot_mapper;
+  G1RegionToSpaceMapper* _cardtable_mapper;
+  G1RegionToSpaceMapper* _card_counts_mapper;
 
   FreeRegionList _free_list;
 
+  // Each bit in this bitmap indicates that the corresponding region is available
+  // for allocation.
+  BitMap _available_map;
+
    // The number of regions committed in the heap.
   uint _num_committed;
 
@@ -77,7 +89,6 @@
   uint _allocated_heapregions_length;
 
    HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
-  HeapWord* heap_top() const { return heap_bottom() + _num_committed * HeapRegion::GrainWords; }
    HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
   void make_regions_available(uint index, uint num_regions = 1);
@@ -92,6 +103,11 @@
   // that they do not all start from the same region.
   uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
 
+  // Find a contiguous set of empty or uncommitted regions of length num and return
+  // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
+  // If only_empty is true, only empty regions are considered.
+  // Searches from bottom to top of the heap, doing a first-fit.
+  uint find_contiguous(size_t num, bool only_empty);
   // Finds the next sequence of unavailable regions starting from start_idx. Returns the
   // length of the sequence found. If this result is zero, no such sequence could be found,
   // otherwise res_idx indicates the start index of these regions.
@@ -100,6 +116,8 @@
   // the heap. Returns the length of the sequence found. If this value is zero, no
   // sequence could be found, otherwise res_idx contains the start index of this range.
   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+  // Allocate a new HeapRegion for the given index.
+  HeapRegion* new_heap_region(uint hrs_index);
 #ifdef ASSERT
 public:
   bool is_free(HeapRegion* hr) const;
@@ -107,16 +125,20 @@
   // Returns whether the given region is available for allocation.
   bool is_available(uint region) const;
 
-  // Allocate a new HeapRegion for the given index.
-  HeapRegion* new_heap_region(uint hrs_index);
   public:
    // Empty constructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() : _regions(), _reserved(), _storage(), _num_committed(0),
-          _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
-          _allocated_heapregions_length(0)
+  HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
+                    _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
+                    _allocated_heapregions_length(0), _available_map(),
+                    _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
   { }
 
-  void initialize(ReservedSpace reserved);
+  void initialize(G1RegionToSpaceMapper* heap_storage,
+                  G1RegionToSpaceMapper* prev_bitmap,
+                  G1RegionToSpaceMapper* next_bitmap,
+                  G1RegionToSpaceMapper* bot,
+                  G1RegionToSpaceMapper* cardtable,
+                  G1RegionToSpaceMapper* card_counts);
 
   // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
   // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
@@ -175,8 +197,6 @@
   // Return the maximum number of regions in the heap.
   uint max_length() const { return (uint)_regions.length(); }
 
-  MemRegion committed() const { return MemRegion(heap_bottom(), heap_top()); }
-
   MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
 
   // Expand the sequence to reflect that the heap has grown. Either create new
@@ -190,11 +210,12 @@
   // this.
   uint expand_at(uint start, uint num_regions);
 
-  // Find a contiguous set of empty or uncommitted regions of length num and return
-  // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
-  // If only_empty is true, only empty regions are considered.
-  // Searches from bottom to top of the heap, doing a first-fit.
-  uint find_contiguous(size_t num, bool only_empty);
+  // Find a contiguous set of empty regions of length num. Returns the start index of
+  // that set, or G1_NO_HRS_INDEX.
+  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  // Find a contiguous set of empty or unavailable regions of length num. Returns the
+  // start index of that set, or G1_NO_HRS_INDEX.
+  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
 
   HeapRegion* next_region_in_heap(const HeapRegion* r) const;
 
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
 inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   assert(addr < heap_end(),
@@ -35,7 +36,6 @@
         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
 
   HeapRegion* hr = _regions.get_by_address(addr);
-  assert(hr != NULL, "invariant");
   return hr;
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -403,3 +403,41 @@
               "master humongous set MT safety protocol outside a safepoint");
   }
 }
+
+void FreeRegionList_test() {
+  FreeRegionList l("test");
+
+  const uint num_regions_in_test = 5;
+  // Create a fake heap. It does not need to be valid, as the HeapRegion constructor
+  // does not access it.
+  MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
+  // Allocate a fake BOT because the HeapRegion constructor initializes
+  // the BOT.
+  size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
+  HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+  G1BlockOffsetSharedArray oa(heap, bot_storage);
+  bot_storage->commit_regions(0, num_regions_in_test);
+  HeapRegion hr0(0, &oa, heap);
+  HeapRegion hr1(1, &oa, heap);
+  HeapRegion hr2(2, &oa, heap);
+  HeapRegion hr3(3, &oa, heap);
+  HeapRegion hr4(4, &oa, heap);
+  l.add_ordered(&hr1);
+  l.add_ordered(&hr0);
+  l.add_ordered(&hr3);
+  l.add_ordered(&hr4);
+  l.add_ordered(&hr2);
+  assert(l.length() == num_regions_in_test, "wrong length");
+  l.verify_list();
+
+  bot_storage->uncommit_regions(0, num_regions_in_test);
+  delete bot_storage;
+  FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
+}
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -78,6 +78,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+  barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
   if (_barrier_set == NULL) {
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -44,13 +44,6 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-size_t CardTableModRefBS::cards_required(size_t covered_words)
-{
-  // Add one for a guard card, used to detect errors.
-  const size_t words = align_size_up(covered_words, card_size_in_words);
-  return words / card_size_in_words + 1;
-}
-
 size_t CardTableModRefBS::compute_byte_map_size()
 {
   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
@@ -64,27 +57,50 @@
                                      int max_covered_regions):
   ModRefBarrierSet(max_covered_regions),
   _whole_heap(whole_heap),
-  _guard_index(cards_required(whole_heap.word_size()) - 1),
-  _last_valid_index(_guard_index - 1),
+  _guard_index(0),
+  _guard_region(),
+  _last_valid_index(0),
   _page_size(os::vm_page_size()),
-  _byte_map_size(compute_byte_map_size())
+  _byte_map_size(0),
+  _covered(NULL),
+  _committed(NULL),
+  _cur_covered_regions(0),
+  _byte_map(NULL),
+  byte_map_base(NULL),
+  // LNC functionality
+  _lowest_non_clean(NULL),
+  _lowest_non_clean_chunk_size(NULL),
+  _lowest_non_clean_base_chunk_index(NULL),
+  _last_LNC_resizing_collection(NULL)
 {
   _kind = BarrierSet::CardTableModRef;
 
+  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
+  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
+
+  assert(card_size <= 512, "card_size must be less than 512"); // why?
+
+  _covered   = new MemRegion[_max_covered_regions];
+  if (_covered == NULL) {
+    vm_exit_during_initialization("Could not allocate card table covered region set.");
+  }
+}
+
+void CardTableModRefBS::initialize() {
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  _byte_map_size = compute_byte_map_size();
+
   HeapWord* low_bound  = _whole_heap.start();
   HeapWord* high_bound = _whole_heap.end();
-  assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
-  assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
 
-  assert(card_size <= 512, "card_size must be less than 512"); // why?
-
-  _covered   = new MemRegion[max_covered_regions];
-  _committed = new MemRegion[max_covered_regions];
-  if (_covered == NULL || _committed == NULL) {
-    vm_exit_during_initialization("couldn't alloc card table covered region set.");
+  _cur_covered_regions = 0;
+  _committed = new MemRegion[_max_covered_regions];
+  if (_committed == NULL) {
+    vm_exit_during_initialization("Could not allocate card table committed region set.");
   }
 
-  _cur_covered_regions = 0;
   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
@@ -114,20 +130,20 @@
                             !ExecMem, "card table last card");
   *guard_card = last_card;
 
-   _lowest_non_clean =
-    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
+  _lowest_non_clean =
+    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
   _lowest_non_clean_chunk_size =
-    NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
   _lowest_non_clean_base_chunk_index =
-    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
   _last_LNC_resizing_collection =
-    NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
   if (_lowest_non_clean == NULL
       || _lowest_non_clean_chunk_size == NULL
       || _lowest_non_clean_base_chunk_index == NULL
       || _last_LNC_resizing_collection == NULL)
     vm_exit_during_initialization("couldn't allocate an LNC array.");
-  for (int i = 0; i < max_covered_regions; i++) {
+  for (int i = 0; i < _max_covered_regions; i++) {
     _lowest_non_clean[i] = NULL;
     _lowest_non_clean_chunk_size[i] = 0;
     _last_LNC_resizing_collection[i] = -1;
@@ -650,7 +666,7 @@
                                       jbyte val, bool val_equals) {
   jbyte* start    = byte_for(mr.start());
   jbyte* end      = byte_for(mr.last());
-  bool   failures = false;
+  bool failures = false;
   for (jbyte* curr = start; curr <= end; ++curr) {
     jbyte curr_val = *curr;
     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Thu Aug 21 11:47:10 2014 +0200
@@ -96,12 +96,12 @@
   // The declaration order of these const fields is important; see the
   // constructor before changing.
   const MemRegion _whole_heap;       // the region covered by the card table
-  const size_t    _guard_index;      // index of very last element in the card
+  size_t          _guard_index;      // index of very last element in the card
                                      // table; it is set to a guard value
                                      // (last_card) and should never be modified
-  const size_t    _last_valid_index; // index of the last valid element
+  size_t          _last_valid_index; // index of the last valid element
   const size_t    _page_size;        // page size used when mapping _byte_map
-  const size_t    _byte_map_size;    // in bytes
+  size_t          _byte_map_size;    // in bytes
   jbyte*          _byte_map;         // the card marking array
 
   int _cur_covered_regions;
@@ -123,7 +123,12 @@
  protected:
   // Initialization utilities; covered_words is the size of the covered region
   // in, um, words.
-  inline size_t cards_required(size_t covered_words);
+  inline size_t cards_required(size_t covered_words) {
+    // Add one for a guard card, used to detect errors.
+    const size_t words = align_size_up(covered_words, card_size_in_words);
+    return words / card_size_in_words + 1;
+  }
+
   inline size_t compute_byte_map_size();
 
   // Finds and return the index of the region, if any, to which the given
@@ -137,7 +142,7 @@
   int find_covering_region_containing(HeapWord* addr);
 
   // Resize one of the regions covered by the remembered set.
-  void resize_covered_region(MemRegion new_region);
+  virtual void resize_covered_region(MemRegion new_region);
 
   // Returns the leftmost end of a committed region corresponding to a
   // covered region before covered region "ind", or else "NULL" if "ind" is
@@ -282,6 +287,8 @@
   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
   ~CardTableModRefBS();
 
+  virtual void initialize();
+
   // *** Barrier set functions.
 
   bool has_write_ref_pre_barrier() { return false; }
--- a/src/share/vm/memory/cardTableRS.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/memory/cardTableRS.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -53,6 +53,7 @@
 #else
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
 #endif
+  _ct_bs->initialize();
   set_bs(_ct_bs);
   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
                          mtGC, 0, AllocFailStrategy::RETURN_NULL);
--- a/src/share/vm/prims/jni.cpp	Tue Aug 19 10:50:27 2014 +0200
+++ b/src/share/vm/prims/jni.cpp	Thu Aug 21 11:47:10 2014 +0200
@@ -5082,6 +5082,7 @@
 void TestG1BiasedArray_test();
 void TestBufferingOopClosure_test();
 void TestCodeCacheRemSet_test();
+void FreeRegionList_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -5112,6 +5113,9 @@
     run_unit_test(HeapRegionRemSet::test_prt());
     run_unit_test(TestBufferingOopClosure_test());
     run_unit_test(TestCodeCacheRemSet_test());
+    if (UseG1GC) {
+      run_unit_test(FreeRegionList_test());
+    }
 #endif
     tty->print_cr("All internal VM tests passed");
   }