diff src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 845:df6caf649ff7

6700789: G1: Enable use of compressed oops with G1 heaps Summary: Modifications to G1 so as to allow the use of compressed oops. Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
author ysr
date Tue, 14 Jul 2009 15:40:39 -0700
parents 0316eac49d5a
children 42d84bbbecf4
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jul 10 16:01:20 2009 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jul 14 15:40:39 2009 -0700
@@ -1655,11 +1655,14 @@
 // Computes the sum of the storage used by the various regions.
 
 size_t G1CollectedHeap::used() const {
-  assert(Heap_lock->owner() != NULL,
-         "Should be owned on this thread's behalf.");
+  // Temporarily, until 6859911 is fixed. XXX
+  // assert(Heap_lock->owner() != NULL,
+  //        "Should be owned on this thread's behalf.");
   size_t result = _summary_bytes_used;
-  if (_cur_alloc_region != NULL)
-    result += _cur_alloc_region->used();
+  // Read only once in case it is set to NULL concurrently
+  HeapRegion* hr = _cur_alloc_region;
+  if (hr != NULL)
+    result += hr->used();
   return result;
 }
 
@@ -2133,13 +2136,13 @@
   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
     g1h = _g1h;
   }
-  void do_oop(narrowOop *p) {
-    guarantee(false, "NYI");
-  }
-  void do_oop(oop *p) {
-    oop obj = *p;
-    assert(obj == NULL || !g1h->is_obj_dead(obj),
-           "Dead object referenced by a not dead object");
+  void do_oop(narrowOop *p) { do_oop_work(p); }
+  void do_oop(      oop *p) { do_oop_work(p); }
+
+  template <class T> void do_oop_work(T *p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(obj == NULL || !g1h->is_obj_dead(obj),
+              "Dead object referenced by a not dead object");
   }
 };
 
@@ -2206,8 +2209,10 @@
   // use_prev_marking == true  -> use "prev" marking information,
   // use_prev_marking == false -> use "next" marking information
   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
-    : _allow_dirty(allow_dirty), _par(par),
+    : _allow_dirty(allow_dirty),
+      _par(par),
       _use_prev_marking(use_prev_marking) {}
+
   bool doHeapRegion(HeapRegion* r) {
     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
               "Should be unclaimed at verify points.");
@@ -2231,18 +2236,16 @@
   // use_prev_marking == true  -> use "prev" marking information,
   // use_prev_marking == false -> use "next" marking information
   VerifyRootsClosure(bool use_prev_marking) :
-    _g1h(G1CollectedHeap::heap()), _failures(false),
+    _g1h(G1CollectedHeap::heap()),
+    _failures(false),
     _use_prev_marking(use_prev_marking) { }
 
   bool failures() { return _failures; }
 
-  void do_oop(narrowOop* p) {
-    guarantee(false, "NYI");
-  }
-
-  void do_oop(oop* p) {
-    oop obj = *p;
-    if (obj != NULL) {
+  template <class T> void do_oop_nv(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
                                "points to dead obj "PTR_FORMAT, p, (void*) obj);
@@ -2251,6 +2254,9 @@
       }
     }
   }
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
 // This is the task used for parallel heap verification.
@@ -2267,7 +2273,8 @@
   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
                   bool use_prev_marking) :
     AbstractGangTask("Parallel verify task"),
-    _g1h(g1h), _allow_dirty(allow_dirty),
+    _g1h(g1h),
+    _allow_dirty(allow_dirty),
     _use_prev_marking(use_prev_marking) { }
 
   void work(int worker_i) {
@@ -2479,12 +2486,10 @@
 
 void
 G1CollectedHeap::doConcurrentMark() {
-  if (G1ConcMark) {
-    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    if (!_cmThread->in_progress()) {
-      _cmThread->set_started();
-      CGC_lock->notify();
-    }
+  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
+  if (!_cmThread->in_progress()) {
+    _cmThread->set_started();
+    CGC_lock->notify();
   }
 }
 
@@ -2561,9 +2566,11 @@
                           "Not enough space for young surv words summary.");
   }
   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
+#ifdef ASSERT
   for (size_t i = 0;  i < array_length; ++i) {
-    guarantee( _surviving_young_words[i] == 0, "invariant" );
-  }
+    assert( _surviving_young_words[i] == 0, "memset above" );
+  }
+#endif // !ASSERT
 }
 
 void
@@ -2649,7 +2656,7 @@
       COMPILER2_PRESENT(DerivedPointerTable::clear());
 
       // We want to turn off ref discovery, if necessary, and turn it back on
-      // on again later if we do.
+      // on again later if we do. XXX Dubious: why is discovery disabled?
       bool was_enabled = ref_processor()->discovery_enabled();
       if (was_enabled) ref_processor()->disable_discovery();
 
@@ -2662,9 +2669,6 @@
       double start_time_sec = os::elapsedTime();
       GCOverheadReporter::recordSTWStart(start_time_sec);
       size_t start_used_bytes = used();
-      if (!G1ConcMark) {
-        do_sync_mark();
-      }
 
       g1_policy()->record_collection_pause_start(start_time_sec,
                                                  start_used_bytes);
@@ -2775,6 +2779,13 @@
           g1_policy()->should_initiate_conc_mark()) {
         concurrent_mark()->checkpointRootsInitialPost();
         set_marking_started();
+        // CAUTION: after the doConcurrentMark() call below,
+        // the concurrent marking thread(s) could be running
+        // concurrently with us. Make sure that anything after
+        // this point does not assume that we are the only GC thread
+        // running. Note: of course, the actual marking work will
+        // not start until the safepoint itself is released in
+        // ConcurrentGCThread::safepoint_desynchronize().
         doConcurrentMark();
       }
 
@@ -3123,10 +3134,8 @@
   G1CollectedHeap* _g1;
 public:
   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
-  void do_oop(narrowOop* p) {
-    guarantee(false, "NYI");
-  }
-  void do_oop(oop* p) {
+  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
+  void do_oop(      oop* p) {
     oop obj = *p;
 #ifdef G1_DEBUG
     if (PrintGC && Verbose) {
@@ -3138,7 +3147,6 @@
     if (_g1->obj_in_cs(obj)) {
       assert( obj->is_forwarded(), "invariant" );
       *p = obj->forwardee();
-
 #ifdef G1_DEBUG
       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
                              (void*) obj, (void*) *p);
@@ -3155,12 +3163,12 @@
   UpdateRSetImmediate(G1CollectedHeap* g1) :
     _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
 
-  void do_oop(narrowOop* p) {
-    guarantee(false, "NYI");
-  }
-  void do_oop(oop* p) {
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+  template <class T> void do_oop_work(T* p) {
     assert(_from->is_in_reserved(p), "paranoia");
-    if (*p != NULL && !_from->is_survivor()) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
       _g1_rem_set->par_write_ref(_from, p, 0);
     }
   }
@@ -3176,12 +3184,12 @@
   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
 
-  void do_oop(narrowOop* p) {
-    guarantee(false, "NYI");
-  }
-  void do_oop(oop* p) {
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+  virtual void do_oop(      oop* p) { do_oop_work(p); }
+  template <class T> void do_oop_work(T* p) {
     assert(_from->is_in_reserved(p), "paranoia");
-    if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
+    if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
+        !_from->is_survivor()) {
       size_t card_index = _ct_bs->index_for(p);
       if (_ct_bs->mark_card_deferred(card_index)) {
         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
@@ -3536,614 +3544,66 @@
   fill_with_object(block, free_words);
 }
 
-#define use_local_bitmaps         1
-#define verify_local_bitmaps      0
-
 #ifndef PRODUCT
-
-class GCLabBitMap;
-class GCLabBitMapClosure: public BitMapClosure {
-private:
-  ConcurrentMark* _cm;
-  GCLabBitMap*    _bitmap;
-
-public:
-  GCLabBitMapClosure(ConcurrentMark* cm,
-                     GCLabBitMap* bitmap) {
-    _cm     = cm;
-    _bitmap = bitmap;
-  }
-
-  virtual bool do_bit(size_t offset);
-};
-
-#endif // PRODUCT
-
-#define oop_buffer_length 256
-
-class GCLabBitMap: public BitMap {
-private:
-  ConcurrentMark* _cm;
-
-  int       _shifter;
-  size_t    _bitmap_word_covers_words;
-
-  // beginning of the heap
-  HeapWord* _heap_start;
-
-  // this is the actual start of the GCLab
-  HeapWord* _real_start_word;
-
-  // this is the actual end of the GCLab
-  HeapWord* _real_end_word;
-
-  // this is the first word, possibly located before the actual start
-  // of the GCLab, that corresponds to the first bit of the bitmap
-  HeapWord* _start_word;
-
-  // size of a GCLab in words
-  size_t _gclab_word_size;
-
-  static int shifter() {
-    return MinObjAlignment - 1;
-  }
-
-  // how many heap words does a single bitmap word corresponds to?
-  static size_t bitmap_word_covers_words() {
-    return BitsPerWord << shifter();
-  }
-
-  static size_t gclab_word_size() {
-    return G1ParallelGCAllocBufferSize / HeapWordSize;
-  }
-
-  static size_t bitmap_size_in_bits() {
-    size_t bits_in_bitmap = gclab_word_size() >> shifter();
-    // We are going to ensure that the beginning of a word in this
-    // bitmap also corresponds to the beginning of a word in the
-    // global marking bitmap. To handle the case where a GCLab
-    // starts from the middle of the bitmap, we need to add enough
-    // space (i.e. up to a bitmap word) to ensure that we have
-    // enough bits in the bitmap.
-    return bits_in_bitmap + BitsPerWord - 1;
-  }
-public:
-  GCLabBitMap(HeapWord* heap_start)
-    : BitMap(bitmap_size_in_bits()),
-      _cm(G1CollectedHeap::heap()->concurrent_mark()),
-      _shifter(shifter()),
-      _bitmap_word_covers_words(bitmap_word_covers_words()),
-      _heap_start(heap_start),
-      _gclab_word_size(gclab_word_size()),
-      _real_start_word(NULL),
-      _real_end_word(NULL),
-      _start_word(NULL)
-  {
-    guarantee( size_in_words() >= bitmap_size_in_words(),
-               "just making sure");
-  }
-
-  inline unsigned heapWordToOffset(HeapWord* addr) {
-    unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
-    assert(offset < size(), "offset should be within bounds");
-    return offset;
-  }
-
-  inline HeapWord* offsetToHeapWord(size_t offset) {
-    HeapWord* addr =  _start_word + (offset << _shifter);
-    assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
-    return addr;
-  }
-
-  bool fields_well_formed() {
-    bool ret1 = (_real_start_word == NULL) &&
-                (_real_end_word == NULL) &&
-                (_start_word == NULL);
-    if (ret1)
-      return true;
-
-    bool ret2 = _real_start_word >= _start_word &&
-      _start_word < _real_end_word &&
-      (_real_start_word + _gclab_word_size) == _real_end_word &&
-      (_start_word + _gclab_word_size + _bitmap_word_covers_words)
-                                                              > _real_end_word;
-    return ret2;
-  }
-
-  inline bool mark(HeapWord* addr) {
-    guarantee(use_local_bitmaps, "invariant");
-    assert(fields_well_formed(), "invariant");
-
-    if (addr >= _real_start_word && addr < _real_end_word) {
-      assert(!isMarked(addr), "should not have already been marked");
-
-      // first mark it on the bitmap
-      at_put(heapWordToOffset(addr), true);
-
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  inline bool isMarked(HeapWord* addr) {
-    guarantee(use_local_bitmaps, "invariant");
-    assert(fields_well_formed(), "invariant");
-
-    return at(heapWordToOffset(addr));
-  }
-
-  void set_buffer(HeapWord* start) {
-    guarantee(use_local_bitmaps, "invariant");
-    clear();
-
-    assert(start != NULL, "invariant");
-    _real_start_word = start;
-    _real_end_word   = start + _gclab_word_size;
-
-    size_t diff =
-      pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
-    _start_word = start - diff;
-
-    assert(fields_well_formed(), "invariant");
-  }
-
-#ifndef PRODUCT
-  void verify() {
-    // verify that the marks have been propagated
-    GCLabBitMapClosure cl(_cm, this);
-    iterate(&cl);
-  }
-#endif // PRODUCT
-
-  void retire() {
-    guarantee(use_local_bitmaps, "invariant");
-    assert(fields_well_formed(), "invariant");
-
-    if (_start_word != NULL) {
-      CMBitMap*       mark_bitmap = _cm->nextMarkBitMap();
-
-      // this means that the bitmap was set up for the GCLab
-      assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
-
-      mark_bitmap->mostly_disjoint_range_union(this,
-                                0, // always start from the start of the bitmap
-                                _start_word,
-                                size_in_words());
-      _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
-
-#ifndef PRODUCT
-      if (use_local_bitmaps && verify_local_bitmaps)
-        verify();
-#endif // PRODUCT
-    } else {
-      assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
-    }
-  }
-
-  static size_t bitmap_size_in_words() {
-    return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
-  }
-};
-
-#ifndef PRODUCT
-
 bool GCLabBitMapClosure::do_bit(size_t offset) {
   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
   guarantee(_cm->isMarked(oop(addr)), "it should be!");
   return true;
 }
-
 #endif // PRODUCT
 
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
-private:
-  bool        _retired;
-  bool        _during_marking;
-  GCLabBitMap _bitmap;
-
-public:
-  G1ParGCAllocBuffer() :
-    ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
-    _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
-    _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
-    _retired(false)
-  { }
-
-  inline bool mark(HeapWord* addr) {
-    guarantee(use_local_bitmaps, "invariant");
-    assert(_during_marking, "invariant");
-    return _bitmap.mark(addr);
-  }
-
-  inline void set_buf(HeapWord* buf) {
-    if (use_local_bitmaps && _during_marking)
-      _bitmap.set_buffer(buf);
-    ParGCAllocBuffer::set_buf(buf);
-    _retired = false;
-  }
-
-  inline void retire(bool end_of_gc, bool retain) {
-    if (_retired)
-      return;
-    if (use_local_bitmaps && _during_marking) {
-      _bitmap.retire();
-    }
-    ParGCAllocBuffer::retire(end_of_gc, retain);
-    _retired = true;
-  }
-};
-
-
-class G1ParScanThreadState : public StackObj {
-protected:
-  G1CollectedHeap* _g1h;
-  RefToScanQueue*  _refs;
-  DirtyCardQueue   _dcq;
-  CardTableModRefBS* _ct_bs;
-  G1RemSet* _g1_rem;
-
-  typedef GrowableArray<oop*> OverflowQueue;
-  OverflowQueue* _overflowed_refs;
-
-  G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
-  ageTable           _age_table;
-
-  size_t           _alloc_buffer_waste;
-  size_t           _undo_waste;
-
-  OopsInHeapRegionClosure*      _evac_failure_cl;
-  G1ParScanHeapEvacClosure*     _evac_cl;
-  G1ParScanPartialArrayClosure* _partial_scan_cl;
-
-  int _hash_seed;
-  int _queue_num;
-
-  int _term_attempts;
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
+  : _g1h(g1h),
+    _refs(g1h->task_queue(queue_num)),
+    _dcq(&g1h->dirty_card_queue_set()),
+    _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
+    _g1_rem(g1h->g1_rem_set()),
+    _hash_seed(17), _queue_num(queue_num),
+    _term_attempts(0),
+    _age_table(false),
 #if G1_DETAILED_STATS
-  int _pushes, _pops, _steals, _steal_attempts;
-  int _overflow_pushes;
-#endif
-
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Map from young-age-index (0 == not young, 1 is youngest) to
-  // surviving words. base is what we get back from the malloc call
-  size_t* _surviving_young_words_base;
-  // this points into the array, as we use the first few entries for padding
-  size_t* _surviving_young_words;
-
-#define PADDING_ELEM_NUM (64 / sizeof(size_t))
-
-  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
-  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
-
-  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  CardTableModRefBS* ctbs()                      { return _ct_bs; }
-
-  void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
-    if (!from->is_survivor()) {
-      _g1_rem->par_write_ref(from, p, tid);
-    }
-  }
-
-  void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
-    // If the new value of the field points to the same region or
-    // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(*p) && !from->is_survivor()) {
-      size_t card_index = ctbs()->index_for(p);
-      // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
-      }
-    }
-  }
-
-public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
-    : _g1h(g1h),
-      _refs(g1h->task_queue(queue_num)),
-      _dcq(&g1h->dirty_card_queue_set()),
-      _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
-      _g1_rem(g1h->g1_rem_set()),
-      _hash_seed(17), _queue_num(queue_num),
-      _term_attempts(0),
-      _age_table(false),
-#if G1_DETAILED_STATS
-      _pushes(0), _pops(0), _steals(0),
-      _steal_attempts(0),  _overflow_pushes(0),
+    _pushes(0), _pops(0), _steals(0),
+    _steal_attempts(0),  _overflow_pushes(0),
 #endif
-      _strong_roots_time(0), _term_time(0),
-      _alloc_buffer_waste(0), _undo_waste(0)
-  {
-    // we allocate G1YoungSurvRateNumRegions plus one entries, since
-    // we "sacrifice" entry 0 to keep track of surviving bytes for
-    // non-young regions (where the age is -1)
-    // We also add a few elements at the beginning and at the end in
-    // an attempt to eliminate cache contention
-    size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
-    size_t array_length = PADDING_ELEM_NUM +
-                          real_length +
-                          PADDING_ELEM_NUM;
-    _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
-    if (_surviving_young_words_base == NULL)
-      vm_exit_out_of_memory(array_length * sizeof(size_t),
-                            "Not enough space for young surv histo.");
-    _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
-    memset(_surviving_young_words, 0, real_length * sizeof(size_t));
-
-    _overflowed_refs = new OverflowQueue(10);
-
-    _start = os::elapsedTime();
-  }
-
-  ~G1ParScanThreadState() {
-    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
-  }
-
-  RefToScanQueue*   refs()            { return _refs;             }
-  OverflowQueue*    overflowed_refs() { return _overflowed_refs;  }
-  ageTable*         age_table()       { return &_age_table;       }
-
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return &_alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste()                    { return _alloc_buffer_waste; }
-  size_t undo_waste()                            { return _undo_waste; }
-
-  void push_on_queue(oop* ref) {
-    assert(ref != NULL, "invariant");
-    assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
-
-    if (!refs()->push(ref)) {
-      overflowed_refs()->push(ref);
-      IF_G1_DETAILED_STATS(note_overflow_push());
-    } else {
-      IF_G1_DETAILED_STATS(note_push());
-    }
-  }
-
-  void pop_from_queue(oop*& ref) {
-    if (!refs()->pop_local(ref)) {
-      ref = NULL;
-    } else {
-      assert(ref != NULL, "invariant");
-      assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
-             "invariant");
-
-      IF_G1_DETAILED_STATS(note_pop());
-    }
-  }
-
-  void pop_from_overflow_queue(oop*& ref) {
-    ref = overflowed_refs()->pop();
-  }
-
-  int refs_to_scan()                             { return refs()->size();                 }
-  int overflowed_refs_to_scan()                  { return overflowed_refs()->length();    }
-
-  void update_rs(HeapRegion* from, oop* p, int tid) {
-    if (G1DeferredRSUpdate) {
-      deferred_rs_update(from, p, tid);
-    } else {
-      immediate_rs_update(from, p, tid);
-    }
-  }
-
-  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-
-    HeapWord* obj = NULL;
-    if (word_sz * 100 <
-        (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
-                                                  ParallelGCBufferWastePct) {
-      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false, false);
-
-      HeapWord* buf =
-        _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
-      if (buf == NULL) return NULL; // Let caller handle allocation failure.
-      // Otherwise.
-      alloc_buf->set_buf(buf);
-
-      obj = alloc_buf->allocate(word_sz);
-      assert(obj != NULL, "buffer was definitely big enough...");
-    } else {
-      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-    }
-    return obj;
-  }
-
-  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
-    if (obj != NULL) return obj;
-    return allocate_slow(purpose, word_sz);
-  }
-
-  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-    if (alloc_buffer(purpose)->contains(obj)) {
-      guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-                "should contain whole object");
-      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-    } else {
-      CollectedHeap::fill_with_object(obj, word_sz);
-      add_to_undo_waste(word_sz);
-    }
-  }
-
-  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
-    _evac_failure_cl = evac_failure_cl;
-  }
-  OopsInHeapRegionClosure* evac_failure_closure() {
-    return _evac_failure_cl;
-  }
-
-  void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
-    _evac_cl = evac_cl;
-  }
-
-  void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
-    _partial_scan_cl = partial_scan_cl;
-  }
-
-  int* hash_seed() { return &_hash_seed; }
-  int  queue_num() { return _queue_num; }
-
-  int term_attempts()   { return _term_attempts; }
-  void note_term_attempt()  { _term_attempts++; }
-
-#if G1_DETAILED_STATS
-  int pushes()          { return _pushes; }
-  int pops()            { return _pops; }
-  int steals()          { return _steals; }
-  int steal_attempts()  { return _steal_attempts; }
-  int overflow_pushes() { return _overflow_pushes; }
-
-  void note_push()          { _pushes++; }
-  void note_pop()           { _pops++; }
-  void note_steal()         { _steals++; }
-  void note_steal_attempt() { _steal_attempts++; }
-  void note_overflow_push() { _overflow_pushes++; }
-#endif
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() { return _strong_roots_time; }
-
-  void start_term_time() {
-    note_term_attempt();
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() { return _term_time; }
-
-  double elapsed() {
-    return os::elapsedTime() - _start;
-  }
-
-  size_t* surviving_young_words() {
-    // We add on to hide entry 0 which accumulates surviving words for
-    // age -1 regions (i.e. non-young ones)
-    return _surviving_young_words;
-  }
-
-  void retire_alloc_buffers() {
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap].words_remaining();
-      add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap].retire(true, false);
-    }
-  }
-
-private:
-  void deal_with_reference(oop* ref_to_scan) {
-    if (has_partial_array_mask(ref_to_scan)) {
-      _partial_scan_cl->do_oop_nv(ref_to_scan);
-    } else {
-      // Note: we can use "raw" versions of "region_containing" because
-      // "obj_to_scan" is definitely in the heap, and is not in a
-      // humongous region.
-      HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-      _evac_cl->set_region(r);
-      _evac_cl->do_oop_nv(ref_to_scan);
-    }
-  }
-
-public:
-  void trim_queue() {
-    // I've replicated the loop twice, first to drain the overflow
-    // queue, second to drain the task queue. This is better than
-    // having a single loop, which checks both conditions and, inside
-    // it, either pops the overflow queue or the task queue, as each
-    // loop is tighter. Also, the decision to drain the overflow queue
-    // first is not arbitrary, as the overflow queue is not visible
-    // to the other workers, whereas the task queue is. So, we want to
-    // drain the "invisible" entries first, while allowing the other
-    // workers to potentially steal the "visible" entries.
-
-    while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
-      while (overflowed_refs_to_scan() > 0) {
-        oop *ref_to_scan = NULL;
-        pop_from_overflow_queue(ref_to_scan);
-        assert(ref_to_scan != NULL, "invariant");
-        // We shouldn't have pushed it on the queue if it was not
-        // pointing into the CSet.
-        assert(ref_to_scan != NULL, "sanity");
-        assert(has_partial_array_mask(ref_to_scan) ||
-                                      _g1h->obj_in_cs(*ref_to_scan), "sanity");
-
-        deal_with_reference(ref_to_scan);
-      }
-
-      while (refs_to_scan() > 0) {
-        oop *ref_to_scan = NULL;
-        pop_from_queue(ref_to_scan);
-
-        if (ref_to_scan != NULL) {
-          // We shouldn't have pushed it on the queue if it was not
-          // pointing into the CSet.
-          assert(has_partial_array_mask(ref_to_scan) ||
-                                      _g1h->obj_in_cs(*ref_to_scan), "sanity");
-
-          deal_with_reference(ref_to_scan);
-        }
-      }
-    }
-  }
-};
+    _strong_roots_time(0), _term_time(0),
+    _alloc_buffer_waste(0), _undo_waste(0)
+{
+  // we allocate G1YoungSurvRateNumRegions plus one entries, since
+  // we "sacrifice" entry 0 to keep track of surviving bytes for
+  // non-young regions (where the age is -1)
+  // We also add a few elements at the beginning and at the end in
+  // an attempt to eliminate cache contention
+  size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
+  size_t array_length = PADDING_ELEM_NUM +
+                        real_length +
+                        PADDING_ELEM_NUM;
+  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
+  if (_surviving_young_words_base == NULL)
+    vm_exit_out_of_memory(array_length * sizeof(size_t),
+                          "Not enough space for young surv histo.");
+  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
+  memset(_surviving_young_words, 0, real_length * sizeof(size_t));
+
+  _overflowed_refs = new OverflowQueue(10);
+
+  _start = os::elapsedTime();
+}
 
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
   _par_scan_state(par_scan_state) { }
 
-// This closure is applied to the fields of the objects that have just been copied.
-// Should probably be made inline and moved in g1OopClosures.inline.hpp.
-void G1ParScanClosure::do_oop_nv(oop* p) {
-  oop obj = *p;
-
-  if (obj != NULL) {
-    if (_g1->in_cset_fast_test(obj)) {
-      // We're not going to even bother checking whether the object is
-      // already forwarded or not, as this usually causes an immediate
-      // stall. We'll try to prefetch the object (for write, given that
-      // we might need to install the forwarding reference) and we'll
-      // get back to it when pop it from the queue
-      Prefetch::write(obj->mark_addr(), 0);
-      Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
-
-      // slightly paranoid test; I'm trying to catch potential
-      // problems before we go into push_on_queue to know where the
-      // problem is coming from
-      assert(obj == *p, "the value of *p should not have changed");
-      _par_scan_state->push_on_queue(p);
-    } else {
-      _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
-    }
-  }
-}
-
-void G1ParCopyHelper::mark_forwardee(oop* p) {
+template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
   // This is called _after_ do_oop_work has been called, hence after
   // the object has been relocated to its new location and *p points
   // to its new location.
 
-  oop thisOop = *p;
-  if (thisOop != NULL) {
-    assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop(heap_oop);
+    assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
            "shouldn't still be in the CSet if evacuation didn't fail.");
-    HeapWord* addr = (HeapWord*)thisOop;
+    HeapWord* addr = (HeapWord*)obj;
     if (_g1->is_in_g1_reserved(addr))
       _cm->grayRoot(oop(addr));
   }
@@ -4226,7 +3686,8 @@
 
     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
       arrayOop(old)->set_length(0);
-      _par_scan_state->push_on_queue(set_partial_array_mask(old));
+      oop* old_p = set_partial_array_mask(old);
+      _par_scan_state->push_on_queue(old_p);
     } else {
       // No point in using the slower heap_region_containing() method,
       // given that we know obj is in the heap.
@@ -4240,11 +3701,11 @@
   return obj;
 }
 
-template<bool do_gen_barrier, G1Barrier barrier,
-         bool do_mark_forwardee, bool skip_cset_test>
-void G1ParCopyClosure<do_gen_barrier, barrier,
-                      do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
-  oop obj = *p;
+template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
+template <class T>
+void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
+::do_oop_work(T* p) {
+  oop obj = oopDesc::load_decode_heap_oop(p);
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is nonNull");
 
@@ -4261,9 +3722,10 @@
                            "into CS.", p, (void*) obj);
 #endif
     if (obj->is_forwarded()) {
-      *p = obj->forwardee();
+      oopDesc::encode_store_heap_oop(p, obj->forwardee());
     } else {
-      *p = copy_to_survivor_space(obj);
+      oop copy_oop = copy_to_survivor_space(obj);
+      oopDesc::encode_store_heap_oop(p, copy_oop);
     }
     // When scanning the RS, we only care about objs in CS.
     if (barrier == G1BarrierRS) {
@@ -4282,21 +3744,9 @@
 }
 
 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
-
-template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
-  oop obj, int start, int end) {
-  // process our set of indices (include header in first chunk)
-  assert(start < end, "invariant");
-  T* const base      = (T*)objArrayOop(obj)->base();
-  T* const start_addr = (start == 0) ? (T*) obj : base + start;
-  T* const end_addr   = base + end;
-  MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
-  _scanner.set_region(_g1->heap_region_containing(obj));
-  obj->oop_iterate(&_scanner, mr);
-}
-
-void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
-  assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
+template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
+
+template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
   assert(has_partial_array_mask(p), "invariant");
   oop old = clear_partial_array_mask(p);
   assert(old->is_objArray(), "must be obj array");
@@ -4316,19 +3766,19 @@
     end = start + ParGCArrayScanChunk;
     arrayOop(old)->set_length(end);
     // Push remainder.
-    _par_scan_state->push_on_queue(set_partial_array_mask(old));
+    oop* old_p = set_partial_array_mask(old);
+    assert(arrayOop(old)->length() < obj->length(), "Empty push?");
+    _par_scan_state->push_on_queue(old_p);
   } else {
     // Restore length so that the heap remains parsable in
     // case of evacuation failure.
     arrayOop(old)->set_length(end);
   }
-
+  _scanner.set_region(_g1->heap_region_containing_raw(obj));
   // process our set of indices (include header in first chunk)
-  process_array_chunk<oop>(obj, start, end);
+  obj->oop_iterate_range(&_scanner, start, end);
 }
 
-int G1ScanAndBalanceClosure::_nq = 0;
-
 class G1ParEvacuateFollowersClosure : public VoidClosure {
 protected:
   G1CollectedHeap*              _g1h;
@@ -4351,21 +3801,28 @@
   void do_void() {
     G1ParScanThreadState* pss = par_scan_state();
     while (true) {
-      oop* ref_to_scan;
       pss->trim_queue();
       IF_G1_DETAILED_STATS(pss->note_steal_attempt());
-      if (queues()->steal(pss->queue_num(),
-                          pss->hash_seed(),
-                          ref_to_scan)) {
+
+      StarTask stolen_task;
+      if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
         IF_G1_DETAILED_STATS(pss->note_steal());
 
         // slightly paranoid tests; I'm trying to catch potential
         // problems before we go into push_on_queue to know where the
         // problem is coming from
-        assert(ref_to_scan != NULL, "invariant");
-        assert(has_partial_array_mask(ref_to_scan) ||
-                                   _g1h->obj_in_cs(*ref_to_scan), "invariant");
-        pss->push_on_queue(ref_to_scan);
+        assert((oop*)stolen_task != NULL, "Error");
+        if (stolen_task.is_narrow()) {
+          assert(UseCompressedOops, "Error");
+          narrowOop* p = (narrowOop*) stolen_task;
+          assert(has_partial_array_mask(p) ||
+                 _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
+          pss->push_on_queue(p);
+        } else {
+          oop* p = (oop*) stolen_task;
+          assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
+          pss->push_on_queue(p);
+        }
         continue;
       }
       pss->start_term_time();
@@ -4382,6 +3839,7 @@
   G1CollectedHeap*       _g1h;
   RefToScanQueueSet      *_queues;
   ParallelTaskTerminator _terminator;
+  int _n_workers;
 
   Mutex _stats_lock;
   Mutex* stats_lock() { return &_stats_lock; }
@@ -4397,7 +3855,8 @@
       _g1h(g1h),
       _queues(task_queues),
       _terminator(workers, _queues),
-      _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
+      _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
+      _n_workers(workers)
   {}
 
   RefToScanQueueSet* queues() { return _queues; }
@@ -4407,6 +3866,7 @@
   }
 
   void work(int i) {
+    if (i >= _n_workers) return;  // no work needed this round
     ResourceMark rm;
     HandleMark   hm;
 
@@ -4504,23 +3964,6 @@
 
 // *** Common G1 Evacuation Stuff
 
-class G1CountClosure: public OopsInHeapRegionClosure {
-public:
-  int n;
-  G1CountClosure() : n(0) {}
-  void do_oop(narrowOop* p) {
-    guarantee(false, "NYI");
-  }
-  void do_oop(oop* p) {
-    oop obj = *p;
-    assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
-           "Rem set closure called on non-rem-set pointer.");
-    n++;
-  }
-};
-
-static G1CountClosure count_closure;
-
 void
 G1CollectedHeap::
 g1_process_strong_roots(bool collecting_perm_gen,
@@ -5570,8 +5013,3 @@
 void G1CollectedHeap::g1_unimplemented() {
   // Unimplemented();
 }
-
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***