changeset 1523:8bfe9058ca46 hs19-b01

Merge
author jcoomes
date Thu, 13 May 2010 13:05:47 -0700
parents 67d74f7a15d9 (diff) ef1a1d051971 (current diff)
children 093432aa7573 cc387008223e
files src/share/vm/runtime/globals.hpp
diffstat 13 files changed, 598 insertions(+), 477 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu May 13 13:05:47 2010 -0700
@@ -104,6 +104,12 @@
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
+    // When using CMS, we cannot use memset() in BOT updates because
+    // the sun4v/CMT version in libc_psr uses BIS which exposes
+    // "phantom zeros" to concurrent readers. See 6948537.
+    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) {
+      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
+    }
   }
 
   // Use hardware population count instruction if available.
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu May 13 13:05:47 2010 -0700
@@ -288,7 +288,7 @@
       vm_exit_out_of_memory(0, "pthread_getattr_np");
     }
     else {
-      fatal1("pthread_getattr_np failed with errno = %d", res);
+      fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
     }
   }
 
@@ -296,7 +296,7 @@
   size_t stack_bytes;
   res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
   if (res != 0) {
-    fatal1("pthread_attr_getstack failed with errno = %d", res);
+    fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
   }
   address stack_top = stack_bottom + stack_bytes;
 
@@ -308,7 +308,7 @@
   size_t guard_bytes;
   res = pthread_attr_getguardsize(&attr, &guard_bytes);
   if (res != 0) {
-    fatal1("pthread_attr_getguardsize failed with errno = %d", res);
+    fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
   }
   int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu May 13 13:05:47 2010 -0700
@@ -1926,59 +1926,6 @@
 
 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
 
-//////////////////////////////////////////////////////////////////////////////
-// We go over the list of promoted objects, removing each from the list,
-// and applying the closure (this may, in turn, add more elements to
-// the tail of the promoted list, and these newly added objects will
-// also be processed) until the list is empty.
-// To aid verification and debugging, in the non-product builds
-// we actually forward _promoHead each time we process a promoted oop.
-// Note that this is not necessary in general (i.e. when we don't need to
-// call PromotionInfo::verify()) because oop_iterate can only add to the
-// end of _promoTail, and never needs to look at _promoHead.
-
-#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
-                                                                            \
-void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
-  NOT_PRODUCT(verify());                                                    \
-  PromotedObject *curObj, *nextObj;                                         \
-  for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
-    if ((nextObj = curObj->next()) == NULL) {                               \
-      /* protect ourselves against additions due to closure application     \
-         below by resetting the list.  */                                   \
-      assert(_promoTail == curObj, "Should have been the tail");            \
-      _promoHead = _promoTail = NULL;                                       \
-    }                                                                       \
-    if (curObj->hasDisplacedMark()) {                                       \
-      /* restore displaced header */                                        \
-      oop(curObj)->set_mark(nextDisplacedHeader());                         \
-    } else {                                                                \
-      /* restore prototypical header */                                     \
-      oop(curObj)->init_mark();                                             \
-    }                                                                       \
-    /* The "promoted_mark" should now not be set */                         \
-    assert(!curObj->hasPromotedMark(),                                      \
-           "Should have been cleared by restoring displaced mark-word");    \
-    NOT_PRODUCT(_promoHead = nextObj);                                      \
-    if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
-    if (nextObj == NULL) { /* start at head of list reset above */          \
-      nextObj = _promoHead;                                                 \
-    }                                                                       \
-  }                                                                         \
-  assert(noPromotions(), "post-condition violation");                       \
-  assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
-  assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
-  assert(_firstIndex == _nextIndex, "empty buffer");                        \
-}
-
-// This should have been ALL_SINCE_...() just like the others,
-// but, because the body of the method above is somehwat longer,
-// the MSVC compiler cannot cope; as a workaround, we split the
-// macro into its 3 constituent parts below (see original macro
-// definition in specializedOopClosures.hpp).
-SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
-PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
-
 
 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
   // ugghh... how would one do this efficiently for a non-contiguous space?
@@ -2506,281 +2453,6 @@
   _dictionary->printDictCensus();
 }
 
-// Return the next displaced header, incrementing the pointer and
-// recycling spool area as necessary.
-markOop PromotionInfo::nextDisplacedHeader() {
-  assert(_spoolHead != NULL, "promotionInfo inconsistency");
-  assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
-         "Empty spool space: no displaced header can be fetched");
-  assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
-  markOop hdr = _spoolHead->displacedHdr[_firstIndex];
-  // Spool forward
-  if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
-    // forward to next block, recycling this block into spare spool buffer
-    SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
-    assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
-    _spoolHead->nextSpoolBlock = _spareSpool;
-    _spareSpool = _spoolHead;
-    _spoolHead = tmp;
-    _firstIndex = 1;
-    NOT_PRODUCT(
-      if (_spoolHead == NULL) {  // all buffers fully consumed
-        assert(_spoolTail == NULL && _nextIndex == 1,
-               "spool buffers processing inconsistency");
-      }
-    )
-  }
-  return hdr;
-}
-
-void PromotionInfo::track(PromotedObject* trackOop) {
-  track(trackOop, oop(trackOop)->klass());
-}
-
-void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
-  // make a copy of header as it may need to be spooled
-  markOop mark = oop(trackOop)->mark();
-  trackOop->clearNext();
-  if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
-    // save non-prototypical header, and mark oop
-    saveDisplacedHeader(mark);
-    trackOop->setDisplacedMark();
-  } else {
-    // we'd like to assert something like the following:
-    // assert(mark == markOopDesc::prototype(), "consistency check");
-    // ... but the above won't work because the age bits have not (yet) been
-    // cleared. The remainder of the check would be identical to the
-    // condition checked in must_be_preserved() above, so we don't really
-    // have anything useful to check here!
-  }
-  if (_promoTail != NULL) {
-    assert(_promoHead != NULL, "List consistency");
-    _promoTail->setNext(trackOop);
-    _promoTail = trackOop;
-  } else {
-    assert(_promoHead == NULL, "List consistency");
-    _promoHead = _promoTail = trackOop;
-  }
-  // Mask as newly promoted, so we can skip over such objects
-  // when scanning dirty cards
-  assert(!trackOop->hasPromotedMark(), "Should not have been marked");
-  trackOop->setPromotedMark();
-}
-
-// Save the given displaced header, incrementing the pointer and
-// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markOop hdr) {
-  assert(_spoolHead != NULL && _spoolTail != NULL,
-         "promotionInfo inconsistency");
-  assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
-  _spoolTail->displacedHdr[_nextIndex] = hdr;
-  // Spool forward
-  if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
-    // get a new spooling block
-    assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
-    _splice_point = _spoolTail;                   // save for splicing
-    _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
-    _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
-    // ... but will attempt filling before next promotion attempt
-    _nextIndex = 1;
-  }
-}
-
-// Ensure that spooling space exists. Return false if spooling space
-// could not be obtained.
-bool PromotionInfo::ensure_spooling_space_work() {
-  assert(!has_spooling_space(), "Only call when there is no spooling space");
-  // Try and obtain more spooling space
-  SpoolBlock* newSpool = getSpoolBlock();
-  assert(newSpool == NULL ||
-         (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
-        "getSpoolBlock() sanity check");
-  if (newSpool == NULL) {
-    return false;
-  }
-  _nextIndex = 1;
-  if (_spoolTail == NULL) {
-    _spoolTail = newSpool;
-    if (_spoolHead == NULL) {
-      _spoolHead = newSpool;
-      _firstIndex = 1;
-    } else {
-      assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
-             "Splice point invariant");
-      // Extra check that _splice_point is connected to list
-      #ifdef ASSERT
-      {
-        SpoolBlock* blk = _spoolHead;
-        for (; blk->nextSpoolBlock != NULL;
-             blk = blk->nextSpoolBlock);
-        assert(blk != NULL && blk == _splice_point,
-               "Splice point incorrect");
-      }
-      #endif // ASSERT
-      _splice_point->nextSpoolBlock = newSpool;
-    }
-  } else {
-    assert(_spoolHead != NULL, "spool list consistency");
-    _spoolTail->nextSpoolBlock = newSpool;
-    _spoolTail = newSpool;
-  }
-  return true;
-}
-
-// Get a free spool buffer from the free pool, getting a new block
-// from the heap if necessary.
-SpoolBlock* PromotionInfo::getSpoolBlock() {
-  SpoolBlock* res;
-  if ((res = _spareSpool) != NULL) {
-    _spareSpool = _spareSpool->nextSpoolBlock;
-    res->nextSpoolBlock = NULL;
-  } else {  // spare spool exhausted, get some from heap
-    res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
-    if (res != NULL) {
-      res->init();
-    }
-  }
-  assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
-  return res;
-}
-
-void PromotionInfo::startTrackingPromotions() {
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = true;
-}
-
-#define CMSPrintPromoBlockInfo 1
-
-void PromotionInfo::stopTrackingPromotions(uint worker_id) {
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = false;
-  if (CMSPrintPromoBlockInfo > 1) {
-    print_statistics(worker_id);
-  }
-}
-
-void PromotionInfo::print_statistics(uint worker_id) const {
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "Else will undercount");
-  assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
-  // Count the number of blocks and slots in the free pool
-  size_t slots  = 0;
-  size_t blocks = 0;
-  for (SpoolBlock* cur_spool = _spareSpool;
-       cur_spool != NULL;
-       cur_spool = cur_spool->nextSpoolBlock) {
-    // the first entry is just a self-pointer; indices 1 through
-    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
-    guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
-              "first entry of displacedHdr should be self-referential");
-    slots += cur_spool->bufferSize - 1;
-    blocks++;
-  }
-  if (_spoolHead != NULL) {
-    slots += _spoolHead->bufferSize - 1;
-    blocks++;
-  }
-  gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
-                         worker_id, blocks, slots);
-}
-
-// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
-// points to the next slot available for filling.
-// The set of slots holding displaced headers are then all those in the
-// right-open interval denoted by:
-//
-//    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
-//
-// When _spoolTail is NULL, then the set of slots with displaced headers
-// is all those starting at the slot <_spoolHead, _firstIndex> and
-// going up to the last slot of last block in the linked list.
-// In this lartter case, _splice_point points to the tail block of
-// this linked list of blocks holding displaced headers.
-void PromotionInfo::verify() const {
-  // Verify the following:
-  // 1. the number of displaced headers matches the number of promoted
-  //    objects that have displaced headers
-  // 2. each promoted object lies in this space
-  debug_only(
-    PromotedObject* junk = NULL;
-    assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
-           "Offset of PromotedObject::_next is expected to align with "
-           "  the OopDesc::_mark within OopDesc");
-  )
-  // FIXME: guarantee????
-  guarantee(_spoolHead == NULL || _spoolTail != NULL ||
-            _splice_point != NULL, "list consistency");
-  guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
-  // count the number of objects with displaced headers
-  size_t numObjsWithDisplacedHdrs = 0;
-  for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
-    guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
-    // the last promoted object may fail the mark() != NULL test of is_oop().
-    guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
-    if (curObj->hasDisplacedMark()) {
-      numObjsWithDisplacedHdrs++;
-    }
-  }
-  // Count the number of displaced headers
-  size_t numDisplacedHdrs = 0;
-  for (SpoolBlock* curSpool = _spoolHead;
-       curSpool != _spoolTail && curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    // the first entry is just a self-pointer; indices 1 through
-    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
-    guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
-              "first entry of displacedHdr should be self-referential");
-    numDisplacedHdrs += curSpool->bufferSize - 1;
-  }
-  guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
-            "internal consistency");
-  guarantee(_spoolTail != NULL || _nextIndex == 1,
-            "Inconsistency between _spoolTail and _nextIndex");
-  // We overcounted (_firstIndex-1) worth of slots in block
-  // _spoolHead and we undercounted (_nextIndex-1) worth of
-  // slots in block _spoolTail. We make an appropriate
-  // adjustment by subtracting the first and adding the
-  // second:  - (_firstIndex - 1) + (_nextIndex - 1)
-  numDisplacedHdrs += (_nextIndex - _firstIndex);
-  guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
-}
-
-void PromotionInfo::print_on(outputStream* st) const {
-  SpoolBlock* curSpool = NULL;
-  size_t i = 0;
-  st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
-               _firstIndex, _nextIndex);
-  for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" active ");
-    i++;
-  }
-  for (curSpool = _spoolTail; curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" inactive ");
-    i++;
-  }
-  for (curSpool = _spareSpool; curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" free ");
-    i++;
-  }
-  st->print_cr(SIZE_FORMAT " header spooling blocks", i);
-}
-
-void SpoolBlock::print_on(outputStream* st) const {
-  st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
-            this, (HeapWord*)displacedHdr + bufferSize,
-            bufferSize, nextSpoolBlock);
-}
-
 ///////////////////////////////////////////////////////////////////////////
 // CFLS_LAB
 ///////////////////////////////////////////////////////////////////////////
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu May 13 13:05:47 2010 -0700
@@ -33,140 +33,6 @@
 class ObjectClosureCareful;
 class Klass;
 
-class PromotedObject VALUE_OBJ_CLASS_SPEC {
- private:
-  enum {
-    promoted_mask  = right_n_bits(2),   // i.e. 0x3
-    displaced_mark = nth_bit(2),        // i.e. 0x4
-    next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
-  };
-  intptr_t _next;
- public:
-  inline PromotedObject* next() const {
-    return (PromotedObject*)(_next & next_mask);
-  }
-  inline void setNext(PromotedObject* x) {
-    assert(((intptr_t)x & ~next_mask) == 0,
-           "Conflict in bit usage, "
-           " or insufficient alignment of objects");
-    _next |= (intptr_t)x;
-  }
-  inline void setPromotedMark() {
-    _next |= promoted_mask;
-  }
-  inline bool hasPromotedMark() const {
-    return (_next & promoted_mask) == promoted_mask;
-  }
-  inline void setDisplacedMark() {
-    _next |= displaced_mark;
-  }
-  inline bool hasDisplacedMark() const {
-    return (_next & displaced_mark) != 0;
-  }
-  inline void clearNext()        { _next = 0; }
-  debug_only(void *next_addr() { return (void *) &_next; })
-};
-
-class SpoolBlock: public FreeChunk {
-  friend class PromotionInfo;
- protected:
-  SpoolBlock*  nextSpoolBlock;
-  size_t       bufferSize;        // number of usable words in this block
-  markOop*     displacedHdr;      // the displaced headers start here
-
-  // Note about bufferSize: it denotes the number of entries available plus 1;
-  // legal indices range from 1 through BufferSize - 1.  See the verification
-  // code verify() that counts the number of displaced headers spooled.
-  size_t computeBufferSize() {
-    return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
-  }
-
- public:
-  void init() {
-    bufferSize = computeBufferSize();
-    displacedHdr = (markOop*)&displacedHdr;
-    nextSpoolBlock = NULL;
-  }
-
-  void print_on(outputStream* st) const;
-  void print() const { print_on(gclog_or_tty); }
-};
-
-class PromotionInfo VALUE_OBJ_CLASS_SPEC {
-  bool            _tracking;      // set if tracking
-  CompactibleFreeListSpace* _space; // the space to which this belongs
-  PromotedObject* _promoHead;     // head of list of promoted objects
-  PromotedObject* _promoTail;     // tail of list of promoted objects
-  SpoolBlock*     _spoolHead;     // first spooling block
-  SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
-  SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
-  SpoolBlock*     _spareSpool;    // free spool buffer
-  size_t          _firstIndex;    // first active index in
-                                  // first spooling block (_spoolHead)
-  size_t          _nextIndex;     // last active index + 1 in last
-                                  // spooling block (_spoolTail)
- private:
-  // ensure that spooling space exists; return true if there is spooling space
-  bool ensure_spooling_space_work();
-
- public:
-  PromotionInfo() :
-    _tracking(0), _space(NULL),
-    _promoHead(NULL), _promoTail(NULL),
-    _spoolHead(NULL), _spoolTail(NULL),
-    _spareSpool(NULL), _firstIndex(1),
-    _nextIndex(1) {}
-
-  bool noPromotions() const {
-    assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
-    return _promoHead == NULL;
-  }
-  void startTrackingPromotions();
-  void stopTrackingPromotions(uint worker_id = 0);
-  bool tracking() const          { return _tracking;  }
-  void track(PromotedObject* trackOop);      // keep track of a promoted oop
-  // The following variant must be used when trackOop is not fully
-  // initialized and has a NULL klass:
-  void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop
-  void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
-  CompactibleFreeListSpace* space() const     { return _space; }
-  markOop nextDisplacedHeader(); // get next header & forward spool pointer
-  void    saveDisplacedHeader(markOop hdr);
-                                 // save header and forward spool
-
-  inline size_t refillSize() const;
-
-  SpoolBlock* getSpoolBlock();   // return a free spooling block
-  inline bool has_spooling_space() {
-    return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
-  }
-  // ensure that spooling space exists
-  bool ensure_spooling_space() {
-    return has_spooling_space() || ensure_spooling_space_work();
-  }
-  #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix)  \
-    void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
-  ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
-  #undef PROMOTED_OOPS_ITERATE_DECL
-  void promoted_oops_iterate(OopsInGenClosure* cl) {
-    promoted_oops_iterate_v(cl);
-  }
-  void verify()  const;
-  void reset() {
-    _promoHead = NULL;
-    _promoTail = NULL;
-    _spoolHead = NULL;
-    _spoolTail = NULL;
-    _spareSpool = NULL;
-    _firstIndex = 0;
-    _nextIndex = 0;
-
-  }
-
-  void print_on(outputStream* st) const;
-  void print_statistics(uint worker_id) const;
-};
-
 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
  public:
   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
@@ -557,6 +423,12 @@
   // promoted since the most recent call to save_marks() on
   // this generation and has not subsequently been iterated
   // over (using oop_since_save_marks_iterate() above).
+  // This property holds only for single-threaded collections,
+  // and is typically used for Cheney scans; for MT scavenges,
+  // the property holds for all objects promoted during that
+  // scavenge for the duration of the scavenge and is used
+  // by card-scanning to avoid scanning objects (being) promoted
+  // during that scavenge.
   bool obj_allocated_since_save_marks(const oop obj) const {
     assert(is_in_reserved(obj), "Wrong space?");
     return ((PromotedObject*)obj)->hasPromotedMark();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 13 13:05:47 2010 -0700
@@ -789,6 +789,14 @@
   _gc_counters = new CollectorCounters("CMS", 1);
   _completed_initialization = true;
   _inter_sweep_timer.start();  // start of time
+#ifdef SPARC
+  // Issue a stern warning, but allow use for experimentation and debugging.
+  if (VM_Version::is_sun4v() && UseMemSetInBOT) {
+    assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
+    warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
+            " on sun4v; please understand that you are using at your own risk!");
+  }
+#endif
 }
 
 const char* ConcurrentMarkSweepGeneration::name() const {
@@ -1356,7 +1364,7 @@
   obj->set_mark(m);
 
   // Now we can track the promoted object, if necessary.  We take care
-  // To delay the transition from uninitialized to full object
+  // to delay the transition from uninitialized to full object
   // (i.e., insertion of klass pointer) until after, so that it
   // atomically becomes a promoted object.
   if (promoInfo->tracking()) {
@@ -1416,10 +1424,9 @@
 
 bool CMSCollector::shouldConcurrentCollect() {
   if (_full_gc_requested) {
-    assert(ExplicitGCInvokesConcurrent, "Unexpected state");
     if (Verbose && PrintGCDetails) {
       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
-                             " gc request");
+                             " gc request (or gc_locker)");
     }
     return true;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Thu May 13 13:05:47 2010 -0700
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+# include "incls/_precompiled.incl"
+# include "incls/_promotionInfo.cpp.incl"
+
+/////////////////////////////////////////////////////////////////////////
+//// PromotionInfo
+/////////////////////////////////////////////////////////////////////////
+
+
+//////////////////////////////////////////////////////////////////////////////
+// We go over the list of promoted objects, removing each from the list,
+// and applying the closure (this may, in turn, add more elements to
+// the tail of the promoted list, and these newly added objects will
+// also be processed) until the list is empty.
+// To aid verification and debugging, in the non-product builds
+// we actually forward _promoHead each time we process a promoted oop.
+// Note that this is not necessary in general (i.e. when we don't need to
+// call PromotionInfo::verify()) because oop_iterate can only add to the
+// end of _promoTail, and never needs to look at _promoHead.
+
+#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
+                                                                            \
+void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
+  NOT_PRODUCT(verify());                                                    \
+  PromotedObject *curObj, *nextObj;                                         \
+  for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
+    if ((nextObj = curObj->next()) == NULL) {                               \
+      /* protect ourselves against additions due to closure application     \
+         below by resetting the list.  */                                   \
+      assert(_promoTail == curObj, "Should have been the tail");            \
+      _promoHead = _promoTail = NULL;                                       \
+    }                                                                       \
+    if (curObj->hasDisplacedMark()) {                                       \
+      /* restore displaced header */                                        \
+      oop(curObj)->set_mark(nextDisplacedHeader());                         \
+    } else {                                                                \
+      /* restore prototypical header */                                     \
+      oop(curObj)->init_mark();                                             \
+    }                                                                       \
+    /* The "promoted_mark" should now not be set */                         \
+    assert(!curObj->hasPromotedMark(),                                      \
+           "Should have been cleared by restoring displaced mark-word");    \
+    NOT_PRODUCT(_promoHead = nextObj);                                      \
+    if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
+    if (nextObj == NULL) { /* start at head of list reset above */          \
+      nextObj = _promoHead;                                                 \
+    }                                                                       \
+  }                                                                         \
+  assert(noPromotions(), "post-condition violation");                       \
+  assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
+  assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
+  assert(_firstIndex == _nextIndex, "empty buffer");                        \
+}
+
+// This should have been ALL_SINCE_...() just like the others,
+// but, because the body of the method above is somehwat longer,
+// the MSVC compiler cannot cope; as a workaround, we split the
+// macro into its 3 constituent parts below (see original macro
+// definition in specializedOopClosures.hpp).
+SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
+PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
+
+
+// Return the next displaced header, incrementing the pointer and
+// recycling spool area as necessary.
+markOop PromotionInfo::nextDisplacedHeader() {
+  assert(_spoolHead != NULL, "promotionInfo inconsistency");
+  assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
+         "Empty spool space: no displaced header can be fetched");
+  assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
+  markOop hdr = _spoolHead->displacedHdr[_firstIndex];
+  // Spool forward
+  if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
+    // forward to next block, recycling this block into spare spool buffer
+    SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
+    assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
+    _spoolHead->nextSpoolBlock = _spareSpool;
+    _spareSpool = _spoolHead;
+    _spoolHead = tmp;
+    _firstIndex = 1;
+    NOT_PRODUCT(
+      if (_spoolHead == NULL) {  // all buffers fully consumed
+        assert(_spoolTail == NULL && _nextIndex == 1,
+               "spool buffers processing inconsistency");
+      }
+    )
+  }
+  return hdr;
+}
+
+void PromotionInfo::track(PromotedObject* trackOop) {
+  track(trackOop, oop(trackOop)->klass());
+}
+
+void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
+  // make a copy of header as it may need to be spooled
+  markOop mark = oop(trackOop)->mark();
+  trackOop->clearNext();
+  if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
+    // save non-prototypical header, and mark oop
+    saveDisplacedHeader(mark);
+    trackOop->setDisplacedMark();
+  } else {
+    // we'd like to assert something like the following:
+    // assert(mark == markOopDesc::prototype(), "consistency check");
+    // ... but the above won't work because the age bits have not (yet) been
+    // cleared. The remainder of the check would be identical to the
+    // condition checked in must_be_preserved() above, so we don't really
+    // have anything useful to check here!
+  }
+  if (_promoTail != NULL) {
+    assert(_promoHead != NULL, "List consistency");
+    _promoTail->setNext(trackOop);
+    _promoTail = trackOop;
+  } else {
+    assert(_promoHead == NULL, "List consistency");
+    _promoHead = _promoTail = trackOop;
+  }
+  // Mask as newly promoted, so we can skip over such objects
+  // when scanning dirty cards
+  assert(!trackOop->hasPromotedMark(), "Should not have been marked");
+  trackOop->setPromotedMark();
+}
+
+// Save the given displaced header, incrementing the pointer and
+// obtaining more spool area as necessary.
+void PromotionInfo::saveDisplacedHeader(markOop hdr) {
+  assert(_spoolHead != NULL && _spoolTail != NULL,
+         "promotionInfo inconsistency");
+  assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
+  _spoolTail->displacedHdr[_nextIndex] = hdr;
+  // Spool forward
+  if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
+    // get a new spooling block
+    assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
+    _splice_point = _spoolTail;                   // save for splicing
+    _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
+    _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
+    // ... but will attempt filling before next promotion attempt
+    _nextIndex = 1;
+  }
+}
+
+// Ensure that spooling space exists. Return false if spooling space
+// could not be obtained.
+bool PromotionInfo::ensure_spooling_space_work() {
+  assert(!has_spooling_space(), "Only call when there is no spooling space");
+  // Try and obtain more spooling space
+  SpoolBlock* newSpool = getSpoolBlock();
+  assert(newSpool == NULL ||
+         (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
+        "getSpoolBlock() sanity check");
+  if (newSpool == NULL) {
+    return false;
+  }
+  _nextIndex = 1;
+  if (_spoolTail == NULL) {
+    _spoolTail = newSpool;
+    if (_spoolHead == NULL) {
+      _spoolHead = newSpool;
+      _firstIndex = 1;
+    } else {
+      assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
+             "Splice point invariant");
+      // Extra check that _splice_point is connected to list
+      #ifdef ASSERT
+      {
+        SpoolBlock* blk = _spoolHead;
+        for (; blk->nextSpoolBlock != NULL;
+             blk = blk->nextSpoolBlock);
+        assert(blk != NULL && blk == _splice_point,
+               "Splice point incorrect");
+      }
+      #endif // ASSERT
+      _splice_point->nextSpoolBlock = newSpool;
+    }
+  } else {
+    assert(_spoolHead != NULL, "spool list consistency");
+    _spoolTail->nextSpoolBlock = newSpool;
+    _spoolTail = newSpool;
+  }
+  return true;
+}
+
+// Get a free spool buffer from the free pool, getting a new block
+// from the heap if necessary.
+SpoolBlock* PromotionInfo::getSpoolBlock() {
+  SpoolBlock* res;
+  if ((res = _spareSpool) != NULL) {
+    _spareSpool = _spareSpool->nextSpoolBlock;
+    res->nextSpoolBlock = NULL;
+  } else {  // spare spool exhausted, get some from heap
+    res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
+    if (res != NULL) {
+      res->init();
+    }
+  }
+  assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
+  return res;
+}
+
+void PromotionInfo::startTrackingPromotions() {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "spooling inconsistency?");
+  _firstIndex = _nextIndex = 1;
+  _tracking = true;
+}
+
+#define CMSPrintPromoBlockInfo 1
+
+void PromotionInfo::stopTrackingPromotions(uint worker_id) {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "spooling inconsistency?");
+  _firstIndex = _nextIndex = 1;
+  _tracking = false;
+  if (CMSPrintPromoBlockInfo > 1) {
+    print_statistics(worker_id);
+  }
+}
+
+void PromotionInfo::print_statistics(uint worker_id) const {
+  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+         "Else will undercount");
+  assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
+  // Count the number of blocks and slots in the free pool
+  size_t slots  = 0;
+  size_t blocks = 0;
+  for (SpoolBlock* cur_spool = _spareSpool;
+       cur_spool != NULL;
+       cur_spool = cur_spool->nextSpoolBlock) {
+    // the first entry is just a self-pointer; indices 1 through
+    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+    guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
+              "first entry of displacedHdr should be self-referential");
+    slots += cur_spool->bufferSize - 1;
+    blocks++;
+  }
+  if (_spoolHead != NULL) {
+    slots += _spoolHead->bufferSize - 1;
+    blocks++;
+  }
+  gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
+                         worker_id, blocks, slots);
+}
+
+// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
+// points to the next slot available for filling.
+// The set of slots holding displaced headers are then all those in the
+// right-open interval denoted by:
+//
+//    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
+//
+// When _spoolTail is NULL, then the set of slots with displaced headers
+// is all those starting at the slot <_spoolHead, _firstIndex> and
+// going up to the last slot of last block in the linked list.
+// In this lartter case, _splice_point points to the tail block of
+// this linked list of blocks holding displaced headers.
+void PromotionInfo::verify() const {
+  // Verify the following:
+  // 1. the number of displaced headers matches the number of promoted
+  //    objects that have displaced headers
+  // 2. each promoted object lies in this space
+  debug_only(
+    PromotedObject* junk = NULL;
+    assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
+           "Offset of PromotedObject::_next is expected to align with "
+           "  the OopDesc::_mark within OopDesc");
+  )
+  // FIXME: guarantee????
+  guarantee(_spoolHead == NULL || _spoolTail != NULL ||
+            _splice_point != NULL, "list consistency");
+  guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
+  // count the number of objects with displaced headers
+  size_t numObjsWithDisplacedHdrs = 0;
+  for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
+    guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
+    // the last promoted object may fail the mark() != NULL test of is_oop().
+    guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
+    if (curObj->hasDisplacedMark()) {
+      numObjsWithDisplacedHdrs++;
+    }
+  }
+  // Count the number of displaced headers
+  size_t numDisplacedHdrs = 0;
+  for (SpoolBlock* curSpool = _spoolHead;
+       curSpool != _spoolTail && curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    // the first entry is just a self-pointer; indices 1 through
+    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+    guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
+              "first entry of displacedHdr should be self-referential");
+    numDisplacedHdrs += curSpool->bufferSize - 1;
+  }
+  guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
+            "internal consistency");
+  guarantee(_spoolTail != NULL || _nextIndex == 1,
+            "Inconsistency between _spoolTail and _nextIndex");
+  // We overcounted (_firstIndex-1) worth of slots in block
+  // _spoolHead and we undercounted (_nextIndex-1) worth of
+  // slots in block _spoolTail. We make an appropriate
+  // adjustment by subtracting the first and adding the
+  // second:  - (_firstIndex - 1) + (_nextIndex - 1)
+  numDisplacedHdrs += (_nextIndex - _firstIndex);
+  guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
+}
+
+void PromotionInfo::print_on(outputStream* st) const {
+  SpoolBlock* curSpool = NULL;
+  size_t i = 0;
+  st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
+               _firstIndex, _nextIndex);
+  for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" active ");
+    i++;
+  }
+  for (curSpool = _spoolTail; curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" inactive ");
+    i++;
+  }
+  for (curSpool = _spareSpool; curSpool != NULL;
+       curSpool = curSpool->nextSpoolBlock) {
+    curSpool->print_on(st);
+    st->print_cr(" free ");
+    i++;
+  }
+  st->print_cr(SIZE_FORMAT " header spooling blocks", i);
+}
+
+void SpoolBlock::print_on(outputStream* st) const {
+  st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
+            this, (HeapWord*)displacedHdr + bufferSize,
+            bufferSize, nextSpoolBlock);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp	Thu May 13 13:05:47 2010 -0700
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Forward declarations
+class CompactibleFreeListSpace;
+
+class PromotedObject VALUE_OBJ_CLASS_SPEC {
+ private:
+  enum {
+    promoted_mask  = right_n_bits(2),   // i.e. 0x3
+    displaced_mark = nth_bit(2),        // i.e. 0x4
+    next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
+  };
+  intptr_t _next;
+ public:
+  inline PromotedObject* next() const {
+    return (PromotedObject*)(_next & next_mask);
+  }
+  inline void setNext(PromotedObject* x) {
+    assert(((intptr_t)x & ~next_mask) == 0,
+           "Conflict in bit usage, "
+           " or insufficient alignment of objects");
+    _next |= (intptr_t)x;
+  }
+  inline void setPromotedMark() {
+    _next |= promoted_mask;
+  }
+  inline bool hasPromotedMark() const {
+    return (_next & promoted_mask) == promoted_mask;
+  }
+  inline void setDisplacedMark() {
+    _next |= displaced_mark;
+  }
+  inline bool hasDisplacedMark() const {
+    return (_next & displaced_mark) != 0;
+  }
+  inline void clearNext()        { _next = 0; }
+  debug_only(void *next_addr() { return (void *) &_next; })
+};
+
+class SpoolBlock: public FreeChunk {
+  friend class PromotionInfo;
+ protected:
+  SpoolBlock*  nextSpoolBlock;
+  size_t       bufferSize;        // number of usable words in this block
+  markOop*     displacedHdr;      // the displaced headers start here
+
+  // Note about bufferSize: it denotes the number of entries available plus 1;
+  // legal indices range from 1 through BufferSize - 1.  See the verification
+  // code verify() that counts the number of displaced headers spooled.
+  size_t computeBufferSize() {
+    return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
+  }
+
+ public:
+  void init() {
+    bufferSize = computeBufferSize();
+    displacedHdr = (markOop*)&displacedHdr;
+    nextSpoolBlock = NULL;
+  }
+
+  void print_on(outputStream* st) const;
+  void print() const { print_on(gclog_or_tty); }
+};
+
+class PromotionInfo VALUE_OBJ_CLASS_SPEC {
+  bool            _tracking;      // set if tracking
+  CompactibleFreeListSpace* _space; // the space to which this belongs
+  PromotedObject* _promoHead;     // head of list of promoted objects
+  PromotedObject* _promoTail;     // tail of list of promoted objects
+  SpoolBlock*     _spoolHead;     // first spooling block
+  SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
+  SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
+  SpoolBlock*     _spareSpool;    // free spool buffer
+  size_t          _firstIndex;    // first active index in
+                                  // first spooling block (_spoolHead)
+  size_t          _nextIndex;     // last active index + 1 in last
+                                  // spooling block (_spoolTail)
+ private:
+  // ensure that spooling space exists; return true if there is spooling space
+  bool ensure_spooling_space_work();
+
+ public:
+  PromotionInfo() :
+    _tracking(0), _space(NULL),
+    _promoHead(NULL), _promoTail(NULL),
+    _spoolHead(NULL), _spoolTail(NULL),
+    _spareSpool(NULL), _firstIndex(1),
+    _nextIndex(1) {}
+
+  bool noPromotions() const {
+    assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
+    return _promoHead == NULL;
+  }
+  void startTrackingPromotions();
+  void stopTrackingPromotions(uint worker_id = 0);
+  bool tracking() const          { return _tracking;  }
+  void track(PromotedObject* trackOop);      // keep track of a promoted oop
+  // The following variant must be used when trackOop is not fully
+  // initialized and has a NULL klass:
+  void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop
+  void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
+  CompactibleFreeListSpace* space() const     { return _space; }
+  markOop nextDisplacedHeader(); // get next header & forward spool pointer
+  void    saveDisplacedHeader(markOop hdr);
+                                 // save header and forward spool
+
+  inline size_t refillSize() const;
+
+  SpoolBlock* getSpoolBlock();   // return a free spooling block
+  inline bool has_spooling_space() {
+    return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
+  }
+  // ensure that spooling space exists
+  bool ensure_spooling_space() {
+    return has_spooling_space() || ensure_spooling_space_work();
+  }
+  #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix)  \
+    void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
+  ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
+  #undef PROMOTED_OOPS_ITERATE_DECL
+  void promoted_oops_iterate(OopsInGenClosure* cl) {
+    promoted_oops_iterate_v(cl);
+  }
+  void verify()  const;
+  void reset() {
+    _promoHead = NULL;
+    _promoTail = NULL;
+    _spoolHead = NULL;
+    _spoolTail = NULL;
+    _spareSpool = NULL;
+    _firstIndex = 0;
+    _nextIndex = 0;
+
+  }
+
+  void print_on(outputStream* st) const;
+  void print_statistics(uint worker_id) const;
+};
+
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Thu May 13 13:05:47 2010 -0700
@@ -163,6 +163,7 @@
 // GenCollectedHeap heap.
 void VM_GenCollectFullConcurrent::doit() {
   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
+  assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (_gc_count_before == gch->total_collections()) {
@@ -190,7 +191,7 @@
     CMSCollector::disable_icms();
     // In case CMS thread was in icms_wait(), wake it up.
     CMSCollector::start_icms();
-    // Nudge the CMS thread to start a concurrent collection
+    // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before);
   } else {
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
@@ -231,7 +232,9 @@
   // e.g. at the rate of 1 full gc per ms, this could
   // overflow in about 1000 years.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (gch->total_full_collections_completed() <= _full_gc_count_before) {
+  if (_gc_cause != GCCause::_gc_locker &&
+      gch->total_full_collections_completed() <= _full_gc_count_before) {
+    assert(ExplicitGCInvokesConcurrent, "Error");
     // Now, wait for witnessing concurrent gc cycle to complete,
     // but do so in native mode, because we want to lock the
     // FullGCEvent_lock, which may be needed by the VM thread
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Thu May 13 13:05:47 2010 -0700
@@ -126,8 +126,7 @@
                               GCCause::Cause gc_cause)
     : VM_GC_Operation(gc_count_before, full_gc_count_before, true /* full */) {
     _gc_cause = gc_cause;
-    assert(FullGCCount_lock != NULL && UseConcMarkSweepGC &&
-           ExplicitGCInvokesConcurrent, "Otherwise shouldn't be here");
+    assert(FullGCCount_lock != NULL, "Error");
     assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
   }
   ~VM_GenCollectFullConcurrent() {}
--- a/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Thu May 13 13:05:47 2010 -0700
@@ -122,6 +122,7 @@
 
 compactibleFreeListSpace.hpp            binaryTreeDictionary.hpp
 compactibleFreeListSpace.hpp            freeList.hpp
+compactibleFreeListSpace.hpp            promotionInfo.hpp
 compactibleFreeListSpace.hpp            space.hpp
 
 compactingPermGenGen.cpp                concurrentMarkSweepGeneration.inline.hpp
@@ -225,6 +226,14 @@
 
 freeList.hpp                            allocationStats.hpp
 
+promotionInfo.cpp                       compactibleFreeListSpace.hpp
+promotionInfo.cpp                       markOop.inline.hpp
+promotionInfo.cpp                       oop.inline.hpp
+promotionInfo.cpp                       promotionInfo.hpp
+
+promotionInfo.hpp                       allocation.hpp
+promotionInfo.hpp                       freeChunk.hpp
+
 vmCMSOperations.cpp			concurrentMarkSweepGeneration.inline.hpp
 vmCMSOperations.cpp			concurrentMarkSweepThread.hpp
 vmCMSOperations.cpp			dtrace.hpp
--- a/src/share/vm/memory/blockOffsetTable.hpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Thu May 13 13:05:47 2010 -0700
@@ -140,14 +140,38 @@
            "right address out of range");
     assert(left  < right, "Heap addresses out of order");
     size_t num_cards = pointer_delta(right, left) >> LogN_words;
-    memset(&_offset_array[index_for(left)], offset, num_cards);
+
+    // Below, we may use an explicit loop instead of memset()
+    // because on certain platforms memset() can give concurrent
+    // readers "out-of-thin-air," phantom zeros; see 6948537.
+    if (UseMemSetInBOT) {
+      memset(&_offset_array[index_for(left)], offset, num_cards);
+    } else {
+      size_t i = index_for(left);
+      const size_t end = i + num_cards;
+      for (; i < end; i++) {
+        _offset_array[i] = offset;
+      }
+    }
   }
 
   void set_offset_array(size_t left, size_t right, u_char offset) {
     assert(right < _vs.committed_size(), "right address out of range");
     assert(left  <= right, "indexes out of order");
     size_t num_cards = right - left + 1;
-    memset(&_offset_array[left], offset, num_cards);
+
+    // Below, we may use an explicit loop instead of memset
+    // because on certain platforms memset() can give concurrent
+    // readers "out-of-thin-air," phantom zeros; see 6948537.
+    if (UseMemSetInBOT) {
+      memset(&_offset_array[left], offset, num_cards);
+    } else {
+      size_t i = left;
+      const size_t end = i + num_cards;
+      for (; i < end; i++) {
+        _offset_array[i] = offset;
+      }
+    }
   }
 
   void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu May 13 13:05:47 2010 -0700
@@ -410,9 +410,9 @@
 }
 
 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  return (cause == GCCause::_java_lang_system_gc ||
-          cause == GCCause::_gc_locker) &&
-         UseConcMarkSweepGC && ExplicitGCInvokesConcurrent;
+  return UseConcMarkSweepGC &&
+         ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
+          (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
 }
 
 void GenCollectedHeap::do_collection(bool  full,
--- a/src/share/vm/runtime/globals.hpp	Wed May 12 22:06:02 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu May 13 13:05:47 2010 -0700
@@ -327,6 +327,10 @@
   product(bool, UseMembar, false,                                           \
           "(Unstable) Issues membars on thread state transitions")          \
                                                                             \
+  /* Temporary: See 6948537 */                                             \
+  experimental(bool, UseMemSetInBOT, true,                                  \
+          "(Unstable) uses memset in BOT updates in GC code")               \
+                                                                            \
   diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug,                  \
           "Enable normal processing of flags relating to field diagnostics")\
                                                                             \
@@ -1299,6 +1303,10 @@
           "also unloads classes during such a concurrent gc cycle "         \
           "(effective only when UseConcMarkSweepGC)")                       \
                                                                             \
+  product(bool, GCLockerInvokesConcurrent, false,                           \
+          "The exit of a JNI CS necessitating a scavenge also"              \
+          " kicks off a bkgrd concurrent collection")                       \
+                                                                            \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")                  \
                                                                             \