changeset 482:7c2386d67889

6765745: par compact - allow young gen spaces to be split Reviewed-by: jmasa
author jcoomes
date Thu, 11 Dec 2008 12:05:14 -0800
parents 7d7a7c599c17
children 0f773163217d
files src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
diffstat 2 files changed, 518 insertions(+), 164 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Dec 11 12:05:08 2008 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Dec 11 12:05:14 2008 -0800
@@ -88,6 +88,72 @@
 GrowableArray<size_t>   * PSParallelCompact::_last_gc_live_oops_size = NULL;
 #endif
 
+void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
+                       HeapWord* destination)
+{
+  assert(src_region_idx != 0, "invalid src_region_idx");
+  assert(partial_obj_size != 0, "invalid partial_obj_size argument");
+  assert(destination != NULL, "invalid destination argument");
+
+  _src_region_idx = src_region_idx;
+  _partial_obj_size = partial_obj_size;
+  _destination = destination;
+
+  // These fields may not be updated below, so make sure they're clear.
+  assert(_dest_region_addr == NULL, "should have been cleared");
+  assert(_first_src_addr == NULL, "should have been cleared");
+
+  // Determine the number of destination regions for the partial object.
+  HeapWord* const last_word = destination + partial_obj_size - 1;
+  const ParallelCompactData& sd = PSParallelCompact::summary_data();
+  HeapWord* const beg_region_addr = sd.region_align_down(destination);
+  HeapWord* const end_region_addr = sd.region_align_down(last_word);
+
+  if (beg_region_addr == end_region_addr) {
+    // One destination region.
+    _destination_count = 1;
+    if (end_region_addr == destination) {
+      // The destination falls on a region boundary, thus the first word of the
+      // partial object will be the first word copied to the destination region.
+      _dest_region_addr = end_region_addr;
+      _first_src_addr = sd.region_to_addr(src_region_idx);
+    }
+  } else {
+    // Two destination regions.  When copied, the partial object will cross a
+    // destination region boundary, so a word somewhere within the partial
+    // object will be the first word copied to the second destination region.
+    _destination_count = 2;
+    _dest_region_addr = end_region_addr;
+    const size_t ofs = pointer_delta(end_region_addr, destination);
+    assert(ofs < _partial_obj_size, "sanity");
+    _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
+  }
+}
+
+void SplitInfo::clear()
+{
+  _src_region_idx = 0;
+  _partial_obj_size = 0;
+  _destination = NULL;
+  _destination_count = 0;
+  _dest_region_addr = NULL;
+  _first_src_addr = NULL;
+  assert(!is_valid(), "sanity");
+}
+
+#ifdef  ASSERT
+void SplitInfo::verify_clear()
+{
+  assert(_src_region_idx == 0, "not clear");
+  assert(_partial_obj_size == 0, "not clear");
+  assert(_destination == NULL, "not clear");
+  assert(_destination_count == 0, "not clear");
+  assert(_dest_region_addr == NULL, "not clear");
+  assert(_first_src_addr == NULL, "not clear");
+}
+#endif  // #ifdef ASSERT
+
+
 #ifndef PRODUCT
 const char* PSParallelCompact::space_names[] = {
   "perm", "old ", "eden", "from", "to  "
@@ -416,21 +482,134 @@
   }
 }
 
-bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
-                                    HeapWord* source_beg, HeapWord* source_end,
-                                    HeapWord** target_next,
-                                    HeapWord** source_next) {
-  // This is too strict.
-  // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
+// Find the point at which a space can be split and, if necessary, record the
+// split point.
+//
+// If the current src region (which overflowed the destination space) doesn't
+// have a partial object, the split point is at the beginning of the current src
+// region (an "easy" split, no extra bookkeeping required).
+//
+// If the current src region has a partial object, the split point is in the
+// region where that partial object starts (call it the split_region).  If
+// split_region has a partial object, then the split point is just after that
+// partial object (a "hard" split where we have to record the split data and
+// zero the partial_obj_size field).  With a "hard" split, we know that the
+// partial_obj ends within split_region because the partial object that caused
+// the overflow starts in split_region.  If split_region doesn't have a partial
+// obj, then the split is at the beginning of split_region (another "easy"
+// split).
+HeapWord*
+ParallelCompactData::summarize_split_space(size_t src_region,
+                                           SplitInfo& split_info,
+                                           HeapWord* destination,
+                                           HeapWord* target_end,
+                                           HeapWord** target_next)
+{
+  assert(destination <= target_end, "sanity");
+  assert(destination + _region_data[src_region].data_size() > target_end,
+    "region should not fit into target space");
+
+  size_t split_region = src_region;
+  HeapWord* split_destination = destination;
+  size_t partial_obj_size = _region_data[src_region].partial_obj_size();
+
+  if (destination + partial_obj_size > target_end) {
+    // The split point is just after the partial object (if any) in the
+    // src_region that contains the start of the object that overflowed the
+    // destination space.
+    //
+    // Find the start of the "overflow" object and set split_region to the
+    // region containing it.
+    HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
+    split_region = addr_to_region_idx(overflow_obj);
+
+    // Clear the source_region field of all destination regions whose first word
+    // came from data after the split point (a non-null source_region field
+    // implies a region must be filled).
+    //
+    // An alternative to the simple loop below:  clear during post_compact(),
+    // which uses memcpy instead of individual stores, and is easy to
+    // parallelize.  (The downside is that it clears the entire RegionData
+    // object as opposed to just one field.)
+    //
+    // post_compact() would have to clear the summary data up to the highest
+    // address that was written during the summary phase, which would be
+    //
+    //         max(top, max(new_top, clear_top))
+    //
+    // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
+    // to destination + partial_obj_size, where both have the values passed to
+    // this routine.
+    const RegionData* const sr = region(split_region);
+    const size_t beg_idx =
+      addr_to_region_idx(region_align_up(sr->destination() +
+                                         sr->partial_obj_size()));
+    const size_t end_idx =
+      addr_to_region_idx(region_align_up(destination + partial_obj_size));
+
+    if (TraceParallelOldGCSummaryPhase) {
+        gclog_or_tty->print_cr("split:  clearing source_region field in ["
+                               SIZE_FORMAT ", " SIZE_FORMAT ")",
+                               beg_idx, end_idx);
+    }
+    for (size_t idx = beg_idx; idx < end_idx; ++idx) {
+      _region_data[idx].set_source_region(0);
+    }
+
+    // Set split_destination and partial_obj_size to reflect the split region.
+    split_destination = sr->destination();
+    partial_obj_size = sr->partial_obj_size();
+  }
+
+  // The split is recorded only if a partial object extends onto the region.
+  if (partial_obj_size != 0) {
+    _region_data[split_region].set_partial_obj_size(0);
+    split_info.record(split_region, partial_obj_size, split_destination);
+  }
+
+  // Setup the continuation addresses.
+  *target_next = split_destination + partial_obj_size;
+  HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 
   if (TraceParallelOldGCSummaryPhase) {
-    tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
-                  "sb=" PTR_FORMAT " se=" PTR_FORMAT " "
-                  "tn=" PTR_FORMAT " sn=" PTR_FORMAT,
-                  target_beg, target_end,
-                  source_beg, source_end,
-                  target_next != 0 ? *target_next : (HeapWord*) 0,
-                  source_next != 0 ? *source_next : (HeapWord*) 0);
+    const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
+    gclog_or_tty->print_cr("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT
+                           " pos=" SIZE_FORMAT,
+                           split_type, source_next, split_region,
+                           partial_obj_size);
+    gclog_or_tty->print_cr("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
+                           " tn=" PTR_FORMAT,
+                           split_type, split_destination,
+                           addr_to_region_idx(split_destination),
+                           *target_next);
+
+    if (partial_obj_size != 0) {
+      HeapWord* const po_beg = split_info.destination();
+      HeapWord* const po_end = po_beg + split_info.partial_obj_size();
+      gclog_or_tty->print_cr("%s split:  "
+                             "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
+                             "po_end=" PTR_FORMAT " " SIZE_FORMAT,
+                             split_type,
+                             po_beg, addr_to_region_idx(po_beg),
+                             po_end, addr_to_region_idx(po_end));
+    }
+  }
+
+  return source_next;
+}
+
+bool ParallelCompactData::summarize(SplitInfo& split_info,
+                                    HeapWord* source_beg, HeapWord* source_end,
+                                    HeapWord** source_next,
+                                    HeapWord* target_beg, HeapWord* target_end,
+                                    HeapWord** target_next)
+{
+  if (TraceParallelOldGCSummaryPhase) {
+    HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
+    tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
+                  "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
+                  source_beg, source_end, source_next_val,
+                  target_beg, target_end, *target_next);
   }
 
   size_t cur_region = addr_to_region_idx(source_beg);
@@ -438,45 +617,53 @@
 
   HeapWord *dest_addr = target_beg;
   while (cur_region < end_region) {
+    // The destination must be set even if the region has no data.
+    _region_data[cur_region].set_destination(dest_addr);
+
     size_t words = _region_data[cur_region].data_size();
-
-#if     1
-    assert(pointer_delta(target_end, dest_addr) >= words,
-           "source region does not fit into target region");
-#else
-    // XXX - need some work on the corner cases here.  If the region does not
-    // fit, then must either make sure any partial_obj from the region fits, or
-    // "undo" the initial part of the partial_obj that is in the previous
-    // region.
-    if (dest_addr + words >= target_end) {
-      // Let the caller know where to continue.
-      *target_next = dest_addr;
-      *source_next = region_to_addr(cur_region);
-      return false;
-    }
-#endif  // #if 1
-
-    _region_data[cur_region].set_destination(dest_addr);
-
-    // Set the destination_count for cur_region, and if necessary, update
-    // source_region for a destination region.  The source_region field is
-    // updated if cur_region is the first (left-most) region to be copied to a
-    // destination region.
-    //
-    // The destination_count calculation is a bit subtle.  A region that has
-    // data that compacts into itself does not count itself as a destination.
-    // This maintains the invariant that a zero count means the region is
-    // available and can be claimed and then filled.
     if (words > 0) {
+      // If cur_region does not fit entirely into the target space, find a point
+      // at which the source space can be 'split' so that part is copied to the
+      // target space and the rest is copied elsewhere.
+      if (dest_addr + words > target_end) {
+        assert(source_next != NULL, "source_next is NULL when splitting");
+        *source_next = summarize_split_space(cur_region, split_info, dest_addr,
+                                             target_end, target_next);
+        return false;
+      }
+
+      // Compute the destination_count for cur_region, and if necessary, update
+      // source_region for a destination region.  The source_region field is
+      // updated if cur_region is the first (left-most) region to be copied to a
+      // destination region.
+      //
+      // The destination_count calculation is a bit subtle.  A region that has
+      // data that compacts into itself does not count itself as a destination.
+      // This maintains the invariant that a zero count means the region is
+      // available and can be claimed and then filled.
+      uint destination_count = 0;
+      if (split_info.is_split(cur_region)) {
+        // The current region has been split:  the partial object will be copied
+        // to one destination space and the remaining data will be copied to
+        // another destination space.  Adjust the initial destination_count and,
+        // if necessary, set the source_region field if the partial object will
+        // cross a destination region boundary.
+        destination_count = split_info.destination_count();
+        if (destination_count == 2) {
+          size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
+          _region_data[dest_idx].set_source_region(cur_region);
+        }
+      }
+
       HeapWord* const last_addr = dest_addr + words - 1;
       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
       const size_t dest_region_2 = addr_to_region_idx(last_addr);
-#if     0
+
       // Initially assume that the destination regions will be the same and
       // adjust the value below if necessary.  Under this assumption, if
       // cur_region == dest_region_2, then cur_region will be compacted
       // completely into itself.
-      uint destination_count = cur_region == dest_region_2 ? 0 : 1;
+      destination_count += cur_region == dest_region_2 ? 0 : 1;
       if (dest_region_1 != dest_region_2) {
         // Destination regions differ; adjust destination_count.
         destination_count += 1;
@@ -487,25 +674,6 @@
         // region.
         _region_data[dest_region_1].set_source_region(cur_region);
       }
-#else
-      // Initially assume that the destination regions will be different and
-      // adjust the value below if necessary.  Under this assumption, if
-      // cur_region == dest_region2, then cur_region will be compacted partially
-      // into dest_region_1 and partially into itself.
-      uint destination_count = cur_region == dest_region_2 ? 1 : 2;
-      if (dest_region_1 != dest_region_2) {
-        // Data from cur_region will be copied to the start of dest_region_2.
-        _region_data[dest_region_2].set_source_region(cur_region);
-      } else {
-        // Destination regions are the same; adjust destination_count.
-        destination_count -= 1;
-        if (region_offset(dest_addr) == 0) {
-          // Data from cur_region will be copied to the start of the destination
-          // region.
-          _region_data[dest_region_1].set_source_region(cur_region);
-        }
-      }
-#endif  // #if 0
 
       _region_data[cur_region].set_destination_count(destination_count);
       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
@@ -749,6 +917,13 @@
   const size_t end_region =
     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
   _summary_data.clear_range(beg_region, end_region);
+
+  // Clear the data used to 'split' regions.
+  SplitInfo& split_info = _space_info[id].split_info();
+  if (split_info.is_valid()) {
+    split_info.clear();
+  }
+  DEBUG_ONLY(split_info.verify_clear();)
 }
 
 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
@@ -807,10 +982,11 @@
 {
   TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
 
-  // Clear the marking bitmap and summary data and update top() in each space.
   for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
+    // Clear the marking bitmap, summary data and split info.
     clear_data_covering_space(SpaceId(id));
-    _space_info[id].space()->set_top(_space_info[id].new_top());
+    // Update top().  Must be done after clearing the bitmap and summary data.
+    _space_info[id].publish_new_top();
   }
 
   MutableSpace* const eden_space = _space_info[eden_space_id].space();
@@ -1243,10 +1419,11 @@
 {
   for (unsigned int i = 0; i < last_space_id; ++i) {
     const MutableSpace* space = _space_info[i].space();
-    bool result = _summary_data.summarize(space->bottom(), space->end(),
-                                          space->bottom(), space->top(),
-                                          _space_info[i].new_top_addr());
-    assert(result, "should never fail");
+    HeapWord** nta = _space_info[i].new_top_addr();
+    bool result = _summary_data.summarize(_space_info[i].split_info(),
+                                          space->bottom(), space->top(), NULL,
+                                          space->bottom(), space->end(), nta);
+    assert(result, "space must fit into itself");
     _space_info[i].set_dense_prefix(space->bottom());
   }
 }
@@ -1308,7 +1485,7 @@
     }
 #endif  // #ifdef _LP64
 
-    gc_heap()->fill_with_object(obj_beg, obj_len);
+    CollectedHeap::fill_with_object(obj_beg, obj_len);
     _mark_bitmap.mark_obj(obj_beg, obj_len);
     _summary_data.add_obj(obj_beg, obj_len);
     assert(start_array(id) != NULL, "sanity");
@@ -1317,6 +1494,17 @@
 }
 
 void
+PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
+{
+  RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
+  HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
+  RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
+  for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
+    cur->set_source_region(0);
+  }
+}
+
+void
 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
 {
   assert(id < last_space_id, "id out of range");
@@ -1337,20 +1525,24 @@
     }
 #endif  // #ifndef PRODUCT
 
-    // If dead space crosses the dense prefix boundary, it is (at least
-    // partially) filled with a dummy object, marked live and added to the
-    // summary data.  This simplifies the copy/update phase and must be done
-    // before the final locations of objects are determined, to prevent leaving
-    // a fragment of dead space that is too small to fill with an object.
+    // Recompute the summary data, taking into account the dense prefix.  If every
+    // last byte will be reclaimed, then the existing summary data which compacts
+    // everything can be left in place.
     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
+      // If dead space crosses the dense prefix boundary, it is (at least
+      // partially) filled with a dummy object, marked live and added to the
+      // summary data.  This simplifies the copy/update phase and must be done
+      // before the final locations of objects are determined, to prevent leaving
+      // a fragment of dead space that is too small to fill with an object.
       fill_dense_prefix_end(id);
+
+      // Compute the destination of each Region, and thus each object.
+      _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
+      _summary_data.summarize(_space_info[id].split_info(),
+                              dense_prefix_end, space->top(), NULL,
+                              dense_prefix_end, space->end(),
+                              _space_info[id].new_top_addr());
     }
-
-    // Compute the destination of each Region, and thus each object.
-    _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
-    _summary_data.summarize(dense_prefix_end, space->end(),
-                            dense_prefix_end, space->top(),
-                            _space_info[id].new_top_addr());
   }
 
   if (TraceParallelOldGCSummaryPhase) {
@@ -1370,6 +1562,30 @@
   }
 }
 
+#ifndef PRODUCT
+void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
+                                          HeapWord* dst_beg, HeapWord* dst_end,
+                                          SpaceId src_space_id,
+                                          HeapWord* src_beg, HeapWord* src_end)
+{
+  if (TraceParallelOldGCSummaryPhase) {
+    tty->print_cr("summarizing %d [%s] into %d [%s]:  "
+                  "src=" PTR_FORMAT "-" PTR_FORMAT " "
+                  SIZE_FORMAT "-" SIZE_FORMAT " "
+                  "dst=" PTR_FORMAT "-" PTR_FORMAT " "
+                  SIZE_FORMAT "-" SIZE_FORMAT,
+                  src_space_id, space_names[src_space_id],
+                  dst_space_id, space_names[dst_space_id],
+                  src_beg, src_end,
+                  _summary_data.addr_to_region_idx(src_beg),
+                  _summary_data.addr_to_region_idx(src_end),
+                  dst_beg, dst_end,
+                  _summary_data.addr_to_region_idx(dst_beg),
+                  _summary_data.addr_to_region_idx(dst_end));
+  }
+}
+#endif  // #ifndef PRODUCT
+
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
@@ -1402,57 +1618,90 @@
 
   // The amount of live data that will end up in old space (assuming it fits).
   size_t old_space_total_live = 0;
-  unsigned int id;
-  for (id = old_space_id; id < last_space_id; ++id) {
+  assert(perm_space_id < old_space_id, "should not count perm data here");
+  for (unsigned int id = old_space_id; id < last_space_id; ++id) {
     old_space_total_live += pointer_delta(_space_info[id].new_top(),
                                           _space_info[id].space()->bottom());
   }
 
-  const MutableSpace* old_space = _space_info[old_space_id].space();
+  MutableSpace* const old_space = _space_info[old_space_id].space();
   if (old_space_total_live > old_space->capacity_in_words()) {
     // XXX - should also try to expand
     maximum_compaction = true;
-  } else if (!UseParallelOldGCDensePrefix) {
-    maximum_compaction = true;
   }
 
   // Permanent and Old generations.
   summarize_space(perm_space_id, maximum_compaction);
   summarize_space(old_space_id, maximum_compaction);
 
-  // Summarize the remaining spaces (those in the young gen) into old space.  If
-  // the live data from a space doesn't fit, the existing summarization is left
-  // intact, so the data is compacted down within the space itself.
-  HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr();
-  HeapWord* const target_space_end = old_space->end();
-  for (id = eden_space_id; id < last_space_id; ++id) {
+  // Summarize the remaining spaces in the young gen.  The initial target space
+  // is the old gen.  If a space does not fit entirely into the target, then the
+  // remainder is compacted into the space itself and that space becomes the new
+  // target.
+  SpaceId dst_space_id = old_space_id;
+  HeapWord* dst_space_end = old_space->end();
+  HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
+  for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
     const MutableSpace* space = _space_info[id].space();
     const size_t live = pointer_delta(_space_info[id].new_top(),
                                       space->bottom());
-    const size_t available = pointer_delta(target_space_end, *new_top_addr);
+    const size_t available = pointer_delta(dst_space_end, *new_top_addr);
+
+    NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
+                                  SpaceId(id), space->bottom(), space->top());)
     if (live > 0 && live <= available) {
       // All the live data will fit.
-      if (TraceParallelOldGCSummaryPhase) {
-        tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
-                      id, *new_top_addr);
-      }
-      _summary_data.summarize(*new_top_addr, target_space_end,
-                              space->bottom(), space->top(),
-                              new_top_addr);
-
+      bool done = _summary_data.summarize(_space_info[id].split_info(),
+                                          space->bottom(), space->top(),
+                                          NULL,
+                                          *new_top_addr, dst_space_end,
+                                          new_top_addr);
+      assert(done, "space must fit into old gen");
+
+      // XXX - this is necessary because decrement_destination_counts() tests
+      // source_region() to determine if a region will be filled.  Probably
+      // better to pass src_space->new_top() into decrement_destination_counts
+      // and test that instead.
+      //
       // Clear the source_region field for each region in the space.
-      HeapWord* const new_top = _space_info[id].new_top();
-      HeapWord* const clear_end = _summary_data.region_align_up(new_top);
-      RegionData* beg_region =
-        _summary_data.addr_to_region_ptr(space->bottom());
-      RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
-      while (beg_region < end_region) {
-        beg_region->set_source_region(0);
-        ++beg_region;
-      }
+      clear_source_region(space->bottom(), _space_info[id].new_top());
 
       // Reset the new_top value for the space.
       _space_info[id].set_new_top(space->bottom());
+    } else if (live > 0) {
+      // Attempt to fit part of the source space into the target space.
+      HeapWord* next_src_addr = NULL;
+      bool done = _summary_data.summarize(_space_info[id].split_info(),
+                                          space->bottom(), space->top(),
+                                          &next_src_addr,
+                                          *new_top_addr, dst_space_end,
+                                          new_top_addr);
+      assert(!done, "space should not fit into old gen");
+      assert(next_src_addr != NULL, "sanity");
+
+      // The source space becomes the new target, so the remainder is compacted
+      // within the space itself.
+      dst_space_id = SpaceId(id);
+      dst_space_end = space->end();
+      new_top_addr = _space_info[id].new_top_addr();
+      HeapWord* const clear_end = _space_info[id].new_top();
+      NOT_PRODUCT(summary_phase_msg(dst_space_id,
+                                    space->bottom(), dst_space_end,
+                                    SpaceId(id), next_src_addr, space->top());)
+      done = _summary_data.summarize(_space_info[id].split_info(),
+                                     next_src_addr, space->top(),
+                                     NULL,
+                                     space->bottom(), dst_space_end,
+                                     new_top_addr);
+      assert(done, "space must fit when compacted into itself");
+      assert(*new_top_addr <= space->top(), "usage should not grow");
+
+      // XXX - this should go away.  See comments above.
+      //
+      // Clear the source_region field in regions at the end of the space that
+      // will not be filled.
+      HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
+      clear_source_region(clear_beg, clear_end);
     }
   }
 
@@ -2051,14 +2300,13 @@
   // regions in the dense prefix.  Assume that 1 gc thread
   // will work on opening the gaps and the remaining gc threads
   // will work on the dense prefix.
-  SpaceId space_id = old_space_id;
-  while (space_id != last_space_id) {
+  unsigned int space_id;
+  for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
     const MutableSpace* const space = _space_info[space_id].space();
 
     if (dense_prefix_end == space->bottom()) {
       // There is no dense prefix for this space.
-      space_id = next_compaction_space_id(space_id);
       continue;
     }
 
@@ -2108,23 +2356,20 @@
         // region_index_end is not processed
         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
                                        region_index_end_dense_prefix);
-        q->enqueue(new UpdateDensePrefixTask(
-                                 space_id,
-                                 region_index_start,
-                                 region_index_end));
+        q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
+                                             region_index_start,
+                                             region_index_end));
         region_index_start = region_index_end;
       }
     }
     // This gets any part of the dense prefix that did not
     // fit evenly.
     if (region_index_start < region_index_end_dense_prefix) {
-      q->enqueue(new UpdateDensePrefixTask(
-                                 space_id,
-                                 region_index_start,
-                                 region_index_end_dense_prefix));
+      q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
+                                           region_index_start,
+                                           region_index_end_dense_prefix));
     }
-    space_id = next_compaction_space_id(space_id);
-  }  // End tasks for dense prefix
+  }
 }
 
 void PSParallelCompact::enqueue_region_stealing_tasks(
@@ -2570,16 +2815,24 @@
   return m->bit_to_addr(cur_beg);
 }
 
-HeapWord*
-PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
-                                 size_t src_region_idx)
+HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
+                                            SpaceId src_space_id,
+                                            size_t src_region_idx)
 {
+  assert(summary_data().is_region_aligned(dest_addr), "not aligned");
+
+  const SplitInfo& split_info = _space_info[src_space_id].split_info();
+  if (split_info.dest_region_addr() == dest_addr) {
+    // The partial object ending at the split point contains the first word to
+    // be copied to dest_addr.
+    return split_info.first_src_addr();
+  }
+
+  const ParallelCompactData& sd = summary_data();
   ParMarkBitMap* const bitmap = mark_bitmap();
-  const ParallelCompactData& sd = summary_data();
   const size_t RegionSize = ParallelCompactData::RegionSize;
 
   assert(sd.is_region_aligned(dest_addr), "not aligned");
-
   const RegionData* const src_region_ptr = sd.region(src_region_idx);
   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
   HeapWord* const src_region_destination = src_region_ptr->destination();
@@ -2740,7 +2993,7 @@
   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
 
   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
-  closure.set_source(first_src_addr(dest_addr, src_region_idx));
+  closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
 
   // Adjust src_region_idx to prepare for decrementing destination counts (the
   // destination count is not decremented when a region is copied to itself).
@@ -3011,34 +3264,3 @@
     summary_data().calc_new_pointer(Universe::intArrayKlassObj());
 }
 
-// The initial implementation of this method created a field
-// _next_compaction_space_id in SpaceInfo and initialized
-// that field in SpaceInfo::initialize_space_info().  That
-// required that _next_compaction_space_id be declared a
-// SpaceId in SpaceInfo and that would have required that
-// either SpaceId be declared in a separate class or that
-// it be declared in SpaceInfo.  It didn't seem consistent
-// to declare it in SpaceInfo (didn't really fit logically).
-// Alternatively, defining a separate class to define SpaceId
-// seem excessive.  This implementation is simple and localizes
-// the knowledge.
-
-PSParallelCompact::SpaceId
-PSParallelCompact::next_compaction_space_id(SpaceId id) {
-  assert(id < last_space_id, "id out of range");
-  switch (id) {
-    case perm_space_id :
-      return last_space_id;
-    case old_space_id :
-      return eden_space_id;
-    case eden_space_id :
-      return from_space_id;
-    case from_space_id :
-      return to_space_id;
-    case to_space_id :
-      return last_space_id;
-    default:
-      assert(false, "Bad space id");
-      return last_space_id;
-  }
-}
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Dec 11 12:05:08 2008 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Dec 11 12:05:14 2008 -0800
@@ -36,6 +36,123 @@
 class MoveAndUpdateClosure;
 class RefProcTaskExecutor;
 
+// The SplitInfo class holds the information needed to 'split' a source region
+// so that the live data can be copied to two destination *spaces*.  Normally,
+// all the live data in a region is copied to a single destination space (e.g.,
+// everything live in a region in eden is copied entirely into the old gen).
+// However, when the heap is nearly full, all the live data in eden may not fit
+// into the old gen.  Copying only some of the regions from eden to old gen
+// requires finding a region that does not contain a partial object (i.e., no
+// live object crosses the region boundary) somewhere near the last object that
+// does fit into the old gen.  Since it's not always possible to find such a
+// region, splitting is necessary for predictable behavior.
+//
+// A region is always split at the end of the partial object.  This avoids
+// additional tests when calculating the new location of a pointer, which is a
+// very hot code path.  The partial object and everything to its left will be
+// copied to another space (call it dest_space_1).  The live data to the right
+// of the partial object will be copied either within the space itself, or to a
+// different destination space (distinct from dest_space_1).
+//
+// Split points are identified during the summary phase, when region
+// destinations are computed:  data about the split, including the
+// partial_object_size, is recorded in a SplitInfo record and the
+// partial_object_size field in the summary data is set to zero.  The zeroing is
+// possible (and necessary) since the partial object will move to a different
+// destination space than anything to its right, thus the partial object should
+// not affect the locations of any objects to its right.
+//
+// The recorded data is used during the compaction phase, but only rarely:  when
+// the partial object on the split region will be copied across a destination
+// region boundary.  This test is made once each time a region is filled, and is
+// a simple address comparison, so the overhead is negligible (see
+// PSParallelCompact::first_src_addr()).
+//
+// Notes:
+//
+// Only regions with partial objects are split; a region without a partial
+// object does not need any extra bookkeeping.
+//
+// At most one region is split per space, so the amount of data required is
+// constant.
+//
+// A region is split only when the destination space would overflow.  Once that
+// happens, the destination space is abandoned and no other data (even from
+// other source spaces) is targeted to that destination space.  Abandoning the
+// destination space may leave a somewhat large unused area at the end, if a
+// large object caused the overflow.
+//
+// Future work:
+//
+// More bookkeeping would be required to continue to use the destination space.
+// The most general solution would allow data from regions in two different
+// source spaces to be "joined" in a single destination region.  At the very
+// least, additional code would be required in next_src_region() to detect the
+// join and skip to an out-of-order source region.  If the join region was also
+// the last destination region to which a split region was copied (the most
+// likely case), then additional work would be needed to get fill_region() to
+// stop iteration and switch to a new source region at the right point.  Basic
+// idea would be to use a fake value for the top of the source space.  It is
+// doable, if a bit tricky.
+//
+// A simpler (but less general) solution would fill the remainder of the
+// destination region with a dummy object and continue filling the next
+// destination region.
+
+class SplitInfo
+{
+public:
+  // Return true if this split info is valid (i.e., if a split has been
+  // recorded).  The very first region cannot have a partial object and thus is
+  // never split, so 0 is the 'invalid' value.
+  bool is_valid() const { return _src_region_idx > 0; }
+
+  // Return true if this split holds data for the specified source region.
+  inline bool is_split(size_t source_region) const;
+
+  // The index of the split region, the size of the partial object on that
+  // region and the destination of the partial object.
+  size_t    src_region_idx() const   { return _src_region_idx; }
+  size_t    partial_obj_size() const { return _partial_obj_size; }
+  HeapWord* destination() const      { return _destination; }
+
+  // The destination count of the partial object referenced by this split
+  // (either 1 or 2).  This must be added to the destination count of the
+  // remainder of the source region.
+  unsigned int destination_count() const { return _destination_count; }
+
+  // If a word within the partial object will be written to the first word of a
+  // destination region, this is the address of the destination region;
+  // otherwise this is NULL.
+  HeapWord* dest_region_addr() const     { return _dest_region_addr; }
+
+  // If a word within the partial object will be written to the first word of a
+  // destination region, this is the address of that word within the partial
+  // object; otherwise this is NULL.
+  HeapWord* first_src_addr() const       { return _first_src_addr; }
+
+  // Record the data necessary to split the region src_region_idx.
+  void record(size_t src_region_idx, size_t partial_obj_size,
+              HeapWord* destination);
+
+  void clear();
+
+  DEBUG_ONLY(void verify_clear();)
+
+private:
+  size_t       _src_region_idx;
+  size_t       _partial_obj_size;
+  HeapWord*    _destination;
+  unsigned int _destination_count;
+  HeapWord*    _dest_region_addr;
+  HeapWord*    _first_src_addr;
+};
+
+inline bool SplitInfo::is_split(size_t region_idx) const
+{
+  return _src_region_idx == region_idx && is_valid();
+}
+
 class SpaceInfo
 {
  public:
@@ -58,18 +175,23 @@
   // is no start array.
   ObjectStartArray* start_array() const { return _start_array; }
 
+  SplitInfo& split_info() { return _split_info; }
+
   void set_space(MutableSpace* s)           { _space = s; }
   void set_new_top(HeapWord* addr)          { _new_top = addr; }
   void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
   void set_dense_prefix(HeapWord* addr)     { _dense_prefix = addr; }
   void set_start_array(ObjectStartArray* s) { _start_array = s; }
 
+  void publish_new_top() const              { _space->set_top(_new_top); }
+
  private:
   MutableSpace*     _space;
   HeapWord*         _new_top;
   HeapWord*         _min_dense_prefix;
   HeapWord*         _dense_prefix;
   ObjectStartArray* _start_array;
+  SplitInfo         _split_info;
 };
 
 class ParallelCompactData
@@ -230,9 +352,14 @@
   // must be region-aligned; end need not be.
   void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
 
-  bool summarize(HeapWord* target_beg, HeapWord* target_end,
+  HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
+                                  HeapWord* destination, HeapWord* target_end,
+                                  HeapWord** target_next);
+  bool summarize(SplitInfo& split_info,
                  HeapWord* source_beg, HeapWord* source_end,
-                 HeapWord** target_next, HeapWord** source_next = 0);
+                 HeapWord** source_next,
+                 HeapWord* target_beg, HeapWord* target_end,
+                 HeapWord** target_next);
 
   void clear();
   void clear_range(size_t beg_region, size_t end_region);
@@ -838,13 +965,13 @@
   // non-empty.
   static void fill_dense_prefix_end(SpaceId id);
 
+  // Clear the summary data source_region field for the specified addresses.
+  static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
+
   static void summarize_spaces_quick();
   static void summarize_space(SpaceId id, bool maximum_compaction);
   static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
 
-  // The space that is compacted after space_id.
-  static SpaceId next_compaction_space_id(SpaceId space_id);
-
   // Adjust addresses in roots.  Does not adjust addresses in heap.
   static void adjust_roots();
 
@@ -999,6 +1126,7 @@
   // Return the address of the word to be copied to dest_addr, which must be
   // aligned to a region boundary.
   static HeapWord* first_src_addr(HeapWord* const dest_addr,
+                                  SpaceId src_space_id,
                                   size_t src_region_idx);
 
   // Determine the next source region, set closure.source() to the start of the
@@ -1081,6 +1209,10 @@
                                        const SpaceId id,
                                        const bool maximum_compaction,
                                        HeapWord* const addr);
+  static void summary_phase_msg(SpaceId dst_space_id,
+                                HeapWord* dst_beg, HeapWord* dst_end,
+                                SpaceId src_space_id,
+                                HeapWord* src_beg, HeapWord* src_end);
 #endif  // #ifndef PRODUCT
 
 #ifdef  ASSERT