comparison src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @ 484:ffe19141e312

Merge
author jmasa
date Fri, 12 Dec 2008 15:37:46 -0800
parents 0f773163217d
children 234c22e54b98
comparison
equal deleted inserted replaced
479:06d2c3204df4 484:ffe19141e312
86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL; 86 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops = NULL;
87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL; 87 GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL; 88 GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
89 #endif 89 #endif
90 90
91 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
92 HeapWord* destination)
93 {
94 assert(src_region_idx != 0, "invalid src_region_idx");
95 assert(partial_obj_size != 0, "invalid partial_obj_size argument");
96 assert(destination != NULL, "invalid destination argument");
97
98 _src_region_idx = src_region_idx;
99 _partial_obj_size = partial_obj_size;
100 _destination = destination;
101
102 // These fields may not be updated below, so make sure they're clear.
103 assert(_dest_region_addr == NULL, "should have been cleared");
104 assert(_first_src_addr == NULL, "should have been cleared");
105
106 // Determine the number of destination regions for the partial object.
107 HeapWord* const last_word = destination + partial_obj_size - 1;
108 const ParallelCompactData& sd = PSParallelCompact::summary_data();
109 HeapWord* const beg_region_addr = sd.region_align_down(destination);
110 HeapWord* const end_region_addr = sd.region_align_down(last_word);
111
112 if (beg_region_addr == end_region_addr) {
113 // One destination region.
114 _destination_count = 1;
115 if (end_region_addr == destination) {
116 // The destination falls on a region boundary, thus the first word of the
117 // partial object will be the first word copied to the destination region.
118 _dest_region_addr = end_region_addr;
119 _first_src_addr = sd.region_to_addr(src_region_idx);
120 }
121 } else {
122 // Two destination regions. When copied, the partial object will cross a
123 // destination region boundary, so a word somewhere within the partial
124 // object will be the first word copied to the second destination region.
125 _destination_count = 2;
126 _dest_region_addr = end_region_addr;
127 const size_t ofs = pointer_delta(end_region_addr, destination);
128 assert(ofs < _partial_obj_size, "sanity");
129 _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
130 }
131 }
132
133 void SplitInfo::clear()
134 {
135 _src_region_idx = 0;
136 _partial_obj_size = 0;
137 _destination = NULL;
138 _destination_count = 0;
139 _dest_region_addr = NULL;
140 _first_src_addr = NULL;
141 assert(!is_valid(), "sanity");
142 }
143
144 #ifdef ASSERT
145 void SplitInfo::verify_clear()
146 {
147 assert(_src_region_idx == 0, "not clear");
148 assert(_partial_obj_size == 0, "not clear");
149 assert(_destination == NULL, "not clear");
150 assert(_destination_count == 0, "not clear");
151 assert(_dest_region_addr == NULL, "not clear");
152 assert(_first_src_addr == NULL, "not clear");
153 }
154 #endif // #ifdef ASSERT
155
156
91 #ifndef PRODUCT 157 #ifndef PRODUCT
92 const char* PSParallelCompact::space_names[] = { 158 const char* PSParallelCompact::space_names[] = {
93 "perm", "old ", "eden", "from", "to " 159 "perm", "old ", "eden", "from", "to "
94 }; 160 };
95 161
414 ++cur_region; 480 ++cur_region;
415 addr += RegionSize; 481 addr += RegionSize;
416 } 482 }
417 } 483 }
418 484
419 bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, 485 // Find the point at which a space can be split and, if necessary, record the
486 // split point.
487 //
488 // If the current src region (which overflowed the destination space) doesn't
489 // have a partial object, the split point is at the beginning of the current src
490 // region (an "easy" split, no extra bookkeeping required).
491 //
492 // If the current src region has a partial object, the split point is in the
493 // region where that partial object starts (call it the split_region). If
494 // split_region has a partial object, then the split point is just after that
495 // partial object (a "hard" split where we have to record the split data and
496 // zero the partial_obj_size field). With a "hard" split, we know that the
497 // partial_obj ends within split_region because the partial object that caused
498 // the overflow starts in split_region. If split_region doesn't have a partial
499 // obj, then the split is at the beginning of split_region (another "easy"
500 // split).
501 HeapWord*
502 ParallelCompactData::summarize_split_space(size_t src_region,
503 SplitInfo& split_info,
504 HeapWord* destination,
505 HeapWord* target_end,
506 HeapWord** target_next)
507 {
508 assert(destination <= target_end, "sanity");
509 assert(destination + _region_data[src_region].data_size() > target_end,
510 "region should not fit into target space");
511
512 size_t split_region = src_region;
513 HeapWord* split_destination = destination;
514 size_t partial_obj_size = _region_data[src_region].partial_obj_size();
515
516 if (destination + partial_obj_size > target_end) {
517 // The split point is just after the partial object (if any) in the
518 // src_region that contains the start of the object that overflowed the
519 // destination space.
520 //
521 // Find the start of the "overflow" object and set split_region to the
522 // region containing it.
523 HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
524 split_region = addr_to_region_idx(overflow_obj);
525
526 // Clear the source_region field of all destination regions whose first word
527 // came from data after the split point (a non-null source_region field
528 // implies a region must be filled).
529 //
530 // An alternative to the simple loop below: clear during post_compact(),
531 // which uses memcpy instead of individual stores, and is easy to
532 // parallelize. (The downside is that it clears the entire RegionData
533 // object as opposed to just one field.)
534 //
535 // post_compact() would have to clear the summary data up to the highest
536 // address that was written during the summary phase, which would be
537 //
538 // max(top, max(new_top, clear_top))
539 //
540 // where clear_top is a new field in SpaceInfo. Would have to set clear_top
541 // to destination + partial_obj_size, where both have the values passed to
542 // this routine.
543 const RegionData* const sr = region(split_region);
544 const size_t beg_idx =
545 addr_to_region_idx(region_align_up(sr->destination() +
546 sr->partial_obj_size()));
547 const size_t end_idx =
548 addr_to_region_idx(region_align_up(destination + partial_obj_size));
549
550 if (TraceParallelOldGCSummaryPhase) {
551 gclog_or_tty->print_cr("split: clearing source_region field in ["
552 SIZE_FORMAT ", " SIZE_FORMAT ")",
553 beg_idx, end_idx);
554 }
555 for (size_t idx = beg_idx; idx < end_idx; ++idx) {
556 _region_data[idx].set_source_region(0);
557 }
558
559 // Set split_destination and partial_obj_size to reflect the split region.
560 split_destination = sr->destination();
561 partial_obj_size = sr->partial_obj_size();
562 }
563
564 // The split is recorded only if a partial object extends onto the region.
565 if (partial_obj_size != 0) {
566 _region_data[split_region].set_partial_obj_size(0);
567 split_info.record(split_region, partial_obj_size, split_destination);
568 }
569
570 // Setup the continuation addresses.
571 *target_next = split_destination + partial_obj_size;
572 HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
573
574 if (TraceParallelOldGCSummaryPhase) {
575 const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
576 gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT
577 " pos=" SIZE_FORMAT,
578 split_type, source_next, split_region,
579 partial_obj_size);
580 gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
581 " tn=" PTR_FORMAT,
582 split_type, split_destination,
583 addr_to_region_idx(split_destination),
584 *target_next);
585
586 if (partial_obj_size != 0) {
587 HeapWord* const po_beg = split_info.destination();
588 HeapWord* const po_end = po_beg + split_info.partial_obj_size();
589 gclog_or_tty->print_cr("%s split: "
590 "po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
591 "po_end=" PTR_FORMAT " " SIZE_FORMAT,
592 split_type,
593 po_beg, addr_to_region_idx(po_beg),
594 po_end, addr_to_region_idx(po_end));
595 }
596 }
597
598 return source_next;
599 }
600
601 bool ParallelCompactData::summarize(SplitInfo& split_info,
420 HeapWord* source_beg, HeapWord* source_end, 602 HeapWord* source_beg, HeapWord* source_end,
421 HeapWord** target_next, 603 HeapWord** source_next,
422 HeapWord** source_next) { 604 HeapWord* target_beg, HeapWord* target_end,
423 // This is too strict. 605 HeapWord** target_next)
424 // assert(region_offset(source_beg) == 0, "not RegionSize aligned"); 606 {
425
426 if (TraceParallelOldGCSummaryPhase) { 607 if (TraceParallelOldGCSummaryPhase) {
427 tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " 608 HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
428 "sb=" PTR_FORMAT " se=" PTR_FORMAT " " 609 tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
429 "tn=" PTR_FORMAT " sn=" PTR_FORMAT, 610 "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
430 target_beg, target_end, 611 source_beg, source_end, source_next_val,
431 source_beg, source_end, 612 target_beg, target_end, *target_next);
432 target_next != 0 ? *target_next : (HeapWord*) 0,
433 source_next != 0 ? *source_next : (HeapWord*) 0);
434 } 613 }
435 614
436 size_t cur_region = addr_to_region_idx(source_beg); 615 size_t cur_region = addr_to_region_idx(source_beg);
437 const size_t end_region = addr_to_region_idx(region_align_up(source_end)); 616 const size_t end_region = addr_to_region_idx(region_align_up(source_end));
438 617
439 HeapWord *dest_addr = target_beg; 618 HeapWord *dest_addr = target_beg;
440 while (cur_region < end_region) { 619 while (cur_region < end_region) {
620 // The destination must be set even if the region has no data.
621 _region_data[cur_region].set_destination(dest_addr);
622
441 size_t words = _region_data[cur_region].data_size(); 623 size_t words = _region_data[cur_region].data_size();
442
443 #if 1
444 assert(pointer_delta(target_end, dest_addr) >= words,
445 "source region does not fit into target region");
446 #else
447 // XXX - need some work on the corner cases here. If the region does not
448 // fit, then must either make sure any partial_obj from the region fits, or
449 // "undo" the initial part of the partial_obj that is in the previous
450 // region.
451 if (dest_addr + words >= target_end) {
452 // Let the caller know where to continue.
453 *target_next = dest_addr;
454 *source_next = region_to_addr(cur_region);
455 return false;
456 }
457 #endif // #if 1
458
459 _region_data[cur_region].set_destination(dest_addr);
460
461 // Set the destination_count for cur_region, and if necessary, update
462 // source_region for a destination region. The source_region field is
463 // updated if cur_region is the first (left-most) region to be copied to a
464 // destination region.
465 //
466 // The destination_count calculation is a bit subtle. A region that has
467 // data that compacts into itself does not count itself as a destination.
468 // This maintains the invariant that a zero count means the region is
469 // available and can be claimed and then filled.
470 if (words > 0) { 624 if (words > 0) {
625 // If cur_region does not fit entirely into the target space, find a point
626 // at which the source space can be 'split' so that part is copied to the
627 // target space and the rest is copied elsewhere.
628 if (dest_addr + words > target_end) {
629 assert(source_next != NULL, "source_next is NULL when splitting");
630 *source_next = summarize_split_space(cur_region, split_info, dest_addr,
631 target_end, target_next);
632 return false;
633 }
634
635 // Compute the destination_count for cur_region, and if necessary, update
636 // source_region for a destination region. The source_region field is
637 // updated if cur_region is the first (left-most) region to be copied to a
638 // destination region.
639 //
640 // The destination_count calculation is a bit subtle. A region that has
641 // data that compacts into itself does not count itself as a destination.
642 // This maintains the invariant that a zero count means the region is
643 // available and can be claimed and then filled.
644 uint destination_count = 0;
645 if (split_info.is_split(cur_region)) {
646 // The current region has been split: the partial object will be copied
647 // to one destination space and the remaining data will be copied to
648 // another destination space. Adjust the initial destination_count and,
649 // if necessary, set the source_region field if the partial object will
650 // cross a destination region boundary.
651 destination_count = split_info.destination_count();
652 if (destination_count == 2) {
653 size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
654 _region_data[dest_idx].set_source_region(cur_region);
655 }
656 }
657
471 HeapWord* const last_addr = dest_addr + words - 1; 658 HeapWord* const last_addr = dest_addr + words - 1;
472 const size_t dest_region_1 = addr_to_region_idx(dest_addr); 659 const size_t dest_region_1 = addr_to_region_idx(dest_addr);
473 const size_t dest_region_2 = addr_to_region_idx(last_addr); 660 const size_t dest_region_2 = addr_to_region_idx(last_addr);
474 #if 0 661
475 // Initially assume that the destination regions will be the same and 662 // Initially assume that the destination regions will be the same and
476 // adjust the value below if necessary. Under this assumption, if 663 // adjust the value below if necessary. Under this assumption, if
477 // cur_region == dest_region_2, then cur_region will be compacted 664 // cur_region == dest_region_2, then cur_region will be compacted
478 // completely into itself. 665 // completely into itself.
479 uint destination_count = cur_region == dest_region_2 ? 0 : 1; 666 destination_count += cur_region == dest_region_2 ? 0 : 1;
480 if (dest_region_1 != dest_region_2) { 667 if (dest_region_1 != dest_region_2) {
481 // Destination regions differ; adjust destination_count. 668 // Destination regions differ; adjust destination_count.
482 destination_count += 1; 669 destination_count += 1;
483 // Data from cur_region will be copied to the start of dest_region_2. 670 // Data from cur_region will be copied to the start of dest_region_2.
484 _region_data[dest_region_2].set_source_region(cur_region); 671 _region_data[dest_region_2].set_source_region(cur_region);
485 } else if (region_offset(dest_addr) == 0) { 672 } else if (region_offset(dest_addr) == 0) {
486 // Data from cur_region will be copied to the start of the destination 673 // Data from cur_region will be copied to the start of the destination
487 // region. 674 // region.
488 _region_data[dest_region_1].set_source_region(cur_region); 675 _region_data[dest_region_1].set_source_region(cur_region);
489 } 676 }
490 #else
491 // Initially assume that the destination regions will be different and
492 // adjust the value below if necessary. Under this assumption, if
493 // cur_region == dest_region2, then cur_region will be compacted partially
494 // into dest_region_1 and partially into itself.
495 uint destination_count = cur_region == dest_region_2 ? 1 : 2;
496 if (dest_region_1 != dest_region_2) {
497 // Data from cur_region will be copied to the start of dest_region_2.
498 _region_data[dest_region_2].set_source_region(cur_region);
499 } else {
500 // Destination regions are the same; adjust destination_count.
501 destination_count -= 1;
502 if (region_offset(dest_addr) == 0) {
503 // Data from cur_region will be copied to the start of the destination
504 // region.
505 _region_data[dest_region_1].set_source_region(cur_region);
506 }
507 }
508 #endif // #if 0
509 677
510 _region_data[cur_region].set_destination_count(destination_count); 678 _region_data[cur_region].set_destination_count(destination_count);
511 _region_data[cur_region].set_data_location(region_to_addr(cur_region)); 679 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
512 dest_addr += words; 680 dest_addr += words;
513 } 681 }
747 915
748 const size_t beg_region = _summary_data.addr_to_region_idx(bot); 916 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
749 const size_t end_region = 917 const size_t end_region =
750 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); 918 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
751 _summary_data.clear_range(beg_region, end_region); 919 _summary_data.clear_range(beg_region, end_region);
920
921 // Clear the data used to 'split' regions.
922 SplitInfo& split_info = _space_info[id].split_info();
923 if (split_info.is_valid()) {
924 split_info.clear();
925 }
926 DEBUG_ONLY(split_info.verify_clear();)
752 } 927 }
753 928
754 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) 929 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
755 { 930 {
756 // Update the from & to space pointers in space_info, since they are swapped 931 // Update the from & to space pointers in space_info, since they are swapped
805 980
806 void PSParallelCompact::post_compact() 981 void PSParallelCompact::post_compact()
807 { 982 {
808 TraceTime tm("post compact", print_phases(), true, gclog_or_tty); 983 TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
809 984
810 // Clear the marking bitmap and summary data and update top() in each space.
811 for (unsigned int id = perm_space_id; id < last_space_id; ++id) { 985 for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
986 // Clear the marking bitmap, summary data and split info.
812 clear_data_covering_space(SpaceId(id)); 987 clear_data_covering_space(SpaceId(id));
813 _space_info[id].space()->set_top(_space_info[id].new_top()); 988 // Update top(). Must be done after clearing the bitmap and summary data.
989 _space_info[id].publish_new_top();
814 } 990 }
815 991
816 MutableSpace* const eden_space = _space_info[eden_space_id].space(); 992 MutableSpace* const eden_space = _space_info[eden_space_id].space();
817 MutableSpace* const from_space = _space_info[from_space_id].space(); 993 MutableSpace* const from_space = _space_info[from_space_id].space();
818 MutableSpace* const to_space = _space_info[to_space_id].space(); 994 MutableSpace* const to_space = _space_info[to_space_id].space();
1149 // (maximum) reclaimed_ratio() is selected. 1325 // (maximum) reclaimed_ratio() is selected.
1150 HeapWord* 1326 HeapWord*
1151 PSParallelCompact::compute_dense_prefix(const SpaceId id, 1327 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1152 bool maximum_compaction) 1328 bool maximum_compaction)
1153 { 1329 {
1330 if (ParallelOldGCSplitALot) {
1331 if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
1332 // The value was chosen to provoke splitting a young gen space; use it.
1333 return _space_info[id].dense_prefix();
1334 }
1335 }
1336
1154 const size_t region_size = ParallelCompactData::RegionSize; 1337 const size_t region_size = ParallelCompactData::RegionSize;
1155 const ParallelCompactData& sd = summary_data(); 1338 const ParallelCompactData& sd = summary_data();
1156 1339
1157 const MutableSpace* const space = _space_info[id].space(); 1340 const MutableSpace* const space = _space_info[id].space();
1158 HeapWord* const top = space->top(); 1341 HeapWord* const top = space->top();
1237 #endif // #if 0 1420 #endif // #if 0
1238 1421
1239 return sd.region_to_addr(best_cp); 1422 return sd.region_to_addr(best_cp);
1240 } 1423 }
1241 1424
1425 #ifndef PRODUCT
1426 void
1427 PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
1428 size_t words)
1429 {
1430 if (TraceParallelOldGCSummaryPhase) {
1431 tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
1432 SIZE_FORMAT, start, start + words, words);
1433 }
1434
1435 ObjectStartArray* const start_array = _space_info[id].start_array();
1436 CollectedHeap::fill_with_objects(start, words);
1437 for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
1438 _mark_bitmap.mark_obj(p, words);
1439 _summary_data.add_obj(p, words);
1440 start_array->allocate_block(p);
1441 }
1442 }
1443
1444 void
1445 PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
1446 {
1447 ParallelCompactData& sd = summary_data();
1448 MutableSpace* space = _space_info[id].space();
1449
1450 // Find the source and destination start addresses.
1451 HeapWord* const src_addr = sd.region_align_down(start);
1452 HeapWord* dst_addr;
1453 if (src_addr < start) {
1454 dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
1455 } else if (src_addr > space->bottom()) {
1456 // The start (the original top() value) is aligned to a region boundary so
1457 // the associated region does not have a destination. Compute the
1458 // destination from the previous region.
1459 RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
1460 dst_addr = cp->destination() + cp->data_size();
1461 } else {
1462 // Filling the entire space.
1463 dst_addr = space->bottom();
1464 }
1465 assert(dst_addr != NULL, "sanity");
1466
1467 // Update the summary data.
1468 bool result = _summary_data.summarize(_space_info[id].split_info(),
1469 src_addr, space->top(), NULL,
1470 dst_addr, space->end(),
1471 _space_info[id].new_top_addr());
1472 assert(result, "should not fail: bad filler object size");
1473 }
1474
1475 void
1476 PSParallelCompact::provoke_split(bool & max_compaction)
1477 {
1478 const size_t region_size = ParallelCompactData::RegionSize;
1479 ParallelCompactData& sd = summary_data();
1480
1481 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1482 MutableSpace* const from_space = _space_info[from_space_id].space();
1483 const size_t eden_live = pointer_delta(eden_space->top(),
1484 _space_info[eden_space_id].new_top());
1485 const size_t from_live = pointer_delta(from_space->top(),
1486 _space_info[from_space_id].new_top());
1487
1488 const size_t min_fill_size = CollectedHeap::min_fill_size();
1489 const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
1490 const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
1491 const size_t from_free = pointer_delta(from_space->end(), from_space->top());
1492 const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
1493
1494 // Choose the space to split; need at least 2 regions live (or fillable).
1495 SpaceId id;
1496 MutableSpace* space;
1497 size_t live_words;
1498 size_t fill_words;
1499 if (eden_live + eden_fillable >= region_size * 2) {
1500 id = eden_space_id;
1501 space = eden_space;
1502 live_words = eden_live;
1503 fill_words = eden_fillable;
1504 } else if (from_live + from_fillable >= region_size * 2) {
1505 id = from_space_id;
1506 space = from_space;
1507 live_words = from_live;
1508 fill_words = from_fillable;
1509 } else {
1510 return; // Give up.
1511 }
1512 assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
1513
1514 if (live_words < region_size * 2) {
1515 // Fill from top() to end() w/live objects of mixed sizes.
1516 HeapWord* const fill_start = space->top();
1517 live_words += fill_words;
1518
1519 space->set_top(fill_start + fill_words);
1520 if (ZapUnusedHeapArea) {
1521 space->set_top_for_allocations();
1522 }
1523
1524 HeapWord* cur_addr = fill_start;
1525 while (fill_words > 0) {
1526 const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
1527 size_t cur_size = MIN2(align_object_size_(r), fill_words);
1528 if (fill_words - cur_size < min_fill_size) {
1529 cur_size = fill_words; // Avoid leaving a fragment too small to fill.
1530 }
1531
1532 CollectedHeap::fill_with_object(cur_addr, cur_size);
1533 mark_bitmap()->mark_obj(cur_addr, cur_size);
1534 sd.add_obj(cur_addr, cur_size);
1535
1536 cur_addr += cur_size;
1537 fill_words -= cur_size;
1538 }
1539
1540 summarize_new_objects(id, fill_start);
1541 }
1542
1543 max_compaction = false;
1544
1545 // Manipulate the old gen so that it has room for about half of the live data
1546 // in the target young gen space (live_words / 2).
1547 id = old_space_id;
1548 space = _space_info[id].space();
1549 const size_t free_at_end = space->free_in_words();
1550 const size_t free_target = align_object_size(live_words / 2);
1551 const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
1552
1553 if (free_at_end >= free_target + min_fill_size) {
1554 // Fill space above top() and set the dense prefix so everything survives.
1555 HeapWord* const fill_start = space->top();
1556 const size_t fill_size = free_at_end - free_target;
1557 space->set_top(space->top() + fill_size);
1558 if (ZapUnusedHeapArea) {
1559 space->set_top_for_allocations();
1560 }
1561 fill_with_live_objects(id, fill_start, fill_size);
1562 summarize_new_objects(id, fill_start);
1563 _space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
1564 } else if (dead + free_at_end > free_target) {
1565 // Find a dense prefix that makes the right amount of space available.
1566 HeapWord* cur = sd.region_align_down(space->top());
1567 HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
1568 size_t dead_to_right = pointer_delta(space->end(), cur_destination);
1569 while (dead_to_right < free_target) {
1570 cur -= region_size;
1571 cur_destination = sd.addr_to_region_ptr(cur)->destination();
1572 dead_to_right = pointer_delta(space->end(), cur_destination);
1573 }
1574 _space_info[id].set_dense_prefix(cur);
1575 }
1576 }
1577 #endif // #ifndef PRODUCT
1578
1242 void PSParallelCompact::summarize_spaces_quick() 1579 void PSParallelCompact::summarize_spaces_quick()
1243 { 1580 {
1244 for (unsigned int i = 0; i < last_space_id; ++i) { 1581 for (unsigned int i = 0; i < last_space_id; ++i) {
1245 const MutableSpace* space = _space_info[i].space(); 1582 const MutableSpace* space = _space_info[i].space();
1246 bool result = _summary_data.summarize(space->bottom(), space->end(), 1583 HeapWord** nta = _space_info[i].new_top_addr();
1247 space->bottom(), space->top(), 1584 bool result = _summary_data.summarize(_space_info[i].split_info(),
1248 _space_info[i].new_top_addr()); 1585 space->bottom(), space->top(), NULL,
1249 assert(result, "should never fail"); 1586 space->bottom(), space->end(), nta);
1587 assert(result, "space must fit into itself");
1250 _space_info[i].set_dense_prefix(space->bottom()); 1588 _space_info[i].set_dense_prefix(space->bottom());
1251 } 1589 }
1252 } 1590 }
1253 1591
1254 void PSParallelCompact::fill_dense_prefix_end(SpaceId id) 1592 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1306 obj_beg = dense_prefix_end - 3; 1644 obj_beg = dense_prefix_end - 3;
1307 obj_len = 3; 1645 obj_len = 3;
1308 } 1646 }
1309 #endif // #ifdef _LP64 1647 #endif // #ifdef _LP64
1310 1648
1311 MemRegion region(obj_beg, obj_len); 1649 CollectedHeap::fill_with_object(obj_beg, obj_len);
1312 SharedHeap::fill_region_with_object(region);
1313 _mark_bitmap.mark_obj(obj_beg, obj_len); 1650 _mark_bitmap.mark_obj(obj_beg, obj_len);
1314 _summary_data.add_obj(obj_beg, obj_len); 1651 _summary_data.add_obj(obj_beg, obj_len);
1315 assert(start_array(id) != NULL, "sanity"); 1652 assert(start_array(id) != NULL, "sanity");
1316 start_array(id)->allocate_block(obj_beg); 1653 start_array(id)->allocate_block(obj_beg);
1317 } 1654 }
1318 } 1655 }
1319 1656
1320 void 1657 void
1658 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1659 {
1660 RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1661 HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1662 RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1663 for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1664 cur->set_source_region(0);
1665 }
1666 }
1667
1668 void
1321 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) 1669 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1322 { 1670 {
1323 assert(id < last_space_id, "id out of range"); 1671 assert(id < last_space_id, "id out of range");
1324 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(), 1672 assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
1325 "should have been set in summarize_spaces_quick()"); 1673 ParallelOldGCSplitALot && id == old_space_id,
1674 "should have been reset in summarize_spaces_quick()");
1326 1675
1327 const MutableSpace* space = _space_info[id].space(); 1676 const MutableSpace* space = _space_info[id].space();
1328 if (_space_info[id].new_top() != space->bottom()) { 1677 if (_space_info[id].new_top() != space->bottom()) {
1329 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); 1678 HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1330 _space_info[id].set_dense_prefix(dense_prefix_end); 1679 _space_info[id].set_dense_prefix(dense_prefix_end);
1336 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); 1685 HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1337 print_dense_prefix_stats("density", id, maximum_compaction, addr); 1686 print_dense_prefix_stats("density", id, maximum_compaction, addr);
1338 } 1687 }
1339 #endif // #ifndef PRODUCT 1688 #endif // #ifndef PRODUCT
1340 1689
1341 // If dead space crosses the dense prefix boundary, it is (at least 1690 // Recompute the summary data, taking into account the dense prefix. If
1342 // partially) filled with a dummy object, marked live and added to the 1691 // every last byte will be reclaimed, then the existing summary data which
1343 // summary data. This simplifies the copy/update phase and must be done 1692 // compacts everything can be left in place.
1344 // before the final locations of objects are determined, to prevent leaving
1345 // a fragment of dead space that is too small to fill with an object.
1346 if (!maximum_compaction && dense_prefix_end != space->bottom()) { 1693 if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1694 // If dead space crosses the dense prefix boundary, it is (at least
1695 // partially) filled with a dummy object, marked live and added to the
1696 // summary data. This simplifies the copy/update phase and must be done
1697 // before the final locations of objects are determined, to prevent
1698 // leaving a fragment of dead space that is too small to fill.
1347 fill_dense_prefix_end(id); 1699 fill_dense_prefix_end(id);
1348 } 1700
1349 1701 // Compute the destination of each Region, and thus each object.
1350 // Compute the destination of each Region, and thus each object. 1702 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1351 _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); 1703 _summary_data.summarize(_space_info[id].split_info(),
1352 _summary_data.summarize(dense_prefix_end, space->end(), 1704 dense_prefix_end, space->top(), NULL,
1353 dense_prefix_end, space->top(), 1705 dense_prefix_end, space->end(),
1354 _space_info[id].new_top_addr()); 1706 _space_info[id].new_top_addr());
1707 }
1355 } 1708 }
1356 1709
1357 if (TraceParallelOldGCSummaryPhase) { 1710 if (TraceParallelOldGCSummaryPhase) {
1358 const size_t region_size = ParallelCompactData::RegionSize; 1711 const size_t region_size = ParallelCompactData::RegionSize;
1359 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix(); 1712 HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1369 dp_region, dp_words / region_size, 1722 dp_region, dp_words / region_size,
1370 cr_words / region_size, new_top); 1723 cr_words / region_size, new_top);
1371 } 1724 }
1372 } 1725 }
1373 1726
1727 #ifndef PRODUCT
1728 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1729 HeapWord* dst_beg, HeapWord* dst_end,
1730 SpaceId src_space_id,
1731 HeapWord* src_beg, HeapWord* src_end)
1732 {
1733 if (TraceParallelOldGCSummaryPhase) {
1734 tty->print_cr("summarizing %d [%s] into %d [%s]: "
1735 "src=" PTR_FORMAT "-" PTR_FORMAT " "
1736 SIZE_FORMAT "-" SIZE_FORMAT " "
1737 "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1738 SIZE_FORMAT "-" SIZE_FORMAT,
1739 src_space_id, space_names[src_space_id],
1740 dst_space_id, space_names[dst_space_id],
1741 src_beg, src_end,
1742 _summary_data.addr_to_region_idx(src_beg),
1743 _summary_data.addr_to_region_idx(src_end),
1744 dst_beg, dst_end,
1745 _summary_data.addr_to_region_idx(dst_beg),
1746 _summary_data.addr_to_region_idx(dst_end));
1747 }
1748 }
1749 #endif // #ifndef PRODUCT
1750
1374 void PSParallelCompact::summary_phase(ParCompactionManager* cm, 1751 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1375 bool maximum_compaction) 1752 bool maximum_compaction)
1376 { 1753 {
1377 EventMark m("2 summarize"); 1754 EventMark m("2 summarize");
1378 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); 1755 TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
1401 } 1778 }
1402 } 1779 }
1403 1780
1404 // The amount of live data that will end up in old space (assuming it fits). 1781 // The amount of live data that will end up in old space (assuming it fits).
1405 size_t old_space_total_live = 0; 1782 size_t old_space_total_live = 0;
1406 unsigned int id; 1783 assert(perm_space_id < old_space_id, "should not count perm data here");
1407 for (id = old_space_id; id < last_space_id; ++id) { 1784 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1408 old_space_total_live += pointer_delta(_space_info[id].new_top(), 1785 old_space_total_live += pointer_delta(_space_info[id].new_top(),
1409 _space_info[id].space()->bottom()); 1786 _space_info[id].space()->bottom());
1410 } 1787 }
1411 1788
1412 const MutableSpace* old_space = _space_info[old_space_id].space(); 1789 MutableSpace* const old_space = _space_info[old_space_id].space();
1413 if (old_space_total_live > old_space->capacity_in_words()) { 1790 const size_t old_capacity = old_space->capacity_in_words();
1791 if (old_space_total_live > old_capacity) {
1414 // XXX - should also try to expand 1792 // XXX - should also try to expand
1415 maximum_compaction = true; 1793 maximum_compaction = true;
1416 } else if (!UseParallelOldGCDensePrefix) { 1794 }
1417 maximum_compaction = true; 1795 #ifndef PRODUCT
1418 } 1796 if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
1797 if (total_invocations() % ParallelOldGCSplitInterval == 0) {
1798 provoke_split(maximum_compaction);
1799 }
1800 }
1801 #endif // #ifndef PRODUCT
1419 1802
1420 // Permanent and Old generations. 1803 // Permanent and Old generations.
1421 summarize_space(perm_space_id, maximum_compaction); 1804 summarize_space(perm_space_id, maximum_compaction);
1422 summarize_space(old_space_id, maximum_compaction); 1805 summarize_space(old_space_id, maximum_compaction);
1423 1806
1424 // Summarize the remaining spaces (those in the young gen) into old space. If 1807 // Summarize the remaining spaces in the young gen. The initial target space
1425 // the live data from a space doesn't fit, the existing summarization is left 1808 // is the old gen. If a space does not fit entirely into the target, then the
1426 // intact, so the data is compacted down within the space itself. 1809 // remainder is compacted into the space itself and that space becomes the new
1427 HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); 1810 // target.
1428 HeapWord* const target_space_end = old_space->end(); 1811 SpaceId dst_space_id = old_space_id;
1429 for (id = eden_space_id; id < last_space_id; ++id) { 1812 HeapWord* dst_space_end = old_space->end();
1813 HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1814 for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1430 const MutableSpace* space = _space_info[id].space(); 1815 const MutableSpace* space = _space_info[id].space();
1431 const size_t live = pointer_delta(_space_info[id].new_top(), 1816 const size_t live = pointer_delta(_space_info[id].new_top(),
1432 space->bottom()); 1817 space->bottom());
1433 const size_t available = pointer_delta(target_space_end, *new_top_addr); 1818 const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1819
1820 NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1821 SpaceId(id), space->bottom(), space->top());)
1434 if (live > 0 && live <= available) { 1822 if (live > 0 && live <= available) {
1435 // All the live data will fit. 1823 // All the live data will fit.
1436 if (TraceParallelOldGCSummaryPhase) { 1824 bool done = _summary_data.summarize(_space_info[id].split_info(),
1437 tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, 1825 space->bottom(), space->top(),
1438 id, *new_top_addr); 1826 NULL,
1439 } 1827 *new_top_addr, dst_space_end,
1440 _summary_data.summarize(*new_top_addr, target_space_end, 1828 new_top_addr);
1441 space->bottom(), space->top(), 1829 assert(done, "space must fit into old gen");
1442 new_top_addr); 1830
1443 1831 // XXX - this is necessary because decrement_destination_counts() tests
1832 // source_region() to determine if a region will be filled. Probably
1833 // better to pass src_space->new_top() into decrement_destination_counts
1834 // and test that instead.
1835 //
1444 // Clear the source_region field for each region in the space. 1836 // Clear the source_region field for each region in the space.
1445 HeapWord* const new_top = _space_info[id].new_top(); 1837 clear_source_region(space->bottom(), _space_info[id].new_top());
1446 HeapWord* const clear_end = _summary_data.region_align_up(new_top);
1447 RegionData* beg_region =
1448 _summary_data.addr_to_region_ptr(space->bottom());
1449 RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
1450 while (beg_region < end_region) {
1451 beg_region->set_source_region(0);
1452 ++beg_region;
1453 }
1454 1838
1455 // Reset the new_top value for the space. 1839 // Reset the new_top value for the space.
1456 _space_info[id].set_new_top(space->bottom()); 1840 _space_info[id].set_new_top(space->bottom());
1841 } else if (live > 0) {
1842 // Attempt to fit part of the source space into the target space.
1843 HeapWord* next_src_addr = NULL;
1844 bool done = _summary_data.summarize(_space_info[id].split_info(),
1845 space->bottom(), space->top(),
1846 &next_src_addr,
1847 *new_top_addr, dst_space_end,
1848 new_top_addr);
1849 assert(!done, "space should not fit into old gen");
1850 assert(next_src_addr != NULL, "sanity");
1851
1852 // The source space becomes the new target, so the remainder is compacted
1853 // within the space itself.
1854 dst_space_id = SpaceId(id);
1855 dst_space_end = space->end();
1856 new_top_addr = _space_info[id].new_top_addr();
1857 HeapWord* const clear_end = _space_info[id].new_top();
1858 NOT_PRODUCT(summary_phase_msg(dst_space_id,
1859 space->bottom(), dst_space_end,
1860 SpaceId(id), next_src_addr, space->top());)
1861 done = _summary_data.summarize(_space_info[id].split_info(),
1862 next_src_addr, space->top(),
1863 NULL,
1864 space->bottom(), dst_space_end,
1865 new_top_addr);
1866 assert(done, "space must fit when compacted into itself");
1867 assert(*new_top_addr <= space->top(), "usage should not grow");
1868
1869 // XXX - this should go away. See comments above.
1870 //
1871 // Clear the source_region field in regions at the end of the space that
1872 // will not be filled.
1873 HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
1874 clear_source_region(clear_beg, clear_end);
1457 } 1875 }
1458 } 1876 }
1459 1877
1460 if (TraceParallelOldGCSummaryPhase) { 1878 if (TraceParallelOldGCSummaryPhase) {
1461 tty->print_cr("summary_phase: after final summarization"); 1879 tty->print_cr("summary_phase: after final summarization");
1805 young_gen->capacity_in_bytes() / K, new_young_size / K); 2223 young_gen->capacity_in_bytes() / K, new_young_size / K);
1806 } 2224 }
1807 2225
1808 // Fill the unused part of the old gen. 2226 // Fill the unused part of the old gen.
1809 MutableSpace* const old_space = old_gen->object_space(); 2227 MutableSpace* const old_space = old_gen->object_space();
1810 MemRegion old_gen_unused(old_space->top(), old_space->end()); 2228 HeapWord* const unused_start = old_space->top();
1811 if (!old_gen_unused.is_empty()) { 2229 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
1812 SharedHeap::fill_region_with_object(old_gen_unused); 2230
2231 if (unused_words > 0) {
2232 if (unused_words < CollectedHeap::min_fill_size()) {
2233 return false; // If the old gen cannot be filled, must give up.
2234 }
2235 CollectedHeap::fill_with_objects(unused_start, unused_words);
1813 } 2236 }
1814 2237
1815 // Take the live data from eden and set both top and end in the old gen to 2238 // Take the live data from eden and set both top and end in the old gen to
1816 // eden top. (Need to set end because reset_after_change() mangles the region 2239 // eden top. (Need to set end because reset_after_change() mangles the region
1817 // from end to virtual_space->high() in debug builds). 2240 // from end to virtual_space->high() in debug builds).
1823 old_space->set_end(new_top); 2246 old_space->set_end(new_top);
1824 old_gen->reset_after_change(); 2247 old_gen->reset_after_change();
1825 2248
1826 // Update the object start array for the filler object and the data from eden. 2249 // Update the object start array for the filler object and the data from eden.
1827 ObjectStartArray* const start_array = old_gen->start_array(); 2250 ObjectStartArray* const start_array = old_gen->start_array();
1828 HeapWord* const start = old_gen_unused.start(); 2251 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
1829 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { 2252 start_array->allocate_block(p);
1830 start_array->allocate_block(addr);
1831 } 2253 }
1832 2254
1833 // Could update the promoted average here, but it is not typically updated at 2255 // Could update the promoted average here, but it is not typically updated at
1834 // full GCs and the value to use is unclear. Something like 2256 // full GCs and the value to use is unclear. Something like
1835 // 2257 //
2046 2468
2047 // Iterate over all the spaces adding tasks for updating 2469 // Iterate over all the spaces adding tasks for updating
2048 // regions in the dense prefix. Assume that 1 gc thread 2470 // regions in the dense prefix. Assume that 1 gc thread
2049 // will work on opening the gaps and the remaining gc threads 2471 // will work on opening the gaps and the remaining gc threads
2050 // will work on the dense prefix. 2472 // will work on the dense prefix.
2051 SpaceId space_id = old_space_id; 2473 unsigned int space_id;
2052 while (space_id != last_space_id) { 2474 for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2053 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); 2475 HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2054 const MutableSpace* const space = _space_info[space_id].space(); 2476 const MutableSpace* const space = _space_info[space_id].space();
2055 2477
2056 if (dense_prefix_end == space->bottom()) { 2478 if (dense_prefix_end == space->bottom()) {
2057 // There is no dense prefix for this space. 2479 // There is no dense prefix for this space.
2058 space_id = next_compaction_space_id(space_id);
2059 continue; 2480 continue;
2060 } 2481 }
2061 2482
2062 // The dense prefix is before this region. 2483 // The dense prefix is before this region.
2063 size_t region_index_end_dense_prefix = 2484 size_t region_index_end_dense_prefix =
2103 break; 2524 break;
2104 } 2525 }
2105 // region_index_end is not processed 2526 // region_index_end is not processed
2106 size_t region_index_end = MIN2(region_index_start + regions_per_thread, 2527 size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2107 region_index_end_dense_prefix); 2528 region_index_end_dense_prefix);
2108 q->enqueue(new UpdateDensePrefixTask( 2529 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2109 space_id, 2530 region_index_start,
2110 region_index_start, 2531 region_index_end));
2111 region_index_end));
2112 region_index_start = region_index_end; 2532 region_index_start = region_index_end;
2113 } 2533 }
2114 } 2534 }
2115 // This gets any part of the dense prefix that did not 2535 // This gets any part of the dense prefix that did not
2116 // fit evenly. 2536 // fit evenly.
2117 if (region_index_start < region_index_end_dense_prefix) { 2537 if (region_index_start < region_index_end_dense_prefix) {
2118 q->enqueue(new UpdateDensePrefixTask( 2538 q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2119 space_id, 2539 region_index_start,
2120 region_index_start, 2540 region_index_end_dense_prefix));
2121 region_index_end_dense_prefix)); 2541 }
2122 } 2542 }
2123 space_id = next_compaction_space_id(space_id);
2124 } // End tasks for dense prefix
2125 } 2543 }
2126 2544
2127 void PSParallelCompact::enqueue_region_stealing_tasks( 2545 void PSParallelCompact::enqueue_region_stealing_tasks(
2128 GCTaskQueue* q, 2546 GCTaskQueue* q,
2129 ParallelTaskTerminator* terminator_ptr, 2547 ParallelTaskTerminator* terminator_ptr,
2565 cur_beg = m->find_obj_beg(cur_beg, search_end); 2983 cur_beg = m->find_obj_beg(cur_beg, search_end);
2566 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip"); 2984 assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2567 return m->bit_to_addr(cur_beg); 2985 return m->bit_to_addr(cur_beg);
2568 } 2986 }
2569 2987
2570 HeapWord* 2988 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2571 PSParallelCompact::first_src_addr(HeapWord* const dest_addr, 2989 SpaceId src_space_id,
2572 size_t src_region_idx) 2990 size_t src_region_idx)
2573 { 2991 {
2992 assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2993
2994 const SplitInfo& split_info = _space_info[src_space_id].split_info();
2995 if (split_info.dest_region_addr() == dest_addr) {
2996 // The partial object ending at the split point contains the first word to
2997 // be copied to dest_addr.
2998 return split_info.first_src_addr();
2999 }
3000
3001 const ParallelCompactData& sd = summary_data();
2574 ParMarkBitMap* const bitmap = mark_bitmap(); 3002 ParMarkBitMap* const bitmap = mark_bitmap();
2575 const ParallelCompactData& sd = summary_data();
2576 const size_t RegionSize = ParallelCompactData::RegionSize; 3003 const size_t RegionSize = ParallelCompactData::RegionSize;
2577 3004
2578 assert(sd.is_region_aligned(dest_addr), "not aligned"); 3005 assert(sd.is_region_aligned(dest_addr), "not aligned");
2579
2580 const RegionData* const src_region_ptr = sd.region(src_region_idx); 3006 const RegionData* const src_region_ptr = sd.region(src_region_idx);
2581 const size_t partial_obj_size = src_region_ptr->partial_obj_size(); 3007 const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2582 HeapWord* const src_region_destination = src_region_ptr->destination(); 3008 HeapWord* const src_region_destination = src_region_ptr->destination();
2583 3009
2584 assert(dest_addr >= src_region_destination, "wrong src region"); 3010 assert(dest_addr >= src_region_destination, "wrong src region");
2735 size_t src_region_idx = region_ptr->source_region(); 3161 size_t src_region_idx = region_ptr->source_region();
2736 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx)); 3162 SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2737 HeapWord* src_space_top = _space_info[src_space_id].space()->top(); 3163 HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2738 3164
2739 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); 3165 MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2740 closure.set_source(first_src_addr(dest_addr, src_region_idx)); 3166 closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2741 3167
2742 // Adjust src_region_idx to prepare for decrementing destination counts (the 3168 // Adjust src_region_idx to prepare for decrementing destination counts (the
2743 // destination count is not decremented when a region is copied to itself). 3169 // destination count is not decremented when a region is copied to itself).
2744 if (src_region_idx == region_idx) { 3170 if (src_region_idx == region_idx) {
2745 src_region_idx += 1; 3171 src_region_idx += 1;
3006 void PSParallelCompact::compact_prologue() { 3432 void PSParallelCompact::compact_prologue() {
3007 _updated_int_array_klass_obj = (klassOop) 3433 _updated_int_array_klass_obj = (klassOop)
3008 summary_data().calc_new_pointer(Universe::intArrayKlassObj()); 3434 summary_data().calc_new_pointer(Universe::intArrayKlassObj());
3009 } 3435 }
3010 3436
3011 // The initial implementation of this method created a field
3012 // _next_compaction_space_id in SpaceInfo and initialized
3013 // that field in SpaceInfo::initialize_space_info(). That
3014 // required that _next_compaction_space_id be declared a
3015 // SpaceId in SpaceInfo and that would have required that
3016 // either SpaceId be declared in a separate class or that
3017 // it be declared in SpaceInfo. It didn't seem consistent
3018 // to declare it in SpaceInfo (didn't really fit logically).
3019 // Alternatively, defining a separate class to define SpaceId
3020 // seem excessive. This implementation is simple and localizes
3021 // the knowledge.
3022
3023 PSParallelCompact::SpaceId
3024 PSParallelCompact::next_compaction_space_id(SpaceId id) {
3025 assert(id < last_space_id, "id out of range");
3026 switch (id) {
3027 case perm_space_id :
3028 return last_space_id;
3029 case old_space_id :
3030 return eden_space_id;
3031 case eden_space_id :
3032 return from_space_id;
3033 case from_space_id :
3034 return to_space_id;
3035 case to_space_id :
3036 return last_space_id;
3037 default:
3038 assert(false, "Bad space id");
3039 return last_space_id;
3040 }
3041 }