comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3778:5f6f2615433a

7049999: G1: Make the G1PrintHeapRegions output consistent and complete Summary: Extend and make more consistent the output from the G1PrintHeapRegions flag. Reviewed-by: johnc, jmasa
author tonyp
date Fri, 24 Jun 2011 12:38:49 -0400
parents e8b0b0392037
children 14a2fd14c0db
comparison
equal deleted inserted replaced
3777:e8b0b0392037 3778:5f6f2615433a
582 // the desired size. So, we cannot assume that the allocation 582 // the desired size. So, we cannot assume that the allocation
583 // will succeed. 583 // will succeed.
584 res = _free_list.remove_head_or_null(); 584 res = _free_list.remove_head_or_null();
585 } 585 }
586 } 586 }
587 if (res != NULL) {
588 if (G1PrintHeapRegions) {
589 gclog_or_tty->print_cr("new alloc region "HR_FORMAT,
590 HR_FORMAT_PARAMS(res));
591 }
592 }
593 return res; 587 return res;
594 } 588 }
595 589
596 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, 590 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
597 size_t word_size) { 591 size_t word_size) {
598 HeapRegion* alloc_region = NULL; 592 HeapRegion* alloc_region = NULL;
599 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 593 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
600 alloc_region = new_region(word_size, true /* do_expand */); 594 alloc_region = new_region(word_size, true /* do_expand */);
601 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 595 if (alloc_region != NULL) {
602 alloc_region->set_survivor(); 596 if (purpose == GCAllocForSurvived) {
603 } 597 _hr_printer.alloc(alloc_region, G1HRPrinter::Survivor);
604 ++_gc_alloc_region_counts[purpose]; 598 alloc_region->set_survivor();
599 } else {
600 _hr_printer.alloc(alloc_region, G1HRPrinter::Old);
601 }
602 ++_gc_alloc_region_counts[purpose];
603 }
605 } else { 604 } else {
606 g1_policy()->note_alloc_region_limit_reached(purpose); 605 g1_policy()->note_alloc_region_limit_reached(purpose);
607 } 606 }
608 return alloc_region; 607 return alloc_region;
609 } 608 }
731 // Now that the BOT and the object header have been initialized, 730 // Now that the BOT and the object header have been initialized,
732 // we can update top of the "starts humongous" region. 731 // we can update top of the "starts humongous" region.
733 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), 732 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
734 "new_top should be in this region"); 733 "new_top should be in this region");
735 first_hr->set_top(new_top); 734 first_hr->set_top(new_top);
735 if (_hr_printer.is_active()) {
736 HeapWord* bottom = first_hr->bottom();
737 HeapWord* end = first_hr->orig_end();
738 if ((first + 1) == last) {
739 // the series has a single humongous region
740 _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
741 } else {
742 // the series has more than one humongous regions
743 _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
744 }
745 }
736 746
737 // Now, we will update the top fields of the "continues humongous" 747 // Now, we will update the top fields of the "continues humongous"
738 // regions. The reason we need to do this is that, otherwise, 748 // regions. The reason we need to do this is that, otherwise,
739 // these regions would look empty and this will confuse parts of 749 // these regions would look empty and this will confuse parts of
740 // G1. For example, the code that looks for a consecutive number 750 // G1. For example, the code that looks for a consecutive number
751 if ((i + 1) == last) { 761 if ((i + 1) == last) {
752 // last continues humongous region 762 // last continues humongous region
753 assert(hr->bottom() < new_top && new_top <= hr->end(), 763 assert(hr->bottom() < new_top && new_top <= hr->end(),
754 "new_top should fall on this region"); 764 "new_top should fall on this region");
755 hr->set_top(new_top); 765 hr->set_top(new_top);
766 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
756 } else { 767 } else {
757 // not last one 768 // not last one
758 assert(new_top > hr->end(), "new_top should be above this region"); 769 assert(new_top > hr->end(), "new_top should be above this region");
759 hr->set_top(hr->end()); 770 hr->set_top(hr->end());
771 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
760 } 772 }
761 } 773 }
762 // If we have continues humongous regions (hr != NULL), then the 774 // If we have continues humongous regions (hr != NULL), then the
763 // end of the last one should match new_end and its top should 775 // end of the last one should match new_end and its top should
764 // match new_top. 776 // match new_top.
1152 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 1164 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
1153 HeapRegion::RebuildRSClaimValue); 1165 HeapRegion::RebuildRSClaimValue);
1154 } 1166 }
1155 }; 1167 };
1156 1168
1169 class PostCompactionPrinterClosure: public HeapRegionClosure {
1170 private:
1171 G1HRPrinter* _hr_printer;
1172 public:
1173 bool doHeapRegion(HeapRegion* hr) {
1174 assert(!hr->is_young(), "not expecting to find young regions");
1175 // We only generate output for non-empty regions.
1176 if (!hr->is_empty()) {
1177 if (!hr->isHumongous()) {
1178 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1179 } else if (hr->startsHumongous()) {
1180 if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
1181 // single humongous region
1182 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1183 } else {
1184 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1185 }
1186 } else {
1187 assert(hr->continuesHumongous(), "only way to get here");
1188 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1189 }
1190 }
1191 return false;
1192 }
1193
1194 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1195 : _hr_printer(hr_printer) { }
1196 };
1197
1157 bool G1CollectedHeap::do_collection(bool explicit_gc, 1198 bool G1CollectedHeap::do_collection(bool explicit_gc,
1158 bool clear_all_soft_refs, 1199 bool clear_all_soft_refs,
1159 size_t word_size) { 1200 size_t word_size) {
1160 assert_at_safepoint(true /* should_be_vm_thread */); 1201 assert_at_safepoint(true /* should_be_vm_thread */);
1161 1202
1233 release_mutator_alloc_region(); 1274 release_mutator_alloc_region();
1234 abandon_gc_alloc_regions(); 1275 abandon_gc_alloc_regions();
1235 g1_rem_set()->cleanupHRRS(); 1276 g1_rem_set()->cleanupHRRS();
1236 tear_down_region_lists(); 1277 tear_down_region_lists();
1237 1278
1279 // We should call this after we retire any currently active alloc
1280 // regions so that all the ALLOC / RETIRE events are generated
1281 // before the start GC event.
1282 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1283
1238 // We may have added regions to the current incremental collection 1284 // We may have added regions to the current incremental collection
1239 // set between the last GC or pause and now. We need to clear the 1285 // set between the last GC or pause and now. We need to clear the
1240 // incremental collection set and then start rebuilding it afresh 1286 // incremental collection set and then start rebuilding it afresh
1241 // after this full GC. 1287 // after this full GC.
1242 abandon_collection_set(g1_policy()->inc_cset_head()); 1288 abandon_collection_set(g1_policy()->inc_cset_head());
1296 PostMCRemSetClearClosure rs_clear(mr_bs()); 1342 PostMCRemSetClearClosure rs_clear(mr_bs());
1297 heap_region_iterate(&rs_clear); 1343 heap_region_iterate(&rs_clear);
1298 1344
1299 // Resize the heap if necessary. 1345 // Resize the heap if necessary.
1300 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); 1346 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1347
1348 if (_hr_printer.is_active()) {
1349 // We should do this after we potentially resize the heap so
1350 // that all the COMMIT / UNCOMMIT events are generated before
1351 // the end GC event.
1352
1353 PostCompactionPrinterClosure cl(hr_printer());
1354 heap_region_iterate(&cl);
1355
1356 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1357 }
1301 1358
1302 if (_cg1r->use_cache()) { 1359 if (_cg1r->use_cache()) {
1303 _cg1r->clear_and_record_card_counts(); 1360 _cg1r->clear_and_record_card_counts();
1304 _cg1r->clear_hot_cache(); 1361 _cg1r->clear_hot_cache();
1305 } 1362 }
1652 _g1_storage.shrink_by(diff_bytes); 1709 _g1_storage.shrink_by(diff_bytes);
1653 // Then propagate this update to the necessary data structures. 1710 // Then propagate this update to the necessary data structures.
1654 update_committed_space(new_end, mr.end()); 1711 update_committed_space(new_end, mr.end());
1655 } 1712 }
1656 _free_list.add_as_tail(&expansion_list); 1713 _free_list.add_as_tail(&expansion_list);
1714
1715 if (_hr_printer.is_active()) {
1716 HeapWord* curr = mr.start();
1717 while (curr < mr.end()) {
1718 HeapWord* curr_end = curr + HeapRegion::GrainWords;
1719 _hr_printer.commit(curr, curr_end);
1720 curr = curr_end;
1721 }
1722 assert(curr == mr.end(), "post-condition");
1723 }
1657 } else { 1724 } else {
1658 // The expansion of the virtual storage space was unsuccessful. 1725 // The expansion of the virtual storage space was unsuccessful.
1659 // Let's see if it was because we ran out of swap. 1726 // Let's see if it was because we ran out of swap.
1660 if (G1ExitOnExpansionFailure && 1727 if (G1ExitOnExpansionFailure &&
1661 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { 1728 _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1682 size_t num_regions_deleted = 0; 1749 size_t num_regions_deleted = 0;
1683 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); 1750 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1684 HeapWord* old_end = (HeapWord*) _g1_storage.high(); 1751 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1685 assert(mr.end() == old_end, "post-condition"); 1752 assert(mr.end() == old_end, "post-condition");
1686 if (mr.byte_size() > 0) { 1753 if (mr.byte_size() > 0) {
1754 if (_hr_printer.is_active()) {
1755 HeapWord* curr = mr.end();
1756 while (curr > mr.start()) {
1757 HeapWord* curr_end = curr;
1758 curr -= HeapRegion::GrainWords;
1759 _hr_printer.uncommit(curr, curr_end);
1760 }
1761 assert(curr == mr.start(), "post-condition");
1762 }
1763
1687 _g1_storage.shrink_by(mr.byte_size()); 1764 _g1_storage.shrink_by(mr.byte_size());
1688 HeapWord* new_end = (HeapWord*) _g1_storage.high(); 1765 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1689 assert(mr.start() == new_end, "post-condition"); 1766 assert(mr.start() == new_end, "post-condition");
1690 1767
1691 _expansion_regions += num_regions_deleted; 1768 _expansion_regions += num_regions_deleted;
1798 1875
1799 // Necessary to satisfy locking discipline assertions. 1876 // Necessary to satisfy locking discipline assertions.
1800 1877
1801 MutexLocker x(Heap_lock); 1878 MutexLocker x(Heap_lock);
1802 1879
1880 // We have to initialize the printer before committing the heap, as
1881 // it will be used then.
1882 _hr_printer.set_active(G1PrintHeapRegions);
1883
1803 // While there are no constraints in the GC code that HeapWordSize 1884 // While there are no constraints in the GC code that HeapWordSize
1804 // be any particular value, there are multiple other areas in the 1885 // be any particular value, there are multiple other areas in the
1805 // system which believe this to be true (e.g. oop->object_size in some 1886 // system which believe this to be true (e.g. oop->object_size in some
1806 // cases incorrectly returns the size in wordSize units rather than 1887 // cases incorrectly returns the size in wordSize units rather than
1807 // HeapWordSize). 1888 // HeapWordSize).
3344 3425
3345 // Forget the current alloc region (we might even choose it to be part 3426 // Forget the current alloc region (we might even choose it to be part
3346 // of the collection set!). 3427 // of the collection set!).
3347 release_mutator_alloc_region(); 3428 release_mutator_alloc_region();
3348 3429
3430 // We should call this after we retire the mutator alloc
3431 // region(s) so that all the ALLOC / RETIRE events are generated
3432 // before the start GC event.
3433 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3434
3349 // The elapsed time induced by the start time below deliberately elides 3435 // The elapsed time induced by the start time below deliberately elides
3350 // the possible verification above. 3436 // the possible verification above.
3351 double start_time_sec = os::elapsedTime(); 3437 double start_time_sec = os::elapsedTime();
3352 size_t start_used_bytes = used(); 3438 size_t start_used_bytes = used();
3353 3439
3394 _young_list->print(); 3480 _young_list->print();
3395 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); 3481 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3396 #endif // YOUNG_LIST_VERBOSE 3482 #endif // YOUNG_LIST_VERBOSE
3397 3483
3398 g1_policy()->choose_collection_set(target_pause_time_ms); 3484 g1_policy()->choose_collection_set(target_pause_time_ms);
3485
3486 if (_hr_printer.is_active()) {
3487 HeapRegion* hr = g1_policy()->collection_set();
3488 while (hr != NULL) {
3489 G1HRPrinter::RegionType type;
3490 if (!hr->is_young()) {
3491 type = G1HRPrinter::Old;
3492 } else if (hr->is_survivor()) {
3493 type = G1HRPrinter::Survivor;
3494 } else {
3495 type = G1HRPrinter::Eden;
3496 }
3497 _hr_printer.cset(hr);
3498 hr = hr->next_in_collection_set();
3499 }
3500 }
3399 3501
3400 // We have chosen the complete collection set. If marking is 3502 // We have chosen the complete collection set. If marking is
3401 // active then, we clear the region fields of any of the 3503 // active then, we clear the region fields of any of the
3402 // concurrent marking tasks whose region fields point into 3504 // concurrent marking tasks whose region fields point into
3403 // the collection set as these values will become stale. This 3505 // the collection set as these values will become stale. This
3515 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); 3617 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3516 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); 3618 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3517 } 3619 }
3518 } 3620 }
3519 } 3621 }
3622
3623 // We should do this after we potentially expand the heap so
3624 // that all the COMMIT events are generated before the end GC
3625 // event, and after we retire the GC alloc regions so that all
3626 // RETIRE events are generated before the end GC event.
3627 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
3628
3520 // We have to do this after we decide whether to expand the heap or not. 3629 // We have to do this after we decide whether to expand the heap or not.
3521 g1_policy()->print_heap_transition(); 3630 g1_policy()->print_heap_transition();
3522 3631
3523 if (mark_in_progress()) { 3632 if (mark_in_progress()) {
3524 concurrent_mark()->update_g1_committed(); 3633 concurrent_mark()->update_g1_committed();
3754 // we will get a new GC alloc region 3863 // we will get a new GC alloc region
3755 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); 3864 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
3756 } else { 3865 } else {
3757 // the region was retained from the last collection 3866 // the region was retained from the last collection
3758 ++_gc_alloc_region_counts[ap]; 3867 ++_gc_alloc_region_counts[ap];
3759 if (G1PrintHeapRegions) { 3868
3760 gclog_or_tty->print_cr("new alloc region "HR_FORMAT, 3869 _hr_printer.reuse(alloc_region);
3761 HR_FORMAT_PARAMS(alloc_region));
3762 }
3763 } 3870 }
3764 3871
3765 if (alloc_region != NULL) { 3872 if (alloc_region != NULL) {
3766 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); 3873 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
3767 set_gc_alloc_region(ap, alloc_region); 3874 set_gc_alloc_region(ap, alloc_region);
4130 preserve_mark_if_necessary(old, m); 4237 preserve_mark_if_necessary(old, m);
4131 4238
4132 HeapRegion* r = heap_region_containing(old); 4239 HeapRegion* r = heap_region_containing(old);
4133 if (!r->evacuation_failed()) { 4240 if (!r->evacuation_failed()) {
4134 r->set_evacuation_failed(true); 4241 r->set_evacuation_failed(true);
4135 if (G1PrintHeapRegions) { 4242 _hr_printer.evac_failure(r);
4136 gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
4137 "["PTR_FORMAT","PTR_FORMAT")\n",
4138 r, r->bottom(), r->end());
4139 }
4140 } 4243 }
4141 4244
4142 push_on_evac_failure_scan_stack(old); 4245 push_on_evac_failure_scan_stack(old);
4143 4246
4144 if (!_drain_in_progress) { 4247 if (!_drain_in_progress) {
4195 // object. 4298 // object.
4196 if (par) par_allocate_remaining_space(alloc_region); 4299 if (par) par_allocate_remaining_space(alloc_region);
4197 // Now we can do the post-GC stuff on the region. 4300 // Now we can do the post-GC stuff on the region.
4198 alloc_region->note_end_of_copying(); 4301 alloc_region->note_end_of_copying();
4199 g1_policy()->record_after_bytes(alloc_region->used()); 4302 g1_policy()->record_after_bytes(alloc_region->used());
4303 _hr_printer.retire(alloc_region);
4200 } 4304 }
4201 4305
4202 HeapWord* 4306 HeapWord*
4203 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 4307 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
4204 HeapRegion* alloc_region, 4308 HeapRegion* alloc_region,
5464 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, 5568 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5465 bool force) { 5569 bool force) {
5466 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 5570 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5467 assert(!force || g1_policy()->can_expand_young_list(), 5571 assert(!force || g1_policy()->can_expand_young_list(),
5468 "if force is true we should be able to expand the young list"); 5572 "if force is true we should be able to expand the young list");
5469 if (force || !g1_policy()->is_young_list_full()) { 5573 bool young_list_full = g1_policy()->is_young_list_full();
5574 if (force || !young_list_full) {
5470 HeapRegion* new_alloc_region = new_region(word_size, 5575 HeapRegion* new_alloc_region = new_region(word_size,
5471 false /* do_expand */); 5576 false /* do_expand */);
5472 if (new_alloc_region != NULL) { 5577 if (new_alloc_region != NULL) {
5473 g1_policy()->update_region_num(true /* next_is_young */); 5578 g1_policy()->update_region_num(true /* next_is_young */);
5474 set_region_short_lived_locked(new_alloc_region); 5579 set_region_short_lived_locked(new_alloc_region);
5580 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
5475 g1mm()->update_eden_counters(); 5581 g1mm()->update_eden_counters();
5476 return new_alloc_region; 5582 return new_alloc_region;
5477 } 5583 }
5478 } 5584 }
5479 return NULL; 5585 return NULL;
5484 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 5590 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5485 assert(alloc_region->is_young(), "all mutator alloc regions should be young"); 5591 assert(alloc_region->is_young(), "all mutator alloc regions should be young");
5486 5592
5487 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); 5593 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
5488 _summary_bytes_used += allocated_bytes; 5594 _summary_bytes_used += allocated_bytes;
5595 _hr_printer.retire(alloc_region);
5489 } 5596 }
5490 5597
5491 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, 5598 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
5492 bool force) { 5599 bool force) {
5493 return _g1h->new_mutator_alloc_region(word_size, force); 5600 return _g1h->new_mutator_alloc_region(word_size, force);