comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 2188:c33825b68624

6923430: G1: assert(res != 0,"This should have worked.") 7007446: G1: expand the heap with a single step, not one region at a time Summary: Changed G1CollectedHeap::expand() to expand the committed space by calling VirtualSpace::expand_by() once rather than for every region in the expansion amount. This allows the success or failure of the expansion to be determined before creating any heap regions. Introduced a develop flag G1ExitOnExpansionFailure (false by default) that, when true, will exit the VM if the expansion of the committed space fails. Finally G1CollectedHeap::expand() returns a status back to it's caller so that the caller knows whether to attempt the allocation. Reviewed-by: brutisso, tonyp
author johnc
date Wed, 02 Feb 2011 10:41:20 -0800
parents 97ba643ea3ed
children 4e0069ff33df
comparison
equal deleted inserted replaced
2187:986b2844f7a2 2188:c33825b68624
544 "res == NULL, trying the secondary_free_list"); 544 "res == NULL, trying the secondary_free_list");
545 } 545 }
546 res = new_region_try_secondary_free_list(word_size); 546 res = new_region_try_secondary_free_list(word_size);
547 } 547 }
548 if (res == NULL && do_expand) { 548 if (res == NULL && do_expand) {
549 expand(word_size * HeapWordSize); 549 if (expand(word_size * HeapWordSize)) {
550 res = _free_list.remove_head_or_null(); 550 // The expansion succeeded and so we should have at least one
551 // region on the free list.
552 res = _free_list.remove_head();
553 }
551 } 554 }
552 if (res != NULL) { 555 if (res != NULL) {
553 if (G1PrintHeapRegions) { 556 if (G1PrintHeapRegions) {
554 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " 557 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
555 "top "PTR_FORMAT, res->hrs_index(), 558 "top "PTR_FORMAT, res->hrs_index(),
629 size_t fs = _hrs->free_suffix(); 632 size_t fs = _hrs->free_suffix();
630 int first = humongous_obj_allocate_find_first(num_regions, word_size); 633 int first = humongous_obj_allocate_find_first(num_regions, word_size);
631 if (first == -1) { 634 if (first == -1) {
632 // The only thing we can do now is attempt expansion. 635 // The only thing we can do now is attempt expansion.
633 if (fs + x_size >= num_regions) { 636 if (fs + x_size >= num_regions) {
634 expand((num_regions - fs) * HeapRegion::GrainBytes); 637 // If the number of regions we're trying to allocate for this
635 first = humongous_obj_allocate_find_first(num_regions, word_size); 638 // object is at most the number of regions in the free suffix,
636 assert(first != -1, "this should have worked"); 639 // then the call to humongous_obj_allocate_find_first() above
640 // should have succeeded and we wouldn't be here.
641 //
642 // We should only be trying to expand when the free suffix is
643 // not sufficient for the object _and_ we have some expansion
644 // room available.
645 assert(num_regions > fs, "earlier allocation should have succeeded");
646
647 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
648 first = humongous_obj_allocate_find_first(num_regions, word_size);
649 // If the expansion was successful then the allocation
650 // should have been successful.
651 assert(first != -1, "this should have worked");
652 }
637 } 653 }
638 } 654 }
639 655
640 if (first != -1) { 656 if (first != -1) {
641 // Index of last region in the series + 1. 657 // Index of last region in the series + 1.
1645 free_percentage); 1661 free_percentage);
1646 } 1662 }
1647 if (capacity_after_gc < minimum_desired_capacity) { 1663 if (capacity_after_gc < minimum_desired_capacity) {
1648 // Don't expand unless it's significant 1664 // Don't expand unless it's significant
1649 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; 1665 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1650 expand(expand_bytes); 1666 if (expand(expand_bytes)) {
1651 if (PrintGC && Verbose) { 1667 if (PrintGC && Verbose) {
1652 gclog_or_tty->print_cr(" " 1668 gclog_or_tty->print_cr(" "
1653 " expanding:" 1669 " expanding:"
1654 " max_heap_size: %6.1fK" 1670 " max_heap_size: %6.1fK"
1655 " minimum_desired_capacity: %6.1fK" 1671 " minimum_desired_capacity: %6.1fK"
1656 " expand_bytes: %6.1fK", 1672 " expand_bytes: %6.1fK",
1657 (double) max_heap_size / (double) K, 1673 (double) max_heap_size / (double) K,
1658 (double) minimum_desired_capacity / (double) K, 1674 (double) minimum_desired_capacity / (double) K,
1659 (double) expand_bytes / (double) K); 1675 (double) expand_bytes / (double) K);
1676 }
1660 } 1677 }
1661 1678
1662 // No expansion, now see if we want to shrink 1679 // No expansion, now see if we want to shrink
1663 } else if (capacity_after_gc > maximum_desired_capacity) { 1680 } else if (capacity_after_gc > maximum_desired_capacity) {
1664 // Capacity too large, compute shrinking size 1681 // Capacity too large, compute shrinking size
1755 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1772 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1756 assert_at_safepoint(true /* should_be_vm_thread */); 1773 assert_at_safepoint(true /* should_be_vm_thread */);
1757 1774
1758 verify_region_sets_optional(); 1775 verify_region_sets_optional();
1759 1776
1760 size_t expand_bytes = word_size * HeapWordSize; 1777 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1761 if (expand_bytes < MinHeapDeltaBytes) { 1778 if (expand(expand_bytes)) {
1762 expand_bytes = MinHeapDeltaBytes; 1779 verify_region_sets_optional();
1763 } 1780 return attempt_allocation_at_safepoint(word_size,
1764 expand(expand_bytes); 1781 false /* expect_null_cur_alloc_region */);
1765 1782 }
1766 verify_region_sets_optional(); 1783 return NULL;
1767 1784 }
1768 return attempt_allocation_at_safepoint(word_size, 1785
1769 false /* expect_null_cur_alloc_region */); 1786 bool G1CollectedHeap::expand(size_t expand_bytes) {
1770 }
1771
1772 // FIXME: both this and shrink could probably be more efficient by
1773 // doing one "VirtualSpace::expand_by" call rather than several.
1774 void G1CollectedHeap::expand(size_t expand_bytes) {
1775 size_t old_mem_size = _g1_storage.committed_size(); 1787 size_t old_mem_size = _g1_storage.committed_size();
1776 // We expand by a minimum of 1K. 1788 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1777 expand_bytes = MAX2(expand_bytes, (size_t)K);
1778 size_t aligned_expand_bytes =
1779 ReservedSpace::page_align_size_up(expand_bytes);
1780 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1789 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1781 HeapRegion::GrainBytes); 1790 HeapRegion::GrainBytes);
1782 expand_bytes = aligned_expand_bytes; 1791
1783 while (expand_bytes > 0) { 1792 if (Verbose && PrintGC) {
1784 HeapWord* base = (HeapWord*)_g1_storage.high(); 1793 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK",
1785 // Commit more storage. 1794 old_mem_size/K, aligned_expand_bytes/K);
1786 bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); 1795 }
1787 if (!successful) { 1796
1788 expand_bytes = 0; 1797 HeapWord* old_end = (HeapWord*)_g1_storage.high();
1789 } else { 1798 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1790 expand_bytes -= HeapRegion::GrainBytes; 1799 if (successful) {
1791 // Expand the committed region. 1800 HeapWord* new_end = (HeapWord*)_g1_storage.high();
1792 HeapWord* high = (HeapWord*) _g1_storage.high(); 1801
1793 _g1_committed.set_end(high); 1802 // Expand the committed region.
1803 _g1_committed.set_end(new_end);
1804
1805 // Tell the cardtable about the expansion.
1806 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1807
1808 // And the offset table as well.
1809 _bot_shared->resize(_g1_committed.word_size());
1810
1811 expand_bytes = aligned_expand_bytes;
1812 HeapWord* base = old_end;
1813
1814 // Create the heap regions for [old_end, new_end)
1815 while (expand_bytes > 0) {
1816 HeapWord* high = base + HeapRegion::GrainWords;
1817
1794 // Create a new HeapRegion. 1818 // Create a new HeapRegion.
1795 MemRegion mr(base, high); 1819 MemRegion mr(base, high);
1796 bool is_zeroed = !_g1_max_committed.contains(base); 1820 bool is_zeroed = !_g1_max_committed.contains(base);
1797 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1821 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
1798 1822
1799 // Now update max_committed if necessary.
1800 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
1801
1802 // Add it to the HeapRegionSeq. 1823 // Add it to the HeapRegionSeq.
1803 _hrs->insert(hr); 1824 _hrs->insert(hr);
1804 _free_list.add_as_tail(hr); 1825 _free_list.add_as_tail(hr);
1826
1805 // And we used up an expansion region to create it. 1827 // And we used up an expansion region to create it.
1806 _expansion_regions--; 1828 _expansion_regions--;
1807 // Tell the cardtable about it. 1829
1808 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1830 expand_bytes -= HeapRegion::GrainBytes;
1809 // And the offset table as well. 1831 base += HeapRegion::GrainWords;
1810 _bot_shared->resize(_g1_committed.word_size()); 1832 }
1833 assert(base == new_end, "sanity");
1834
1835 // Now update max_committed if necessary.
1836 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
1837
1838 } else {
1839 // The expansion of the virtual storage space was unsuccessful.
1840 // Let's see if it was because we ran out of swap.
1841 if (G1ExitOnExpansionFailure &&
1842 _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1843 // We had head room...
1844 vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
1811 } 1845 }
1812 } 1846 }
1813 1847
1814 if (Verbose && PrintGC) { 1848 if (Verbose && PrintGC) {
1815 size_t new_mem_size = _g1_storage.committed_size(); 1849 size_t new_mem_size = _g1_storage.committed_size();
1816 gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", 1850 gclog_or_tty->print_cr("...%s, expanded to %ldK",
1817 old_mem_size/K, aligned_expand_bytes/K, 1851 (successful ? "Successful" : "Failed"),
1818 new_mem_size/K); 1852 new_mem_size/K);
1819 } 1853 }
1854 return successful;
1820 } 1855 }
1821 1856
1822 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1857 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
1823 { 1858 {
1824 size_t old_mem_size = _g1_storage.committed_size(); 1859 size_t old_mem_size = _g1_storage.committed_size();
2086 2121
2087 // Initialize the from_card cache structure of HeapRegionRemSet. 2122 // Initialize the from_card cache structure of HeapRegionRemSet.
2088 HeapRegionRemSet::init_heap(max_regions()); 2123 HeapRegionRemSet::init_heap(max_regions());
2089 2124
2090 // Now expand into the initial heap size. 2125 // Now expand into the initial heap size.
2091 expand(init_byte_size); 2126 if (!expand(init_byte_size)) {
2127 vm_exit_during_initialization("Failed to allocate initial heap.");
2128 return JNI_ENOMEM;
2129 }
2092 2130
2093 // Perform any initialization actions delegated to the policy. 2131 // Perform any initialization actions delegated to the policy.
2094 g1_policy()->init(); 2132 g1_policy()->init();
2095 2133
2096 g1_policy()->note_start_of_mark_thread(); 2134 g1_policy()->note_start_of_mark_thread();
2742 // FIXME 2780 // FIXME
2743 return HeapRegion::GrainBytes/HeapWordSize; 2781 return HeapRegion::GrainBytes/HeapWordSize;
2744 } 2782 }
2745 2783
2746 size_t G1CollectedHeap::max_capacity() const { 2784 size_t G1CollectedHeap::max_capacity() const {
2747 return g1_reserved_obj_bytes(); 2785 return _g1_reserved.byte_size();
2748 } 2786 }
2749 2787
2750 jlong G1CollectedHeap::millis_since_last_gc() { 2788 jlong G1CollectedHeap::millis_since_last_gc() {
2751 // assert(false, "NYI"); 2789 // assert(false, "NYI");
2752 return 0; 2790 return 0;
3536 3574
3537 { 3575 {
3538 size_t expand_bytes = g1_policy()->expansion_amount(); 3576 size_t expand_bytes = g1_policy()->expansion_amount();
3539 if (expand_bytes > 0) { 3577 if (expand_bytes > 0) {
3540 size_t bytes_before = capacity(); 3578 size_t bytes_before = capacity();
3541 expand(expand_bytes); 3579 if (!expand(expand_bytes)) {
3580 // We failed to expand the heap so let's verify that
3581 // committed/uncommitted amount match the backing store
3582 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3583 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3584 }
3542 } 3585 }
3543 } 3586 }
3544 3587
3545 if (mark_in_progress()) { 3588 if (mark_in_progress()) {
3546 concurrent_mark()->update_g1_committed(); 3589 concurrent_mark()->update_g1_committed();
3760 } 3803 }
3761 } 3804 }
3762 3805
3763 if (alloc_region == NULL) { 3806 if (alloc_region == NULL) {
3764 // we will get a new GC alloc region 3807 // we will get a new GC alloc region
3765 alloc_region = new_gc_alloc_region(ap, 0); 3808 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
3766 } else { 3809 } else {
3767 // the region was retained from the last collection 3810 // the region was retained from the last collection
3768 ++_gc_alloc_region_counts[ap]; 3811 ++_gc_alloc_region_counts[ap];
3769 if (G1PrintHeapRegions) { 3812 if (G1PrintHeapRegions) {
3770 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 3813 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
5309 return _hrs->length(); 5352 return _hrs->length();
5310 } 5353 }
5311 5354
5312 size_t G1CollectedHeap::max_regions() { 5355 size_t G1CollectedHeap::max_regions() {
5313 return 5356 return
5314 (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / 5357 (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
5315 HeapRegion::GrainBytes; 5358 HeapRegion::GrainBytes;
5316 } 5359 }
5317 5360
5318 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5361 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5319 assert(heap_lock_held_for_gc(), 5362 assert(heap_lock_held_for_gc(),