comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20336:6701abbc4441

8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code. Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author tschatzl
date Tue, 19 Aug 2014 10:50:27 +0200
parents eec72fa4b108
children 1f1d373cd044
comparison
equal deleted inserted replaced
20335:eec72fa4b108 20336:6701abbc4441
46 #include "gc_implementation/g1/g1RemSet.inline.hpp" 46 #include "gc_implementation/g1/g1RemSet.inline.hpp"
47 #include "gc_implementation/g1/g1StringDedup.hpp" 47 #include "gc_implementation/g1/g1StringDedup.hpp"
48 #include "gc_implementation/g1/g1YCTypes.hpp" 48 #include "gc_implementation/g1/g1YCTypes.hpp"
49 #include "gc_implementation/g1/heapRegion.inline.hpp" 49 #include "gc_implementation/g1/heapRegion.inline.hpp"
50 #include "gc_implementation/g1/heapRegionRemSet.hpp" 50 #include "gc_implementation/g1/heapRegionRemSet.hpp"
51 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 51 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
52 #include "gc_implementation/g1/vm_operations_g1.hpp" 52 #include "gc_implementation/g1/vm_operations_g1.hpp"
53 #include "gc_implementation/shared/gcHeapSummary.hpp" 53 #include "gc_implementation/shared/gcHeapSummary.hpp"
54 #include "gc_implementation/shared/gcTimer.hpp" 54 #include "gc_implementation/shared/gcTimer.hpp"
55 #include "gc_implementation/shared/gcTrace.hpp" 55 #include "gc_implementation/shared/gcTrace.hpp"
56 #include "gc_implementation/shared/gcTraceTime.hpp" 56 #include "gc_implementation/shared/gcTraceTime.hpp"
517 // It looks as if there are free regions available on the 517 // It looks as if there are free regions available on the
518 // secondary_free_list. Let's move them to the free_list and try 518 // secondary_free_list. Let's move them to the free_list and try
519 // again to allocate from it. 519 // again to allocate from it.
520 append_secondary_free_list(); 520 append_secondary_free_list();
521 521
522 assert(!_free_list.is_empty(), "if the secondary_free_list was not " 522 assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
523 "empty we should have moved at least one entry to the free_list"); 523 "empty we should have moved at least one entry to the free_list");
524 HeapRegion* res = _free_list.remove_region(is_old); 524 HeapRegion* res = _hrs.allocate_free_region(is_old);
525 if (G1ConcRegionFreeingVerbose) { 525 if (G1ConcRegionFreeingVerbose) {
526 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 526 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
527 "allocated "HR_FORMAT" from secondary_free_list", 527 "allocated "HR_FORMAT" from secondary_free_list",
528 HR_FORMAT_PARAMS(res)); 528 HR_FORMAT_PARAMS(res));
529 } 529 }
560 return res; 560 return res;
561 } 561 }
562 } 562 }
563 } 563 }
564 564
565 res = _free_list.remove_region(is_old); 565 res = _hrs.allocate_free_region(is_old);
566 566
567 if (res == NULL) { 567 if (res == NULL) {
568 if (G1ConcRegionFreeingVerbose) { 568 if (G1ConcRegionFreeingVerbose) {
569 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 569 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
570 "res == NULL, trying the secondary_free_list"); 570 "res == NULL, trying the secondary_free_list");
585 word_size * HeapWordSize); 585 word_size * HeapWordSize);
586 if (expand(word_size * HeapWordSize)) { 586 if (expand(word_size * HeapWordSize)) {
587 // Given that expand() succeeded in expanding the heap, and we 587 // Given that expand() succeeded in expanding the heap, and we
588 // always expand the heap by an amount aligned to the heap 588 // always expand the heap by an amount aligned to the heap
589 // region size, the free list should in theory not be empty. 589 // region size, the free list should in theory not be empty.
590 // In either case remove_region() will check for NULL. 590 // In either case allocate_free_region() will check for NULL.
591 res = _free_list.remove_region(is_old); 591 res = _hrs.allocate_free_region(is_old);
592 } else { 592 } else {
593 _expand_heap_after_alloc_failure = false; 593 _expand_heap_after_alloc_failure = false;
594 } 594 }
595 } 595 }
596 return res; 596 return res;
597 }
598
599 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
600 size_t word_size) {
601 assert(isHumongous(word_size), "word_size should be humongous");
602 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
603
604 uint first = G1_NULL_HRS_INDEX;
605 if (num_regions == 1) {
606 // Only one region to allocate, no need to go through the slower
607 // path. The caller will attempt the expansion if this fails, so
608 // let's not try to expand here too.
609 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
610 if (hr != NULL) {
611 first = hr->hrs_index();
612 } else {
613 first = G1_NULL_HRS_INDEX;
614 }
615 } else {
616 // We can't allocate humongous regions while cleanupComplete() is
617 // running, since some of the regions we find to be empty might not
618 // yet be added to the free list and it is not straightforward to
619 // know which list they are on so that we can remove them. Note
620 // that we only need to do this if we need to allocate more than
621 // one region to satisfy the current humongous allocation
622 // request. If we are only allocating one region we use the common
623 // region allocation code (see above).
624 wait_while_free_regions_coming();
625 append_secondary_free_list_if_not_empty_with_lock();
626
627 if (free_regions() >= num_regions) {
628 first = _hrs.find_contiguous(num_regions);
629 if (first != G1_NULL_HRS_INDEX) {
630 for (uint i = first; i < first + num_regions; ++i) {
631 HeapRegion* hr = region_at(i);
632 assert(hr->is_empty(), "sanity");
633 assert(is_on_master_free_list(hr), "sanity");
634 hr->set_pending_removal(true);
635 }
636 _free_list.remove_all_pending(num_regions);
637 }
638 }
639 }
640 return first;
641 } 597 }
642 598
643 HeapWord* 599 HeapWord*
644 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, 600 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
645 uint num_regions, 601 uint num_regions,
646 size_t word_size) { 602 size_t word_size) {
647 assert(first != G1_NULL_HRS_INDEX, "pre-condition"); 603 assert(first != G1_NO_HRS_INDEX, "pre-condition");
648 assert(isHumongous(word_size), "word_size should be humongous"); 604 assert(isHumongous(word_size), "word_size should be humongous");
649 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 605 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
650 606
651 // Index of last region in the series + 1. 607 // Index of last region in the series + 1.
652 uint last = first + num_regions; 608 uint last = first + num_regions;
780 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { 736 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
781 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 737 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
782 738
783 verify_region_sets_optional(); 739 verify_region_sets_optional();
784 740
785 size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords); 741 uint first = G1_NO_HRS_INDEX;
786 uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords); 742 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
787 uint x_num = expansion_regions(); 743
788 uint fs = _hrs.free_suffix(); 744 if (obj_regions == 1) {
789 uint first = humongous_obj_allocate_find_first(num_regions, word_size); 745 // Only one region to allocate, try to use a fast path by directly allocating
790 if (first == G1_NULL_HRS_INDEX) { 746 // from the free lists. Do not try to expand here, we will potentially do that
791 // The only thing we can do now is attempt expansion. 747 // later.
792 if (fs + x_num >= num_regions) { 748 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
793 // If the number of regions we're trying to allocate for this 749 if (hr != NULL) {
794 // object is at most the number of regions in the free suffix, 750 first = hr->hrs_index();
795 // then the call to humongous_obj_allocate_find_first() above 751 }
796 // should have succeeded and we wouldn't be here. 752 } else {
797 // 753 // We can't allocate humongous regions spanning more than one region while
798 // We should only be trying to expand when the free suffix is 754 // cleanupComplete() is running, since some of the regions we find to be
799 // not sufficient for the object _and_ we have some expansion 755 // empty might not yet be added to the free list. It is not straightforward
800 // room available. 756 // to know in which list they are on so that we can remove them. We only
801 assert(num_regions > fs, "earlier allocation should have succeeded"); 757 // need to do this if we need to allocate more than one region to satisfy the
802 758 // current humongous allocation request. If we are only allocating one region
759 // we use the one-region region allocation code (see above), or end up here.
760 wait_while_free_regions_coming();
761 append_secondary_free_list_if_not_empty_with_lock();
762
763 // Policy: Try only empty regions (i.e. already committed first). Maybe we
764 // are lucky enough to find some.
765 first = _hrs.find_contiguous(obj_regions, true);
766 if (first != G1_NO_HRS_INDEX) {
767 _hrs.allocate_free_regions_starting_at(first, obj_regions);
768 }
769 }
770
771 if (first == G1_NO_HRS_INDEX) {
772 // Policy: We could not find enough regions for the humongous object in the
773 // free list. Look through the heap to find a mix of free and uncommitted regions.
774 // If so, try expansion.
775 first = _hrs.find_contiguous(obj_regions, false);
776 if (first != G1_NO_HRS_INDEX) {
777 // We found something. Make sure these regions are committed, i.e. expand
778 // the heap. Alternatively we could do a defragmentation GC.
803 ergo_verbose1(ErgoHeapSizing, 779 ergo_verbose1(ErgoHeapSizing,
804 "attempt heap expansion", 780 "attempt heap expansion",
805 ergo_format_reason("humongous allocation request failed") 781 ergo_format_reason("humongous allocation request failed")
806 ergo_format_byte("allocation request"), 782 ergo_format_byte("allocation request"),
807 word_size * HeapWordSize); 783 word_size * HeapWordSize);
808 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { 784
809 // Even though the heap was expanded, it might not have 785 _hrs.expand_at(first, obj_regions);
810 // reached the desired size. So, we cannot assume that the 786 g1_policy()->record_new_heap_size(num_regions());
811 // allocation will succeed. 787
812 first = humongous_obj_allocate_find_first(num_regions, word_size); 788 #ifdef ASSERT
789 for (uint i = first; i < first + obj_regions; ++i) {
790 HeapRegion* hr = region_at(i);
791 assert(hr->is_empty(), "sanity");
792 assert(is_on_master_free_list(hr), "sanity");
813 } 793 }
794 #endif
795 _hrs.allocate_free_regions_starting_at(first, obj_regions);
796 } else {
797 // Policy: Potentially trigger a defragmentation GC.
814 } 798 }
815 } 799 }
816 800
817 HeapWord* result = NULL; 801 HeapWord* result = NULL;
818 if (first != G1_NULL_HRS_INDEX) { 802 if (first != G1_NO_HRS_INDEX) {
819 result = 803 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
820 humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
821 assert(result != NULL, "it should always return a valid result"); 804 assert(result != NULL, "it should always return a valid result");
822 805
823 // A successful humongous object allocation changes the used space 806 // A successful humongous object allocation changes the used space
824 // information of the old generation so we need to recalculate the 807 // information of the old generation so we need to recalculate the
825 // sizes and update the jstat counters here. 808 // sizes and update the jstat counters here.
1378 { 1361 {
1379 HandleMark hm; // Discard invalid handles created during gc 1362 HandleMark hm; // Discard invalid handles created during gc
1380 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); 1363 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1381 } 1364 }
1382 1365
1383 assert(free_regions() == 0, "we should not have added any free regions"); 1366 assert(num_free_regions() == 0, "we should not have added any free regions");
1384 rebuild_region_sets(false /* free_list_only */); 1367 rebuild_region_sets(false /* free_list_only */);
1385 1368
1386 // Enqueue any discovered reference objects that have 1369 // Enqueue any discovered reference objects that have
1387 // not been removed from the discovered lists. 1370 // not been removed from the discovered lists.
1388 ref_processor_stw()->enqueue_discovered_references(); 1371 ref_processor_stw()->enqueue_discovered_references();
1743 false /* expect_null_mutator_alloc_region */); 1726 false /* expect_null_mutator_alloc_region */);
1744 } 1727 }
1745 return NULL; 1728 return NULL;
1746 } 1729 }
1747 1730
1748 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1749 HeapWord* new_end) {
1750 assert(old_end != new_end, "don't call this otherwise");
1751 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1752
1753 // Update the committed mem region.
1754 _g1_committed.set_end(new_end);
1755 // Tell the card table about the update.
1756 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1757 // Tell the BOT about the update.
1758 _bot_shared->resize(_g1_committed.word_size());
1759 // Tell the hot card cache about the update
1760 _cg1r->hot_card_cache()->resize_card_counts(capacity());
1761 }
1762
1763 bool G1CollectedHeap::expand(size_t expand_bytes) { 1731 bool G1CollectedHeap::expand(size_t expand_bytes) {
1764 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 1732 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1765 aligned_expand_bytes = align_size_up(aligned_expand_bytes, 1733 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1766 HeapRegion::GrainBytes); 1734 HeapRegion::GrainBytes);
1767 ergo_verbose2(ErgoHeapSizing, 1735 ergo_verbose2(ErgoHeapSizing,
1768 "expand the heap", 1736 "expand the heap",
1769 ergo_format_byte("requested expansion amount") 1737 ergo_format_byte("requested expansion amount")
1770 ergo_format_byte("attempted expansion amount"), 1738 ergo_format_byte("attempted expansion amount"),
1771 expand_bytes, aligned_expand_bytes); 1739 expand_bytes, aligned_expand_bytes);
1772 1740
1773 if (_g1_storage.uncommitted_size() == 0) { 1741 if (is_maximal_no_gc()) {
1774 ergo_verbose0(ErgoHeapSizing, 1742 ergo_verbose0(ErgoHeapSizing,
1775 "did not expand the heap", 1743 "did not expand the heap",
1776 ergo_format_reason("heap already fully expanded")); 1744 ergo_format_reason("heap already fully expanded"));
1777 return false; 1745 return false;
1778 } 1746 }
1779 1747
1780 // First commit the memory. 1748 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1781 HeapWord* old_end = (HeapWord*) _g1_storage.high(); 1749 assert(regions_to_expand > 0, "Must expand by at least one region");
1782 bool successful = _g1_storage.expand_by(aligned_expand_bytes); 1750
1783 if (successful) { 1751 uint expanded_by = _hrs.expand_by(regions_to_expand);
1784 // Then propagate this update to the necessary data structures. 1752
1785 HeapWord* new_end = (HeapWord*) _g1_storage.high(); 1753 if (expanded_by > 0) {
1786 update_committed_space(old_end, new_end); 1754 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1787
1788 FreeRegionList expansion_list("Local Expansion List");
1789 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1790 assert(mr.start() == old_end, "post-condition");
1791 // mr might be a smaller region than what was requested if
1792 // expand_by() was unable to allocate the HeapRegion instances
1793 assert(mr.end() <= new_end, "post-condition");
1794
1795 size_t actual_expand_bytes = mr.byte_size();
1796 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); 1755 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1797 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), 1756 g1_policy()->record_new_heap_size(num_regions());
1798 "post-condition");
1799 if (actual_expand_bytes < aligned_expand_bytes) {
1800 // We could not expand _hrs to the desired size. In this case we
1801 // need to shrink the committed space accordingly.
1802 assert(mr.end() < new_end, "invariant");
1803
1804 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1805 // First uncommit the memory.
1806 _g1_storage.shrink_by(diff_bytes);
1807 // Then propagate this update to the necessary data structures.
1808 update_committed_space(new_end, mr.end());
1809 }
1810 _free_list.add_as_tail(&expansion_list);
1811
1812 if (_hr_printer.is_active()) {
1813 HeapWord* curr = mr.start();
1814 while (curr < mr.end()) {
1815 HeapWord* curr_end = curr + HeapRegion::GrainWords;
1816 _hr_printer.commit(curr, curr_end);
1817 curr = curr_end;
1818 }
1819 assert(curr == mr.end(), "post-condition");
1820 }
1821 g1_policy()->record_new_heap_size(n_regions());
1822 } else { 1757 } else {
1823 ergo_verbose0(ErgoHeapSizing, 1758 ergo_verbose0(ErgoHeapSizing,
1824 "did not expand the heap", 1759 "did not expand the heap",
1825 ergo_format_reason("heap expansion operation failed")); 1760 ergo_format_reason("heap expansion operation failed"));
1826 // The expansion of the virtual storage space was unsuccessful. 1761 // The expansion of the virtual storage space was unsuccessful.
1827 // Let's see if it was because we ran out of swap. 1762 // Let's see if it was because we ran out of swap.
1828 if (G1ExitOnExpansionFailure && 1763 if (G1ExitOnExpansionFailure &&
1829 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { 1764 _hrs.available() >= regions_to_expand) {
1830 // We had head room... 1765 // We had head room...
1831 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); 1766 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1832 } 1767 }
1833 } 1768 }
1834 return successful; 1769 return regions_to_expand > 0;
1835 } 1770 }
1836 1771
1837 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { 1772 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1838 size_t aligned_shrink_bytes = 1773 size_t aligned_shrink_bytes =
1839 ReservedSpace::page_align_size_down(shrink_bytes); 1774 ReservedSpace::page_align_size_down(shrink_bytes);
1840 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1775 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1841 HeapRegion::GrainBytes); 1776 HeapRegion::GrainBytes);
1842 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); 1777 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1843 1778
1844 uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); 1779 uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
1845 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1846 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; 1780 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1847 1781
1848 ergo_verbose3(ErgoHeapSizing, 1782 ergo_verbose3(ErgoHeapSizing,
1849 "shrink the heap", 1783 "shrink the heap",
1850 ergo_format_byte("requested shrinking amount") 1784 ergo_format_byte("requested shrinking amount")
1851 ergo_format_byte("aligned shrinking amount") 1785 ergo_format_byte("aligned shrinking amount")
1852 ergo_format_byte("attempted shrinking amount"), 1786 ergo_format_byte("attempted shrinking amount"),
1853 shrink_bytes, aligned_shrink_bytes, shrunk_bytes); 1787 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1854 if (num_regions_removed > 0) { 1788 if (num_regions_removed > 0) {
1855 _g1_storage.shrink_by(shrunk_bytes); 1789 g1_policy()->record_new_heap_size(num_regions());
1856 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1857
1858 if (_hr_printer.is_active()) {
1859 HeapWord* curr = old_end;
1860 while (curr > new_end) {
1861 HeapWord* curr_end = curr;
1862 curr -= HeapRegion::GrainWords;
1863 _hr_printer.uncommit(curr, curr_end);
1864 }
1865 }
1866
1867 _expansion_regions += num_regions_removed;
1868 update_committed_space(old_end, new_end);
1869 HeapRegionRemSet::shrink_heap(n_regions());
1870 g1_policy()->record_new_heap_size(n_regions());
1871 } else { 1790 } else {
1872 ergo_verbose0(ErgoHeapSizing, 1791 ergo_verbose0(ErgoHeapSizing,
1873 "did not shrink the heap", 1792 "did not shrink the heap",
1874 ergo_format_reason("heap shrinking operation failed")); 1793 ergo_format_reason("heap shrinking operation failed"));
1875 } 1794 }
1916 _mark_in_progress(false), 1835 _mark_in_progress(false),
1917 _cg1r(NULL), _summary_bytes_used(0), 1836 _cg1r(NULL), _summary_bytes_used(0),
1918 _g1mm(NULL), 1837 _g1mm(NULL),
1919 _refine_cte_cl(NULL), 1838 _refine_cte_cl(NULL),
1920 _full_collection(false), 1839 _full_collection(false),
1921 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1922 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), 1840 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1923 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), 1841 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1924 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), 1842 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1925 _humongous_is_live(), 1843 _humongous_is_live(),
1926 _has_humongous_reclaim_candidates(false), 1844 _has_humongous_reclaim_candidates(false),
2045 // Also create a G1 rem set. 1963 // Also create a G1 rem set.
2046 _g1_rem_set = new G1RemSet(this, g1_barrier_set()); 1964 _g1_rem_set = new G1RemSet(this, g1_barrier_set());
2047 1965
2048 // Carve out the G1 part of the heap. 1966 // Carve out the G1 part of the heap.
2049 1967
2050 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); 1968 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2051 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), 1969 _hrs.initialize(g1_rs);
2052 g1_rs.size()/HeapWordSize); 1970
2053
2054 _g1_storage.initialize(g1_rs, 0);
2055 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2056 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2057 (HeapWord*) _g1_reserved.end());
2058 assert(_hrs.max_length() == _expansion_regions, 1971 assert(_hrs.max_length() == _expansion_regions,
2059 err_msg("max length: %u expansion regions: %u", 1972 err_msg("max length: %u expansion regions: %u",
2060 _hrs.max_length(), _expansion_regions)); 1973 _hrs.max_length(), _expansion_regions));
2061 1974
2062 // Do later initialization work for concurrent refinement. 1975 // Do later initialization work for concurrent refinement.
2077 _bot_shared = new G1BlockOffsetSharedArray(_reserved, 1990 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2078 heap_word_size(init_byte_size)); 1991 heap_word_size(init_byte_size));
2079 1992
2080 _g1h = this; 1993 _g1h = this;
2081 1994
2082 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); 1995 _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2083 _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); 1996 _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2084 1997
2085 // Create the ConcurrentMark data structure and thread. 1998 // Create the ConcurrentMark data structure and thread.
2086 // (Must do this late, so that "max_regions" is defined.) 1999 // (Must do this late, so that "max_regions" is defined.)
2087 _cm = new ConcurrentMark(this, heap_rs); 2000 _cm = new ConcurrentMark(this, heap_rs);
2088 if (_cm == NULL || !_cm->completed_initialization()) { 2001 if (_cm == NULL || !_cm->completed_initialization()) {
2137 2050
2138 // In case we're keeping closure specialization stats, initialize those 2051 // In case we're keeping closure specialization stats, initialize those
2139 // counts and that mechanism. 2052 // counts and that mechanism.
2140 SpecializationStats::clear(); 2053 SpecializationStats::clear();
2141 2054
2142 // Here we allocate the dummy full region that is required by the 2055 // Here we allocate the dummy HeapRegion that is required by the
2143 // G1AllocRegion class. If we don't pass an address in the reserved 2056 // G1AllocRegion class.
2144 // space here, lots of asserts fire. 2057
2145 2058 HeapRegion* dummy_region = _hrs.get_dummy_region();
2146 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2147 _g1_reserved.start());
2148 // We'll re-use the same region whether the alloc region will 2059 // We'll re-use the same region whether the alloc region will
2149 // require BOT updates or not and, if it doesn't, then a non-young 2060 // require BOT updates or not and, if it doesn't, then a non-young
2150 // region will complain that it cannot support allocations without 2061 // region will complain that it cannot support allocations without
2151 // BOT updates. So we'll tag the dummy region as young to avoid that. 2062 // BOT updates. So we'll tag the dummy region as young to avoid that.
2152 dummy_region->set_young(); 2063 dummy_region->set_young();
2258 // is alive closure 2169 // is alive closure
2259 // (for efficiency/performance) 2170 // (for efficiency/performance)
2260 } 2171 }
2261 2172
2262 size_t G1CollectedHeap::capacity() const { 2173 size_t G1CollectedHeap::capacity() const {
2263 return _g1_committed.byte_size(); 2174 return _hrs.length() * HeapRegion::GrainBytes;
2264 } 2175 }
2265 2176
2266 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { 2177 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2267 assert(!hr->continuesHumongous(), "pre-condition"); 2178 assert(!hr->continuesHumongous(), "pre-condition");
2268 hr->reset_gc_time_stamp(); 2179 hr->reset_gc_time_stamp();
2367 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0); 2278 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2368 return blk.result(); 2279 return blk.result();
2369 } 2280 }
2370 2281
2371 size_t G1CollectedHeap::unsafe_max_alloc() { 2282 size_t G1CollectedHeap::unsafe_max_alloc() {
2372 if (free_regions() > 0) return HeapRegion::GrainBytes; 2283 if (num_free_regions() > 0) return HeapRegion::GrainBytes;
2373 // otherwise, is there space in the current allocation region? 2284 // otherwise, is there space in the current allocation region?
2374 2285
2375 // We need to store the current allocation region in a local variable 2286 // We need to store the current allocation region in a local variable
2376 // here. The problem is that this method doesn't take any locks and 2287 // here. The problem is that this method doesn't take any locks and
2377 // there may be other threads which overwrite the current allocation 2288 // there may be other threads which overwrite the current allocation
2582 } 2493 }
2583 } while (retry_gc); 2494 } while (retry_gc);
2584 } 2495 }
2585 2496
2586 bool G1CollectedHeap::is_in(const void* p) const { 2497 bool G1CollectedHeap::is_in(const void* p) const {
2587 if (_g1_committed.contains(p)) { 2498 if (_hrs.committed().contains(p)) {
2588 // Given that we know that p is in the committed space, 2499 // Given that we know that p is in the committed space,
2589 // heap_region_containing_raw() should successfully 2500 // heap_region_containing_raw() should successfully
2590 // return the containing region. 2501 // return the containing region.
2591 HeapRegion* hr = heap_region_containing_raw(p); 2502 HeapRegion* hr = heap_region_containing_raw(p);
2592 return hr->is_in(p); 2503 return hr->is_in(p);
2657 } 2568 }
2658 2569
2659 void 2570 void
2660 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2571 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2661 uint worker_id, 2572 uint worker_id,
2662 uint no_of_par_workers, 2573 uint num_workers,
2663 jint claim_value) { 2574 jint claim_value) const {
2664 const uint regions = n_regions(); 2575 _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
2665 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2666 no_of_par_workers :
2667 1);
2668 assert(UseDynamicNumberOfGCThreads ||
2669 no_of_par_workers == workers()->total_workers(),
2670 "Non dynamic should use fixed number of workers");
2671 // try to spread out the starting points of the workers
2672 const HeapRegion* start_hr =
2673 start_region_for_worker(worker_id, no_of_par_workers);
2674 const uint start_index = start_hr->hrs_index();
2675
2676 // each worker will actually look at all regions
2677 for (uint count = 0; count < regions; ++count) {
2678 const uint index = (start_index + count) % regions;
2679 assert(0 <= index && index < regions, "sanity");
2680 HeapRegion* r = region_at(index);
2681 // we'll ignore "continues humongous" regions (we'll process them
2682 // when we come across their corresponding "start humongous"
2683 // region) and regions already claimed
2684 if (r->claim_value() == claim_value || r->continuesHumongous()) {
2685 continue;
2686 }
2687 // OK, try to claim it
2688 if (r->claimHeapRegion(claim_value)) {
2689 // success!
2690 assert(!r->continuesHumongous(), "sanity");
2691 if (r->startsHumongous()) {
2692 // If the region is "starts humongous" we'll iterate over its
2693 // "continues humongous" first; in fact we'll do them
2694 // first. The order is important. In on case, calling the
2695 // closure on the "starts humongous" region might de-allocate
2696 // and clear all its "continues humongous" regions and, as a
2697 // result, we might end up processing them twice. So, we'll do
2698 // them first (notice: most closures will ignore them anyway) and
2699 // then we'll do the "starts humongous" region.
2700 for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2701 HeapRegion* chr = region_at(ch_index);
2702
2703 // if the region has already been claimed or it's not
2704 // "continues humongous" we're done
2705 if (chr->claim_value() == claim_value ||
2706 !chr->continuesHumongous()) {
2707 break;
2708 }
2709
2710 // No one should have claimed it directly. We can given
2711 // that we claimed its "starts humongous" region.
2712 assert(chr->claim_value() != claim_value, "sanity");
2713 assert(chr->humongous_start_region() == r, "sanity");
2714
2715 if (chr->claimHeapRegion(claim_value)) {
2716 // we should always be able to claim it; no one else should
2717 // be trying to claim this region
2718
2719 bool res2 = cl->doHeapRegion(chr);
2720 assert(!res2, "Should not abort");
2721
2722 // Right now, this holds (i.e., no closure that actually
2723 // does something with "continues humongous" regions
2724 // clears them). We might have to weaken it in the future,
2725 // but let's leave these two asserts here for extra safety.
2726 assert(chr->continuesHumongous(), "should still be the case");
2727 assert(chr->humongous_start_region() == r, "sanity");
2728 } else {
2729 guarantee(false, "we should not reach here");
2730 }
2731 }
2732 }
2733
2734 assert(!r->continuesHumongous(), "sanity");
2735 bool res = cl->doHeapRegion(r);
2736 assert(!res, "Should not abort");
2737 }
2738 }
2739 } 2576 }
2740 2577
2741 class ResetClaimValuesClosure: public HeapRegionClosure { 2578 class ResetClaimValuesClosure: public HeapRegionClosure {
2742 public: 2579 public:
2743 bool doHeapRegion(HeapRegion* r) { 2580 bool doHeapRegion(HeapRegion* r) {
2911 OrderAccess::storestore(); 2748 OrderAccess::storestore();
2912 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp; 2749 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2913 return result; 2750 return result;
2914 } 2751 }
2915 2752
2916 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2917 uint no_of_par_workers) {
2918 uint worker_num =
2919 G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2920 assert(UseDynamicNumberOfGCThreads ||
2921 no_of_par_workers == workers()->total_workers(),
2922 "Non dynamic should use fixed number of workers");
2923 const uint start_index = n_regions() * worker_i / worker_num;
2924 return region_at(start_index);
2925 }
2926
2927 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { 2753 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2928 HeapRegion* r = g1_policy()->collection_set(); 2754 HeapRegion* r = g1_policy()->collection_set();
2929 while (r != NULL) { 2755 while (r != NULL) {
2930 HeapRegion* next = r->next_in_collection_set(); 2756 HeapRegion* next = r->next_in_collection_set();
2931 if (cl->doHeapRegion(r)) { 2757 if (cl->doHeapRegion(r)) {
2964 cur = next; 2790 cur = next;
2965 } 2791 }
2966 } 2792 }
2967 2793
2968 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { 2794 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2969 // We're not using an iterator given that it will wrap around when 2795 HeapRegion* result = _hrs.next_region_in_heap(from);
2970 // it reaches the last region and this is not what we want here. 2796 while (result != NULL && result->isHumongous()) {
2971 for (uint index = from->hrs_index() + 1; index < n_regions(); index++) { 2797 result = _hrs.next_region_in_heap(result);
2972 HeapRegion* hr = region_at(index); 2798 }
2973 if (!hr->isHumongous()) { 2799 return result;
2974 return hr;
2975 }
2976 }
2977 return NULL;
2978 } 2800 }
2979 2801
2980 Space* G1CollectedHeap::space_containing(const void* addr) const { 2802 Space* G1CollectedHeap::space_containing(const void* addr) const {
2981 return heap_region_containing(addr); 2803 return heap_region_containing(addr);
2982 } 2804 }
3030 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 2852 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
3031 } 2853 }
3032 } 2854 }
3033 2855
3034 size_t G1CollectedHeap::max_capacity() const { 2856 size_t G1CollectedHeap::max_capacity() const {
3035 return _g1_reserved.byte_size(); 2857 return _hrs.reserved().byte_size();
3036 } 2858 }
3037 2859
3038 jlong G1CollectedHeap::millis_since_last_gc() { 2860 jlong G1CollectedHeap::millis_since_last_gc() {
3039 // assert(false, "NYI"); 2861 // assert(false, "NYI");
3040 return 0; 2862 return 0;
3559 void G1CollectedHeap::print_on(outputStream* st) const { 3381 void G1CollectedHeap::print_on(outputStream* st) const {
3560 st->print(" %-20s", "garbage-first heap"); 3382 st->print(" %-20s", "garbage-first heap");
3561 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 3383 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3562 capacity()/K, used_unlocked()/K); 3384 capacity()/K, used_unlocked()/K);
3563 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 3385 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3564 _g1_storage.low_boundary(), 3386 _hrs.committed().start(),
3565 _g1_storage.high(), 3387 _hrs.committed().end(),
3566 _g1_storage.high_boundary()); 3388 _hrs.reserved().end());
3567 st->cr(); 3389 st->cr();
3568 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); 3390 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3569 uint young_regions = _young_list->length(); 3391 uint young_regions = _young_list->length();
3570 st->print("%u young (" SIZE_FORMAT "K), ", young_regions, 3392 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3571 (size_t) young_regions * HeapRegion::GrainBytes / K); 3393 (size_t) young_regions * HeapRegion::GrainBytes / K);
4251 if (expand_bytes > 0) { 4073 if (expand_bytes > 0) {
4252 size_t bytes_before = capacity(); 4074 size_t bytes_before = capacity();
4253 // No need for an ergo verbose message here, 4075 // No need for an ergo verbose message here,
4254 // expansion_amount() does this when it returns a value > 0. 4076 // expansion_amount() does this when it returns a value > 0.
4255 if (!expand(expand_bytes)) { 4077 if (!expand(expand_bytes)) {
4256 // We failed to expand the heap so let's verify that 4078 // We failed to expand the heap. Cannot do anything about it.
4257 // committed/uncommitted amount match the backing store
4258 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
4259 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
4260 } 4079 }
4261 } 4080 }
4262 } 4081 }
4263 4082
4264 // We redo the verification but now wrt to the new CSet which 4083 // We redo the verification but now wrt to the new CSet which
4315 // event, and after we retire the GC alloc regions so that all 4134 // event, and after we retire the GC alloc regions so that all
4316 // RETIRE events are generated before the end GC event. 4135 // RETIRE events are generated before the end GC event.
4317 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); 4136 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4318 4137
4319 if (mark_in_progress()) { 4138 if (mark_in_progress()) {
4320 concurrent_mark()->update_g1_committed(); 4139 concurrent_mark()->update_heap_boundaries(_hrs.committed());
4321 } 4140 }
4322 4141
4323 #ifdef TRACESPINNING 4142 #ifdef TRACESPINNING
4324 ParallelTaskTerminator::print_termination_counts(); 4143 ParallelTaskTerminator::print_termination_counts();
4325 #endif 4144 #endif
6152 FreeRegionList* free_list, 5971 FreeRegionList* free_list,
6153 bool par, 5972 bool par,
6154 bool locked) { 5973 bool locked) {
6155 assert(!hr->isHumongous(), "this is only for non-humongous regions"); 5974 assert(!hr->isHumongous(), "this is only for non-humongous regions");
6156 assert(!hr->is_empty(), "the region should not be empty"); 5975 assert(!hr->is_empty(), "the region should not be empty");
5976 assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
6157 assert(free_list != NULL, "pre-condition"); 5977 assert(free_list != NULL, "pre-condition");
6158 5978
6159 if (G1VerifyBitmaps) { 5979 if (G1VerifyBitmaps) {
6160 MemRegion mr(hr->bottom(), hr->end()); 5980 MemRegion mr(hr->bottom(), hr->end());
6161 concurrent_mark()->clearRangePrevBitmap(mr); 5981 concurrent_mark()->clearRangePrevBitmap(mr);
6206 6026
6207 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { 6027 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
6208 assert(list != NULL, "list can't be null"); 6028 assert(list != NULL, "list can't be null");
6209 if (!list->is_empty()) { 6029 if (!list->is_empty()) {
6210 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 6030 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
6211 _free_list.add_ordered(list); 6031 _hrs.insert_list_into_free_list(list);
6212 } 6032 }
6213 } 6033 }
6214 6034
6215 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { 6035 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
6216 assert(_summary_bytes_used >= bytes, 6036 assert(_summary_bytes_used >= bytes,
6814 // Note that emptying the _young_list is postponed and instead done as 6634 // Note that emptying the _young_list is postponed and instead done as
6815 // the first step when rebuilding the regions sets again. The reason for 6635 // the first step when rebuilding the regions sets again. The reason for
6816 // this is that during a full GC string deduplication needs to know if 6636 // this is that during a full GC string deduplication needs to know if
6817 // a collected region was young or old when the full GC was initiated. 6637 // a collected region was young or old when the full GC was initiated.
6818 } 6638 }
6819 _free_list.remove_all(); 6639 _hrs.remove_all_free_regions();
6820 } 6640 }
6821 6641
6822 class RebuildRegionSetsClosure : public HeapRegionClosure { 6642 class RebuildRegionSetsClosure : public HeapRegionClosure {
6823 private: 6643 private:
6824 bool _free_list_only; 6644 bool _free_list_only;
6825 HeapRegionSet* _old_set; 6645 HeapRegionSet* _old_set;
6826 FreeRegionList* _free_list; 6646 HeapRegionSeq* _hrs;
6827 size_t _total_used; 6647 size_t _total_used;
6828 6648
6829 public: 6649 public:
6830 RebuildRegionSetsClosure(bool free_list_only, 6650 RebuildRegionSetsClosure(bool free_list_only,
6831 HeapRegionSet* old_set, FreeRegionList* free_list) : 6651 HeapRegionSet* old_set, HeapRegionSeq* hrs) :
6832 _free_list_only(free_list_only), 6652 _free_list_only(free_list_only),
6833 _old_set(old_set), _free_list(free_list), _total_used(0) { 6653 _old_set(old_set), _hrs(hrs), _total_used(0) {
6834 assert(_free_list->is_empty(), "pre-condition"); 6654 assert(_hrs->num_free_regions() == 0, "pre-condition");
6835 if (!free_list_only) { 6655 if (!free_list_only) {
6836 assert(_old_set->is_empty(), "pre-condition"); 6656 assert(_old_set->is_empty(), "pre-condition");
6837 } 6657 }
6838 } 6658 }
6839 6659
6842 return false; 6662 return false;
6843 } 6663 }
6844 6664
6845 if (r->is_empty()) { 6665 if (r->is_empty()) {
6846 // Add free regions to the free list 6666 // Add free regions to the free list
6847 _free_list->add_as_tail(r); 6667 _hrs->insert_into_free_list(r);
6848 } else if (!_free_list_only) { 6668 } else if (!_free_list_only) {
6849 assert(!r->is_young(), "we should not come across young regions"); 6669 assert(!r->is_young(), "we should not come across young regions");
6850 6670
6851 if (r->isHumongous()) { 6671 if (r->isHumongous()) {
6852 // We ignore humongous regions, we left the humongous set unchanged 6672 // We ignore humongous regions, we left the humongous set unchanged
6870 6690
6871 if (!free_list_only) { 6691 if (!free_list_only) {
6872 _young_list->empty_list(); 6692 _young_list->empty_list();
6873 } 6693 }
6874 6694
6875 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); 6695 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
6876 heap_region_iterate(&cl); 6696 heap_region_iterate(&cl);
6877 6697
6878 if (!free_list_only) { 6698 if (!free_list_only) {
6879 _summary_bytes_used = cl.total_used(); 6699 _summary_bytes_used = cl.total_used();
6880 } 6700 }
7060 6880
7061 class VerifyRegionListsClosure : public HeapRegionClosure { 6881 class VerifyRegionListsClosure : public HeapRegionClosure {
7062 private: 6882 private:
7063 HeapRegionSet* _old_set; 6883 HeapRegionSet* _old_set;
7064 HeapRegionSet* _humongous_set; 6884 HeapRegionSet* _humongous_set;
7065 FreeRegionList* _free_list; 6885 HeapRegionSeq* _hrs;
7066 6886
7067 public: 6887 public:
7068 HeapRegionSetCount _old_count; 6888 HeapRegionSetCount _old_count;
7069 HeapRegionSetCount _humongous_count; 6889 HeapRegionSetCount _humongous_count;
7070 HeapRegionSetCount _free_count; 6890 HeapRegionSetCount _free_count;
7071 6891
7072 VerifyRegionListsClosure(HeapRegionSet* old_set, 6892 VerifyRegionListsClosure(HeapRegionSet* old_set,
7073 HeapRegionSet* humongous_set, 6893 HeapRegionSet* humongous_set,
7074 FreeRegionList* free_list) : 6894 HeapRegionSeq* hrs) :
7075 _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list), 6895 _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
7076 _old_count(), _humongous_count(), _free_count(){ } 6896 _old_count(), _humongous_count(), _free_count(){ }
7077 6897
7078 bool doHeapRegion(HeapRegion* hr) { 6898 bool doHeapRegion(HeapRegion* hr) {
7079 if (hr->continuesHumongous()) { 6899 if (hr->continuesHumongous()) {
7080 return false; 6900 return false;
7084 // TODO 6904 // TODO
7085 } else if (hr->startsHumongous()) { 6905 } else if (hr->startsHumongous()) {
7086 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index())); 6906 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
7087 _humongous_count.increment(1u, hr->capacity()); 6907 _humongous_count.increment(1u, hr->capacity());
7088 } else if (hr->is_empty()) { 6908 } else if (hr->is_empty()) {
7089 assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index())); 6909 assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
7090 _free_count.increment(1u, hr->capacity()); 6910 _free_count.increment(1u, hr->capacity());
7091 } else { 6911 } else {
7092 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index())); 6912 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
7093 _old_count.increment(1u, hr->capacity()); 6913 _old_count.increment(1u, hr->capacity());
7094 } 6914 }
7095 return false; 6915 return false;
7096 } 6916 }
7097 6917
7098 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) { 6918 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
7099 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); 6919 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
7100 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6920 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
7101 old_set->total_capacity_bytes(), _old_count.capacity())); 6921 old_set->total_capacity_bytes(), _old_count.capacity()));
7102 6922
7103 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length())); 6923 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
7104 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6924 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
7105 humongous_set->total_capacity_bytes(), _humongous_count.capacity())); 6925 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
7106 6926
7107 guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length())); 6927 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
7108 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6928 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
7109 free_list->total_capacity_bytes(), _free_count.capacity())); 6929 free_list->total_capacity_bytes(), _free_count.capacity()));
7110 } 6930 }
7111 }; 6931 };
7112 6932
7113 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
7114 HeapWord* bottom) {
7115 HeapWord* end = bottom + HeapRegion::GrainWords;
7116 MemRegion mr(bottom, end);
7117 assert(_g1_reserved.contains(mr), "invariant");
7118 // This might return NULL if the allocation fails
7119 return new HeapRegion(hrs_index, _bot_shared, mr);
7120 }
7121
7122 void G1CollectedHeap::verify_region_sets() { 6933 void G1CollectedHeap::verify_region_sets() {
7123 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6934 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
7124 6935
7125 // First, check the explicit lists. 6936 // First, check the explicit lists.
7126 _free_list.verify_list(); 6937 _hrs.verify();
7127 { 6938 {
7128 // Given that a concurrent operation might be adding regions to 6939 // Given that a concurrent operation might be adding regions to
7129 // the secondary free list we have to take the lock before 6940 // the secondary free list we have to take the lock before
7130 // verifying it. 6941 // verifying it.
7131 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6942 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
7152 append_secondary_free_list_if_not_empty_with_lock(); 6963 append_secondary_free_list_if_not_empty_with_lock();
7153 6964
7154 // Finally, make sure that the region accounting in the lists is 6965 // Finally, make sure that the region accounting in the lists is
7155 // consistent with what we see in the heap. 6966 // consistent with what we see in the heap.
7156 6967
7157 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); 6968 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
7158 heap_region_iterate(&cl); 6969 heap_region_iterate(&cl);
7159 cl.verify_counts(&_old_set, &_humongous_set, &_free_list); 6970 cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
7160 } 6971 }
7161 6972
7162 // Optimized nmethod scanning 6973 // Optimized nmethod scanning
7163 6974
7164 class RegisterNMethodOopClosure: public OopClosure { 6975 class RegisterNMethodOopClosure: public OopClosure {