comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 3766:c3f1170908be

7045330: G1: Simplify/fix the HeapRegionSeq class 7042285: G1: native memory leak during humongous object allocation 6804436: G1: heap region indices should be size_t Summary: A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing. Reviewed-by: stefank, johnc, brutisso
author tonyp
date Fri, 10 Jun 2011 13:16:40 -0400
parents 053d84a76d3d
children 6747fd0512e0
comparison
equal deleted inserted replaced
3765:ae5b2f1dcf12 3766:c3f1170908be
576 } 576 }
577 res = new_region_try_secondary_free_list(); 577 res = new_region_try_secondary_free_list();
578 } 578 }
579 if (res == NULL && do_expand) { 579 if (res == NULL && do_expand) {
580 if (expand(word_size * HeapWordSize)) { 580 if (expand(word_size * HeapWordSize)) {
581 // The expansion succeeded and so we should have at least one 581 // Even though the heap was expanded, it might not have reached
582 // region on the free list. 582 // the desired size. So, we cannot assume that the allocation
583 res = _free_list.remove_head(); 583 // will succeed.
584 res = _free_list.remove_head_or_null();
584 } 585 }
585 } 586 }
586 if (res != NULL) { 587 if (res != NULL) {
587 if (G1PrintHeapRegions) { 588 if (G1PrintHeapRegions) {
588 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], " 589 gclog_or_tty->print_cr("new alloc region "HR_FORMAT,
589 "top "PTR_FORMAT, res->hrs_index(), 590 HR_FORMAT_PARAMS(res));
590 res->bottom(), res->end(), res->top());
591 } 591 }
592 } 592 }
593 return res; 593 return res;
594 } 594 }
595 595
606 g1_policy()->note_alloc_region_limit_reached(purpose); 606 g1_policy()->note_alloc_region_limit_reached(purpose);
607 } 607 }
608 return alloc_region; 608 return alloc_region;
609 } 609 }
610 610
611 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, 611 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
612 size_t word_size) { 612 size_t word_size) {
613 assert(isHumongous(word_size), "word_size should be humongous"); 613 assert(isHumongous(word_size), "word_size should be humongous");
614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
615 615
616 int first = -1; 616 size_t first = G1_NULL_HRS_INDEX;
617 if (num_regions == 1) { 617 if (num_regions == 1) {
618 // Only one region to allocate, no need to go through the slower 618 // Only one region to allocate, no need to go through the slower
619 // path. The caller will attempt the expasion if this fails, so 619 // path. The caller will attempt the expasion if this fails, so
620 // let's not try to expand here too. 620 // let's not try to expand here too.
621 HeapRegion* hr = new_region(word_size, false /* do_expand */); 621 HeapRegion* hr = new_region(word_size, false /* do_expand */);
622 if (hr != NULL) { 622 if (hr != NULL) {
623 first = hr->hrs_index(); 623 first = hr->hrs_index();
624 } else { 624 } else {
625 first = -1; 625 first = G1_NULL_HRS_INDEX;
626 } 626 }
627 } else { 627 } else {
628 // We can't allocate humongous regions while cleanupComplete() is 628 // We can't allocate humongous regions while cleanupComplete() is
629 // running, since some of the regions we find to be empty might not 629 // running, since some of the regions we find to be empty might not
630 // yet be added to the free list and it is not straightforward to 630 // yet be added to the free list and it is not straightforward to
635 // region allocation code (see above). 635 // region allocation code (see above).
636 wait_while_free_regions_coming(); 636 wait_while_free_regions_coming();
637 append_secondary_free_list_if_not_empty_with_lock(); 637 append_secondary_free_list_if_not_empty_with_lock();
638 638
639 if (free_regions() >= num_regions) { 639 if (free_regions() >= num_regions) {
640 first = _hrs->find_contiguous(num_regions); 640 first = _hrs.find_contiguous(num_regions);
641 if (first != -1) { 641 if (first != G1_NULL_HRS_INDEX) {
642 for (int i = first; i < first + (int) num_regions; ++i) { 642 for (size_t i = first; i < first + num_regions; ++i) {
643 HeapRegion* hr = _hrs->at(i); 643 HeapRegion* hr = region_at(i);
644 assert(hr->is_empty(), "sanity"); 644 assert(hr->is_empty(), "sanity");
645 assert(is_on_master_free_list(hr), "sanity"); 645 assert(is_on_master_free_list(hr), "sanity");
646 hr->set_pending_removal(true); 646 hr->set_pending_removal(true);
647 } 647 }
648 _free_list.remove_all_pending(num_regions); 648 _free_list.remove_all_pending(num_regions);
651 } 651 }
652 return first; 652 return first;
653 } 653 }
654 654
655 HeapWord* 655 HeapWord*
656 G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, 656 G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
657 size_t num_regions, 657 size_t num_regions,
658 size_t word_size) { 658 size_t word_size) {
659 assert(first != -1, "pre-condition"); 659 assert(first != G1_NULL_HRS_INDEX, "pre-condition");
660 assert(isHumongous(word_size), "word_size should be humongous"); 660 assert(isHumongous(word_size), "word_size should be humongous");
661 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 661 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
662 662
663 // Index of last region in the series + 1. 663 // Index of last region in the series + 1.
664 int last = first + (int) num_regions; 664 size_t last = first + num_regions;
665 665
666 // We need to initialize the region(s) we just discovered. This is 666 // We need to initialize the region(s) we just discovered. This is
667 // a bit tricky given that it can happen concurrently with 667 // a bit tricky given that it can happen concurrently with
668 // refinement threads refining cards on these regions and 668 // refinement threads refining cards on these regions and
669 // potentially wanting to refine the BOT as they are scanning 669 // potentially wanting to refine the BOT as they are scanning
674 // The word size sum of all the regions we will allocate. 674 // The word size sum of all the regions we will allocate.
675 size_t word_size_sum = num_regions * HeapRegion::GrainWords; 675 size_t word_size_sum = num_regions * HeapRegion::GrainWords;
676 assert(word_size <= word_size_sum, "sanity"); 676 assert(word_size <= word_size_sum, "sanity");
677 677
678 // This will be the "starts humongous" region. 678 // This will be the "starts humongous" region.
679 HeapRegion* first_hr = _hrs->at(first); 679 HeapRegion* first_hr = region_at(first);
680 // The header of the new object will be placed at the bottom of 680 // The header of the new object will be placed at the bottom of
681 // the first region. 681 // the first region.
682 HeapWord* new_obj = first_hr->bottom(); 682 HeapWord* new_obj = first_hr->bottom();
683 // This will be the new end of the first region in the series that 683 // This will be the new end of the first region in the series that
684 // should also match the end of the last region in the seriers. 684 // should also match the end of the last region in the seriers.
709 first_hr->set_startsHumongous(new_top, new_end); 709 first_hr->set_startsHumongous(new_top, new_end);
710 710
711 // Then, if there are any, we will set up the "continues 711 // Then, if there are any, we will set up the "continues
712 // humongous" regions. 712 // humongous" regions.
713 HeapRegion* hr = NULL; 713 HeapRegion* hr = NULL;
714 for (int i = first + 1; i < last; ++i) { 714 for (size_t i = first + 1; i < last; ++i) {
715 hr = _hrs->at(i); 715 hr = region_at(i);
716 hr->set_continuesHumongous(first_hr); 716 hr->set_continuesHumongous(first_hr);
717 } 717 }
718 // If we have "continues humongous" regions (hr != NULL), then the 718 // If we have "continues humongous" regions (hr != NULL), then the
719 // end of the last one should match new_end. 719 // end of the last one should match new_end.
720 assert(hr == NULL || hr->end() == new_end, "sanity"); 720 assert(hr == NULL || hr->end() == new_end, "sanity");
744 // fields here. The way we set top for all regions (i.e., top == 744 // fields here. The way we set top for all regions (i.e., top ==
745 // end for all regions but the last one, top == new_top for the 745 // end for all regions but the last one, top == new_top for the
746 // last one) is actually used when we will free up the humongous 746 // last one) is actually used when we will free up the humongous
747 // region in free_humongous_region(). 747 // region in free_humongous_region().
748 hr = NULL; 748 hr = NULL;
749 for (int i = first + 1; i < last; ++i) { 749 for (size_t i = first + 1; i < last; ++i) {
750 hr = _hrs->at(i); 750 hr = region_at(i);
751 if ((i + 1) == last) { 751 if ((i + 1) == last) {
752 // last continues humongous region 752 // last continues humongous region
753 assert(hr->bottom() < new_top && new_top <= hr->end(), 753 assert(hr->bottom() < new_top && new_top <= hr->end(),
754 "new_top should fall on this region"); 754 "new_top should fall on this region");
755 hr->set_top(new_top); 755 hr->set_top(new_top);
781 verify_region_sets_optional(); 781 verify_region_sets_optional();
782 782
783 size_t num_regions = 783 size_t num_regions =
784 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 784 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
785 size_t x_size = expansion_regions(); 785 size_t x_size = expansion_regions();
786 size_t fs = _hrs->free_suffix(); 786 size_t fs = _hrs.free_suffix();
787 int first = humongous_obj_allocate_find_first(num_regions, word_size); 787 size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
788 if (first == -1) { 788 if (first == G1_NULL_HRS_INDEX) {
789 // The only thing we can do now is attempt expansion. 789 // The only thing we can do now is attempt expansion.
790 if (fs + x_size >= num_regions) { 790 if (fs + x_size >= num_regions) {
791 // If the number of regions we're trying to allocate for this 791 // If the number of regions we're trying to allocate for this
792 // object is at most the number of regions in the free suffix, 792 // object is at most the number of regions in the free suffix,
793 // then the call to humongous_obj_allocate_find_first() above 793 // then the call to humongous_obj_allocate_find_first() above
797 // not sufficient for the object _and_ we have some expansion 797 // not sufficient for the object _and_ we have some expansion
798 // room available. 798 // room available.
799 assert(num_regions > fs, "earlier allocation should have succeeded"); 799 assert(num_regions > fs, "earlier allocation should have succeeded");
800 800
801 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { 801 if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
802 // Even though the heap was expanded, it might not have
803 // reached the desired size. So, we cannot assume that the
804 // allocation will succeed.
802 first = humongous_obj_allocate_find_first(num_regions, word_size); 805 first = humongous_obj_allocate_find_first(num_regions, word_size);
803 // If the expansion was successful then the allocation
804 // should have been successful.
805 assert(first != -1, "this should have worked");
806 } 806 }
807 } 807 }
808 } 808 }
809 809
810 HeapWord* result = NULL; 810 HeapWord* result = NULL;
811 if (first != -1) { 811 if (first != G1_NULL_HRS_INDEX) {
812 result = 812 result =
813 humongous_obj_allocate_initialize_regions(first, num_regions, word_size); 813 humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
814 assert(result != NULL, "it should always return a valid result"); 814 assert(result != NULL, "it should always return a valid result");
815 } 815 }
816 816
1364 } 1364 }
1365 1365
1366 // Update the number of full collections that have been completed. 1366 // Update the number of full collections that have been completed.
1367 increment_full_collections_completed(false /* concurrent */); 1367 increment_full_collections_completed(false /* concurrent */);
1368 1368
1369 _hrs.verify_optional();
1369 verify_region_sets_optional(); 1370 verify_region_sets_optional();
1370 1371
1371 if (PrintHeapAtGC) { 1372 if (PrintHeapAtGC) {
1372 Universe::print_heap_after_gc(); 1373 Universe::print_heap_after_gc();
1373 } 1374 }
1587 1588
1588 verify_region_sets_optional(); 1589 verify_region_sets_optional();
1589 1590
1590 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); 1591 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1591 if (expand(expand_bytes)) { 1592 if (expand(expand_bytes)) {
1593 _hrs.verify_optional();
1592 verify_region_sets_optional(); 1594 verify_region_sets_optional();
1593 return attempt_allocation_at_safepoint(word_size, 1595 return attempt_allocation_at_safepoint(word_size,
1594 false /* expect_null_mutator_alloc_region */); 1596 false /* expect_null_mutator_alloc_region */);
1595 } 1597 }
1596 return NULL; 1598 return NULL;
1599 }
1600
1601 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1602 HeapWord* new_end) {
1603 assert(old_end != new_end, "don't call this otherwise");
1604 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1605
1606 // Update the committed mem region.
1607 _g1_committed.set_end(new_end);
1608 // Tell the card table about the update.
1609 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1610 // Tell the BOT about the update.
1611 _bot_shared->resize(_g1_committed.word_size());
1597 } 1612 }
1598 1613
1599 bool G1CollectedHeap::expand(size_t expand_bytes) { 1614 bool G1CollectedHeap::expand(size_t expand_bytes) {
1600 size_t old_mem_size = _g1_storage.committed_size(); 1615 size_t old_mem_size = _g1_storage.committed_size();
1601 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 1616 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1605 if (Verbose && PrintGC) { 1620 if (Verbose && PrintGC) {
1606 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", 1621 gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK",
1607 old_mem_size/K, aligned_expand_bytes/K); 1622 old_mem_size/K, aligned_expand_bytes/K);
1608 } 1623 }
1609 1624
1610 HeapWord* old_end = (HeapWord*)_g1_storage.high(); 1625 // First commit the memory.
1626 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1611 bool successful = _g1_storage.expand_by(aligned_expand_bytes); 1627 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1612 if (successful) { 1628 if (successful) {
1613 HeapWord* new_end = (HeapWord*)_g1_storage.high(); 1629 // Then propagate this update to the necessary data structures.
1614 1630 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1615 // Expand the committed region. 1631 update_committed_space(old_end, new_end);
1616 _g1_committed.set_end(new_end); 1632
1617 1633 FreeRegionList expansion_list("Local Expansion List");
1618 // Tell the cardtable about the expansion. 1634 MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1619 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1635 assert(mr.start() == old_end, "post-condition");
1620 1636 // mr might be a smaller region than what was requested if
1621 // And the offset table as well. 1637 // expand_by() was unable to allocate the HeapRegion instances
1622 _bot_shared->resize(_g1_committed.word_size()); 1638 assert(mr.end() <= new_end, "post-condition");
1623 1639
1624 expand_bytes = aligned_expand_bytes; 1640 size_t actual_expand_bytes = mr.byte_size();
1625 HeapWord* base = old_end; 1641 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1626 1642 assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1627 // Create the heap regions for [old_end, new_end) 1643 "post-condition");
1628 while (expand_bytes > 0) { 1644 if (actual_expand_bytes < aligned_expand_bytes) {
1629 HeapWord* high = base + HeapRegion::GrainWords; 1645 // We could not expand _hrs to the desired size. In this case we
1630 1646 // need to shrink the committed space accordingly.
1631 // Create a new HeapRegion. 1647 assert(mr.end() < new_end, "invariant");
1632 MemRegion mr(base, high); 1648
1633 bool is_zeroed = !_g1_max_committed.contains(base); 1649 size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1634 HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); 1650 // First uncommit the memory.
1635 1651 _g1_storage.shrink_by(diff_bytes);
1636 // Add it to the HeapRegionSeq. 1652 // Then propagate this update to the necessary data structures.
1637 _hrs->insert(hr); 1653 update_committed_space(new_end, mr.end());
1638 _free_list.add_as_tail(hr); 1654 }
1639 1655 _free_list.add_as_tail(&expansion_list);
1640 // And we used up an expansion region to create it.
1641 _expansion_regions--;
1642
1643 expand_bytes -= HeapRegion::GrainBytes;
1644 base += HeapRegion::GrainWords;
1645 }
1646 assert(base == new_end, "sanity");
1647
1648 // Now update max_committed if necessary.
1649 _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
1650
1651 } else { 1656 } else {
1652 // The expansion of the virtual storage space was unsuccessful. 1657 // The expansion of the virtual storage space was unsuccessful.
1653 // Let's see if it was because we ran out of swap. 1658 // Let's see if it was because we ran out of swap.
1654 if (G1ExitOnExpansionFailure && 1659 if (G1ExitOnExpansionFailure &&
1655 _g1_storage.uncommitted_size() >= aligned_expand_bytes) { 1660 _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1665 new_mem_size/K); 1670 new_mem_size/K);
1666 } 1671 }
1667 return successful; 1672 return successful;
1668 } 1673 }
1669 1674
1670 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) 1675 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1671 {
1672 size_t old_mem_size = _g1_storage.committed_size(); 1676 size_t old_mem_size = _g1_storage.committed_size();
1673 size_t aligned_shrink_bytes = 1677 size_t aligned_shrink_bytes =
1674 ReservedSpace::page_align_size_down(shrink_bytes); 1678 ReservedSpace::page_align_size_down(shrink_bytes);
1675 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1679 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1676 HeapRegion::GrainBytes); 1680 HeapRegion::GrainBytes);
1677 size_t num_regions_deleted = 0; 1681 size_t num_regions_deleted = 0;
1678 MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted); 1682 MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1679 1683 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1680 assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1684 assert(mr.end() == old_end, "post-condition");
1681 if (mr.byte_size() > 0) 1685 if (mr.byte_size() > 0) {
1682 _g1_storage.shrink_by(mr.byte_size()); 1686 _g1_storage.shrink_by(mr.byte_size());
1683 assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!"); 1687 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1684 1688 assert(mr.start() == new_end, "post-condition");
1685 _g1_committed.set_end(mr.start()); 1689
1686 _expansion_regions += num_regions_deleted; 1690 _expansion_regions += num_regions_deleted;
1687 1691 update_committed_space(old_end, new_end);
1688 // Tell the cardtable about it. 1692 HeapRegionRemSet::shrink_heap(n_regions());
1689 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); 1693
1690 1694 if (Verbose && PrintGC) {
1691 // And the offset table as well. 1695 size_t new_mem_size = _g1_storage.committed_size();
1692 _bot_shared->resize(_g1_committed.word_size()); 1696 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
1693 1697 old_mem_size/K, aligned_shrink_bytes/K,
1694 HeapRegionRemSet::shrink_heap(n_regions()); 1698 new_mem_size/K);
1695 1699 }
1696 if (Verbose && PrintGC) {
1697 size_t new_mem_size = _g1_storage.committed_size();
1698 gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
1699 old_mem_size/K, aligned_shrink_bytes/K,
1700 new_mem_size/K);
1701 } 1700 }
1702 } 1701 }
1703 1702
1704 void G1CollectedHeap::shrink(size_t shrink_bytes) { 1703 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1705 verify_region_sets_optional(); 1704 verify_region_sets_optional();
1710 // remove only the ones that we need to remove. 1709 // remove only the ones that we need to remove.
1711 tear_down_region_lists(); // We will rebuild them in a moment. 1710 tear_down_region_lists(); // We will rebuild them in a moment.
1712 shrink_helper(shrink_bytes); 1711 shrink_helper(shrink_bytes);
1713 rebuild_region_lists(); 1712 rebuild_region_lists();
1714 1713
1714 _hrs.verify_optional();
1715 verify_region_sets_optional(); 1715 verify_region_sets_optional();
1716 } 1716 }
1717 1717
1718 // Public methods. 1718 // Public methods.
1719 1719
1888 1888
1889 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set()); 1889 _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
1890 1890
1891 _g1_storage.initialize(g1_rs, 0); 1891 _g1_storage.initialize(g1_rs, 0);
1892 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); 1892 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
1893 _g1_max_committed = _g1_committed; 1893 _hrs.initialize((HeapWord*) _g1_reserved.start(),
1894 _hrs = new HeapRegionSeq(_expansion_regions); 1894 (HeapWord*) _g1_reserved.end(),
1895 guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); 1895 _expansion_regions);
1896 1896
1897 // 6843694 - ensure that the maximum region index can fit 1897 // 6843694 - ensure that the maximum region index can fit
1898 // in the remembered set structures. 1898 // in the remembered set structures.
1899 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; 1899 const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1900 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); 1900 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1989 _cg1r->init(); 1989 _cg1r->init();
1990 1990
1991 // Here we allocate the dummy full region that is required by the 1991 // Here we allocate the dummy full region that is required by the
1992 // G1AllocRegion class. If we don't pass an address in the reserved 1992 // G1AllocRegion class. If we don't pass an address in the reserved
1993 // space here, lots of asserts fire. 1993 // space here, lots of asserts fire.
1994 MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); 1994
1995 HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); 1995 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
1996 _g1_reserved.start());
1996 // We'll re-use the same region whether the alloc region will 1997 // We'll re-use the same region whether the alloc region will
1997 // require BOT updates or not and, if it doesn't, then a non-young 1998 // require BOT updates or not and, if it doesn't, then a non-young
1998 // region will complain that it cannot support allocations without 1999 // region will complain that it cannot support allocations without
1999 // BOT updates. So we'll tag the dummy region as young to avoid that. 2000 // BOT updates. So we'll tag the dummy region as young to avoid that.
2000 dummy_region->set_young(); 2001 dummy_region->set_young();
2098 size_t result() { return _used; } 2099 size_t result() { return _used; }
2099 }; 2100 };
2100 2101
2101 size_t G1CollectedHeap::recalculate_used() const { 2102 size_t G1CollectedHeap::recalculate_used() const {
2102 SumUsedClosure blk; 2103 SumUsedClosure blk;
2103 _hrs->iterate(&blk); 2104 heap_region_iterate(&blk);
2104 return blk.result(); 2105 return blk.result();
2105 } 2106 }
2106 2107
2107 #ifndef PRODUCT 2108 #ifndef PRODUCT
2108 class SumUsedRegionsClosure: public HeapRegionClosure { 2109 class SumUsedRegionsClosure: public HeapRegionClosure {
2118 size_t result() { return _num; } 2119 size_t result() { return _num; }
2119 }; 2120 };
2120 2121
2121 size_t G1CollectedHeap::recalculate_used_regions() const { 2122 size_t G1CollectedHeap::recalculate_used_regions() const {
2122 SumUsedRegionsClosure blk; 2123 SumUsedRegionsClosure blk;
2123 _hrs->iterate(&blk); 2124 heap_region_iterate(&blk);
2124 return blk.result(); 2125 return blk.result();
2125 } 2126 }
2126 #endif // PRODUCT 2127 #endif // PRODUCT
2127 2128
2128 size_t G1CollectedHeap::unsafe_max_alloc() { 2129 size_t G1CollectedHeap::unsafe_max_alloc() {
2283 } 2284 }
2284 } 2285 }
2285 } 2286 }
2286 2287
2287 bool G1CollectedHeap::is_in(const void* p) const { 2288 bool G1CollectedHeap::is_in(const void* p) const {
2288 if (_g1_committed.contains(p)) { 2289 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
2289 HeapRegion* hr = _hrs->addr_to_region(p); 2290 if (hr != NULL) {
2290 return hr->is_in(p); 2291 return hr->is_in(p);
2291 } else { 2292 } else {
2292 return _perm_gen->as_gen()->is_in(p); 2293 return _perm_gen->as_gen()->is_in(p);
2293 } 2294 }
2294 } 2295 }
2312 } 2313 }
2313 }; 2314 };
2314 2315
2315 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) { 2316 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
2316 IterateOopClosureRegionClosure blk(_g1_committed, cl); 2317 IterateOopClosureRegionClosure blk(_g1_committed, cl);
2317 _hrs->iterate(&blk); 2318 heap_region_iterate(&blk);
2318 if (do_perm) { 2319 if (do_perm) {
2319 perm_gen()->oop_iterate(cl); 2320 perm_gen()->oop_iterate(cl);
2320 } 2321 }
2321 } 2322 }
2322 2323
2323 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) { 2324 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
2324 IterateOopClosureRegionClosure blk(mr, cl); 2325 IterateOopClosureRegionClosure blk(mr, cl);
2325 _hrs->iterate(&blk); 2326 heap_region_iterate(&blk);
2326 if (do_perm) { 2327 if (do_perm) {
2327 perm_gen()->oop_iterate(cl); 2328 perm_gen()->oop_iterate(cl);
2328 } 2329 }
2329 } 2330 }
2330 2331
2342 } 2343 }
2343 }; 2344 };
2344 2345
2345 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) { 2346 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
2346 IterateObjectClosureRegionClosure blk(cl); 2347 IterateObjectClosureRegionClosure blk(cl);
2347 _hrs->iterate(&blk); 2348 heap_region_iterate(&blk);
2348 if (do_perm) { 2349 if (do_perm) {
2349 perm_gen()->object_iterate(cl); 2350 perm_gen()->object_iterate(cl);
2350 } 2351 }
2351 } 2352 }
2352 2353
2367 } 2368 }
2368 }; 2369 };
2369 2370
2370 void G1CollectedHeap::space_iterate(SpaceClosure* cl) { 2371 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2371 SpaceClosureRegionClosure blk(cl); 2372 SpaceClosureRegionClosure blk(cl);
2372 _hrs->iterate(&blk); 2373 heap_region_iterate(&blk);
2373 } 2374 }
2374 2375
2375 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) { 2376 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2376 _hrs->iterate(cl); 2377 _hrs.iterate(cl);
2377 } 2378 }
2378 2379
2379 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r, 2380 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
2380 HeapRegionClosure* cl) { 2381 HeapRegionClosure* cl) const {
2381 _hrs->iterate_from(r, cl); 2382 _hrs.iterate_from(r, cl);
2382 } 2383 }
2383
2384 void
2385 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
2386 _hrs->iterate_from(idx, cl);
2387 }
2388
2389 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
2390 2384
2391 void 2385 void
2392 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2386 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2393 int worker, 2387 int worker,
2394 jint claim_value) { 2388 jint claim_value) {
2566 cur = next; 2560 cur = next;
2567 } 2561 }
2568 } 2562 }
2569 2563
2570 CompactibleSpace* G1CollectedHeap::first_compactible_space() { 2564 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2571 return _hrs->length() > 0 ? _hrs->at(0) : NULL; 2565 return n_regions() > 0 ? region_at(0) : NULL;
2572 } 2566 }
2573 2567
2574 2568
2575 Space* G1CollectedHeap::space_containing(const void* addr) const { 2569 Space* G1CollectedHeap::space_containing(const void* addr) const {
2576 Space* res = heap_region_containing(addr); 2570 Space* res = heap_region_containing(addr);
2879 2873
2880 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), 2874 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2881 "sanity check"); 2875 "sanity check");
2882 } else { 2876 } else {
2883 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking); 2877 VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
2884 _hrs->iterate(&blk); 2878 heap_region_iterate(&blk);
2885 if (blk.failures()) { 2879 if (blk.failures()) {
2886 failures = true; 2880 failures = true;
2887 } 2881 }
2888 } 2882 }
2889 if (!silent) gclog_or_tty->print("RemSet "); 2883 if (!silent) gclog_or_tty->print("RemSet ");
2948 } 2942 }
2949 } 2943 }
2950 2944
2951 void G1CollectedHeap::print_on_extended(outputStream* st) const { 2945 void G1CollectedHeap::print_on_extended(outputStream* st) const {
2952 PrintRegionClosure blk(st); 2946 PrintRegionClosure blk(st);
2953 _hrs->iterate(&blk); 2947 heap_region_iterate(&blk);
2954 } 2948 }
2955 2949
2956 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const { 2950 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2957 if (G1CollectedHeap::use_parallel_gc_threads()) { 2951 if (G1CollectedHeap::use_parallel_gc_threads()) {
2958 workers()->print_worker_threads_on(st); 2952 workers()->print_worker_threads_on(st);
2985 if (G1SummarizeConcMark) { 2979 if (G1SummarizeConcMark) {
2986 concurrent_mark()->print_summary_info(); 2980 concurrent_mark()->print_summary_info();
2987 } 2981 }
2988 g1_policy()->print_yg_surv_rate_info(); 2982 g1_policy()->print_yg_surv_rate_info();
2989 SpecializationStats::print(); 2983 SpecializationStats::print();
2990 }
2991
2992 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
2993 HeapRegion* hr = heap_region_containing(addr);
2994 if (hr == NULL) {
2995 return 0;
2996 } else {
2997 return 1;
2998 }
2999 } 2984 }
3000 2985
3001 G1CollectedHeap* G1CollectedHeap::heap() { 2986 G1CollectedHeap* G1CollectedHeap::heap() {
3002 assert(_sh->kind() == CollectedHeap::G1CollectedHeap, 2987 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3003 "not a garbage-first heap"); 2988 "not a garbage-first heap");
3475 print_tracing_info(); 3460 print_tracing_info();
3476 vm_exit(-1); 3461 vm_exit(-1);
3477 } 3462 }
3478 } 3463 }
3479 3464
3465 _hrs.verify_optional();
3480 verify_region_sets_optional(); 3466 verify_region_sets_optional();
3481 3467
3482 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 3468 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
3483 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 3469 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3484 3470
3607 #ifdef G1_DEBUG 3593 #ifdef G1_DEBUG
3608 class FindGCAllocRegion: public HeapRegionClosure { 3594 class FindGCAllocRegion: public HeapRegionClosure {
3609 public: 3595 public:
3610 bool doHeapRegion(HeapRegion* r) { 3596 bool doHeapRegion(HeapRegion* r) {
3611 if (r->is_gc_alloc_region()) { 3597 if (r->is_gc_alloc_region()) {
3612 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.", 3598 gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region",
3613 r->hrs_index(), r->bottom()); 3599 HR_FORMAT_PARAMS(r));
3614 } 3600 }
3615 return false; 3601 return false;
3616 } 3602 }
3617 }; 3603 };
3618 #endif // G1_DEBUG 3604 #endif // G1_DEBUG
3693 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); 3679 alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
3694 } else { 3680 } else {
3695 // the region was retained from the last collection 3681 // the region was retained from the last collection
3696 ++_gc_alloc_region_counts[ap]; 3682 ++_gc_alloc_region_counts[ap];
3697 if (G1PrintHeapRegions) { 3683 if (G1PrintHeapRegions) {
3698 gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " 3684 gclog_or_tty->print_cr("new alloc region "HR_FORMAT,
3699 "top "PTR_FORMAT, 3685 HR_FORMAT_PARAMS(alloc_region));
3700 alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
3701 } 3686 }
3702 } 3687 }
3703 3688
3704 if (alloc_region != NULL) { 3689 if (alloc_region != NULL) {
3705 assert(_gc_alloc_regions[ap] == NULL, "pre-condition"); 3690 assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
4906 size_t hr_pre_used = 0; 4891 size_t hr_pre_used = 0;
4907 _humongous_set.remove_with_proxy(hr, humongous_proxy_set); 4892 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
4908 hr->set_notHumongous(); 4893 hr->set_notHumongous();
4909 free_region(hr, &hr_pre_used, free_list, par); 4894 free_region(hr, &hr_pre_used, free_list, par);
4910 4895
4911 int i = hr->hrs_index() + 1; 4896 size_t i = hr->hrs_index() + 1;
4912 size_t num = 1; 4897 size_t num = 1;
4913 while ((size_t) i < n_regions()) { 4898 while (i < n_regions()) {
4914 HeapRegion* curr_hr = _hrs->at(i); 4899 HeapRegion* curr_hr = region_at(i);
4915 if (!curr_hr->continuesHumongous()) { 4900 if (!curr_hr->continuesHumongous()) {
4916 break; 4901 break;
4917 } 4902 }
4918 curr_hr->set_notHumongous(); 4903 curr_hr->set_notHumongous();
4919 free_region(curr_hr, &hr_pre_used, free_list, par); 4904 free_region(curr_hr, &hr_pre_used, free_list, par);
5269 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " 5254 gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5270 "done waiting for free regions"); 5255 "done waiting for free regions");
5271 } 5256 }
5272 } 5257 }
5273 5258
5274 size_t G1CollectedHeap::n_regions() {
5275 return _hrs->length();
5276 }
5277
5278 size_t G1CollectedHeap::max_regions() {
5279 return
5280 (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
5281 HeapRegion::GrainBytes;
5282 }
5283
5284 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5259 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5285 assert(heap_lock_held_for_gc(), 5260 assert(heap_lock_held_for_gc(),
5286 "the heap lock should already be held by or for this thread"); 5261 "the heap lock should already be held by or for this thread");
5287 _young_list->push_region(hr); 5262 _young_list->push_region(hr);
5288 g1_policy()->set_region_short_lived(hr); 5263 g1_policy()->set_region_short_lived(hr);
5475 } 5450 }
5476 return false; 5451 return false;
5477 } 5452 }
5478 }; 5453 };
5479 5454
5455 HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
5456 HeapWord* bottom) {
5457 HeapWord* end = bottom + HeapRegion::GrainWords;
5458 MemRegion mr(bottom, end);
5459 assert(_g1_reserved.contains(mr), "invariant");
5460 // This might return NULL if the allocation fails
5461 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
5462 }
5463
5480 void G1CollectedHeap::verify_region_sets() { 5464 void G1CollectedHeap::verify_region_sets() {
5481 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 5465 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5482 5466
5483 // First, check the explicit lists. 5467 // First, check the explicit lists.
5484 _free_list.verify(); 5468 _free_list.verify();