comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20377:a8ea2f110d87

8054819: Rename HeapRegionSeq to HeapRegionManager Reviewed-by: jwilhelm, jmasa
author tschatzl
date Tue, 26 Aug 2014 09:36:53 +0200
parents 4d3a43351904
children 227a9e5e4b4a
comparison
equal deleted inserted replaced
20359:4d3a43351904 20377:a8ea2f110d87
526 // It looks as if there are free regions available on the 526 // It looks as if there are free regions available on the
527 // secondary_free_list. Let's move them to the free_list and try 527 // secondary_free_list. Let's move them to the free_list and try
528 // again to allocate from it. 528 // again to allocate from it.
529 append_secondary_free_list(); 529 append_secondary_free_list();
530 530
531 assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not " 531 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
532 "empty we should have moved at least one entry to the free_list"); 532 "empty we should have moved at least one entry to the free_list");
533 HeapRegion* res = _hrs.allocate_free_region(is_old); 533 HeapRegion* res = _hrm.allocate_free_region(is_old);
534 if (G1ConcRegionFreeingVerbose) { 534 if (G1ConcRegionFreeingVerbose) {
535 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 535 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
536 "allocated "HR_FORMAT" from secondary_free_list", 536 "allocated "HR_FORMAT" from secondary_free_list",
537 HR_FORMAT_PARAMS(res)); 537 HR_FORMAT_PARAMS(res));
538 } 538 }
569 return res; 569 return res;
570 } 570 }
571 } 571 }
572 } 572 }
573 573
574 res = _hrs.allocate_free_region(is_old); 574 res = _hrm.allocate_free_region(is_old);
575 575
576 if (res == NULL) { 576 if (res == NULL) {
577 if (G1ConcRegionFreeingVerbose) { 577 if (G1ConcRegionFreeingVerbose) {
578 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 578 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
579 "res == NULL, trying the secondary_free_list"); 579 "res == NULL, trying the secondary_free_list");
595 if (expand(word_size * HeapWordSize)) { 595 if (expand(word_size * HeapWordSize)) {
596 // Given that expand() succeeded in expanding the heap, and we 596 // Given that expand() succeeded in expanding the heap, and we
597 // always expand the heap by an amount aligned to the heap 597 // always expand the heap by an amount aligned to the heap
598 // region size, the free list should in theory not be empty. 598 // region size, the free list should in theory not be empty.
599 // In either case allocate_free_region() will check for NULL. 599 // In either case allocate_free_region() will check for NULL.
600 res = _hrs.allocate_free_region(is_old); 600 res = _hrm.allocate_free_region(is_old);
601 } else { 601 } else {
602 _expand_heap_after_alloc_failure = false; 602 _expand_heap_after_alloc_failure = false;
603 } 603 }
604 } 604 }
605 return res; 605 return res;
607 607
608 HeapWord* 608 HeapWord*
609 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, 609 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
610 uint num_regions, 610 uint num_regions,
611 size_t word_size) { 611 size_t word_size) {
612 assert(first != G1_NO_HRS_INDEX, "pre-condition"); 612 assert(first != G1_NO_HRM_INDEX, "pre-condition");
613 assert(isHumongous(word_size), "word_size should be humongous"); 613 assert(isHumongous(word_size), "word_size should be humongous");
614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 614 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
615 615
616 // Index of last region in the series + 1. 616 // Index of last region in the series + 1.
617 uint last = first + num_regions; 617 uint last = first + num_regions;
745 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { 745 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
746 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 746 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
747 747
748 verify_region_sets_optional(); 748 verify_region_sets_optional();
749 749
750 uint first = G1_NO_HRS_INDEX; 750 uint first = G1_NO_HRM_INDEX;
751 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords); 751 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
752 752
753 if (obj_regions == 1) { 753 if (obj_regions == 1) {
754 // Only one region to allocate, try to use a fast path by directly allocating 754 // Only one region to allocate, try to use a fast path by directly allocating
755 // from the free lists. Do not try to expand here, we will potentially do that 755 // from the free lists. Do not try to expand here, we will potentially do that
756 // later. 756 // later.
757 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); 757 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
758 if (hr != NULL) { 758 if (hr != NULL) {
759 first = hr->hrs_index(); 759 first = hr->hrm_index();
760 } 760 }
761 } else { 761 } else {
762 // We can't allocate humongous regions spanning more than one region while 762 // We can't allocate humongous regions spanning more than one region while
763 // cleanupComplete() is running, since some of the regions we find to be 763 // cleanupComplete() is running, since some of the regions we find to be
764 // empty might not yet be added to the free list. It is not straightforward 764 // empty might not yet be added to the free list. It is not straightforward
770 wait_while_free_regions_coming(); 770 wait_while_free_regions_coming();
771 append_secondary_free_list_if_not_empty_with_lock(); 771 append_secondary_free_list_if_not_empty_with_lock();
772 772
773 // Policy: Try only empty regions (i.e. already committed first). Maybe we 773 // Policy: Try only empty regions (i.e. already committed first). Maybe we
774 // are lucky enough to find some. 774 // are lucky enough to find some.
775 first = _hrs.find_contiguous_only_empty(obj_regions); 775 first = _hrm.find_contiguous_only_empty(obj_regions);
776 if (first != G1_NO_HRS_INDEX) { 776 if (first != G1_NO_HRM_INDEX) {
777 _hrs.allocate_free_regions_starting_at(first, obj_regions); 777 _hrm.allocate_free_regions_starting_at(first, obj_regions);
778 } 778 }
779 } 779 }
780 780
781 if (first == G1_NO_HRS_INDEX) { 781 if (first == G1_NO_HRM_INDEX) {
782 // Policy: We could not find enough regions for the humongous object in the 782 // Policy: We could not find enough regions for the humongous object in the
783 // free list. Look through the heap to find a mix of free and uncommitted regions. 783 // free list. Look through the heap to find a mix of free and uncommitted regions.
784 // If so, try expansion. 784 // If so, try expansion.
785 first = _hrs.find_contiguous_empty_or_unavailable(obj_regions); 785 first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
786 if (first != G1_NO_HRS_INDEX) { 786 if (first != G1_NO_HRM_INDEX) {
787 // We found something. Make sure these regions are committed, i.e. expand 787 // We found something. Make sure these regions are committed, i.e. expand
788 // the heap. Alternatively we could do a defragmentation GC. 788 // the heap. Alternatively we could do a defragmentation GC.
789 ergo_verbose1(ErgoHeapSizing, 789 ergo_verbose1(ErgoHeapSizing,
790 "attempt heap expansion", 790 "attempt heap expansion",
791 ergo_format_reason("humongous allocation request failed") 791 ergo_format_reason("humongous allocation request failed")
792 ergo_format_byte("allocation request"), 792 ergo_format_byte("allocation request"),
793 word_size * HeapWordSize); 793 word_size * HeapWordSize);
794 794
795 _hrs.expand_at(first, obj_regions); 795 _hrm.expand_at(first, obj_regions);
796 g1_policy()->record_new_heap_size(num_regions()); 796 g1_policy()->record_new_heap_size(num_regions());
797 797
798 #ifdef ASSERT 798 #ifdef ASSERT
799 for (uint i = first; i < first + obj_regions; ++i) { 799 for (uint i = first; i < first + obj_regions; ++i) {
800 HeapRegion* hr = region_at(i); 800 HeapRegion* hr = region_at(i);
801 assert(hr->is_empty(), "sanity"); 801 assert(hr->is_empty(), "sanity");
802 assert(is_on_master_free_list(hr), "sanity"); 802 assert(is_on_master_free_list(hr), "sanity");
803 } 803 }
804 #endif 804 #endif
805 _hrs.allocate_free_regions_starting_at(first, obj_regions); 805 _hrm.allocate_free_regions_starting_at(first, obj_regions);
806 } else { 806 } else {
807 // Policy: Potentially trigger a defragmentation GC. 807 // Policy: Potentially trigger a defragmentation GC.
808 } 808 }
809 } 809 }
810 810
811 HeapWord* result = NULL; 811 HeapWord* result = NULL;
812 if (first != G1_NO_HRS_INDEX) { 812 if (first != G1_NO_HRM_INDEX) {
813 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size); 813 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
814 assert(result != NULL, "it should always return a valid result"); 814 assert(result != NULL, "it should always return a valid result");
815 815
816 // A successful humongous object allocation changes the used space 816 // A successful humongous object allocation changes the used space
817 // information of the old generation so we need to recalculate the 817 // information of the old generation so we need to recalculate the
1242 1242
1243 PostCompactionPrinterClosure(G1HRPrinter* hr_printer) 1243 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1244 : _hr_printer(hr_printer) { } 1244 : _hr_printer(hr_printer) { }
1245 }; 1245 };
1246 1246
1247 void G1CollectedHeap::print_hrs_post_compaction() { 1247 void G1CollectedHeap::print_hrm_post_compaction() {
1248 PostCompactionPrinterClosure cl(hr_printer()); 1248 PostCompactionPrinterClosure cl(hr_printer());
1249 heap_region_iterate(&cl); 1249 heap_region_iterate(&cl);
1250 } 1250 }
1251 1251
1252 bool G1CollectedHeap::do_collection(bool explicit_gc, 1252 bool G1CollectedHeap::do_collection(bool explicit_gc,
1411 if (_hr_printer.is_active()) { 1411 if (_hr_printer.is_active()) {
1412 // We should do this after we potentially resize the heap so 1412 // We should do this after we potentially resize the heap so
1413 // that all the COMMIT / UNCOMMIT events are generated before 1413 // that all the COMMIT / UNCOMMIT events are generated before
1414 // the end GC event. 1414 // the end GC event.
1415 1415
1416 print_hrs_post_compaction(); 1416 print_hrm_post_compaction();
1417 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); 1417 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1418 } 1418 }
1419 1419
1420 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 1420 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1421 if (hot_card_cache->use_cache()) { 1421 if (hot_card_cache->use_cache()) {
1484 "young list should be empty at this point"); 1484 "young list should be empty at this point");
1485 1485
1486 // Update the number of full collections that have been completed. 1486 // Update the number of full collections that have been completed.
1487 increment_old_marking_cycles_completed(false /* concurrent */); 1487 increment_old_marking_cycles_completed(false /* concurrent */);
1488 1488
1489 _hrs.verify_optional(); 1489 _hrm.verify_optional();
1490 verify_region_sets_optional(); 1490 verify_region_sets_optional();
1491 1491
1492 verify_after_gc(); 1492 verify_after_gc();
1493 1493
1494 // Clear the previous marking bitmap, if needed for bitmap verification. 1494 // Clear the previous marking bitmap, if needed for bitmap verification.
1728 "attempt heap expansion", 1728 "attempt heap expansion",
1729 ergo_format_reason("allocation request failed") 1729 ergo_format_reason("allocation request failed")
1730 ergo_format_byte("allocation request"), 1730 ergo_format_byte("allocation request"),
1731 word_size * HeapWordSize); 1731 word_size * HeapWordSize);
1732 if (expand(expand_bytes)) { 1732 if (expand(expand_bytes)) {
1733 _hrs.verify_optional(); 1733 _hrm.verify_optional();
1734 verify_region_sets_optional(); 1734 verify_region_sets_optional();
1735 return attempt_allocation_at_safepoint(word_size, 1735 return attempt_allocation_at_safepoint(word_size,
1736 false /* expect_null_mutator_alloc_region */); 1736 false /* expect_null_mutator_alloc_region */);
1737 } 1737 }
1738 return NULL; 1738 return NULL;
1756 } 1756 }
1757 1757
1758 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); 1758 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1759 assert(regions_to_expand > 0, "Must expand by at least one region"); 1759 assert(regions_to_expand > 0, "Must expand by at least one region");
1760 1760
1761 uint expanded_by = _hrs.expand_by(regions_to_expand); 1761 uint expanded_by = _hrm.expand_by(regions_to_expand);
1762 1762
1763 if (expanded_by > 0) { 1763 if (expanded_by > 0) {
1764 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; 1764 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1765 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); 1765 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1766 g1_policy()->record_new_heap_size(num_regions()); 1766 g1_policy()->record_new_heap_size(num_regions());
1769 "did not expand the heap", 1769 "did not expand the heap",
1770 ergo_format_reason("heap expansion operation failed")); 1770 ergo_format_reason("heap expansion operation failed"));
1771 // The expansion of the virtual storage space was unsuccessful. 1771 // The expansion of the virtual storage space was unsuccessful.
1772 // Let's see if it was because we ran out of swap. 1772 // Let's see if it was because we ran out of swap.
1773 if (G1ExitOnExpansionFailure && 1773 if (G1ExitOnExpansionFailure &&
1774 _hrs.available() >= regions_to_expand) { 1774 _hrm.available() >= regions_to_expand) {
1775 // We had head room... 1775 // We had head room...
1776 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); 1776 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1777 } 1777 }
1778 } 1778 }
1779 return regions_to_expand > 0; 1779 return regions_to_expand > 0;
1784 ReservedSpace::page_align_size_down(shrink_bytes); 1784 ReservedSpace::page_align_size_down(shrink_bytes);
1785 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, 1785 aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1786 HeapRegion::GrainBytes); 1786 HeapRegion::GrainBytes);
1787 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); 1787 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1788 1788
1789 uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); 1789 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1790 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; 1790 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1791 1791
1792 ergo_verbose3(ErgoHeapSizing, 1792 ergo_verbose3(ErgoHeapSizing,
1793 "shrink the heap", 1793 "shrink the heap",
1794 ergo_format_byte("requested shrinking amount") 1794 ergo_format_byte("requested shrinking amount")
1817 // remove only the ones that we need to remove. 1817 // remove only the ones that we need to remove.
1818 tear_down_region_sets(true /* free_list_only */); 1818 tear_down_region_sets(true /* free_list_only */);
1819 shrink_helper(shrink_bytes); 1819 shrink_helper(shrink_bytes);
1820 rebuild_region_sets(true /* free_list_only */); 1820 rebuild_region_sets(true /* free_list_only */);
1821 1821
1822 _hrs.verify_optional(); 1822 _hrm.verify_optional();
1823 verify_region_sets_optional(); 1823 verify_region_sets_optional();
1824 } 1824 }
1825 1825
1826 // Public methods. 1826 // Public methods.
1827 1827
2026 os::vm_page_size(), 2026 os::vm_page_size(),
2027 HeapRegion::GrainBytes, 2027 HeapRegion::GrainBytes,
2028 CMBitMap::mark_distance(), 2028 CMBitMap::mark_distance(),
2029 mtGC); 2029 mtGC);
2030 2030
2031 _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); 2031 _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2032 g1_barrier_set()->initialize(cardtable_storage); 2032 g1_barrier_set()->initialize(cardtable_storage);
2033 // Do later initialization work for concurrent refinement. 2033 // Do later initialization work for concurrent refinement.
2034 _cg1r->init(card_counts_storage); 2034 _cg1r->init(card_counts_storage);
2035 2035
2036 // 6843694 - ensure that the maximum region index can fit 2036 // 6843694 - ensure that the maximum region index can fit
2047 2047
2048 _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage); 2048 _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2049 2049
2050 _g1h = this; 2050 _g1h = this;
2051 2051
2052 _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); 2052 _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2053 _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); 2053 _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2054 2054
2055 // Create the ConcurrentMark data structure and thread. 2055 // Create the ConcurrentMark data structure and thread.
2056 // (Must do this late, so that "max_regions" is defined.) 2056 // (Must do this late, so that "max_regions" is defined.)
2057 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); 2057 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2058 if (_cm == NULL || !_cm->completed_initialization()) { 2058 if (_cm == NULL || !_cm->completed_initialization()) {
2109 // counts and that mechanism. 2109 // counts and that mechanism.
2110 SpecializationStats::clear(); 2110 SpecializationStats::clear();
2111 2111
2112 // Here we allocate the dummy HeapRegion that is required by the 2112 // Here we allocate the dummy HeapRegion that is required by the
2113 // G1AllocRegion class. 2113 // G1AllocRegion class.
2114 HeapRegion* dummy_region = _hrs.get_dummy_region(); 2114 HeapRegion* dummy_region = _hrm.get_dummy_region();
2115 2115
2116 // We'll re-use the same region whether the alloc region will 2116 // We'll re-use the same region whether the alloc region will
2117 // require BOT updates or not and, if it doesn't, then a non-young 2117 // require BOT updates or not and, if it doesn't, then a non-young
2118 // region will complain that it cannot support allocations without 2118 // region will complain that it cannot support allocations without
2119 // BOT updates. So we'll tag the dummy region as young to avoid that. 2119 // BOT updates. So we'll tag the dummy region as young to avoid that.
2226 // is alive closure 2226 // is alive closure
2227 // (for efficiency/performance) 2227 // (for efficiency/performance)
2228 } 2228 }
2229 2229
2230 size_t G1CollectedHeap::capacity() const { 2230 size_t G1CollectedHeap::capacity() const {
2231 return _hrs.length() * HeapRegion::GrainBytes; 2231 return _hrm.length() * HeapRegion::GrainBytes;
2232 } 2232 }
2233 2233
2234 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { 2234 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2235 assert(!hr->continuesHumongous(), "pre-condition"); 2235 assert(!hr->continuesHumongous(), "pre-condition");
2236 hr->reset_gc_time_stamp(); 2236 hr->reset_gc_time_stamp();
2237 if (hr->startsHumongous()) { 2237 if (hr->startsHumongous()) {
2238 uint first_index = hr->hrs_index() + 1; 2238 uint first_index = hr->hrm_index() + 1;
2239 uint last_index = hr->last_hc_index(); 2239 uint last_index = hr->last_hc_index();
2240 for (uint i = first_index; i < last_index; i += 1) { 2240 for (uint i = first_index; i < last_index; i += 1) {
2241 HeapRegion* chr = region_at(i); 2241 HeapRegion* chr = region_at(i);
2242 assert(chr->continuesHumongous(), "sanity"); 2242 assert(chr->continuesHumongous(), "sanity");
2243 chr->reset_gc_time_stamp(); 2243 chr->reset_gc_time_stamp();
2531 } 2531 }
2532 } while (retry_gc); 2532 } while (retry_gc);
2533 } 2533 }
2534 2534
2535 bool G1CollectedHeap::is_in(const void* p) const { 2535 bool G1CollectedHeap::is_in(const void* p) const {
2536 if (_hrs.reserved().contains(p)) { 2536 if (_hrm.reserved().contains(p)) {
2537 // Given that we know that p is in the reserved space, 2537 // Given that we know that p is in the reserved space,
2538 // heap_region_containing_raw() should successfully 2538 // heap_region_containing_raw() should successfully
2539 // return the containing region. 2539 // return the containing region.
2540 HeapRegion* hr = heap_region_containing_raw(p); 2540 HeapRegion* hr = heap_region_containing_raw(p);
2541 return hr->is_in(p); 2541 return hr->is_in(p);
2545 } 2545 }
2546 2546
2547 #ifdef ASSERT 2547 #ifdef ASSERT
2548 bool G1CollectedHeap::is_in_exact(const void* p) const { 2548 bool G1CollectedHeap::is_in_exact(const void* p) const {
2549 bool contains = reserved_region().contains(p); 2549 bool contains = reserved_region().contains(p);
2550 bool available = _hrs.is_available(addr_to_region((HeapWord*)p)); 2550 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2551 if (contains && available) { 2551 if (contains && available) {
2552 return true; 2552 return true;
2553 } else { 2553 } else {
2554 return false; 2554 return false;
2555 } 2555 }
2612 SpaceClosureRegionClosure blk(cl); 2612 SpaceClosureRegionClosure blk(cl);
2613 heap_region_iterate(&blk); 2613 heap_region_iterate(&blk);
2614 } 2614 }
2615 2615
2616 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { 2616 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2617 _hrs.iterate(cl); 2617 _hrm.iterate(cl);
2618 } 2618 }
2619 2619
2620 void 2620 void
2621 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, 2621 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2622 uint worker_id, 2622 uint worker_id,
2623 uint num_workers, 2623 uint num_workers,
2624 jint claim_value) const { 2624 jint claim_value) const {
2625 _hrs.par_iterate(cl, worker_id, num_workers, claim_value); 2625 _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
2626 } 2626 }
2627 2627
2628 class ResetClaimValuesClosure: public HeapRegionClosure { 2628 class ResetClaimValuesClosure: public HeapRegionClosure {
2629 public: 2629 public:
2630 bool doHeapRegion(HeapRegion* r) { 2630 bool doHeapRegion(HeapRegion* r) {
2840 cur = next; 2840 cur = next;
2841 } 2841 }
2842 } 2842 }
2843 2843
2844 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { 2844 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2845 HeapRegion* result = _hrs.next_region_in_heap(from); 2845 HeapRegion* result = _hrm.next_region_in_heap(from);
2846 while (result != NULL && result->isHumongous()) { 2846 while (result != NULL && result->isHumongous()) {
2847 result = _hrs.next_region_in_heap(result); 2847 result = _hrm.next_region_in_heap(result);
2848 } 2848 }
2849 return result; 2849 return result;
2850 } 2850 }
2851 2851
2852 Space* G1CollectedHeap::space_containing(const void* addr) const { 2852 Space* G1CollectedHeap::space_containing(const void* addr) const {
2902 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); 2902 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2903 } 2903 }
2904 } 2904 }
2905 2905
2906 size_t G1CollectedHeap::max_capacity() const { 2906 size_t G1CollectedHeap::max_capacity() const {
2907 return _hrs.reserved().byte_size(); 2907 return _hrm.reserved().byte_size();
2908 } 2908 }
2909 2909
2910 jlong G1CollectedHeap::millis_since_last_gc() { 2910 jlong G1CollectedHeap::millis_since_last_gc() {
2911 // assert(false, "NYI"); 2911 // assert(false, "NYI");
2912 return 0; 2912 return 0;
3431 void G1CollectedHeap::print_on(outputStream* st) const { 3431 void G1CollectedHeap::print_on(outputStream* st) const {
3432 st->print(" %-20s", "garbage-first heap"); 3432 st->print(" %-20s", "garbage-first heap");
3433 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 3433 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3434 capacity()/K, used_unlocked()/K); 3434 capacity()/K, used_unlocked()/K);
3435 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 3435 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3436 _hrs.reserved().start(), 3436 _hrm.reserved().start(),
3437 _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords, 3437 _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
3438 _hrs.reserved().end()); 3438 _hrm.reserved().end());
3439 st->cr(); 3439 st->cr();
3440 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); 3440 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3441 uint young_regions = _young_list->length(); 3441 uint young_regions = _young_list->length();
3442 st->print("%u young (" SIZE_FORMAT "K), ", young_regions, 3442 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3443 (size_t) young_regions * HeapRegion::GrainBytes / K); 3443 (size_t) young_regions * HeapRegion::GrainBytes / K);
3676 if (!r->startsHumongous()) { 3676 if (!r->startsHumongous()) {
3677 return false; 3677 return false;
3678 } 3678 }
3679 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3679 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3680 3680
3681 uint region_idx = r->hrs_index(); 3681 uint region_idx = r->hrm_index();
3682 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx); 3682 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3683 // Is_candidate already filters out humongous regions with some remembered set. 3683 // Is_candidate already filters out humongous regions with some remembered set.
3684 // This will not lead to humongous object that we mistakenly keep alive because 3684 // This will not lead to humongous object that we mistakenly keep alive because
3685 // during young collection the remembered sets will only be added to. 3685 // during young collection the remembered sets will only be added to.
3686 if (is_candidate) { 3686 if (is_candidate) {
4198 // It is not yet to safe to tell the concurrent mark to 4198 // It is not yet to safe to tell the concurrent mark to
4199 // start as we have some optional output below. We don't want the 4199 // start as we have some optional output below. We don't want the
4200 // output from the concurrent mark thread interfering with this 4200 // output from the concurrent mark thread interfering with this
4201 // logging output either. 4201 // logging output either.
4202 4202
4203 _hrs.verify_optional(); 4203 _hrm.verify_optional();
4204 verify_region_sets_optional(); 4204 verify_region_sets_optional();
4205 4205
4206 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 4206 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4207 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); 4207 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4208 4208
6017 FreeRegionList* free_list, 6017 FreeRegionList* free_list,
6018 bool par, 6018 bool par,
6019 bool locked) { 6019 bool locked) {
6020 assert(!hr->isHumongous(), "this is only for non-humongous regions"); 6020 assert(!hr->isHumongous(), "this is only for non-humongous regions");
6021 assert(!hr->is_empty(), "the region should not be empty"); 6021 assert(!hr->is_empty(), "the region should not be empty");
6022 assert(_hrs.is_available(hr->hrs_index()), "region should be committed"); 6022 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
6023 assert(free_list != NULL, "pre-condition"); 6023 assert(free_list != NULL, "pre-condition");
6024 6024
6025 if (G1VerifyBitmaps) { 6025 if (G1VerifyBitmaps) {
6026 MemRegion mr(hr->bottom(), hr->end()); 6026 MemRegion mr(hr->bottom(), hr->end());
6027 concurrent_mark()->clearRangePrevBitmap(mr); 6027 concurrent_mark()->clearRangePrevBitmap(mr);
6048 // otherwise the information will be gone. 6048 // otherwise the information will be gone.
6049 uint last_index = hr->last_hc_index(); 6049 uint last_index = hr->last_hc_index();
6050 hr->set_notHumongous(); 6050 hr->set_notHumongous();
6051 free_region(hr, free_list, par); 6051 free_region(hr, free_list, par);
6052 6052
6053 uint i = hr->hrs_index() + 1; 6053 uint i = hr->hrm_index() + 1;
6054 while (i < last_index) { 6054 while (i < last_index) {
6055 HeapRegion* curr_hr = region_at(i); 6055 HeapRegion* curr_hr = region_at(i);
6056 assert(curr_hr->continuesHumongous(), "invariant"); 6056 assert(curr_hr->continuesHumongous(), "invariant");
6057 curr_hr->set_notHumongous(); 6057 curr_hr->set_notHumongous();
6058 free_region(curr_hr, free_list, par); 6058 free_region(curr_hr, free_list, par);
6072 6072
6073 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { 6073 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
6074 assert(list != NULL, "list can't be null"); 6074 assert(list != NULL, "list can't be null");
6075 if (!list->is_empty()) { 6075 if (!list->is_empty()) {
6076 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 6076 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
6077 _hrs.insert_list_into_free_list(list); 6077 _hrm.insert_list_into_free_list(list);
6078 } 6078 }
6079 } 6079 }
6080 6080
6081 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { 6081 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
6082 assert(_summary_bytes_used >= bytes, 6082 assert(_summary_bytes_used >= bytes,
6441 // - they would also pose considerable effort for cleaning up the the remembered 6441 // - they would also pose considerable effort for cleaning up the the remembered
6442 // sets. 6442 // sets.
6443 // While this cleanup is not strictly necessary to be done (or done instantly), 6443 // While this cleanup is not strictly necessary to be done (or done instantly),
6444 // given that their occurrence is very low, this saves us this additional 6444 // given that their occurrence is very low, this saves us this additional
6445 // complexity. 6445 // complexity.
6446 uint region_idx = r->hrs_index(); 6446 uint region_idx = r->hrm_index();
6447 if (g1h->humongous_is_live(region_idx) || 6447 if (g1h->humongous_is_live(region_idx) ||
6448 g1h->humongous_region_is_always_live(region_idx)) { 6448 g1h->humongous_region_is_always_live(region_idx)) {
6449 6449
6450 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) { 6450 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6451 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d", 6451 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6680 // Note that emptying the _young_list is postponed and instead done as 6680 // Note that emptying the _young_list is postponed and instead done as
6681 // the first step when rebuilding the regions sets again. The reason for 6681 // the first step when rebuilding the regions sets again. The reason for
6682 // this is that during a full GC string deduplication needs to know if 6682 // this is that during a full GC string deduplication needs to know if
6683 // a collected region was young or old when the full GC was initiated. 6683 // a collected region was young or old when the full GC was initiated.
6684 } 6684 }
6685 _hrs.remove_all_free_regions(); 6685 _hrm.remove_all_free_regions();
6686 } 6686 }
6687 6687
6688 class RebuildRegionSetsClosure : public HeapRegionClosure { 6688 class RebuildRegionSetsClosure : public HeapRegionClosure {
6689 private: 6689 private:
6690 bool _free_list_only; 6690 bool _free_list_only;
6691 HeapRegionSet* _old_set; 6691 HeapRegionSet* _old_set;
6692 HeapRegionSeq* _hrs; 6692 HeapRegionManager* _hrm;
6693 size_t _total_used; 6693 size_t _total_used;
6694 6694
6695 public: 6695 public:
6696 RebuildRegionSetsClosure(bool free_list_only, 6696 RebuildRegionSetsClosure(bool free_list_only,
6697 HeapRegionSet* old_set, HeapRegionSeq* hrs) : 6697 HeapRegionSet* old_set, HeapRegionManager* hrm) :
6698 _free_list_only(free_list_only), 6698 _free_list_only(free_list_only),
6699 _old_set(old_set), _hrs(hrs), _total_used(0) { 6699 _old_set(old_set), _hrm(hrm), _total_used(0) {
6700 assert(_hrs->num_free_regions() == 0, "pre-condition"); 6700 assert(_hrm->num_free_regions() == 0, "pre-condition");
6701 if (!free_list_only) { 6701 if (!free_list_only) {
6702 assert(_old_set->is_empty(), "pre-condition"); 6702 assert(_old_set->is_empty(), "pre-condition");
6703 } 6703 }
6704 } 6704 }
6705 6705
6708 return false; 6708 return false;
6709 } 6709 }
6710 6710
6711 if (r->is_empty()) { 6711 if (r->is_empty()) {
6712 // Add free regions to the free list 6712 // Add free regions to the free list
6713 _hrs->insert_into_free_list(r); 6713 _hrm->insert_into_free_list(r);
6714 } else if (!_free_list_only) { 6714 } else if (!_free_list_only) {
6715 assert(!r->is_young(), "we should not come across young regions"); 6715 assert(!r->is_young(), "we should not come across young regions");
6716 6716
6717 if (r->isHumongous()) { 6717 if (r->isHumongous()) {
6718 // We ignore humongous regions, we left the humongous set unchanged 6718 // We ignore humongous regions, we left the humongous set unchanged
6736 6736
6737 if (!free_list_only) { 6737 if (!free_list_only) {
6738 _young_list->empty_list(); 6738 _young_list->empty_list();
6739 } 6739 }
6740 6740
6741 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs); 6741 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6742 heap_region_iterate(&cl); 6742 heap_region_iterate(&cl);
6743 6743
6744 if (!free_list_only) { 6744 if (!free_list_only) {
6745 _summary_bytes_used = cl.total_used(); 6745 _summary_bytes_used = cl.total_used();
6746 } 6746 }
6926 6926
6927 class VerifyRegionListsClosure : public HeapRegionClosure { 6927 class VerifyRegionListsClosure : public HeapRegionClosure {
6928 private: 6928 private:
6929 HeapRegionSet* _old_set; 6929 HeapRegionSet* _old_set;
6930 HeapRegionSet* _humongous_set; 6930 HeapRegionSet* _humongous_set;
6931 HeapRegionSeq* _hrs; 6931 HeapRegionManager* _hrm;
6932 6932
6933 public: 6933 public:
6934 HeapRegionSetCount _old_count; 6934 HeapRegionSetCount _old_count;
6935 HeapRegionSetCount _humongous_count; 6935 HeapRegionSetCount _humongous_count;
6936 HeapRegionSetCount _free_count; 6936 HeapRegionSetCount _free_count;
6937 6937
6938 VerifyRegionListsClosure(HeapRegionSet* old_set, 6938 VerifyRegionListsClosure(HeapRegionSet* old_set,
6939 HeapRegionSet* humongous_set, 6939 HeapRegionSet* humongous_set,
6940 HeapRegionSeq* hrs) : 6940 HeapRegionManager* hrm) :
6941 _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs), 6941 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6942 _old_count(), _humongous_count(), _free_count(){ } 6942 _old_count(), _humongous_count(), _free_count(){ }
6943 6943
6944 bool doHeapRegion(HeapRegion* hr) { 6944 bool doHeapRegion(HeapRegion* hr) {
6945 if (hr->continuesHumongous()) { 6945 if (hr->continuesHumongous()) {
6946 return false; 6946 return false;
6947 } 6947 }
6948 6948
6949 if (hr->is_young()) { 6949 if (hr->is_young()) {
6950 // TODO 6950 // TODO
6951 } else if (hr->startsHumongous()) { 6951 } else if (hr->startsHumongous()) {
6952 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index())); 6952 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6953 _humongous_count.increment(1u, hr->capacity()); 6953 _humongous_count.increment(1u, hr->capacity());
6954 } else if (hr->is_empty()) { 6954 } else if (hr->is_empty()) {
6955 assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index())); 6955 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6956 _free_count.increment(1u, hr->capacity()); 6956 _free_count.increment(1u, hr->capacity());
6957 } else { 6957 } else {
6958 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index())); 6958 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6959 _old_count.increment(1u, hr->capacity()); 6959 _old_count.increment(1u, hr->capacity());
6960 } 6960 }
6961 return false; 6961 return false;
6962 } 6962 }
6963 6963
6964 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) { 6964 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6965 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); 6965 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6966 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6966 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6967 old_set->total_capacity_bytes(), _old_count.capacity())); 6967 old_set->total_capacity_bytes(), _old_count.capacity()));
6968 6968
6969 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length())); 6969 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6978 6978
6979 void G1CollectedHeap::verify_region_sets() { 6979 void G1CollectedHeap::verify_region_sets() {
6980 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6980 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6981 6981
6982 // First, check the explicit lists. 6982 // First, check the explicit lists.
6983 _hrs.verify(); 6983 _hrm.verify();
6984 { 6984 {
6985 // Given that a concurrent operation might be adding regions to 6985 // Given that a concurrent operation might be adding regions to
6986 // the secondary free list we have to take the lock before 6986 // the secondary free list we have to take the lock before
6987 // verifying it. 6987 // verifying it.
6988 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6988 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
7009 append_secondary_free_list_if_not_empty_with_lock(); 7009 append_secondary_free_list_if_not_empty_with_lock();
7010 7010
7011 // Finally, make sure that the region accounting in the lists is 7011 // Finally, make sure that the region accounting in the lists is
7012 // consistent with what we see in the heap. 7012 // consistent with what we see in the heap.
7013 7013
7014 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs); 7014 VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
7015 heap_region_iterate(&cl); 7015 heap_region_iterate(&cl);
7016 cl.verify_counts(&_old_set, &_humongous_set, &_hrs); 7016 cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
7017 } 7017 }
7018 7018
7019 // Optimized nmethod scanning 7019 // Optimized nmethod scanning
7020 7020
7021 class RegisterNMethodOopClosure: public OopClosure { 7021 class RegisterNMethodOopClosure: public OopClosure {