Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 20337:1f1d373cd044
8038423: G1: Decommit memory within heap
Summary: Allow G1 to decommit memory of arbitrary regions within the heap and their associated auxiliary data structures card table, BOT, hot card cache, and mark bitmaps.
Reviewed-by: mgerdin, brutisso, jwilhelm
author | tschatzl |
---|---|
date | Thu, 21 Aug 2014 11:47:10 +0200 |
parents | 6701abbc4441 |
children | 439f0d76cff3 4bfc44ba0d19 |
comparison
equal
deleted
inserted
replaced
20336:6701abbc4441 | 20337:1f1d373cd044 |
---|---|
41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" | 41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" |
42 #include "gc_implementation/g1/g1Log.hpp" | 42 #include "gc_implementation/g1/g1Log.hpp" |
43 #include "gc_implementation/g1/g1MarkSweep.hpp" | 43 #include "gc_implementation/g1/g1MarkSweep.hpp" |
44 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 44 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
45 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" | 45 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" |
46 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" | |
46 #include "gc_implementation/g1/g1RemSet.inline.hpp" | 47 #include "gc_implementation/g1/g1RemSet.inline.hpp" |
47 #include "gc_implementation/g1/g1StringDedup.hpp" | 48 #include "gc_implementation/g1/g1StringDedup.hpp" |
48 #include "gc_implementation/g1/g1YCTypes.hpp" | 49 #include "gc_implementation/g1/g1YCTypes.hpp" |
49 #include "gc_implementation/g1/heapRegion.inline.hpp" | 50 #include "gc_implementation/g1/heapRegion.inline.hpp" |
50 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 51 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
373 curr = curr->get_next_young_region(); | 374 curr = curr->get_next_young_region(); |
374 } | 375 } |
375 } | 376 } |
376 | 377 |
377 gclog_or_tty->cr(); | 378 gclog_or_tty->cr(); |
379 } | |
380 | |
381 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { | |
382 OtherRegionsTable::invalidate(start_idx, num_regions); | |
383 } | |
384 | |
385 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { | |
386 reset_from_card_cache(start_idx, num_regions); | |
378 } | 387 } |
379 | 388 |
380 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) | 389 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) |
381 { | 390 { |
382 // Claim the right to put the region on the dirty cards region list | 391 // Claim the right to put the region on the dirty cards region list |
754 // cleanupComplete() is running, since some of the regions we find to be | 763 // cleanupComplete() is running, since some of the regions we find to be |
755 // empty might not yet be added to the free list. It is not straightforward | 764 // empty might not yet be added to the free list. It is not straightforward |
756 // to know in which list they are on so that we can remove them. We only | 765 // to know in which list they are on so that we can remove them. We only |
757 // need to do this if we need to allocate more than one region to satisfy the | 766 // need to do this if we need to allocate more than one region to satisfy the |
758 // current humongous allocation request. If we are only allocating one region | 767 // current humongous allocation request. If we are only allocating one region |
759 // we use the one-region region allocation code (see above), or end up here. | 768 // we use the one-region region allocation code (see above), that already |
769 // potentially waits for regions from the secondary free list. | |
760 wait_while_free_regions_coming(); | 770 wait_while_free_regions_coming(); |
761 append_secondary_free_list_if_not_empty_with_lock(); | 771 append_secondary_free_list_if_not_empty_with_lock(); |
762 | 772 |
763 // Policy: Try only empty regions (i.e. already committed first). Maybe we | 773 // Policy: Try only empty regions (i.e. already committed first). Maybe we |
764 // are lucky enough to find some. | 774 // are lucky enough to find some. |
765 first = _hrs.find_contiguous(obj_regions, true); | 775 first = _hrs.find_contiguous_only_empty(obj_regions); |
766 if (first != G1_NO_HRS_INDEX) { | 776 if (first != G1_NO_HRS_INDEX) { |
767 _hrs.allocate_free_regions_starting_at(first, obj_regions); | 777 _hrs.allocate_free_regions_starting_at(first, obj_regions); |
768 } | 778 } |
769 } | 779 } |
770 | 780 |
771 if (first == G1_NO_HRS_INDEX) { | 781 if (first == G1_NO_HRS_INDEX) { |
772 // Policy: We could not find enough regions for the humongous object in the | 782 // Policy: We could not find enough regions for the humongous object in the |
773 // free list. Look through the heap to find a mix of free and uncommitted regions. | 783 // free list. Look through the heap to find a mix of free and uncommitted regions. |
774 // If so, try expansion. | 784 // If so, try expansion. |
775 first = _hrs.find_contiguous(obj_regions, false); | 785 first = _hrs.find_contiguous_empty_or_unavailable(obj_regions); |
776 if (first != G1_NO_HRS_INDEX) { | 786 if (first != G1_NO_HRS_INDEX) { |
777 // We found something. Make sure these regions are committed, i.e. expand | 787 // We found something. Make sure these regions are committed, i.e. expand |
778 // the heap. Alternatively we could do a defragmentation GC. | 788 // the heap. Alternatively we could do a defragmentation GC. |
779 ergo_verbose1(ErgoHeapSizing, | 789 ergo_verbose1(ErgoHeapSizing, |
780 "attempt heap expansion", | 790 "attempt heap expansion", |
1948 // happen in asserts: DLD.) | 1958 // happen in asserts: DLD.) |
1949 _reserved.set_word_size(0); | 1959 _reserved.set_word_size(0); |
1950 _reserved.set_start((HeapWord*)heap_rs.base()); | 1960 _reserved.set_start((HeapWord*)heap_rs.base()); |
1951 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); | 1961 _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); |
1952 | 1962 |
1953 _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); | |
1954 | |
1955 // Create the gen rem set (and barrier set) for the entire reserved region. | 1963 // Create the gen rem set (and barrier set) for the entire reserved region. |
1956 _rem_set = collector_policy()->create_rem_set(_reserved, 2); | 1964 _rem_set = collector_policy()->create_rem_set(_reserved, 2); |
1957 set_barrier_set(rem_set()->bs()); | 1965 set_barrier_set(rem_set()->bs()); |
1958 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { | 1966 if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { |
1959 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); | 1967 vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); |
1964 _g1_rem_set = new G1RemSet(this, g1_barrier_set()); | 1972 _g1_rem_set = new G1RemSet(this, g1_barrier_set()); |
1965 | 1973 |
1966 // Carve out the G1 part of the heap. | 1974 // Carve out the G1 part of the heap. |
1967 | 1975 |
1968 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); | 1976 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); |
1969 _hrs.initialize(g1_rs); | 1977 G1RegionToSpaceMapper* heap_storage = |
1970 | 1978 G1RegionToSpaceMapper::create_mapper(g1_rs, |
1971 assert(_hrs.max_length() == _expansion_regions, | 1979 UseLargePages ? os::large_page_size() : os::vm_page_size(), |
1972 err_msg("max length: %u expansion regions: %u", | 1980 HeapRegion::GrainBytes, |
1973 _hrs.max_length(), _expansion_regions)); | 1981 1, |
1974 | 1982 mtJavaHeap); |
1975 // Do later initialization work for concurrent refinement. | 1983 heap_storage->set_mapping_changed_listener(&_listener); |
1976 _cg1r->init(); | 1984 |
1985 // Reserve space for the block offset table. We do not support automatic uncommit | |
1986 // for the card table at this time. BOT only. | |
1987 ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); | |
1988 G1RegionToSpaceMapper* bot_storage = | |
1989 G1RegionToSpaceMapper::create_mapper(bot_rs, | |
1990 os::vm_page_size(), | |
1991 HeapRegion::GrainBytes, | |
1992 G1BlockOffsetSharedArray::N_bytes, | |
1993 mtGC); | |
1994 | |
1995 ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); | |
1996 G1RegionToSpaceMapper* cardtable_storage = | |
1997 G1RegionToSpaceMapper::create_mapper(cardtable_rs, | |
1998 os::vm_page_size(), | |
1999 HeapRegion::GrainBytes, | |
2000 G1BlockOffsetSharedArray::N_bytes, | |
2001 mtGC); | |
2002 | |
2003 // Reserve space for the card counts table. | |
2004 ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); | |
2005 G1RegionToSpaceMapper* card_counts_storage = | |
2006 G1RegionToSpaceMapper::create_mapper(card_counts_rs, | |
2007 os::vm_page_size(), | |
2008 HeapRegion::GrainBytes, | |
2009 G1BlockOffsetSharedArray::N_bytes, | |
2010 mtGC); | |
2011 | |
2012 // Reserve space for prev and next bitmap. | |
2013 size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); | |
2014 | |
2015 ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); | |
2016 G1RegionToSpaceMapper* prev_bitmap_storage = | |
2017 G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, | |
2018 os::vm_page_size(), | |
2019 HeapRegion::GrainBytes, | |
2020 CMBitMap::mark_distance(), | |
2021 mtGC); | |
2022 | |
2023 ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); | |
2024 G1RegionToSpaceMapper* next_bitmap_storage = | |
2025 G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, | |
2026 os::vm_page_size(), | |
2027 HeapRegion::GrainBytes, | |
2028 CMBitMap::mark_distance(), | |
2029 mtGC); | |
2030 | |
2031 _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); | |
2032 g1_barrier_set()->initialize(cardtable_storage); | |
2033 // Do later initialization work for concurrent refinement. | |
2034 _cg1r->init(card_counts_storage); | |
1977 | 2035 |
1978 // 6843694 - ensure that the maximum region index can fit | 2036 // 6843694 - ensure that the maximum region index can fit |
1979 // in the remembered set structures. | 2037 // in the remembered set structures. |
1980 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; | 2038 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; |
1981 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); | 2039 guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); |
1985 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, | 2043 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, |
1986 "too many cards per region"); | 2044 "too many cards per region"); |
1987 | 2045 |
1988 FreeRegionList::set_unrealistically_long_length(max_regions() + 1); | 2046 FreeRegionList::set_unrealistically_long_length(max_regions() + 1); |
1989 | 2047 |
1990 _bot_shared = new G1BlockOffsetSharedArray(_reserved, | 2048 _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage); |
1991 heap_word_size(init_byte_size)); | |
1992 | 2049 |
1993 _g1h = this; | 2050 _g1h = this; |
1994 | 2051 |
1995 _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); | 2052 _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); |
1996 _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); | 2053 _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); |
1997 | 2054 |
1998 // Create the ConcurrentMark data structure and thread. | 2055 // Create the ConcurrentMark data structure and thread. |
1999 // (Must do this late, so that "max_regions" is defined.) | 2056 // (Must do this late, so that "max_regions" is defined.) |
2000 _cm = new ConcurrentMark(this, heap_rs); | 2057 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); |
2001 if (_cm == NULL || !_cm->completed_initialization()) { | 2058 if (_cm == NULL || !_cm->completed_initialization()) { |
2002 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); | 2059 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); |
2003 return JNI_ENOMEM; | 2060 return JNI_ENOMEM; |
2004 } | 2061 } |
2005 _cmThread = _cm->cmThread(); | 2062 _cmThread = _cm->cmThread(); |
2052 // counts and that mechanism. | 2109 // counts and that mechanism. |
2053 SpecializationStats::clear(); | 2110 SpecializationStats::clear(); |
2054 | 2111 |
2055 // Here we allocate the dummy HeapRegion that is required by the | 2112 // Here we allocate the dummy HeapRegion that is required by the |
2056 // G1AllocRegion class. | 2113 // G1AllocRegion class. |
2057 | |
2058 HeapRegion* dummy_region = _hrs.get_dummy_region(); | 2114 HeapRegion* dummy_region = _hrs.get_dummy_region(); |
2115 | |
2059 // We'll re-use the same region whether the alloc region will | 2116 // We'll re-use the same region whether the alloc region will |
2060 // require BOT updates or not and, if it doesn't, then a non-young | 2117 // require BOT updates or not and, if it doesn't, then a non-young |
2061 // region will complain that it cannot support allocations without | 2118 // region will complain that it cannot support allocations without |
2062 // BOT updates. So we'll tag the dummy region as young to avoid that. | 2119 // BOT updates. So we'll tag the dummy region as young to avoid that. |
2063 dummy_region->set_young(); | 2120 dummy_region->set_young(); |
2493 } | 2550 } |
2494 } while (retry_gc); | 2551 } while (retry_gc); |
2495 } | 2552 } |
2496 | 2553 |
2497 bool G1CollectedHeap::is_in(const void* p) const { | 2554 bool G1CollectedHeap::is_in(const void* p) const { |
2498 if (_hrs.committed().contains(p)) { | 2555 if (_hrs.reserved().contains(p)) { |
2499 // Given that we know that p is in the committed space, | 2556 // Given that we know that p is in the reserved space, |
2500 // heap_region_containing_raw() should successfully | 2557 // heap_region_containing_raw() should successfully |
2501 // return the containing region. | 2558 // return the containing region. |
2502 HeapRegion* hr = heap_region_containing_raw(p); | 2559 HeapRegion* hr = heap_region_containing_raw(p); |
2503 return hr->is_in(p); | 2560 return hr->is_in(p); |
2504 } else { | 2561 } else { |
2505 return false; | 2562 return false; |
2506 } | 2563 } |
2507 } | 2564 } |
2565 | |
2566 #ifdef ASSERT | |
2567 bool G1CollectedHeap::is_in_exact(const void* p) const { | |
2568 bool contains = reserved_region().contains(p); | |
2569 bool available = _hrs.is_available(addr_to_region((HeapWord*)p)); | |
2570 if (contains && available) { | |
2571 return true; | |
2572 } else { | |
2573 return false; | |
2574 } | |
2575 } | |
2576 #endif | |
2508 | 2577 |
2509 // Iteration functions. | 2578 // Iteration functions. |
2510 | 2579 |
2511 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion. | 2580 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion. |
2512 | 2581 |
3381 void G1CollectedHeap::print_on(outputStream* st) const { | 3450 void G1CollectedHeap::print_on(outputStream* st) const { |
3382 st->print(" %-20s", "garbage-first heap"); | 3451 st->print(" %-20s", "garbage-first heap"); |
3383 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", | 3452 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
3384 capacity()/K, used_unlocked()/K); | 3453 capacity()/K, used_unlocked()/K); |
3385 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", | 3454 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
3386 _hrs.committed().start(), | 3455 _hrs.reserved().start(), |
3387 _hrs.committed().end(), | 3456 _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords, |
3388 _hrs.reserved().end()); | 3457 _hrs.reserved().end()); |
3389 st->cr(); | 3458 st->cr(); |
3390 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); | 3459 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); |
3391 uint young_regions = _young_list->length(); | 3460 uint young_regions = _young_list->length(); |
3392 st->print("%u young (" SIZE_FORMAT "K), ", young_regions, | 3461 st->print("%u young (" SIZE_FORMAT "K), ", young_regions, |
4132 // We should do this after we potentially expand the heap so | 4201 // We should do this after we potentially expand the heap so |
4133 // that all the COMMIT events are generated before the end GC | 4202 // that all the COMMIT events are generated before the end GC |
4134 // event, and after we retire the GC alloc regions so that all | 4203 // event, and after we retire the GC alloc regions so that all |
4135 // RETIRE events are generated before the end GC event. | 4204 // RETIRE events are generated before the end GC event. |
4136 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); | 4205 _hr_printer.end_gc(false /* full */, (size_t) total_collections()); |
4137 | |
4138 if (mark_in_progress()) { | |
4139 concurrent_mark()->update_heap_boundaries(_hrs.committed()); | |
4140 } | |
4141 | 4206 |
4142 #ifdef TRACESPINNING | 4207 #ifdef TRACESPINNING |
4143 ParallelTaskTerminator::print_termination_counts(); | 4208 ParallelTaskTerminator::print_termination_counts(); |
4144 #endif | 4209 #endif |
4145 | 4210 |