Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/heapRegionSeq.hpp @ 3766:c3f1170908be
7045330: G1: Simplify/fix the HeapRegionSeq class
7042285: G1: native memory leak during humongous object allocation
6804436: G1: heap region indices should be size_t
Summary: A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing.
Reviewed-by: stefank, johnc, brutisso
author | tonyp |
---|---|
date | Fri, 10 Jun 2011 13:16:40 -0400 |
parents | 1216415d8e35 |
children | 4f93f0d00802 |
comparison
equal
deleted
inserted
replaced
3765:ae5b2f1dcf12 | 3766:c3f1170908be |
---|---|
23 */ | 23 */ |
24 | 24 |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP | 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP |
27 | 27 |
28 #include "gc_implementation/g1/heapRegion.hpp" | |
29 #include "utilities/growableArray.hpp" | |
30 | |
31 class HeapRegion; | 28 class HeapRegion; |
32 class HeapRegionClosure; | 29 class HeapRegionClosure; |
30 class FreeRegionList; | |
31 | |
32 #define G1_NULL_HRS_INDEX ((size_t) -1) | |
33 | |
34 // This class keeps track of the region metadata (i.e., HeapRegion | |
35 // instances). They are kept in the _regions array in address | |
36 // order. A region's index in the array corresponds to its index in | |
37 // the heap (i.e., 0 is the region at the bottom of the heap, 1 is | |
38 // the one after it, etc.). Two regions that are consecutive in the | |
39 // array should also be adjacent in the address space (i.e., | |
40 // region(i).end() == region(i+1).bottom(). | |
41 // | |
42 // We create a HeapRegion when we commit the region's address space | |
43 // for the first time. When we uncommit the address space of a | |
44 // region we retain the HeapRegion to be able to re-use it in the | |
45 // future (in case we recommit it). | |
46 // | |
47 // We keep track of three lengths: | |
48 // | |
49 // * _length (returned by length()) is the number of currently | |
50 // committed regions. | |
51 // * _allocated_length (not exposed outside this class) is the | |
52 // number of regions for which we have HeapRegions. | |
53 // * _max_length (returned by max_length()) is the maximum number of | |
54 // regions the heap can have. | |
55 // | |
56 // and maintain that: _length <= _allocated_length <= _max_length | |
33 | 57 |
34 class HeapRegionSeq: public CHeapObj { | 58 class HeapRegionSeq: public CHeapObj { |
35 | 59 |
36 // _regions is kept sorted by start address order, and no two regions are | 60 // The array that holds the HeapRegions. |
37 // overlapping. | 61 HeapRegion** _regions; |
38 GrowableArray<HeapRegion*> _regions; | |
39 | 62 |
40 // The index in "_regions" at which to start the next allocation search. | 63 // Version of _regions biased to address 0 |
41 // (For efficiency only; private to obj_allocate after initialization.) | 64 HeapRegion** _regions_biased; |
42 int _alloc_search_start; | |
43 | 65 |
44 // Finds a contiguous set of empty regions of length num, starting | 66 // The number of regions committed in the heap. |
45 // from a given index. | 67 size_t _length; |
46 int find_contiguous_from(int from, size_t num); | |
47 | 68 |
48 // Currently, we're choosing collection sets in a round-robin fashion, | 69 // The address of the first reserved word in the heap. |
49 // starting here. | 70 HeapWord* _heap_bottom; |
50 int _next_rr_candidate; | |
51 | 71 |
52 // The bottom address of the bottom-most region, or else NULL if there | 72 // The address of the last reserved word in the heap - 1. |
53 // are no regions in the sequence. | 73 HeapWord* _heap_end; |
54 char* _seq_bottom; | 74 |
75 // The log of the region byte size. | |
76 size_t _region_shift; | |
77 | |
78 // A hint for which index to start searching from for humongous | |
79 // allocations. | |
80 size_t _next_search_index; | |
81 | |
82 // The number of regions for which we have allocated HeapRegions for. | |
83 size_t _allocated_length; | |
84 | |
85 // The maximum number of regions in the heap. | |
86 size_t _max_length; | |
87 | |
88 // Find a contiguous set of empty regions of length num, starting | |
89 // from the given index. | |
90 size_t find_contiguous_from(size_t from, size_t num); | |
91 | |
92 // Map a heap address to a biased region index. Assume that the | |
93 // address is valid. | |
94 inline size_t addr_to_index_biased(HeapWord* addr) const; | |
95 | |
96 void increment_length(size_t* length) { | |
97 assert(*length < _max_length, "pre-condition"); | |
98 *length += 1; | |
99 } | |
100 | |
101 void decrement_length(size_t* length) { | |
102 assert(*length > 0, "pre-condition"); | |
103 *length -= 1; | |
104 } | |
55 | 105 |
56 public: | 106 public: |
57 // Initializes "this" to the empty sequence of regions. | 107 // Empty contructor, we'll initialize it with the initialize() method. |
58 HeapRegionSeq(const size_t max_size); | 108 HeapRegionSeq() { } |
59 | 109 |
60 // Adds "hr" to "this" sequence. Requires "hr" not to overlap with | 110 void initialize(HeapWord* bottom, HeapWord* end, size_t max_length); |
61 // any region already in "this". (Will perform better if regions are | |
62 // inserted in ascending address order.) | |
63 void insert(HeapRegion* hr); | |
64 | 111 |
65 // Given a HeapRegion*, returns its index within _regions, | 112 // Return the HeapRegion at the given index. Assume that the index |
66 // or returns -1 if not found. | 113 // is valid. |
67 int find(HeapRegion* hr); | 114 inline HeapRegion* at(size_t index) const; |
68 | 115 |
69 // Requires the index to be valid, and return the region at the index. | 116 // If addr is within the committed space return its corresponding |
70 HeapRegion* at(size_t i) { return _regions.at((int)i); } | 117 // HeapRegion, otherwise return NULL. |
118 inline HeapRegion* addr_to_region(HeapWord* addr) const; | |
71 | 119 |
72 // Return the number of regions in the sequence. | 120 // Return the HeapRegion that corresponds to the given |
73 size_t length(); | 121 // address. Assume the address is valid. |
122 inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; | |
74 | 123 |
75 // Returns the number of contiguous regions at the end of the sequence | 124 // Return the number of regions that have been committed in the heap. |
125 size_t length() const { return _length; } | |
126 | |
127 // Return the maximum number of regions in the heap. | |
128 size_t max_length() const { return _max_length; } | |
129 | |
130 // Expand the sequence to reflect that the heap has grown from | |
131 // old_end to new_end. Either create new HeapRegions, or re-use | |
132 // existing ones, and return them in the given list. Returns the | |
133 // memory region that covers the newly-created regions. If a | |
134 // HeapRegion allocation fails, the result memory region might be | |
135 // smaller than the desired one. | |
136 MemRegion expand_by(HeapWord* old_end, HeapWord* new_end, | |
137 FreeRegionList* list); | |
138 | |
139 // Return the number of contiguous regions at the end of the sequence | |
76 // that are available for allocation. | 140 // that are available for allocation. |
77 size_t free_suffix(); | 141 size_t free_suffix(); |
78 | 142 |
79 // Find a contiguous set of empty regions of length num and return | 143 // Find a contiguous set of empty regions of length num and return |
80 // the index of the first region or -1 if the search was unsuccessful. | 144 // the index of the first region or G1_NULL_HRS_INDEX if the |
81 int find_contiguous(size_t num); | 145 // search was unsuccessful. |
146 size_t find_contiguous(size_t num); | |
82 | 147 |
83 // Apply the "doHeapRegion" method of "blk" to all regions in "this", | 148 // Apply blk->doHeapRegion() on all committed regions in address order, |
84 // in address order, terminating the iteration early | 149 // terminating the iteration early if doHeapRegion() returns true. |
85 // if the "doHeapRegion" method returns "true". | 150 void iterate(HeapRegionClosure* blk) const; |
86 void iterate(HeapRegionClosure* blk); | |
87 | 151 |
88 // Apply the "doHeapRegion" method of "blk" to all regions in "this", | 152 // As above, but start the iteration from hr and loop around. If hr |
89 // starting at "r" (or first region, if "r" is NULL), in a circular | 153 // is NULL, we start from the first region in the heap. |
90 // manner, terminating the iteration early if the "doHeapRegion" method | 154 void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const; |
91 // returns "true". | |
92 void iterate_from(HeapRegion* r, HeapRegionClosure* blk); | |
93 | 155 |
94 // As above, but start from a given index in the sequence | 156 // Tag as uncommitted as many regions that are completely free as |
95 // instead of a given heap region. | 157 // possible, up to shrink_bytes, from the suffix of the committed |
96 void iterate_from(int idx, HeapRegionClosure* blk); | 158 // sequence. Return a MemRegion that corresponds to the address |
159 // range of the uncommitted regions. Assume shrink_bytes is page and | |
160 // heap region aligned. | |
161 MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted); | |
97 | 162 |
98 // Requires "shrink_bytes" to be a multiple of the page size and heap | 163 // Do some sanity checking. |
99 // region granularity. Deletes as many "rightmost" completely free heap | 164 void verify_optional() PRODUCT_RETURN; |
100 // regions from the sequence as comprise shrink_bytes bytes. Returns the | |
101 // MemRegion indicating the region those regions comprised, and sets | |
102 // "num_regions_deleted" to the number of regions deleted. | |
103 MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted); | |
104 | |
105 // If "addr" falls within a region in the sequence, return that region, | |
106 // or else NULL. | |
107 inline HeapRegion* addr_to_region(const void* addr); | |
108 | |
109 void print(); | |
110 | |
111 // Prints out runs of empty regions. | |
112 void print_empty_runs(); | |
113 | |
114 }; | 165 }; |
115 | 166 |
116 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP | 167 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP |