comparison src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @ 3766:c3f1170908be

7045330: G1: Simplify/fix the HeapRegionSeq class 7042285: G1: native memory leak during humongous object allocation 6804436: G1: heap region indices should be size_t Summary: A series of fixes and improvements to the HeapRegionSeq class: a) replace the _regions growable array with a standard C array, b) avoid de-allocating / re-allocating HeapRegion instances when the heap shrinks / grows (fix for 7042285), c) introduce fast method to map address to HeapRegion via a "biased" array pointer, d) embed the _hrs object in G1CollectedHeap, instead of pointing to it via an indirection, e) assume that all the regions added to the HeapRegionSeq instance are contiguous, f) replace int's with size_t's for indexes (and expand that to HeapRegion as part of 6804436), g) remove unnecessary / unused methods, h) rename a couple of fields (_alloc_search_start and _seq_bottom), i) fix iterate_from() not to always start from index 0 irrespective of the region passed to it, j) add a verification method to check the HeapRegionSeq assumptions, k) always call the wrappers for _hrs.iterate(), _hrs_length(), and _hrs.at() from G1CollectedHeap, not those methods directly, and l) unify the code that expands the sequence (by either re-using or creating a new HeapRegion) and make it robust wrt to a HeapRegion allocation failing. Reviewed-by: stefank, johnc, brutisso
author tonyp
date Fri, 10 Jun 2011 13:16:40 -0400
parents 1216415d8e35
children 720b6a76dd9d
comparison
equal deleted inserted replaced
3765:ae5b2f1dcf12 3766:c3f1170908be
21 * questions. 21 * questions.
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/heapRegion.hpp"
27 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
28 #include "gc_implementation/g1/heapRegionSets.hpp"
26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 #include "gc_implementation/g1/heapRegionSeq.hpp"
28 #include "memory/allocation.hpp" 30 #include "memory/allocation.hpp"
29 31
30 // Local to this file. 32 // Private
31 33
32 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { 34 size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
33 if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1; 35 size_t len = length();
34 else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1; 36 assert(num > 1, "use this only for sequences of length 2 or greater");
35 else if (*hr1p == *hr2p) return 0; 37 assert(from <= len,
36 else { 38 err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
37 assert(false, "We should never compare distinct overlapping regions."); 39 from, len));
38 } 40
39 return 0; 41 size_t curr = from;
40 } 42 size_t first = G1_NULL_HRS_INDEX;
41
42 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
43 _alloc_search_start(0),
44 // The line below is the worst bit of C++ hackery I've ever written
45 // (Detlefs, 11/23). You should think of it as equivalent to
46 // "_regions(100, true)": initialize the growable array and inform it
47 // that it should allocate its elem array(s) on the C heap.
48 //
49 // The first argument, however, is actually a comma expression
50 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
51 // set_allocation_type() call is to replace the default allocation
52 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
53 // allow to pass the assert in GenericGrowableArray() which checks
54 // that a growable array object must be on C heap if elements are.
55 //
56 // Note: containing object is allocated on C heap since it is CHeapObj.
57 //
58 _regions((ResourceObj::set_allocation_type((address)&_regions,
59 ResourceObj::C_HEAP),
60 (int)max_size),
61 true),
62 _next_rr_candidate(0),
63 _seq_bottom(NULL)
64 {}
65
66 // Private methods.
67
68 void HeapRegionSeq::print_empty_runs() {
69 int empty_run = 0;
70 int n_empty = 0;
71 int empty_run_start;
72 for (int i = 0; i < _regions.length(); i++) {
73 HeapRegion* r = _regions.at(i);
74 if (r->continuesHumongous()) continue;
75 if (r->is_empty()) {
76 assert(!r->isHumongous(), "H regions should not be empty.");
77 if (empty_run == 0) empty_run_start = i;
78 empty_run++;
79 n_empty++;
80 } else {
81 if (empty_run > 0) {
82 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
83 empty_run = 0;
84 }
85 }
86 }
87 if (empty_run > 0) {
88 gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
89 }
90 gclog_or_tty->print_cr(" [tot = %d]", n_empty);
91 }
92
93 int HeapRegionSeq::find(HeapRegion* hr) {
94 // FIXME: optimized for adjacent regions of fixed size.
95 int ind = hr->hrs_index();
96 if (ind != -1) {
97 assert(_regions.at(ind) == hr, "Mismatch");
98 }
99 return ind;
100 }
101
102
103 // Public methods.
104
105 void HeapRegionSeq::insert(HeapRegion* hr) {
106 assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
107 if (_regions.length() == 0
108 || _regions.top()->end() <= hr->bottom()) {
109 hr->set_hrs_index(_regions.length());
110 _regions.append(hr);
111 } else {
112 _regions.append(hr);
113 _regions.sort(orderRegions);
114 for (int i = 0; i < _regions.length(); i++) {
115 _regions.at(i)->set_hrs_index(i);
116 }
117 }
118 char* bot = (char*)_regions.at(0)->bottom();
119 if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
120 }
121
122 size_t HeapRegionSeq::length() {
123 return _regions.length();
124 }
125
126 size_t HeapRegionSeq::free_suffix() {
127 size_t res = 0;
128 int first = _regions.length() - 1;
129 int cur = first;
130 while (cur >= 0 &&
131 (_regions.at(cur)->is_empty()
132 && (first == cur
133 || (_regions.at(cur+1)->bottom() ==
134 _regions.at(cur)->end())))) {
135 res++;
136 cur--;
137 }
138 return res;
139 }
140
141 int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
142 assert(num > 1, "pre-condition");
143 assert(0 <= from && from <= _regions.length(),
144 err_msg("from: %d should be valid and <= than %d",
145 from, _regions.length()));
146
147 int curr = from;
148 int first = -1;
149 size_t num_so_far = 0; 43 size_t num_so_far = 0;
150 while (curr < _regions.length() && num_so_far < num) { 44 while (curr < len && num_so_far < num) {
151 HeapRegion* curr_hr = _regions.at(curr); 45 if (at(curr)->is_empty()) {
152 if (curr_hr->is_empty()) { 46 if (first == G1_NULL_HRS_INDEX) {
153 if (first == -1) {
154 first = curr; 47 first = curr;
155 num_so_far = 1; 48 num_so_far = 1;
156 } else { 49 } else {
157 num_so_far += 1; 50 num_so_far += 1;
158 } 51 }
159 } else { 52 } else {
160 first = -1; 53 first = G1_NULL_HRS_INDEX;
161 num_so_far = 0; 54 num_so_far = 0;
162 } 55 }
163 curr += 1; 56 curr += 1;
164 } 57 }
165
166 assert(num_so_far <= num, "post-condition"); 58 assert(num_so_far <= num, "post-condition");
167 if (num_so_far == num) { 59 if (num_so_far == num) {
168 // we found enough space for the humongous object 60 // we found enough space for the humongous object
169 assert(from <= first && first < _regions.length(), "post-condition"); 61 assert(from <= first && first < len, "post-condition");
170 assert(first < curr && (curr - first) == (int) num, "post-condition"); 62 assert(first < curr && (curr - first) == num, "post-condition");
171 for (int i = first; i < first + (int) num; ++i) { 63 for (size_t i = first; i < first + num; ++i) {
172 assert(_regions.at(i)->is_empty(), "post-condition"); 64 assert(at(i)->is_empty(), "post-condition");
173 } 65 }
174 return first; 66 return first;
175 } else { 67 } else {
176 // we failed to find enough space for the humongous object 68 // we failed to find enough space for the humongous object
177 return -1; 69 return G1_NULL_HRS_INDEX;
178 } 70 }
179 } 71 }
180 72
181 int HeapRegionSeq::find_contiguous(size_t num) { 73 // Public
182 assert(num > 1, "otherwise we should not be calling this"); 74
183 assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(), 75 void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
184 err_msg("_alloc_search_start: %d should be valid and <= than %d", 76 size_t max_length) {
185 _alloc_search_start, _regions.length())); 77 assert((size_t) bottom % HeapRegion::GrainBytes == 0,
186 78 "bottom should be heap region aligned");
187 int start = _alloc_search_start; 79 assert((size_t) end % HeapRegion::GrainBytes == 0,
188 int res = find_contiguous_from(start, num); 80 "end should be heap region aligned");
189 if (res == -1 && start != 0) { 81
190 // Try starting from the beginning. If _alloc_search_start was 0, 82 _length = 0;
83 _heap_bottom = bottom;
84 _heap_end = end;
85 _region_shift = HeapRegion::LogOfHRGrainBytes;
86 _next_search_index = 0;
87 _allocated_length = 0;
88 _max_length = max_length;
89
90 _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
91 memset(_regions, 0, max_length * sizeof(HeapRegion*));
92 _regions_biased = _regions - ((size_t) bottom >> _region_shift);
93
94 assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
95 "bottom should be included in the region with index 0");
96 }
97
98 MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
99 HeapWord* new_end,
100 FreeRegionList* list) {
101 assert(old_end < new_end, "don't call it otherwise");
102 G1CollectedHeap* g1h = G1CollectedHeap::heap();
103
104 HeapWord* next_bottom = old_end;
105 assert(_heap_bottom <= next_bottom, "invariant");
106 while (next_bottom < new_end) {
107 assert(next_bottom < _heap_end, "invariant");
108 size_t index = length();
109
110 assert(index < _max_length, "otherwise we cannot expand further");
111 if (index == 0) {
112 // We have not allocated any regions so far
113 assert(next_bottom == _heap_bottom, "invariant");
114 } else {
115 // next_bottom should match the end of the last/previous region
116 assert(next_bottom == at(index - 1)->end(), "invariant");
117 }
118
119 if (index == _allocated_length) {
120 // We have to allocate a new HeapRegion.
121 HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
122 if (new_hr == NULL) {
123 // allocation failed, we bail out and return what we have done so far
124 return MemRegion(old_end, next_bottom);
125 }
126 assert(_regions[index] == NULL, "invariant");
127 _regions[index] = new_hr;
128 increment_length(&_allocated_length);
129 }
130 // Have to increment the length first, otherwise we will get an
131 // assert failure at(index) below.
132 increment_length(&_length);
133 HeapRegion* hr = at(index);
134 list->add_as_tail(hr);
135
136 next_bottom = hr->end();
137 }
138 assert(next_bottom == new_end, "post-condition");
139 return MemRegion(old_end, next_bottom);
140 }
141
142 size_t HeapRegionSeq::free_suffix() {
143 size_t res = 0;
144 size_t index = length();
145 while (index > 0) {
146 index -= 1;
147 if (!at(index)->is_empty()) {
148 break;
149 }
150 res += 1;
151 }
152 return res;
153 }
154
155 size_t HeapRegionSeq::find_contiguous(size_t num) {
156 assert(num > 1, "use this only for sequences of length 2 or greater");
157 assert(_next_search_index <= length(),
158 err_msg("_next_search_indeex: "SIZE_FORMAT" "
159 "should be valid and <= than "SIZE_FORMAT,
160 _next_search_index, length()));
161
162 size_t start = _next_search_index;
163 size_t res = find_contiguous_from(start, num);
164 if (res == G1_NULL_HRS_INDEX && start > 0) {
165 // Try starting from the beginning. If _next_search_index was 0,
191 // no point in doing this again. 166 // no point in doing this again.
192 res = find_contiguous_from(0, num); 167 res = find_contiguous_from(0, num);
193 } 168 }
194 if (res != -1) { 169 if (res != G1_NULL_HRS_INDEX) {
195 assert(0 <= res && res < _regions.length(), 170 assert(res < length(),
196 err_msg("res: %d should be valid", res)); 171 err_msg("res: "SIZE_FORMAT" should be valid", res));
197 _alloc_search_start = res + (int) num; 172 _next_search_index = res + num;
198 assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(), 173 assert(_next_search_index <= length(),
199 err_msg("_alloc_search_start: %d should be valid", 174 err_msg("_next_search_indeex: "SIZE_FORMAT" "
200 _alloc_search_start)); 175 "should be valid and <= than "SIZE_FORMAT,
176 _next_search_index, length()));
201 } 177 }
202 return res; 178 return res;
203 } 179 }
204 180
205 void HeapRegionSeq::iterate(HeapRegionClosure* blk) { 181 void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
206 iterate_from((HeapRegion*)NULL, blk); 182 iterate_from((HeapRegion*) NULL, blk);
207 } 183 }
208 184
209 // The first argument r is the heap region at which iteration begins. 185 void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
210 // This operation runs fastest when r is NULL, or the heap region for 186 size_t hr_index = 0;
211 // which a HeapRegionClosure most recently returned true, or the 187 if (hr != NULL) {
212 // heap region immediately to its right in the sequence. In all 188 hr_index = (size_t) hr->hrs_index();
213 // other cases a linear search is required to find the index of r. 189 }
214 190
215 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) { 191 size_t len = length();
216 192 for (size_t i = hr_index; i < len; i += 1) {
217 // :::: FIXME :::: 193 bool res = blk->doHeapRegion(at(i));
218 // Static cache value is bad, especially when we start doing parallel
219 // remembered set update. For now just don't cache anything (the
220 // code in the def'd out blocks).
221
222 #if 0
223 static int cached_j = 0;
224 #endif
225 int len = _regions.length();
226 int j = 0;
227 // Find the index of r.
228 if (r != NULL) {
229 #if 0
230 assert(cached_j >= 0, "Invariant.");
231 if ((cached_j < len) && (r == _regions.at(cached_j))) {
232 j = cached_j;
233 } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
234 j = cached_j + 1;
235 } else {
236 j = find(r);
237 #endif
238 if (j < 0) {
239 j = 0;
240 }
241 #if 0
242 }
243 #endif
244 }
245 int i;
246 for (i = j; i < len; i += 1) {
247 int res = blk->doHeapRegion(_regions.at(i));
248 if (res) { 194 if (res) {
249 #if 0
250 cached_j = i;
251 #endif
252 blk->incomplete(); 195 blk->incomplete();
253 return; 196 return;
254 } 197 }
255 } 198 }
256 for (i = 0; i < j; i += 1) { 199 for (size_t i = 0; i < hr_index; i += 1) {
257 int res = blk->doHeapRegion(_regions.at(i)); 200 bool res = blk->doHeapRegion(at(i));
258 if (res) { 201 if (res) {
259 #if 0
260 cached_j = i;
261 #endif
262 blk->incomplete(); 202 blk->incomplete();
263 return; 203 return;
264 } 204 }
265 } 205 }
266 } 206 }
267 207
268 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
269 int len = _regions.length();
270 int i;
271 for (i = idx; i < len; i++) {
272 if (blk->doHeapRegion(_regions.at(i))) {
273 blk->incomplete();
274 return;
275 }
276 }
277 for (i = 0; i < idx; i++) {
278 if (blk->doHeapRegion(_regions.at(i))) {
279 blk->incomplete();
280 return;
281 }
282 }
283 }
284
285 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, 208 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
286 size_t& num_regions_deleted) { 209 size_t* num_regions_deleted) {
287 // Reset this in case it's currently pointing into the regions that 210 // Reset this in case it's currently pointing into the regions that
288 // we just removed. 211 // we just removed.
289 _alloc_search_start = 0; 212 _next_search_index = 0;
290 213
291 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned"); 214 assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
292 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned"); 215 assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
293 216 assert(length() > 0, "the region sequence should not be empty");
294 if (_regions.length() == 0) { 217 assert(length() <= _allocated_length, "invariant");
295 num_regions_deleted = 0; 218 assert(_allocated_length > 0, "we should have at least one region committed");
296 return MemRegion(); 219
297 } 220 // around the loop, i will be the next region to be removed
298 int j = _regions.length() - 1; 221 size_t i = length() - 1;
299 HeapWord* end = _regions.at(j)->end(); 222 assert(i > 0, "we should never remove all regions");
223 // [last_start, end) is the MemRegion that covers the regions we will remove.
224 HeapWord* end = at(i)->end();
300 HeapWord* last_start = end; 225 HeapWord* last_start = end;
301 while (j >= 0 && shrink_bytes > 0) { 226 *num_regions_deleted = 0;
302 HeapRegion* cur = _regions.at(j); 227 while (shrink_bytes > 0) {
303 // We have to leave humongous regions where they are, 228 HeapRegion* cur = at(i);
304 // and work around them. 229 // We should leave the humongous regions where they are.
305 if (cur->isHumongous()) { 230 if (cur->isHumongous()) break;
306 return MemRegion(last_start, end); 231 // We should stop shrinking if we come across a non-empty region.
307 }
308 assert(cur == _regions.top(), "Should be top");
309 if (!cur->is_empty()) break; 232 if (!cur->is_empty()) break;
233
234 i -= 1;
235 *num_regions_deleted += 1;
310 shrink_bytes -= cur->capacity(); 236 shrink_bytes -= cur->capacity();
311 num_regions_deleted++;
312 _regions.pop();
313 last_start = cur->bottom(); 237 last_start = cur->bottom();
314 // We need to delete these somehow, but can't currently do so here: if 238 decrement_length(&_length);
315 // we do, the ZF thread may still access the deleted region. We'll 239 // We will reclaim the HeapRegion. _allocated_length should be
316 // leave this here as a reminder that we have to do something about 240 // covering this index. So, even though we removed the region from
317 // this. 241 // the active set by decreasing _length, we still have it
318 // delete cur; 242 // available in the future if we need to re-use it.
319 j--; 243 assert(i > 0, "we should never remove all regions");
244 assert(length() > 0, "we should never remove all regions");
320 } 245 }
321 return MemRegion(last_start, end); 246 return MemRegion(last_start, end);
322 } 247 }
323 248
324 class PrintHeapRegionClosure : public HeapRegionClosure { 249 #ifndef PRODUCT
325 public: 250 void HeapRegionSeq::verify_optional() {
326 bool doHeapRegion(HeapRegion* r) { 251 guarantee(_length <= _allocated_length,
327 gclog_or_tty->print(PTR_FORMAT ":", r); 252 err_msg("invariant: _length: "SIZE_FORMAT" "
328 r->print(); 253 "_allocated_length: "SIZE_FORMAT,
329 return false; 254 _length, _allocated_length));
330 } 255 guarantee(_allocated_length <= _max_length,
331 }; 256 err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
332 257 "_max_length: "SIZE_FORMAT,
333 void HeapRegionSeq::print() { 258 _allocated_length, _max_length));
334 PrintHeapRegionClosure cl; 259 guarantee(_next_search_index <= _length,
335 iterate(&cl); 260 err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
336 } 261 "_length: "SIZE_FORMAT,
262 _next_search_index, _length));
263
264 HeapWord* prev_end = _heap_bottom;
265 for (size_t i = 0; i < _allocated_length; i += 1) {
266 HeapRegion* hr = _regions[i];
267 guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
268 guarantee(hr->bottom() == prev_end,
269 err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
270 "prev_end: "PTR_FORMAT,
271 i, HR_FORMAT_PARAMS(hr), prev_end));
272 guarantee(hr->hrs_index() == i,
273 err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
274 i, hr->hrs_index()));
275 if (i < _length) {
276 // Asserts will fire if i is >= _length
277 HeapWord* addr = hr->bottom();
278 guarantee(addr_to_region(addr) == hr, "sanity");
279 guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
280 } else {
281 guarantee(hr->is_empty(), "sanity");
282 guarantee(!hr->isHumongous(), "sanity");
283 // using assert instead of guarantee here since containing_set()
284 // is only available in non-product builds.
285 assert(hr->containing_set() == NULL, "sanity");
286 }
287 if (hr->startsHumongous()) {
288 prev_end = hr->orig_end();
289 } else {
290 prev_end = hr->end();
291 }
292 }
293 for (size_t i = _allocated_length; i < _max_length; i += 1) {
294 guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
295 }
296 }
297 #endif // PRODUCT