comparison src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @ 20336:6701abbc4441

8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code. Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author tschatzl
date Tue, 19 Aug 2014 10:50:27 +0200
parents eec72fa4b108
children 1f1d373cd044
comparison
equal deleted inserted replaced
20335:eec72fa4b108 20336:6701abbc4441
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/heapRegion.hpp" 26 #include "gc_implementation/g1/heapRegion.hpp"
27 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 27 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
28 #include "gc_implementation/g1/heapRegionSet.hpp" 28 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
30 #include "memory/allocation.hpp" 31 #include "memory/allocation.hpp"
31 32
32 // Private 33 void HeapRegionSeq::initialize(ReservedSpace reserved) {
33 34 _reserved = reserved;
34 uint HeapRegionSeq::find_contiguous_from(uint from, uint num) { 35 _storage.initialize(reserved, 0);
35 uint len = length(); 36
36 assert(num > 1, "use this only for sequences of length 2 or greater"); 37 _num_committed = 0;
37 assert(from <= len, 38
38 err_msg("from: %u should be valid and <= than %u", from, len)); 39 _allocated_heapregions_length = 0;
39 40
40 uint curr = from; 41 _regions.initialize((HeapWord*)_storage.low_boundary(), (HeapWord*)_storage.high_boundary(), HeapRegion::GrainBytes);
41 uint first = G1_NULL_HRS_INDEX; 42 }
42 uint num_so_far = 0; 43
43 while (curr < len && num_so_far < num) { 44 bool HeapRegionSeq::is_available(uint region) const {
44 if (at(curr)->is_empty()) { 45 return region < _num_committed;
45 if (first == G1_NULL_HRS_INDEX) { 46 }
46 first = curr; 47
47 num_so_far = 1; 48 #ifdef ASSERT
48 } else { 49 bool HeapRegionSeq::is_free(HeapRegion* hr) const {
49 num_so_far += 1; 50 return _free_list.contains(hr);
50 } 51 }
52 #endif
53
54 HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
55 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
56 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
57 assert(reserved().contains(mr), "invariant");
58 return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
59 }
60
61 void HeapRegionSeq::update_committed_space(HeapWord* old_end,
62 HeapWord* new_end) {
63 assert(old_end != new_end, "don't call this otherwise");
64 // We may not have officially committed the area. So construct and use a separate one.
65 MemRegion new_committed(heap_bottom(), new_end);
66 // Tell the card table about the update.
67 Universe::heap()->barrier_set()->resize_covered_region(new_committed);
68 // Tell the BOT about the update.
69 G1CollectedHeap::heap()->bot_shared()->resize(new_committed.word_size());
70 // Tell the hot card cache about the update
71 G1CollectedHeap::heap()->concurrent_g1_refine()->hot_card_cache()->resize_card_counts(new_committed.byte_size());
72 }
73
74 void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
75 guarantee(num_regions > 0, "Must commit more than zero regions");
76 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
77
78 _storage.expand_by(num_regions * HeapRegion::GrainBytes);
79 update_committed_space(heap_top(), heap_top() + num_regions * HeapRegion::GrainWords);
80 }
81
82 void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
83 guarantee(num_regions >= 1, "Need to specify at least one region to uncommit");
84 guarantee(_num_committed >= num_regions, "pre-condition");
85
86 // Print before uncommitting.
87 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
88 for (uint i = start; i < start + num_regions; i++) {
89 HeapRegion* hr = at(i);
90 G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
91 }
92 }
93
94 HeapWord* old_end = heap_top();
95 _num_committed -= (uint)num_regions;
96 OrderAccess::fence();
97
98 _storage.shrink_by(num_regions * HeapRegion::GrainBytes);
99 update_committed_space(old_end, heap_top());
100 }
101
102 void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
103 guarantee(num_regions > 0, "No point in calling this for zero regions");
104 commit_regions(start, num_regions);
105 for (uint i = start; i < start + num_regions; i++) {
106 if (_regions.get_by_index(i) == NULL) {
107 HeapRegion* new_hr = new_heap_region(i);
108 _regions.set_by_index(i, new_hr);
109 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
110 }
111 }
112
113 _num_committed += (size_t)num_regions;
114
115 OrderAccess::fence();
116
117 for (uint i = start; i < start + num_regions; i++) {
118 assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
119 HeapRegion* hr = at(i);
120 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
121 G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
122 }
123 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
124 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
125
126 hr->initialize(mr);
127 insert_into_free_list(at(i));
128 }
129 }
130
131 uint HeapRegionSeq::expand_by(uint num_regions) {
132 // Only ever expand from the end of the heap.
133 return expand_at(_num_committed, num_regions);
134 }
135
136 uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
137 if (num_regions == 0) {
138 return 0;
139 }
140
141 uint cur = start;
142 uint idx_last_found = 0;
143 uint num_last_found = 0;
144
145 uint expanded = 0;
146
147 while (expanded < num_regions &&
148 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
149 uint to_expand = MIN2(num_regions - expanded, num_last_found);
150 make_regions_available(idx_last_found, to_expand);
151 expanded += to_expand;
152 cur = idx_last_found + num_last_found + 1;
153 }
154
155 verify_optional();
156 return expanded;
157 }
158
159 uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
160 uint found = 0;
161 size_t length_found = 0;
162 uint cur = 0;
163
164 while (length_found < num && cur < max_length()) {
165 HeapRegion* hr = _regions.get_by_index(cur);
166 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
167 // This region is a potential candidate for allocation into.
168 length_found++;
51 } else { 169 } else {
52 first = G1_NULL_HRS_INDEX; 170 // This region is not a candidate. The next region is the next possible one.
53 num_so_far = 0; 171 found = cur + 1;
54 } 172 length_found = 0;
55 curr += 1; 173 }
56 } 174 cur++;
57 assert(num_so_far <= num, "post-condition"); 175 }
58 if (num_so_far == num) { 176
59 // we found enough space for the humongous object 177 if (length_found == num) {
60 assert(from <= first && first < len, "post-condition"); 178 for (uint i = found; i < (found + num); i++) {
61 assert(first < curr && (curr - first) == num, "post-condition"); 179 HeapRegion* hr = _regions.get_by_index(i);
62 for (uint i = first; i < first + num; ++i) { 180 // sanity check
63 assert(at(i)->is_empty(), "post-condition"); 181 guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
64 } 182 err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
65 return first; 183 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
184 }
185 return found;
66 } else { 186 } else {
67 // we failed to find enough space for the humongous object 187 return G1_NO_HRS_INDEX;
68 return G1_NULL_HRS_INDEX; 188 }
69 } 189 }
70 } 190
71 191 HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
72 // Public 192 guarantee(r != NULL, "Start region must be a valid region");
73 193 guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
74 void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) { 194 for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
75 assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, 195 HeapRegion* hr = _regions.get_by_index(i);
76 "bottom should be heap region aligned"); 196 if (is_available(i)) {
77 assert((uintptr_t) end % HeapRegion::GrainBytes == 0, 197 return hr;
78 "end should be heap region aligned"); 198 }
79 199 }
80 _next_search_index = 0; 200 return NULL;
81 _allocated_length = 0;
82
83 _regions.initialize(bottom, end, HeapRegion::GrainBytes);
84 }
85
86 MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
87 HeapWord* new_end,
88 FreeRegionList* list) {
89 assert(old_end < new_end, "don't call it otherwise");
90 G1CollectedHeap* g1h = G1CollectedHeap::heap();
91
92 HeapWord* next_bottom = old_end;
93 assert(heap_bottom() <= next_bottom, "invariant");
94 while (next_bottom < new_end) {
95 assert(next_bottom < heap_end(), "invariant");
96 uint index = length();
97
98 assert(index < max_length(), "otherwise we cannot expand further");
99 if (index == 0) {
100 // We have not allocated any regions so far
101 assert(next_bottom == heap_bottom(), "invariant");
102 } else {
103 // next_bottom should match the end of the last/previous region
104 assert(next_bottom == at(index - 1)->end(), "invariant");
105 }
106
107 if (index == _allocated_length) {
108 // We have to allocate a new HeapRegion.
109 HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
110 if (new_hr == NULL) {
111 // allocation failed, we bail out and return what we have done so far
112 return MemRegion(old_end, next_bottom);
113 }
114 assert(_regions.get_by_index(index) == NULL, "invariant");
115 _regions.set_by_index(index, new_hr);
116 increment_allocated_length();
117 }
118 // Have to increment the length first, otherwise we will get an
119 // assert failure at(index) below.
120 increment_length();
121 HeapRegion* hr = at(index);
122 list->add_as_tail(hr);
123
124 next_bottom = hr->end();
125 }
126 assert(next_bottom == new_end, "post-condition");
127 return MemRegion(old_end, next_bottom);
128 }
129
130 uint HeapRegionSeq::free_suffix() {
131 uint res = 0;
132 uint index = length();
133 while (index > 0) {
134 index -= 1;
135 if (!at(index)->is_empty()) {
136 break;
137 }
138 res += 1;
139 }
140 return res;
141 }
142
143 uint HeapRegionSeq::find_contiguous(uint num) {
144 assert(num > 1, "use this only for sequences of length 2 or greater");
145 assert(_next_search_index <= length(),
146 err_msg("_next_search_index: %u should be valid and <= than %u",
147 _next_search_index, length()));
148
149 uint start = _next_search_index;
150 uint res = find_contiguous_from(start, num);
151 if (res == G1_NULL_HRS_INDEX && start > 0) {
152 // Try starting from the beginning. If _next_search_index was 0,
153 // no point in doing this again.
154 res = find_contiguous_from(0, num);
155 }
156 if (res != G1_NULL_HRS_INDEX) {
157 assert(res < length(), err_msg("res: %u should be valid", res));
158 _next_search_index = res + num;
159 assert(_next_search_index <= length(),
160 err_msg("_next_search_index: %u should be valid and <= than %u",
161 _next_search_index, length()));
162 }
163 return res;
164 } 201 }
165 202
166 void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { 203 void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
167 iterate_from((HeapRegion*) NULL, blk); 204 uint len = max_length();
168 } 205
169 206 for (uint i = 0; i < len; i++) {
170 void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { 207 if (!is_available(i)) {
171 uint hr_index = 0; 208 continue;
172 if (hr != NULL) { 209 }
173 hr_index = hr->hrs_index(); 210 guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
174 }
175
176 uint len = length();
177 for (uint i = hr_index; i < len; i += 1) {
178 bool res = blk->doHeapRegion(at(i)); 211 bool res = blk->doHeapRegion(at(i));
179 if (res) { 212 if (res) {
180 blk->incomplete(); 213 blk->incomplete();
181 return; 214 return;
182 } 215 }
183 } 216 }
184 for (uint i = 0; i < hr_index; i += 1) { 217 }
185 bool res = blk->doHeapRegion(at(i)); 218
219 uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
220 guarantee(res_idx != NULL, "checking");
221 guarantee(start_idx <= (max_length() + 1), "checking");
222
223 uint num_regions = 0;
224
225 uint cur = start_idx;
226 while (cur < max_length() && is_available(cur)) {
227 cur++;
228 }
229 if (cur == max_length()) {
230 return num_regions;
231 }
232 *res_idx = cur;
233 while (cur < max_length() && !is_available(cur)) {
234 cur++;
235 }
236 num_regions = cur - *res_idx;
237 #ifdef ASSERT
238 for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
239 assert(!is_available(i), "just checking");
240 }
241 assert(cur == max_length() || num_regions == 0 || is_available(cur),
242 err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
243 #endif
244 return num_regions;
245 }
246
247 uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
248 return num_regions * worker_i / num_workers;
249 }
250
251 void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
252 const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
253
254 // Every worker will actually look at all regions, skipping over regions that
255 // are currently not committed.
256 // This also (potentially) iterates over regions newly allocated during GC. This
257 // is no problem except for some extra work.
258 for (uint count = 0; count < _allocated_heapregions_length; count++) {
259 const uint index = (start_index + count) % _allocated_heapregions_length;
260 assert(0 <= index && index < _allocated_heapregions_length, "sanity");
261 // Skip over unavailable regions
262 if (!is_available(index)) {
263 continue;
264 }
265 HeapRegion* r = _regions.get_by_index(index);
266 // We'll ignore "continues humongous" regions (we'll process them
267 // when we come across their corresponding "start humongous"
268 // region) and regions already claimed.
269 if (r->claim_value() == claim_value || r->continuesHumongous()) {
270 continue;
271 }
272 // OK, try to claim it
273 if (!r->claimHeapRegion(claim_value)) {
274 continue;
275 }
276 // Success!
277 if (r->startsHumongous()) {
278 // If the region is "starts humongous" we'll iterate over its
279 // "continues humongous" first; in fact we'll do them
280 // first. The order is important. In one case, calling the
281 // closure on the "starts humongous" region might de-allocate
282 // and clear all its "continues humongous" regions and, as a
283 // result, we might end up processing them twice. So, we'll do
284 // them first (note: most closures will ignore them anyway) and
285 // then we'll do the "starts humongous" region.
286 for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
287 HeapRegion* chr = _regions.get_by_index(ch_index);
288
289 assert(chr->continuesHumongous(), "Must be humongous region");
290 assert(chr->humongous_start_region() == r,
291 err_msg("Must work on humongous continuation of the original start region "
292 PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
293 assert(chr->claim_value() != claim_value,
294 "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
295
296 bool claim_result = chr->claimHeapRegion(claim_value);
297 // We should always be able to claim it; no one else should
298 // be trying to claim this region.
299 guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
300
301 bool res2 = blk->doHeapRegion(chr);
302 if (res2) {
303 return;
304 }
305
306 // Right now, this holds (i.e., no closure that actually
307 // does something with "continues humongous" regions
308 // clears them). We might have to weaken it in the future,
309 // but let's leave these two asserts here for extra safety.
310 assert(chr->continuesHumongous(), "should still be the case");
311 assert(chr->humongous_start_region() == r, "sanity");
312 }
313 }
314
315 bool res = blk->doHeapRegion(r);
186 if (res) { 316 if (res) {
187 blk->incomplete();
188 return; 317 return;
189 } 318 }
190 } 319 }
191 } 320 }
192 321
193 uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { 322 uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
194 // Reset this in case it's currently pointing into the regions that
195 // we just removed.
196 _next_search_index = 0;
197
198 assert(length() > 0, "the region sequence should not be empty"); 323 assert(length() > 0, "the region sequence should not be empty");
199 assert(length() <= _allocated_length, "invariant"); 324 assert(length() <= _allocated_heapregions_length, "invariant");
200 assert(_allocated_length > 0, "we should have at least one region committed"); 325 assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
201 assert(num_regions_to_remove < length(), "We should never remove all regions"); 326 assert(num_regions_to_remove < length(), "We should never remove all regions");
202 327
203 uint i = 0; 328 if (num_regions_to_remove == 0) {
204 for (; i < num_regions_to_remove; i++) { 329 return 0;
205 HeapRegion* cur = at(length() - 1); 330 }
206 331
207 if (!cur->is_empty()) { 332 uint removed = 0;
208 // We have to give up if the region can not be moved 333 uint cur = _allocated_heapregions_length - 1;
209 break; 334 uint idx_last_found = 0;
210 } 335 uint num_last_found = 0;
211 assert(!cur->isHumongous(), "Humongous regions should not be empty"); 336
212 337 if ((num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
213 decrement_length(); 338 // Only allow uncommit from the end of the heap.
214 } 339 if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
215 return i; 340 return 0;
216 } 341 }
217 342 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
218 #ifndef PRODUCT 343
219 void HeapRegionSeq::verify_optional() { 344 uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
220 guarantee(length() <= _allocated_length, 345
346 cur -= num_last_found;
347 removed += to_remove;
348 }
349
350 verify_optional();
351
352 return removed;
353 }
354
355 uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
356 guarantee(start_idx < _allocated_heapregions_length, "checking");
357 guarantee(res_idx != NULL, "checking");
358
359 uint num_regions_found = 0;
360
361 jlong cur = start_idx;
362 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
363 cur--;
364 }
365 if (cur == -1) {
366 return num_regions_found;
367 }
368 jlong old_cur = cur;
369 // cur indexes the first empty region
370 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
371 cur--;
372 }
373 *res_idx = cur + 1;
374 num_regions_found = old_cur - cur;
375
376 #ifdef ASSERT
377 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
378 assert(at(i)->is_empty(), "just checking");
379 }
380 #endif
381 return num_regions_found;
382 }
383
384 void HeapRegionSeq::verify() {
385 guarantee(length() <= _allocated_heapregions_length,
221 err_msg("invariant: _length: %u _allocated_length: %u", 386 err_msg("invariant: _length: %u _allocated_length: %u",
222 length(), _allocated_length)); 387 length(), _allocated_heapregions_length));
223 guarantee(_allocated_length <= max_length(), 388 guarantee(_allocated_heapregions_length <= max_length(),
224 err_msg("invariant: _allocated_length: %u _max_length: %u", 389 err_msg("invariant: _allocated_length: %u _max_length: %u",
225 _allocated_length, max_length())); 390 _allocated_heapregions_length, max_length()));
226 guarantee(_next_search_index <= length(), 391
227 err_msg("invariant: _next_search_index: %u _length: %u", 392 bool prev_committed = true;
228 _next_search_index, length())); 393 uint num_committed = 0;
229
230 HeapWord* prev_end = heap_bottom(); 394 HeapWord* prev_end = heap_bottom();
231 for (uint i = 0; i < _allocated_length; i += 1) { 395 for (uint i = 0; i < _allocated_heapregions_length; i++) {
396 if (!is_available(i)) {
397 prev_committed = false;
398 continue;
399 }
400 num_committed++;
232 HeapRegion* hr = _regions.get_by_index(i); 401 HeapRegion* hr = _regions.get_by_index(i);
233 guarantee(hr != NULL, err_msg("invariant: i: %u", i)); 402 guarantee(hr != NULL, err_msg("invariant: i: %u", i));
234 guarantee(hr->bottom() == prev_end, 403 guarantee(!prev_committed || hr->bottom() == prev_end,
235 err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, 404 err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
236 i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); 405 i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
237 guarantee(hr->hrs_index() == i, 406 guarantee(hr->hrs_index() == i,
238 err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); 407 err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
239 if (i < length()) { 408 // Asserts will fire if i is >= _length
240 // Asserts will fire if i is >= _length 409 HeapWord* addr = hr->bottom();
241 HeapWord* addr = hr->bottom(); 410 guarantee(addr_to_region(addr) == hr, "sanity");
242 guarantee(addr_to_region(addr) == hr, "sanity"); 411 // We cannot check whether the region is part of a particular set: at the time
243 } else { 412 // this method may be called, we have only completed allocation of the regions,
244 guarantee(hr->is_empty(), "sanity"); 413 // but not put into a region set.
245 guarantee(!hr->isHumongous(), "sanity"); 414 prev_committed = true;
246 // using assert instead of guarantee here since containing_set()
247 // is only available in non-product builds.
248 assert(hr->containing_set() == NULL, "sanity");
249 }
250 if (hr->startsHumongous()) { 415 if (hr->startsHumongous()) {
251 prev_end = hr->orig_end(); 416 prev_end = hr->orig_end();
252 } else { 417 } else {
253 prev_end = hr->end(); 418 prev_end = hr->end();
254 } 419 }
255 } 420 }
256 for (uint i = _allocated_length; i < max_length(); i += 1) { 421 for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
257 guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); 422 guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
258 } 423 }
424
425 guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
426 _free_list.verify();
427 }
428
429 #ifndef PRODUCT
430 void HeapRegionSeq::verify_optional() {
431 verify();
259 } 432 }
260 #endif // PRODUCT 433 #endif // PRODUCT
434