Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 1886:72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr
author | tonyp |
---|---|
date | Sat, 16 Oct 2010 17:12:19 -0400 |
parents | bb847e31b836 |
children | 2d26b0046e0d f95d63e2154a |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegion.cpp.incl" | |
27 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
28 int HeapRegion::LogOfHRGrainBytes = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
29 int HeapRegion::LogOfHRGrainWords = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
30 int HeapRegion::GrainBytes = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
31 int HeapRegion::GrainWords = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
32 int HeapRegion::CardsPerRegion = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
33 |
342 | 34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, |
35 HeapRegion* hr, OopClosure* cl, | |
36 CardTableModRefBS::PrecisionStyle precision, | |
37 FilterKind fk) : | |
38 ContiguousSpaceDCTOC(hr, cl, precision, NULL), | |
39 _hr(hr), _fk(fk), _g1(g1) | |
40 {} | |
41 | |
42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | |
43 OopClosure* oc) : | |
44 _r_bottom(r->bottom()), _r_end(r->end()), | |
45 _oc(oc), _out_of_region(0) | |
46 {} | |
47 | |
48 class VerifyLiveClosure: public OopClosure { | |
811 | 49 private: |
342 | 50 G1CollectedHeap* _g1h; |
51 CardTableModRefBS* _bs; | |
52 oop _containing_obj; | |
53 bool _failures; | |
54 int _n_failures; | |
811 | 55 bool _use_prev_marking; |
342 | 56 public: |
811 | 57 // use_prev_marking == true -> use "prev" marking information, |
58 // use_prev_marking == false -> use "next" marking information | |
59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) : | |
342 | 60 _g1h(g1h), _bs(NULL), _containing_obj(NULL), |
811 | 61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking) |
342 | 62 { |
63 BarrierSet* bs = _g1h->barrier_set(); | |
64 if (bs->is_a(BarrierSet::CardTableModRef)) | |
65 _bs = (CardTableModRefBS*)bs; | |
66 } | |
67 | |
68 void set_containing_obj(oop obj) { | |
69 _containing_obj = obj; | |
70 } | |
71 | |
72 bool failures() { return _failures; } | |
73 int n_failures() { return _n_failures; } | |
74 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
75 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
76 virtual void do_oop( oop* p) { do_oop_work(p); } |
342 | 77 |
1388 | 78 void print_object(outputStream* out, oop obj) { |
79 #ifdef PRODUCT | |
80 klassOop k = obj->klass(); | |
81 const char* class_name = instanceKlass::cast(k)->external_name(); | |
82 out->print_cr("class name %s", class_name); | |
83 #else // PRODUCT | |
84 obj->print_on(out); | |
85 #endif // PRODUCT | |
86 } | |
87 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
88 template <class T> void do_oop_work(T* p) { |
342 | 89 assert(_containing_obj != NULL, "Precondition"); |
811 | 90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), |
91 "Precondition"); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
92 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
93 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
342 | 95 bool failed = false; |
811 | 96 if (!_g1h->is_in_closed_subset(obj) || |
97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) { | |
342 | 98 if (!_failures) { |
99 gclog_or_tty->print_cr(""); | |
100 gclog_or_tty->print_cr("----------"); | |
101 } | |
102 if (!_g1h->is_in_closed_subset(obj)) { | |
1388 | 103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
342 | 104 gclog_or_tty->print_cr("Field "PTR_FORMAT |
1388 | 105 " of live obj "PTR_FORMAT" in region " |
106 "["PTR_FORMAT", "PTR_FORMAT")", | |
107 p, (void*) _containing_obj, | |
108 from->bottom(), from->end()); | |
109 print_object(gclog_or_tty, _containing_obj); | |
110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", | |
111 (void*) obj); | |
112 } else { | |
113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); | |
114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); | |
115 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
116 " of live obj "PTR_FORMAT" in region " | |
117 "["PTR_FORMAT", "PTR_FORMAT")", | |
118 p, (void*) _containing_obj, | |
119 from->bottom(), from->end()); | |
120 print_object(gclog_or_tty, _containing_obj); | |
121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " | |
122 "["PTR_FORMAT", "PTR_FORMAT")", | |
123 (void*) obj, to->bottom(), to->end()); | |
124 print_object(gclog_or_tty, obj); | |
342 | 125 } |
126 gclog_or_tty->print_cr("----------"); | |
127 _failures = true; | |
128 failed = true; | |
129 _n_failures++; | |
130 } | |
131 | |
132 if (!_g1h->full_collection()) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
134 HeapRegion* to = _g1h->heap_region_containing(obj); |
342 | 135 if (from != NULL && to != NULL && |
136 from != to && | |
137 !to->isHumongous()) { | |
138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); | |
139 jbyte cv_field = *_bs->byte_for_const(p); | |
140 const jbyte dirty = CardTableModRefBS::dirty_card_val(); | |
141 | |
142 bool is_bad = !(from->is_young() | |
143 || to->rem_set()->contains_reference(p) | |
144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed | |
145 (_containing_obj->is_objArray() ? | |
146 cv_field == dirty | |
147 : cv_obj == dirty || cv_field == dirty)); | |
148 if (is_bad) { | |
149 if (!_failures) { | |
150 gclog_or_tty->print_cr(""); | |
151 gclog_or_tty->print_cr("----------"); | |
152 } | |
153 gclog_or_tty->print_cr("Missing rem set entry:"); | |
154 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
155 " of obj "PTR_FORMAT | |
156 ", in region %d ["PTR_FORMAT | |
157 ", "PTR_FORMAT"),", | |
158 p, (void*) _containing_obj, | |
159 from->hrs_index(), | |
160 from->bottom(), | |
161 from->end()); | |
162 _containing_obj->print_on(gclog_or_tty); | |
163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT | |
164 " in region %d ["PTR_FORMAT | |
165 ", "PTR_FORMAT").", | |
166 (void*) obj, to->hrs_index(), | |
167 to->bottom(), to->end()); | |
168 obj->print_on(gclog_or_tty); | |
169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", | |
170 cv_obj, cv_field); | |
171 gclog_or_tty->print_cr("----------"); | |
172 _failures = true; | |
173 if (!failed) _n_failures++; | |
174 } | |
175 } | |
176 } | |
177 } | |
178 } | |
179 }; | |
180 | |
181 template<class ClosureType> | |
182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
183 HeapRegion* hr, | |
184 HeapWord* cur, HeapWord* top) { | |
185 oop cur_oop = oop(cur); | |
186 int oop_size = cur_oop->size(); | |
187 HeapWord* next_obj = cur + oop_size; | |
188 while (next_obj < top) { | |
189 // Keep filtering the remembered set. | |
190 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
191 // Bottom lies entirely below top, so we can call the | |
192 // non-memRegion version of oop_iterate below. | |
193 cur_oop->oop_iterate(cl); | |
194 } | |
195 cur = next_obj; | |
196 cur_oop = oop(cur); | |
197 oop_size = cur_oop->size(); | |
198 next_obj = cur + oop_size; | |
199 } | |
200 return cur; | |
201 } | |
202 | |
203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
204 HeapWord* bottom, | |
205 HeapWord* top, | |
206 OopClosure* cl) { | |
207 G1CollectedHeap* g1h = _g1; | |
208 | |
209 int oop_size; | |
210 | |
211 OopClosure* cl2 = cl; | |
212 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
214 switch (_fk) { | |
215 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
217 } | |
218 | |
219 // Start filtering what we add to the remembered set. If the object is | |
220 // not considered dead, either because it is marked (in the mark bitmap) | |
221 // or it was allocated after marking finished, then we add it. Otherwise | |
222 // we can safely ignore the object. | |
223 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
224 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
225 } else { | |
226 oop_size = oop(bottom)->size(); | |
227 } | |
228 | |
229 bottom += oop_size; | |
230 | |
231 if (bottom < top) { | |
232 // We replicate the loop below for several kinds of possible filters. | |
233 switch (_fk) { | |
234 case NoFilterKind: | |
235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
236 break; | |
237 case IntoCSFilterKind: { | |
238 FilterIntoCSClosure filt(this, g1h, cl); | |
239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
240 break; | |
241 } | |
242 case OutOfRegionFilterKind: { | |
243 FilterOutOfRegionClosure filt(_hr, cl); | |
244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
245 break; | |
246 } | |
247 default: | |
248 ShouldNotReachHere(); | |
249 } | |
250 | |
251 // Last object. Need to do dead-obj filtering here too. | |
252 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
253 oop(bottom)->oop_iterate(cl2, mr); | |
254 } | |
255 } | |
256 } | |
257 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
258 // Minimum region size; we won't go lower than that. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
259 // We might want to decrease this in the future, to deal with small |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
260 // heaps a bit more efficiently. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
261 #define MIN_REGION_SIZE ( 1024 * 1024 ) |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
262 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
263 // Maximum region size; we don't go higher than that. There's a good |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
264 // reason for having an upper bound. We don't want regions to get too |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
265 // large, otherwise cleanup's effectiveness would decrease as there |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
266 // will be fewer opportunities to find totally empty regions after |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
267 // marking. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
269 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
270 // The automatic region size calculation will try to have around this |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
271 // many regions in the heap (based on the min heap size). |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
272 #define TARGET_REGION_NUMBER 2048 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
273 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
275 // region_size in bytes |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
276 uintx region_size = G1HeapRegionSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
278 // We base the automatic calculation on the min heap size. This |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
279 // can be problematic if the spread between min and max is quite |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
281 // the max size, the region size might be way too large for the |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
282 // min size. Either way, some users might have to set the region |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
283 // size manually for some -Xms / -Xmx combos. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
284 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
286 (uintx) MIN_REGION_SIZE); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
287 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
288 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
289 int region_size_log = log2_long((jlong) region_size); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
290 // Recalculate the region size to make sure it's a power of |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
291 // 2. This means that region_size is the largest power of 2 that's |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
292 // <= what we've calculated so far. |
1485
fb57d4cf76c2
6931180: Migration to recent versions of MS Platform SDK
prr
parents:
1394
diff
changeset
|
293 region_size = ((uintx)1 << region_size_log); |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
294 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
295 // Now make sure that we don't go over or under our limits. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
296 if (region_size < MIN_REGION_SIZE) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
297 region_size = MIN_REGION_SIZE; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
298 } else if (region_size > MAX_REGION_SIZE) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
299 region_size = MAX_REGION_SIZE; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
300 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
301 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
302 // And recalculate the log. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
303 region_size_log = log2_long((jlong) region_size); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
304 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
305 // Now, set up the globals. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
307 LogOfHRGrainBytes = region_size_log; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
308 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
309 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
311 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
312 guarantee(GrainBytes == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
313 // The cast to int is safe, given that we've bounded region_size by |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
314 // MIN_REGION_SIZE and MAX_REGION_SIZE. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
315 GrainBytes = (int) region_size; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
316 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
317 guarantee(GrainWords == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
318 GrainWords = GrainBytes >> LogHeapWordSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
320 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
321 guarantee(CardsPerRegion == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
323 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
324 |
342 | 325 void HeapRegion::reset_after_compaction() { |
326 G1OffsetTableContigSpace::reset_after_compaction(); | |
327 // After a compaction the mark bitmap is invalid, so we must | |
328 // treat all objects as being inside the unmarked area. | |
329 zero_marked_bytes(); | |
330 init_top_at_mark_start(); | |
331 } | |
332 | |
333 DirtyCardToOopClosure* | |
334 HeapRegion::new_dcto_closure(OopClosure* cl, | |
335 CardTableModRefBS::PrecisionStyle precision, | |
336 HeapRegionDCTOC::FilterKind fk) { | |
337 return new HeapRegionDCTOC(G1CollectedHeap::heap(), | |
338 this, cl, precision, fk); | |
339 } | |
340 | |
341 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
355 | 342 _humongous_type = NotHumongous; |
342 | 343 _humongous_start_region = NULL; |
344 _in_collection_set = false; | |
345 _is_gc_alloc_region = false; | |
346 | |
347 // Age stuff (if parallel, this will be done separately, since it needs | |
348 // to be sequential). | |
349 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
350 | |
351 set_young_index_in_cset(-1); | |
352 uninstall_surv_rate_group(); | |
353 set_young_type(NotYoung); | |
354 | |
355 // In case it had been the start of a humongous sequence, reset its end. | |
356 set_end(_orig_end); | |
357 | |
358 if (!par) { | |
359 // If this is parallel, this will be done later. | |
360 HeapRegionRemSet* hrrs = rem_set(); | |
361 if (hrrs != NULL) hrrs->clear(); | |
355 | 362 _claimed = InitialClaimValue; |
342 | 363 } |
364 zero_marked_bytes(); | |
365 set_sort_index(-1); | |
366 | |
367 _offsets.resize(HeapRegion::GrainWords); | |
368 init_top_at_mark_start(); | |
356 | 369 if (clear_space) clear(SpaceDecorator::Mangle); |
342 | 370 } |
371 | |
372 // <PREDICTION> | |
373 void HeapRegion::calc_gc_efficiency() { | |
374 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
375 _gc_efficiency = (double) garbage_bytes() / | |
376 g1h->predict_region_elapsed_time_ms(this, false); | |
377 } | |
378 // </PREDICTION> | |
379 | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
380 void HeapRegion::set_startsHumongous(HeapWord* new_end) { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
381 assert(end() == _orig_end, |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
382 "Should be normal before the humongous object allocation"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
383 assert(top() == bottom(), "should be empty"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
384 |
355 | 385 _humongous_type = StartsHumongous; |
342 | 386 _humongous_start_region = this; |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
387 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
388 set_end(new_end); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
389 _offsets.set_for_starts_humongous(new_end); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
390 } |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
391 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
392 void HeapRegion::set_continuesHumongous(HeapRegion* start) { |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
393 assert(end() == _orig_end, |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
394 "Should be normal before the humongous object allocation"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
395 assert(top() == bottom(), "should be empty"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
396 assert(start->startsHumongous(), "pre-condition"); |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
397 |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
398 _humongous_type = ContinuesHumongous; |
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1718
diff
changeset
|
399 _humongous_start_region = start; |
342 | 400 } |
401 | |
402 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
403 jint current = _claimed; | |
404 if (current != claimValue) { | |
405 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
406 if (res == current) { | |
407 return true; | |
408 } | |
409 } | |
410 return false; | |
411 } | |
412 | |
413 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
414 HeapWord* low = addr; | |
415 HeapWord* high = end(); | |
416 while (low < high) { | |
417 size_t diff = pointer_delta(high, low); | |
418 // Must add one below to bias toward the high amount. Otherwise, if | |
419 // "high" were at the desired value, and "low" were one less, we | |
420 // would not converge on "high". This is not symmetric, because | |
421 // we set "high" to a block start, which might be the right one, | |
422 // which we don't do for "low". | |
423 HeapWord* middle = low + (diff+1)/2; | |
424 if (middle == high) return high; | |
425 HeapWord* mid_bs = block_start_careful(middle); | |
426 if (mid_bs < addr) { | |
427 low = middle; | |
428 } else { | |
429 high = mid_bs; | |
430 } | |
431 } | |
432 assert(low == high && low >= addr, "Didn't work."); | |
433 return low; | |
434 } | |
435 | |
436 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { | |
437 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); | |
438 _next_in_special_set = r; | |
439 } | |
440 | |
441 void HeapRegion::set_on_unclean_list(bool b) { | |
442 _is_on_unclean_list = b; | |
443 } | |
444 | |
356 | 445 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
446 G1OffsetTableContigSpace::initialize(mr, false, mangle_space); | |
342 | 447 hr_clear(false/*par*/, clear_space); |
448 } | |
449 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
450 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
451 #endif // _MSC_VER | |
452 | |
453 | |
454 HeapRegion:: | |
455 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
456 MemRegion mr, bool is_zeroed) | |
457 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), | |
458 _next_fk(HeapRegionDCTOC::NoFilterKind), | |
459 _hrs_index(-1), | |
355 | 460 _humongous_type(NotHumongous), _humongous_start_region(NULL), |
342 | 461 _in_collection_set(false), _is_gc_alloc_region(false), |
462 _is_on_free_list(false), _is_on_unclean_list(false), | |
463 _next_in_special_set(NULL), _orig_end(NULL), | |
355 | 464 _claimed(InitialClaimValue), _evacuation_failed(false), |
342 | 465 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
466 _young_type(NotYoung), _next_young_region(NULL), | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
751
diff
changeset
|
467 _next_dirty_cards_region(NULL), |
342 | 468 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
469 _rem_set(NULL), _zfs(NotZeroFilled), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
470 _recorded_rs_length(0), _predicted_elapsed_time_ms(0), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
471 _predicted_bytes_to_copy(0) |
342 | 472 { |
473 _orig_end = mr.end(); | |
474 // Note that initialize() will set the start of the unmarked area of the | |
475 // region. | |
356 | 476 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
477 set_top(bottom()); | |
478 set_saved_mark(); | |
342 | 479 |
480 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
481 | |
482 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
483 // In case the region is allocated during a pause, note the top. | |
484 // We haven't done any counting on a brand new region. | |
485 _top_at_conc_mark_count = bottom(); | |
486 } | |
487 | |
488 class NextCompactionHeapRegionClosure: public HeapRegionClosure { | |
489 const HeapRegion* _target; | |
490 bool _target_seen; | |
491 HeapRegion* _last; | |
492 CompactibleSpace* _res; | |
493 public: | |
494 NextCompactionHeapRegionClosure(const HeapRegion* target) : | |
495 _target(target), _target_seen(false), _res(NULL) {} | |
496 bool doHeapRegion(HeapRegion* cur) { | |
497 if (_target_seen) { | |
498 if (!cur->isHumongous()) { | |
499 _res = cur; | |
500 return true; | |
501 } | |
502 } else if (cur == _target) { | |
503 _target_seen = true; | |
504 } | |
505 return false; | |
506 } | |
507 CompactibleSpace* result() { return _res; } | |
508 }; | |
509 | |
510 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
511 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
512 // cast away const-ness | |
513 HeapRegion* r = (HeapRegion*) this; | |
514 NextCompactionHeapRegionClosure blk(r); | |
515 g1h->heap_region_iterate_from(r, &blk); | |
516 return blk.result(); | |
517 } | |
518 | |
519 void HeapRegion::save_marks() { | |
520 set_saved_mark(); | |
521 } | |
522 | |
523 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { | |
524 HeapWord* p = mr.start(); | |
525 HeapWord* e = mr.end(); | |
526 oop obj; | |
527 while (p < e) { | |
528 obj = oop(p); | |
529 p += obj->oop_iterate(cl); | |
530 } | |
531 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
532 } | |
533 | |
534 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
535 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
536 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
537 } | |
538 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
539 | |
540 | |
541 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { | |
542 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
543 } | |
544 | |
545 #ifdef DEBUG | |
546 HeapWord* HeapRegion::allocate(size_t size) { | |
547 jint state = zero_fill_state(); | |
548 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || | |
549 zero_fill_is_allocated(), | |
550 "When ZF is on, only alloc in ZF'd regions"); | |
551 return G1OffsetTableContigSpace::allocate(size); | |
552 } | |
553 #endif | |
554 | |
555 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { | |
556 assert(ZF_mon->owned_by_self() || | |
557 Universe::heap()->is_gc_active(), | |
558 "Must hold the lock or be a full GC to modify."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
559 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
560 if (top() != bottom() && zfs != Allocated) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
561 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
562 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
563 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
564 assert(top() == bottom() || zfs == Allocated, |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
565 err_msg("Region must be empty, or we must be setting it to allocated. " |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
566 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
567 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
568 #endif |
342 | 569 _zfs = zfs; |
570 } | |
571 | |
572 void HeapRegion::set_zero_fill_complete() { | |
573 set_zero_fill_state_work(ZeroFilled); | |
574 if (ZF_mon->owned_by_self()) { | |
575 ZF_mon->notify_all(); | |
576 } | |
577 } | |
578 | |
579 | |
580 void HeapRegion::ensure_zero_filled() { | |
581 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
582 ensure_zero_filled_locked(); | |
583 } | |
584 | |
585 void HeapRegion::ensure_zero_filled_locked() { | |
586 assert(ZF_mon->owned_by_self(), "Precondition"); | |
587 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); | |
588 assert(should_ignore_zf || Heap_lock->is_locked(), | |
589 "Either we're in a GC or we're allocating a region."); | |
590 switch (zero_fill_state()) { | |
591 case HeapRegion::NotZeroFilled: | |
592 set_zero_fill_in_progress(Thread::current()); | |
593 { | |
594 ZF_mon->unlock(); | |
595 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
596 ZF_mon->lock_without_safepoint_check(); | |
597 } | |
598 // A trap. | |
599 guarantee(zero_fill_state() == HeapRegion::ZeroFilling | |
600 && zero_filler() == Thread::current(), | |
601 "AHA! Tell Dave D if you see this..."); | |
602 set_zero_fill_complete(); | |
603 // gclog_or_tty->print_cr("Did sync ZF."); | |
604 ConcurrentZFThread::note_sync_zfs(); | |
605 break; | |
606 case HeapRegion::ZeroFilling: | |
607 if (should_ignore_zf) { | |
608 // We can "break" the lock and take over the work. | |
609 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
610 set_zero_fill_complete(); | |
611 ConcurrentZFThread::note_sync_zfs(); | |
612 break; | |
613 } else { | |
614 ConcurrentZFThread::wait_for_ZF_completed(this); | |
615 } | |
616 case HeapRegion::ZeroFilled: | |
617 // Nothing to do. | |
618 break; | |
619 case HeapRegion::Allocated: | |
620 guarantee(false, "Should not call on allocated regions."); | |
621 } | |
622 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); | |
623 } | |
624 | |
625 HeapWord* | |
626 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
627 ObjectClosure* cl) { | |
628 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
629 // We used to use "block_start_careful" here. But we're actually happy | |
630 // to update the BOT while we do this... | |
631 HeapWord* cur = block_start(mr.start()); | |
632 mr = mr.intersection(used_region()); | |
633 if (mr.is_empty()) return NULL; | |
634 // Otherwise, find the obj that extends onto mr.start(). | |
635 | |
636 assert(cur <= mr.start() | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
637 && (oop(cur)->klass_or_null() == NULL || |
342 | 638 cur + oop(cur)->size() > mr.start()), |
639 "postcondition of block_start"); | |
640 oop obj; | |
641 while (cur < mr.end()) { | |
642 obj = oop(cur); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
643 if (obj->klass_or_null() == NULL) { |
342 | 644 // Ran into an unparseable point. |
645 return cur; | |
646 } else if (!g1h->is_obj_dead(obj)) { | |
647 cl->do_object(obj); | |
648 } | |
649 if (cl->abort()) return cur; | |
650 // The check above must occur before the operation below, since an | |
651 // abort might invalidate the "size" operation. | |
652 cur += obj->size(); | |
653 } | |
654 return NULL; | |
655 } | |
656 | |
657 HeapWord* | |
658 HeapRegion:: | |
659 oops_on_card_seq_iterate_careful(MemRegion mr, | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
660 FilterOutOfRegionClosure* cl, |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
661 bool filter_young) { |
342 | 662 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
663 | |
664 // If we're within a stop-world GC, then we might look at a card in a | |
665 // GC alloc region that extends onto a GC LAB, which may not be | |
666 // parseable. Stop such at the "saved_mark" of the region. | |
667 if (G1CollectedHeap::heap()->is_gc_active()) { | |
668 mr = mr.intersection(used_region_at_save_marks()); | |
669 } else { | |
670 mr = mr.intersection(used_region()); | |
671 } | |
672 if (mr.is_empty()) return NULL; | |
673 // Otherwise, find the obj that extends onto mr.start(). | |
674 | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
675 // The intersection of the incoming mr (for the card) and the |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
676 // allocated part of the region is non-empty. This implies that |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
677 // we have actually allocated into this region. The code in |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
678 // G1CollectedHeap.cpp that allocates a new region sets the |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
679 // is_young tag on the region before allocating. Thus we |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
680 // safely know if this region is young. |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
681 if (is_young() && filter_young) { |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
682 return NULL; |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
683 } |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
684 |
1705 | 685 assert(!is_young(), "check value of filter_young"); |
686 | |
342 | 687 // We used to use "block_start_careful" here. But we're actually happy |
688 // to update the BOT while we do this... | |
689 HeapWord* cur = block_start(mr.start()); | |
690 assert(cur <= mr.start(), "Postcondition"); | |
691 | |
692 while (cur <= mr.start()) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
693 if (oop(cur)->klass_or_null() == NULL) { |
342 | 694 // Ran into an unparseable point. |
695 return cur; | |
696 } | |
697 // Otherwise... | |
698 int sz = oop(cur)->size(); | |
699 if (cur + sz > mr.start()) break; | |
700 // Otherwise, go on. | |
701 cur = cur + sz; | |
702 } | |
703 oop obj; | |
704 obj = oop(cur); | |
705 // If we finish this loop... | |
706 assert(cur <= mr.start() | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
707 && obj->klass_or_null() != NULL |
342 | 708 && cur + obj->size() > mr.start(), |
709 "Loop postcondition"); | |
710 if (!g1h->is_obj_dead(obj)) { | |
711 obj->oop_iterate(cl, mr); | |
712 } | |
713 | |
714 HeapWord* next; | |
715 while (cur < mr.end()) { | |
716 obj = oop(cur); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
717 if (obj->klass_or_null() == NULL) { |
342 | 718 // Ran into an unparseable point. |
719 return cur; | |
720 }; | |
721 // Otherwise: | |
722 next = (cur + obj->size()); | |
723 if (!g1h->is_obj_dead(obj)) { | |
724 if (next < mr.end()) { | |
725 obj->oop_iterate(cl); | |
726 } else { | |
727 // this obj spans the boundary. If it's an array, stop at the | |
728 // boundary. | |
729 if (obj->is_objArray()) { | |
730 obj->oop_iterate(cl, mr); | |
731 } else { | |
732 obj->oop_iterate(cl); | |
733 } | |
734 } | |
735 } | |
736 cur = next; | |
737 } | |
738 return NULL; | |
739 } | |
740 | |
741 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
742 void HeapRegion::print_on(outputStream* st) const { | |
743 if (isHumongous()) { | |
744 if (startsHumongous()) | |
745 st->print(" HS"); | |
746 else | |
747 st->print(" HC"); | |
748 } else { | |
749 st->print(" "); | |
750 } | |
751 if (in_collection_set()) | |
752 st->print(" CS"); | |
753 else if (is_gc_alloc_region()) | |
754 st->print(" A "); | |
755 else | |
756 st->print(" "); | |
757 if (is_young()) | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
758 st->print(is_survivor() ? " SU" : " Y "); |
342 | 759 else |
760 st->print(" "); | |
761 if (is_empty()) | |
762 st->print(" F"); | |
763 else | |
764 st->print(" "); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
765 st->print(" %5d", _gc_time_stamp); |
1388 | 766 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, |
767 prev_top_at_mark_start(), next_top_at_mark_start()); | |
342 | 768 G1OffsetTableContigSpace::print_on(st); |
769 } | |
770 | |
811 | 771 void HeapRegion::verify(bool allow_dirty) const { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
772 bool dummy = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
773 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy); |
811 | 774 } |
775 | |
342 | 776 #define OBJ_SAMPLE_INTERVAL 0 |
777 #define BLOCK_SAMPLE_INTERVAL 100 | |
778 | |
779 // This really ought to be commoned up into OffsetTableContigSpace somehow. | |
780 // We would need a mechanism to make that code skip dead objects. | |
781 | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
782 void HeapRegion::verify(bool allow_dirty, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
783 bool use_prev_marking, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
784 bool* failures) const { |
342 | 785 G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
786 *failures = false; |
342 | 787 HeapWord* p = bottom(); |
788 HeapWord* prev_p = NULL; | |
789 int objs = 0; | |
790 int blocks = 0; | |
811 | 791 VerifyLiveClosure vl_cl(g1, use_prev_marking); |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
792 bool is_humongous = isHumongous(); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
793 size_t object_num = 0; |
342 | 794 while (p < top()) { |
795 size_t size = oop(p)->size(); | |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
796 if (is_humongous != g1->isHumongous(size)) { |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
797 gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size (" |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
798 SIZE_FORMAT" words) in a %shumongous region", |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
799 p, g1->isHumongous(size) ? "" : "non-", |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
800 size, is_humongous ? "" : "non-"); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
801 *failures = true; |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
802 } |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
803 object_num += 1; |
342 | 804 if (blocks == BLOCK_SAMPLE_INTERVAL) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
805 HeapWord* res = block_start_const(p + (size/2)); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
806 if (p != res) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
807 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
808 SIZE_FORMAT" returned "PTR_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
809 p, size, res); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
810 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
811 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
812 } |
342 | 813 blocks = 0; |
814 } else { | |
815 blocks++; | |
816 } | |
817 if (objs == OBJ_SAMPLE_INTERVAL) { | |
818 oop obj = oop(p); | |
811 | 819 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
820 if (obj->is_oop()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
821 klassOop klass = obj->klass(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
822 if (!klass->is_perm()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
823 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
824 "not in perm", klass, obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
825 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
826 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
827 } else if (!klass->is_klass()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
828 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
829 "not a klass", klass, obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
830 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
831 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
832 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
833 vl_cl.set_containing_obj(obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
834 obj->oop_iterate(&vl_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
835 if (vl_cl.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
836 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
837 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
838 if (G1MaxVerifyFailures >= 0 && |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
839 vl_cl.n_failures() >= G1MaxVerifyFailures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
840 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
841 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
842 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
843 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
844 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
845 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
846 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
847 } |
342 | 848 } |
849 objs = 0; | |
850 } else { | |
851 objs++; | |
852 } | |
853 prev_p = p; | |
854 p += size; | |
855 } | |
856 HeapWord* rend = end(); | |
857 HeapWord* rtop = top(); | |
858 if (rtop < rend) { | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
859 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
860 if (res != rtop) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
861 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
862 PTR_FORMAT" returned "PTR_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
863 rtop, rend, res); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
864 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
865 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
866 } |
342 | 867 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
868 |
1718
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
869 if (is_humongous && object_num > 1) { |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
870 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
871 "but has "SIZE_FORMAT", objects", |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
872 bottom(), end(), object_num); |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
873 *failures = true; |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
874 } |
bb847e31b836
6974928: G1: sometimes humongous objects are allocated in young regions
tonyp
parents:
1705
diff
changeset
|
875 |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
876 if (p != top()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
877 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
878 "does not match top "PTR_FORMAT, p, top()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
879 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
880 return; |
342 | 881 } |
882 } | |
883 | |
884 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | |
885 // away eventually. | |
886 | |
356 | 887 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
342 | 888 // false ==> we'll do the clearing if there's clearing to be done. |
356 | 889 ContiguousSpace::initialize(mr, false, mangle_space); |
342 | 890 _offsets.zero_bottom_entry(); |
891 _offsets.initialize_threshold(); | |
356 | 892 if (clear_space) clear(mangle_space); |
342 | 893 } |
894 | |
356 | 895 void G1OffsetTableContigSpace::clear(bool mangle_space) { |
896 ContiguousSpace::clear(mangle_space); | |
342 | 897 _offsets.zero_bottom_entry(); |
898 _offsets.initialize_threshold(); | |
899 } | |
900 | |
901 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
902 Space::set_bottom(new_bottom); | |
903 _offsets.set_bottom(new_bottom); | |
904 } | |
905 | |
906 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
907 Space::set_end(new_end); | |
908 _offsets.resize(new_end - bottom()); | |
909 } | |
910 | |
911 void G1OffsetTableContigSpace::print() const { | |
912 print_short(); | |
913 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
914 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
915 bottom(), top(), _offsets.threshold(), end()); | |
916 } | |
917 | |
918 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { | |
919 return _offsets.initialize_threshold(); | |
920 } | |
921 | |
922 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, | |
923 HeapWord* end) { | |
924 _offsets.alloc_block(start, end); | |
925 return _offsets.threshold(); | |
926 } | |
927 | |
928 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { | |
929 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
930 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); | |
931 if (_gc_time_stamp < g1h->get_gc_time_stamp()) | |
932 return top(); | |
933 else | |
934 return ContiguousSpace::saved_mark_word(); | |
935 } | |
936 | |
937 void G1OffsetTableContigSpace::set_saved_mark() { | |
938 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
939 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); | |
940 | |
941 if (_gc_time_stamp < curr_gc_time_stamp) { | |
942 // The order of these is important, as another thread might be | |
943 // about to start scanning this region. If it does so after | |
944 // set_saved_mark and before _gc_time_stamp = ..., then the latter | |
945 // will be false, and it will pick up top() as the high water mark | |
946 // of region. If it does so after _gc_time_stamp = ..., then it | |
947 // will pick up the right saved_mark_word() as the high water mark | |
948 // of the region. Either way, the behaviour will be correct. | |
949 ContiguousSpace::set_saved_mark(); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
950 OrderAccess::storestore(); |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
951 _gc_time_stamp = curr_gc_time_stamp; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
952 // The following fence is to force a flush of the writes above, but |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
953 // is strictly not needed because when an allocating worker thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
954 // calls set_saved_mark() it does so under the ParGCRareEvent_lock; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
955 // when the lock is released, the write will be flushed. |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
956 // OrderAccess::fence(); |
342 | 957 } |
958 } | |
959 | |
960 G1OffsetTableContigSpace:: | |
961 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
962 MemRegion mr, bool is_zeroed) : | |
963 _offsets(sharedOffsetArray, mr), | |
964 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), | |
965 _gc_time_stamp(0) | |
966 { | |
967 _offsets.set_space(this); | |
356 | 968 initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
342 | 969 } |
970 | |
971 size_t RegionList::length() { | |
972 size_t len = 0; | |
973 HeapRegion* cur = hd(); | |
974 DEBUG_ONLY(HeapRegion* last = NULL); | |
975 while (cur != NULL) { | |
976 len++; | |
977 DEBUG_ONLY(last = cur); | |
978 cur = get_next(cur); | |
979 } | |
980 assert(last == tl(), "Invariant"); | |
981 return len; | |
982 } | |
983 | |
984 void RegionList::insert_before_head(HeapRegion* r) { | |
985 assert(well_formed(), "Inv"); | |
986 set_next(r, hd()); | |
987 _hd = r; | |
988 _sz++; | |
989 if (tl() == NULL) _tl = r; | |
990 assert(well_formed(), "Inv"); | |
991 } | |
992 | |
993 void RegionList::prepend_list(RegionList* new_list) { | |
994 assert(well_formed(), "Precondition"); | |
995 assert(new_list->well_formed(), "Precondition"); | |
996 HeapRegion* new_tl = new_list->tl(); | |
997 if (new_tl != NULL) { | |
998 set_next(new_tl, hd()); | |
999 _hd = new_list->hd(); | |
1000 _sz += new_list->sz(); | |
1001 if (tl() == NULL) _tl = new_list->tl(); | |
1002 } else { | |
1003 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); | |
1004 } | |
1005 assert(well_formed(), "Inv"); | |
1006 } | |
1007 | |
1008 void RegionList::delete_after(HeapRegion* r) { | |
1009 assert(well_formed(), "Precondition"); | |
1010 HeapRegion* next = get_next(r); | |
1011 assert(r != NULL, "Precondition"); | |
1012 HeapRegion* next_tl = get_next(next); | |
1013 set_next(r, next_tl); | |
1014 dec_sz(); | |
1015 if (next == tl()) { | |
1016 assert(next_tl == NULL, "Inv"); | |
1017 _tl = r; | |
1018 } | |
1019 assert(well_formed(), "Inv"); | |
1020 } | |
1021 | |
1022 HeapRegion* RegionList::pop() { | |
1023 assert(well_formed(), "Inv"); | |
1024 HeapRegion* res = hd(); | |
1025 if (res != NULL) { | |
1026 _hd = get_next(res); | |
1027 _sz--; | |
1028 set_next(res, NULL); | |
1029 if (sz() == 0) _tl = NULL; | |
1030 } | |
1031 assert(well_formed(), "Inv"); | |
1032 return res; | |
1033 } |