Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 1666:5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
Summary: During concurrent refinment, filter cards in young regions after it has been determined that the region has been allocated from and the young type of the region has been set.
Reviewed-by: iveresov, tonyp, jcoomes
author | johnc |
---|---|
date | Mon, 19 Jul 2010 11:06:34 -0700 |
parents | c18cbe5936b8 |
children | 2d160770d2e5 |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1550
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegion.cpp.incl" | |
27 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
28 int HeapRegion::LogOfHRGrainBytes = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
29 int HeapRegion::LogOfHRGrainWords = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
30 int HeapRegion::GrainBytes = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
31 int HeapRegion::GrainWords = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
32 int HeapRegion::CardsPerRegion = 0; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
33 |
342 | 34 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, |
35 HeapRegion* hr, OopClosure* cl, | |
36 CardTableModRefBS::PrecisionStyle precision, | |
37 FilterKind fk) : | |
38 ContiguousSpaceDCTOC(hr, cl, precision, NULL), | |
39 _hr(hr), _fk(fk), _g1(g1) | |
40 {} | |
41 | |
42 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | |
43 OopClosure* oc) : | |
44 _r_bottom(r->bottom()), _r_end(r->end()), | |
45 _oc(oc), _out_of_region(0) | |
46 {} | |
47 | |
48 class VerifyLiveClosure: public OopClosure { | |
811 | 49 private: |
342 | 50 G1CollectedHeap* _g1h; |
51 CardTableModRefBS* _bs; | |
52 oop _containing_obj; | |
53 bool _failures; | |
54 int _n_failures; | |
811 | 55 bool _use_prev_marking; |
342 | 56 public: |
811 | 57 // use_prev_marking == true -> use "prev" marking information, |
58 // use_prev_marking == false -> use "next" marking information | |
59 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) : | |
342 | 60 _g1h(g1h), _bs(NULL), _containing_obj(NULL), |
811 | 61 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking) |
342 | 62 { |
63 BarrierSet* bs = _g1h->barrier_set(); | |
64 if (bs->is_a(BarrierSet::CardTableModRef)) | |
65 _bs = (CardTableModRefBS*)bs; | |
66 } | |
67 | |
68 void set_containing_obj(oop obj) { | |
69 _containing_obj = obj; | |
70 } | |
71 | |
72 bool failures() { return _failures; } | |
73 int n_failures() { return _n_failures; } | |
74 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
75 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
76 virtual void do_oop( oop* p) { do_oop_work(p); } |
342 | 77 |
1388 | 78 void print_object(outputStream* out, oop obj) { |
79 #ifdef PRODUCT | |
80 klassOop k = obj->klass(); | |
81 const char* class_name = instanceKlass::cast(k)->external_name(); | |
82 out->print_cr("class name %s", class_name); | |
83 #else // PRODUCT | |
84 obj->print_on(out); | |
85 #endif // PRODUCT | |
86 } | |
87 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
88 template <class T> void do_oop_work(T* p) { |
342 | 89 assert(_containing_obj != NULL, "Precondition"); |
811 | 90 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), |
91 "Precondition"); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
92 T heap_oop = oopDesc::load_heap_oop(p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
93 if (!oopDesc::is_null(heap_oop)) { |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
94 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
342 | 95 bool failed = false; |
811 | 96 if (!_g1h->is_in_closed_subset(obj) || |
97 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) { | |
342 | 98 if (!_failures) { |
99 gclog_or_tty->print_cr(""); | |
100 gclog_or_tty->print_cr("----------"); | |
101 } | |
102 if (!_g1h->is_in_closed_subset(obj)) { | |
1388 | 103 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
342 | 104 gclog_or_tty->print_cr("Field "PTR_FORMAT |
1388 | 105 " of live obj "PTR_FORMAT" in region " |
106 "["PTR_FORMAT", "PTR_FORMAT")", | |
107 p, (void*) _containing_obj, | |
108 from->bottom(), from->end()); | |
109 print_object(gclog_or_tty, _containing_obj); | |
110 gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", | |
111 (void*) obj); | |
112 } else { | |
113 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); | |
114 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); | |
115 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
116 " of live obj "PTR_FORMAT" in region " | |
117 "["PTR_FORMAT", "PTR_FORMAT")", | |
118 p, (void*) _containing_obj, | |
119 from->bottom(), from->end()); | |
120 print_object(gclog_or_tty, _containing_obj); | |
121 gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " | |
122 "["PTR_FORMAT", "PTR_FORMAT")", | |
123 (void*) obj, to->bottom(), to->end()); | |
124 print_object(gclog_or_tty, obj); | |
342 | 125 } |
126 gclog_or_tty->print_cr("----------"); | |
127 _failures = true; | |
128 failed = true; | |
129 _n_failures++; | |
130 } | |
131 | |
132 if (!_g1h->full_collection()) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
133 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
134 HeapRegion* to = _g1h->heap_region_containing(obj); |
342 | 135 if (from != NULL && to != NULL && |
136 from != to && | |
137 !to->isHumongous()) { | |
138 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); | |
139 jbyte cv_field = *_bs->byte_for_const(p); | |
140 const jbyte dirty = CardTableModRefBS::dirty_card_val(); | |
141 | |
142 bool is_bad = !(from->is_young() | |
143 || to->rem_set()->contains_reference(p) | |
144 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed | |
145 (_containing_obj->is_objArray() ? | |
146 cv_field == dirty | |
147 : cv_obj == dirty || cv_field == dirty)); | |
148 if (is_bad) { | |
149 if (!_failures) { | |
150 gclog_or_tty->print_cr(""); | |
151 gclog_or_tty->print_cr("----------"); | |
152 } | |
153 gclog_or_tty->print_cr("Missing rem set entry:"); | |
154 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
155 " of obj "PTR_FORMAT | |
156 ", in region %d ["PTR_FORMAT | |
157 ", "PTR_FORMAT"),", | |
158 p, (void*) _containing_obj, | |
159 from->hrs_index(), | |
160 from->bottom(), | |
161 from->end()); | |
162 _containing_obj->print_on(gclog_or_tty); | |
163 gclog_or_tty->print_cr("points to obj "PTR_FORMAT | |
164 " in region %d ["PTR_FORMAT | |
165 ", "PTR_FORMAT").", | |
166 (void*) obj, to->hrs_index(), | |
167 to->bottom(), to->end()); | |
168 obj->print_on(gclog_or_tty); | |
169 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", | |
170 cv_obj, cv_field); | |
171 gclog_or_tty->print_cr("----------"); | |
172 _failures = true; | |
173 if (!failed) _n_failures++; | |
174 } | |
175 } | |
176 } | |
177 } | |
178 } | |
179 }; | |
180 | |
181 template<class ClosureType> | |
182 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
183 HeapRegion* hr, | |
184 HeapWord* cur, HeapWord* top) { | |
185 oop cur_oop = oop(cur); | |
186 int oop_size = cur_oop->size(); | |
187 HeapWord* next_obj = cur + oop_size; | |
188 while (next_obj < top) { | |
189 // Keep filtering the remembered set. | |
190 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
191 // Bottom lies entirely below top, so we can call the | |
192 // non-memRegion version of oop_iterate below. | |
193 cur_oop->oop_iterate(cl); | |
194 } | |
195 cur = next_obj; | |
196 cur_oop = oop(cur); | |
197 oop_size = cur_oop->size(); | |
198 next_obj = cur + oop_size; | |
199 } | |
200 return cur; | |
201 } | |
202 | |
203 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
204 HeapWord* bottom, | |
205 HeapWord* top, | |
206 OopClosure* cl) { | |
207 G1CollectedHeap* g1h = _g1; | |
208 | |
209 int oop_size; | |
210 | |
211 OopClosure* cl2 = cl; | |
212 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
213 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
214 switch (_fk) { | |
215 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
216 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
217 } | |
218 | |
219 // Start filtering what we add to the remembered set. If the object is | |
220 // not considered dead, either because it is marked (in the mark bitmap) | |
221 // or it was allocated after marking finished, then we add it. Otherwise | |
222 // we can safely ignore the object. | |
223 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
224 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
225 } else { | |
226 oop_size = oop(bottom)->size(); | |
227 } | |
228 | |
229 bottom += oop_size; | |
230 | |
231 if (bottom < top) { | |
232 // We replicate the loop below for several kinds of possible filters. | |
233 switch (_fk) { | |
234 case NoFilterKind: | |
235 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
236 break; | |
237 case IntoCSFilterKind: { | |
238 FilterIntoCSClosure filt(this, g1h, cl); | |
239 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
240 break; | |
241 } | |
242 case OutOfRegionFilterKind: { | |
243 FilterOutOfRegionClosure filt(_hr, cl); | |
244 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
245 break; | |
246 } | |
247 default: | |
248 ShouldNotReachHere(); | |
249 } | |
250 | |
251 // Last object. Need to do dead-obj filtering here too. | |
252 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
253 oop(bottom)->oop_iterate(cl2, mr); | |
254 } | |
255 } | |
256 } | |
257 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
258 // Minimum region size; we won't go lower than that. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
259 // We might want to decrease this in the future, to deal with small |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
260 // heaps a bit more efficiently. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
261 #define MIN_REGION_SIZE ( 1024 * 1024 ) |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
262 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
263 // Maximum region size; we don't go higher than that. There's a good |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
264 // reason for having an upper bound. We don't want regions to get too |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
265 // large, otherwise cleanup's effectiveness would decrease as there |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
266 // will be fewer opportunities to find totally empty regions after |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
267 // marking. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
268 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
269 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
270 // The automatic region size calculation will try to have around this |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
271 // many regions in the heap (based on the min heap size). |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
272 #define TARGET_REGION_NUMBER 2048 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
273 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
274 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
275 // region_size in bytes |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
276 uintx region_size = G1HeapRegionSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
277 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
278 // We base the automatic calculation on the min heap size. This |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
279 // can be problematic if the spread between min and max is quite |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
280 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
281 // the max size, the region size might be way too large for the |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
282 // min size. Either way, some users might have to set the region |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
283 // size manually for some -Xms / -Xmx combos. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
284 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
285 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
286 (uintx) MIN_REGION_SIZE); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
287 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
288 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
289 int region_size_log = log2_long((jlong) region_size); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
290 // Recalculate the region size to make sure it's a power of |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
291 // 2. This means that region_size is the largest power of 2 that's |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
292 // <= what we've calculated so far. |
1485
fb57d4cf76c2
6931180: Migration to recent versions of MS Platform SDK
prr
parents:
1394
diff
changeset
|
293 region_size = ((uintx)1 << region_size_log); |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
294 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
295 // Now make sure that we don't go over or under our limits. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
296 if (region_size < MIN_REGION_SIZE) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
297 region_size = MIN_REGION_SIZE; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
298 } else if (region_size > MAX_REGION_SIZE) { |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
299 region_size = MAX_REGION_SIZE; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
300 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
301 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
302 // And recalculate the log. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
303 region_size_log = log2_long((jlong) region_size); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
304 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
305 // Now, set up the globals. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
306 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
307 LogOfHRGrainBytes = region_size_log; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
308 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
309 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
311 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
312 guarantee(GrainBytes == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
313 // The cast to int is safe, given that we've bounded region_size by |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
314 // MIN_REGION_SIZE and MAX_REGION_SIZE. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
315 GrainBytes = (int) region_size; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
316 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
317 guarantee(GrainWords == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
318 GrainWords = GrainBytes >> LogHeapWordSize; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
319 guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
320 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
321 guarantee(CardsPerRegion == 0, "we should only set it once"); |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
322 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
323 } |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
866
diff
changeset
|
324 |
342 | 325 void HeapRegion::reset_after_compaction() { |
326 G1OffsetTableContigSpace::reset_after_compaction(); | |
327 // After a compaction the mark bitmap is invalid, so we must | |
328 // treat all objects as being inside the unmarked area. | |
329 zero_marked_bytes(); | |
330 init_top_at_mark_start(); | |
331 } | |
332 | |
333 DirtyCardToOopClosure* | |
334 HeapRegion::new_dcto_closure(OopClosure* cl, | |
335 CardTableModRefBS::PrecisionStyle precision, | |
336 HeapRegionDCTOC::FilterKind fk) { | |
337 return new HeapRegionDCTOC(G1CollectedHeap::heap(), | |
338 this, cl, precision, fk); | |
339 } | |
340 | |
341 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
355 | 342 _humongous_type = NotHumongous; |
342 | 343 _humongous_start_region = NULL; |
344 _in_collection_set = false; | |
345 _is_gc_alloc_region = false; | |
346 | |
347 // Age stuff (if parallel, this will be done separately, since it needs | |
348 // to be sequential). | |
349 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
350 | |
351 set_young_index_in_cset(-1); | |
352 uninstall_surv_rate_group(); | |
353 set_young_type(NotYoung); | |
354 | |
355 // In case it had been the start of a humongous sequence, reset its end. | |
356 set_end(_orig_end); | |
357 | |
358 if (!par) { | |
359 // If this is parallel, this will be done later. | |
360 HeapRegionRemSet* hrrs = rem_set(); | |
361 if (hrrs != NULL) hrrs->clear(); | |
355 | 362 _claimed = InitialClaimValue; |
342 | 363 } |
364 zero_marked_bytes(); | |
365 set_sort_index(-1); | |
366 | |
367 _offsets.resize(HeapRegion::GrainWords); | |
368 init_top_at_mark_start(); | |
356 | 369 if (clear_space) clear(SpaceDecorator::Mangle); |
342 | 370 } |
371 | |
372 // <PREDICTION> | |
373 void HeapRegion::calc_gc_efficiency() { | |
374 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
375 _gc_efficiency = (double) garbage_bytes() / | |
376 g1h->predict_region_elapsed_time_ms(this, false); | |
377 } | |
378 // </PREDICTION> | |
379 | |
380 void HeapRegion::set_startsHumongous() { | |
355 | 381 _humongous_type = StartsHumongous; |
342 | 382 _humongous_start_region = this; |
383 assert(end() == _orig_end, "Should be normal before alloc."); | |
384 } | |
385 | |
386 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
387 jint current = _claimed; | |
388 if (current != claimValue) { | |
389 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
390 if (res == current) { | |
391 return true; | |
392 } | |
393 } | |
394 return false; | |
395 } | |
396 | |
397 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
398 HeapWord* low = addr; | |
399 HeapWord* high = end(); | |
400 while (low < high) { | |
401 size_t diff = pointer_delta(high, low); | |
402 // Must add one below to bias toward the high amount. Otherwise, if | |
403 // "high" were at the desired value, and "low" were one less, we | |
404 // would not converge on "high". This is not symmetric, because | |
405 // we set "high" to a block start, which might be the right one, | |
406 // which we don't do for "low". | |
407 HeapWord* middle = low + (diff+1)/2; | |
408 if (middle == high) return high; | |
409 HeapWord* mid_bs = block_start_careful(middle); | |
410 if (mid_bs < addr) { | |
411 low = middle; | |
412 } else { | |
413 high = mid_bs; | |
414 } | |
415 } | |
416 assert(low == high && low >= addr, "Didn't work."); | |
417 return low; | |
418 } | |
419 | |
420 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { | |
421 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); | |
422 _next_in_special_set = r; | |
423 } | |
424 | |
425 void HeapRegion::set_on_unclean_list(bool b) { | |
426 _is_on_unclean_list = b; | |
427 } | |
428 | |
356 | 429 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
430 G1OffsetTableContigSpace::initialize(mr, false, mangle_space); | |
342 | 431 hr_clear(false/*par*/, clear_space); |
432 } | |
433 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
434 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
435 #endif // _MSC_VER | |
436 | |
437 | |
438 HeapRegion:: | |
439 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
440 MemRegion mr, bool is_zeroed) | |
441 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), | |
442 _next_fk(HeapRegionDCTOC::NoFilterKind), | |
443 _hrs_index(-1), | |
355 | 444 _humongous_type(NotHumongous), _humongous_start_region(NULL), |
342 | 445 _in_collection_set(false), _is_gc_alloc_region(false), |
446 _is_on_free_list(false), _is_on_unclean_list(false), | |
447 _next_in_special_set(NULL), _orig_end(NULL), | |
355 | 448 _claimed(InitialClaimValue), _evacuation_failed(false), |
342 | 449 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
450 _young_type(NotYoung), _next_young_region(NULL), | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
751
diff
changeset
|
451 _next_dirty_cards_region(NULL), |
342 | 452 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
453 _rem_set(NULL), _zfs(NotZeroFilled), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
454 _recorded_rs_length(0), _predicted_elapsed_time_ms(0), |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
455 _predicted_bytes_to_copy(0) |
342 | 456 { |
457 _orig_end = mr.end(); | |
458 // Note that initialize() will set the start of the unmarked area of the | |
459 // region. | |
356 | 460 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
461 set_top(bottom()); | |
462 set_saved_mark(); | |
342 | 463 |
464 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
465 | |
466 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
467 // In case the region is allocated during a pause, note the top. | |
468 // We haven't done any counting on a brand new region. | |
469 _top_at_conc_mark_count = bottom(); | |
470 } | |
471 | |
472 class NextCompactionHeapRegionClosure: public HeapRegionClosure { | |
473 const HeapRegion* _target; | |
474 bool _target_seen; | |
475 HeapRegion* _last; | |
476 CompactibleSpace* _res; | |
477 public: | |
478 NextCompactionHeapRegionClosure(const HeapRegion* target) : | |
479 _target(target), _target_seen(false), _res(NULL) {} | |
480 bool doHeapRegion(HeapRegion* cur) { | |
481 if (_target_seen) { | |
482 if (!cur->isHumongous()) { | |
483 _res = cur; | |
484 return true; | |
485 } | |
486 } else if (cur == _target) { | |
487 _target_seen = true; | |
488 } | |
489 return false; | |
490 } | |
491 CompactibleSpace* result() { return _res; } | |
492 }; | |
493 | |
494 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
495 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
496 // cast away const-ness | |
497 HeapRegion* r = (HeapRegion*) this; | |
498 NextCompactionHeapRegionClosure blk(r); | |
499 g1h->heap_region_iterate_from(r, &blk); | |
500 return blk.result(); | |
501 } | |
502 | |
503 void HeapRegion::set_continuesHumongous(HeapRegion* start) { | |
504 // The order is important here. | |
505 start->add_continuingHumongousRegion(this); | |
355 | 506 _humongous_type = ContinuesHumongous; |
342 | 507 _humongous_start_region = start; |
508 } | |
509 | |
510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { | |
511 // Must join the blocks of the current H region seq with the block of the | |
512 // added region. | |
513 offsets()->join_blocks(bottom(), cont->bottom()); | |
514 arrayOop obj = (arrayOop)(bottom()); | |
515 obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); | |
516 set_end(cont->end()); | |
517 set_top(cont->end()); | |
518 } | |
519 | |
520 void HeapRegion::save_marks() { | |
521 set_saved_mark(); | |
522 } | |
523 | |
524 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { | |
525 HeapWord* p = mr.start(); | |
526 HeapWord* e = mr.end(); | |
527 oop obj; | |
528 while (p < e) { | |
529 obj = oop(p); | |
530 p += obj->oop_iterate(cl); | |
531 } | |
532 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
533 } | |
534 | |
535 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
536 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
537 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
538 } | |
539 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
540 | |
541 | |
542 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { | |
543 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
544 } | |
545 | |
546 #ifdef DEBUG | |
547 HeapWord* HeapRegion::allocate(size_t size) { | |
548 jint state = zero_fill_state(); | |
549 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || | |
550 zero_fill_is_allocated(), | |
551 "When ZF is on, only alloc in ZF'd regions"); | |
552 return G1OffsetTableContigSpace::allocate(size); | |
553 } | |
554 #endif | |
555 | |
556 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { | |
557 assert(ZF_mon->owned_by_self() || | |
558 Universe::heap()->is_gc_active(), | |
559 "Must hold the lock or be a full GC to modify."); | |
1545
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
560 #ifdef ASSERT |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
561 if (top() != bottom() && zfs != Allocated) { |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
562 ResourceMark rm; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
563 stringStream region_str; |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
564 print_on(®ion_str); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
565 assert(top() == bottom() || zfs == Allocated, |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
566 err_msg("Region must be empty, or we must be setting it to allocated. " |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
567 "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string())); |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
568 } |
cc387008223e
6921317: (partial) G1: assert(top() == bottom() || zfs == Allocated,"Region must be empty, or we must be setting it to
apetrusenko
parents:
1394
diff
changeset
|
569 #endif |
342 | 570 _zfs = zfs; |
571 } | |
572 | |
573 void HeapRegion::set_zero_fill_complete() { | |
574 set_zero_fill_state_work(ZeroFilled); | |
575 if (ZF_mon->owned_by_self()) { | |
576 ZF_mon->notify_all(); | |
577 } | |
578 } | |
579 | |
580 | |
581 void HeapRegion::ensure_zero_filled() { | |
582 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
583 ensure_zero_filled_locked(); | |
584 } | |
585 | |
586 void HeapRegion::ensure_zero_filled_locked() { | |
587 assert(ZF_mon->owned_by_self(), "Precondition"); | |
588 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); | |
589 assert(should_ignore_zf || Heap_lock->is_locked(), | |
590 "Either we're in a GC or we're allocating a region."); | |
591 switch (zero_fill_state()) { | |
592 case HeapRegion::NotZeroFilled: | |
593 set_zero_fill_in_progress(Thread::current()); | |
594 { | |
595 ZF_mon->unlock(); | |
596 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
597 ZF_mon->lock_without_safepoint_check(); | |
598 } | |
599 // A trap. | |
600 guarantee(zero_fill_state() == HeapRegion::ZeroFilling | |
601 && zero_filler() == Thread::current(), | |
602 "AHA! Tell Dave D if you see this..."); | |
603 set_zero_fill_complete(); | |
604 // gclog_or_tty->print_cr("Did sync ZF."); | |
605 ConcurrentZFThread::note_sync_zfs(); | |
606 break; | |
607 case HeapRegion::ZeroFilling: | |
608 if (should_ignore_zf) { | |
609 // We can "break" the lock and take over the work. | |
610 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
611 set_zero_fill_complete(); | |
612 ConcurrentZFThread::note_sync_zfs(); | |
613 break; | |
614 } else { | |
615 ConcurrentZFThread::wait_for_ZF_completed(this); | |
616 } | |
617 case HeapRegion::ZeroFilled: | |
618 // Nothing to do. | |
619 break; | |
620 case HeapRegion::Allocated: | |
621 guarantee(false, "Should not call on allocated regions."); | |
622 } | |
623 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); | |
624 } | |
625 | |
626 HeapWord* | |
627 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
628 ObjectClosure* cl) { | |
629 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
630 // We used to use "block_start_careful" here. But we're actually happy | |
631 // to update the BOT while we do this... | |
632 HeapWord* cur = block_start(mr.start()); | |
633 mr = mr.intersection(used_region()); | |
634 if (mr.is_empty()) return NULL; | |
635 // Otherwise, find the obj that extends onto mr.start(). | |
636 | |
637 assert(cur <= mr.start() | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
638 && (oop(cur)->klass_or_null() == NULL || |
342 | 639 cur + oop(cur)->size() > mr.start()), |
640 "postcondition of block_start"); | |
641 oop obj; | |
642 while (cur < mr.end()) { | |
643 obj = oop(cur); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
644 if (obj->klass_or_null() == NULL) { |
342 | 645 // Ran into an unparseable point. |
646 return cur; | |
647 } else if (!g1h->is_obj_dead(obj)) { | |
648 cl->do_object(obj); | |
649 } | |
650 if (cl->abort()) return cur; | |
651 // The check above must occur before the operation below, since an | |
652 // abort might invalidate the "size" operation. | |
653 cur += obj->size(); | |
654 } | |
655 return NULL; | |
656 } | |
657 | |
658 HeapWord* | |
659 HeapRegion:: | |
660 oops_on_card_seq_iterate_careful(MemRegion mr, | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
661 FilterOutOfRegionClosure* cl, |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
662 bool filter_young) { |
342 | 663 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
664 | |
665 // If we're within a stop-world GC, then we might look at a card in a | |
666 // GC alloc region that extends onto a GC LAB, which may not be | |
667 // parseable. Stop such at the "saved_mark" of the region. | |
668 if (G1CollectedHeap::heap()->is_gc_active()) { | |
669 mr = mr.intersection(used_region_at_save_marks()); | |
670 } else { | |
671 mr = mr.intersection(used_region()); | |
672 } | |
673 if (mr.is_empty()) return NULL; | |
674 // Otherwise, find the obj that extends onto mr.start(). | |
675 | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
676 // The intersection of the incoming mr (for the card) and the |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
677 // allocated part of the region is non-empty. This implies that |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
678 // we have actually allocated into this region. The code in |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
679 // G1CollectedHeap.cpp that allocates a new region sets the |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
680 // is_young tag on the region before allocating. Thus we |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
681 // safely know if this region is young. |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
682 if (is_young() && filter_young) { |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
683 return NULL; |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
684 } |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
685 |
342 | 686 // We used to use "block_start_careful" here. But we're actually happy |
687 // to update the BOT while we do this... | |
688 HeapWord* cur = block_start(mr.start()); | |
689 assert(cur <= mr.start(), "Postcondition"); | |
690 | |
691 while (cur <= mr.start()) { | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
692 if (oop(cur)->klass_or_null() == NULL) { |
342 | 693 // Ran into an unparseable point. |
694 return cur; | |
695 } | |
696 // Otherwise... | |
697 int sz = oop(cur)->size(); | |
698 if (cur + sz > mr.start()) break; | |
699 // Otherwise, go on. | |
700 cur = cur + sz; | |
701 } | |
702 oop obj; | |
703 obj = oop(cur); | |
704 // If we finish this loop... | |
705 assert(cur <= mr.start() | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
706 && obj->klass_or_null() != NULL |
342 | 707 && cur + obj->size() > mr.start(), |
708 "Loop postcondition"); | |
709 if (!g1h->is_obj_dead(obj)) { | |
710 obj->oop_iterate(cl, mr); | |
711 } | |
712 | |
713 HeapWord* next; | |
714 while (cur < mr.end()) { | |
715 obj = oop(cur); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
716 if (obj->klass_or_null() == NULL) { |
342 | 717 // Ran into an unparseable point. |
718 return cur; | |
719 }; | |
720 // Otherwise: | |
721 next = (cur + obj->size()); | |
722 if (!g1h->is_obj_dead(obj)) { | |
723 if (next < mr.end()) { | |
724 obj->oop_iterate(cl); | |
725 } else { | |
726 // this obj spans the boundary. If it's an array, stop at the | |
727 // boundary. | |
728 if (obj->is_objArray()) { | |
729 obj->oop_iterate(cl, mr); | |
730 } else { | |
731 obj->oop_iterate(cl); | |
732 } | |
733 } | |
734 } | |
735 cur = next; | |
736 } | |
737 return NULL; | |
738 } | |
739 | |
740 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
741 void HeapRegion::print_on(outputStream* st) const { | |
742 if (isHumongous()) { | |
743 if (startsHumongous()) | |
744 st->print(" HS"); | |
745 else | |
746 st->print(" HC"); | |
747 } else { | |
748 st->print(" "); | |
749 } | |
750 if (in_collection_set()) | |
751 st->print(" CS"); | |
752 else if (is_gc_alloc_region()) | |
753 st->print(" A "); | |
754 else | |
755 st->print(" "); | |
756 if (is_young()) | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
757 st->print(is_survivor() ? " SU" : " Y "); |
342 | 758 else |
759 st->print(" "); | |
760 if (is_empty()) | |
761 st->print(" F"); | |
762 else | |
763 st->print(" "); | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
764 st->print(" %5d", _gc_time_stamp); |
1388 | 765 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, |
766 prev_top_at_mark_start(), next_top_at_mark_start()); | |
342 | 767 G1OffsetTableContigSpace::print_on(st); |
768 } | |
769 | |
811 | 770 void HeapRegion::verify(bool allow_dirty) const { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
771 bool dummy = false; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
772 verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy); |
811 | 773 } |
774 | |
342 | 775 #define OBJ_SAMPLE_INTERVAL 0 |
776 #define BLOCK_SAMPLE_INTERVAL 100 | |
777 | |
778 // This really ought to be commoned up into OffsetTableContigSpace somehow. | |
779 // We would need a mechanism to make that code skip dead objects. | |
780 | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
781 void HeapRegion::verify(bool allow_dirty, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
782 bool use_prev_marking, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
783 bool* failures) const { |
342 | 784 G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
785 *failures = false; |
342 | 786 HeapWord* p = bottom(); |
787 HeapWord* prev_p = NULL; | |
788 int objs = 0; | |
789 int blocks = 0; | |
811 | 790 VerifyLiveClosure vl_cl(g1, use_prev_marking); |
342 | 791 while (p < top()) { |
792 size_t size = oop(p)->size(); | |
793 if (blocks == BLOCK_SAMPLE_INTERVAL) { | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
794 HeapWord* res = block_start_const(p + (size/2)); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
795 if (p != res) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
796 gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
797 SIZE_FORMAT" returned "PTR_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
798 p, size, res); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
799 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
800 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
801 } |
342 | 802 blocks = 0; |
803 } else { | |
804 blocks++; | |
805 } | |
806 if (objs == OBJ_SAMPLE_INTERVAL) { | |
807 oop obj = oop(p); | |
811 | 808 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
809 if (obj->is_oop()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
810 klassOop klass = obj->klass(); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
811 if (!klass->is_perm()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
812 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
813 "not in perm", klass, obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
814 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
815 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
816 } else if (!klass->is_klass()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
817 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
818 "not a klass", klass, obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
819 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
820 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
821 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
822 vl_cl.set_containing_obj(obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
823 obj->oop_iterate(&vl_cl); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
824 if (vl_cl.failures()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
825 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
826 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
827 if (G1MaxVerifyFailures >= 0 && |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
828 vl_cl.n_failures() >= G1MaxVerifyFailures) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
829 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
830 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
831 } |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
832 } else { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
833 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
834 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
835 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
836 } |
342 | 837 } |
838 objs = 0; | |
839 } else { | |
840 objs++; | |
841 } | |
842 prev_p = p; | |
843 p += size; | |
844 } | |
845 HeapWord* rend = end(); | |
846 HeapWord* rtop = top(); | |
847 if (rtop < rend) { | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
848 HeapWord* res = block_start_const(rtop + (rend - rtop) / 2); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
849 if (res != rtop) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
850 gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
851 PTR_FORMAT" returned "PTR_FORMAT, |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
852 rtop, rend, res); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
853 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
854 return; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
855 } |
342 | 856 } |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
857 |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
858 if (p != top()) { |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
859 gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
860 "does not match top "PTR_FORMAT, p, top()); |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
861 *failures = true; |
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
862 return; |
342 | 863 } |
864 } | |
865 | |
866 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | |
867 // away eventually. | |
868 | |
356 | 869 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
342 | 870 // false ==> we'll do the clearing if there's clearing to be done. |
356 | 871 ContiguousSpace::initialize(mr, false, mangle_space); |
342 | 872 _offsets.zero_bottom_entry(); |
873 _offsets.initialize_threshold(); | |
356 | 874 if (clear_space) clear(mangle_space); |
342 | 875 } |
876 | |
356 | 877 void G1OffsetTableContigSpace::clear(bool mangle_space) { |
878 ContiguousSpace::clear(mangle_space); | |
342 | 879 _offsets.zero_bottom_entry(); |
880 _offsets.initialize_threshold(); | |
881 } | |
882 | |
883 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
884 Space::set_bottom(new_bottom); | |
885 _offsets.set_bottom(new_bottom); | |
886 } | |
887 | |
888 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
889 Space::set_end(new_end); | |
890 _offsets.resize(new_end - bottom()); | |
891 } | |
892 | |
893 void G1OffsetTableContigSpace::print() const { | |
894 print_short(); | |
895 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
896 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
897 bottom(), top(), _offsets.threshold(), end()); | |
898 } | |
899 | |
900 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { | |
901 return _offsets.initialize_threshold(); | |
902 } | |
903 | |
904 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, | |
905 HeapWord* end) { | |
906 _offsets.alloc_block(start, end); | |
907 return _offsets.threshold(); | |
908 } | |
909 | |
910 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { | |
911 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
912 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); | |
913 if (_gc_time_stamp < g1h->get_gc_time_stamp()) | |
914 return top(); | |
915 else | |
916 return ContiguousSpace::saved_mark_word(); | |
917 } | |
918 | |
919 void G1OffsetTableContigSpace::set_saved_mark() { | |
920 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
921 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); | |
922 | |
923 if (_gc_time_stamp < curr_gc_time_stamp) { | |
924 // The order of these is important, as another thread might be | |
925 // about to start scanning this region. If it does so after | |
926 // set_saved_mark and before _gc_time_stamp = ..., then the latter | |
927 // will be false, and it will pick up top() as the high water mark | |
928 // of region. If it does so after _gc_time_stamp = ..., then it | |
929 // will pick up the right saved_mark_word() as the high water mark | |
930 // of the region. Either way, the behaviour will be correct. | |
931 ContiguousSpace::set_saved_mark(); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
932 OrderAccess::storestore(); |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
933 _gc_time_stamp = curr_gc_time_stamp; |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
934 // The following fence is to force a flush of the writes above, but |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
935 // is strictly not needed because when an allocating worker thread |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
936 // calls set_saved_mark() it does so under the ParGCRareEvent_lock; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
937 // when the lock is released, the write will be flushed. |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
838
diff
changeset
|
938 // OrderAccess::fence(); |
342 | 939 } |
940 } | |
941 | |
942 G1OffsetTableContigSpace:: | |
943 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
944 MemRegion mr, bool is_zeroed) : | |
945 _offsets(sharedOffsetArray, mr), | |
946 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), | |
947 _gc_time_stamp(0) | |
948 { | |
949 _offsets.set_space(this); | |
356 | 950 initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
342 | 951 } |
952 | |
953 size_t RegionList::length() { | |
954 size_t len = 0; | |
955 HeapRegion* cur = hd(); | |
956 DEBUG_ONLY(HeapRegion* last = NULL); | |
957 while (cur != NULL) { | |
958 len++; | |
959 DEBUG_ONLY(last = cur); | |
960 cur = get_next(cur); | |
961 } | |
962 assert(last == tl(), "Invariant"); | |
963 return len; | |
964 } | |
965 | |
966 void RegionList::insert_before_head(HeapRegion* r) { | |
967 assert(well_formed(), "Inv"); | |
968 set_next(r, hd()); | |
969 _hd = r; | |
970 _sz++; | |
971 if (tl() == NULL) _tl = r; | |
972 assert(well_formed(), "Inv"); | |
973 } | |
974 | |
975 void RegionList::prepend_list(RegionList* new_list) { | |
976 assert(well_formed(), "Precondition"); | |
977 assert(new_list->well_formed(), "Precondition"); | |
978 HeapRegion* new_tl = new_list->tl(); | |
979 if (new_tl != NULL) { | |
980 set_next(new_tl, hd()); | |
981 _hd = new_list->hd(); | |
982 _sz += new_list->sz(); | |
983 if (tl() == NULL) _tl = new_list->tl(); | |
984 } else { | |
985 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); | |
986 } | |
987 assert(well_formed(), "Inv"); | |
988 } | |
989 | |
990 void RegionList::delete_after(HeapRegion* r) { | |
991 assert(well_formed(), "Precondition"); | |
992 HeapRegion* next = get_next(r); | |
993 assert(r != NULL, "Precondition"); | |
994 HeapRegion* next_tl = get_next(next); | |
995 set_next(r, next_tl); | |
996 dec_sz(); | |
997 if (next == tl()) { | |
998 assert(next_tl == NULL, "Inv"); | |
999 _tl = r; | |
1000 } | |
1001 assert(well_formed(), "Inv"); | |
1002 } | |
1003 | |
1004 HeapRegion* RegionList::pop() { | |
1005 assert(well_formed(), "Inv"); | |
1006 HeapRegion* res = hd(); | |
1007 if (res != NULL) { | |
1008 _hd = get_next(res); | |
1009 _sz--; | |
1010 set_next(res, NULL); | |
1011 if (sz() == 0) _tl = NULL; | |
1012 } | |
1013 assert(well_formed(), "Inv"); | |
1014 return res; | |
1015 } |