Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 838:0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
Summary: Changing the behavior of -XX:+PrintHeapAtGC for G1 from printing lengthy, per-region information to instead printing a concise summary.
Reviewed-by: ysr, apetrusenko, jcoomes
author | tonyp |
---|---|
date | Tue, 07 Jul 2009 14:23:00 -0400 |
parents | 830ca2573896 |
children | bd02caa94611 df6caf649ff7 |
rev | line source |
---|---|
342 | 1 /* |
470 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegion.cpp.incl" | |
27 | |
28 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, | |
29 HeapRegion* hr, OopClosure* cl, | |
30 CardTableModRefBS::PrecisionStyle precision, | |
31 FilterKind fk) : | |
32 ContiguousSpaceDCTOC(hr, cl, precision, NULL), | |
33 _hr(hr), _fk(fk), _g1(g1) | |
34 {} | |
35 | |
36 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | |
37 OopClosure* oc) : | |
38 _r_bottom(r->bottom()), _r_end(r->end()), | |
39 _oc(oc), _out_of_region(0) | |
40 {} | |
41 | |
42 class VerifyLiveClosure: public OopClosure { | |
811 | 43 private: |
342 | 44 G1CollectedHeap* _g1h; |
45 CardTableModRefBS* _bs; | |
46 oop _containing_obj; | |
47 bool _failures; | |
48 int _n_failures; | |
811 | 49 bool _use_prev_marking; |
342 | 50 public: |
811 | 51 // use_prev_marking == true -> use "prev" marking information, |
52 // use_prev_marking == false -> use "next" marking information | |
53 VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) : | |
342 | 54 _g1h(g1h), _bs(NULL), _containing_obj(NULL), |
811 | 55 _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking) |
342 | 56 { |
57 BarrierSet* bs = _g1h->barrier_set(); | |
58 if (bs->is_a(BarrierSet::CardTableModRef)) | |
59 _bs = (CardTableModRefBS*)bs; | |
60 } | |
61 | |
62 void set_containing_obj(oop obj) { | |
63 _containing_obj = obj; | |
64 } | |
65 | |
66 bool failures() { return _failures; } | |
67 int n_failures() { return _n_failures; } | |
68 | |
69 virtual void do_oop(narrowOop* p) { | |
70 guarantee(false, "NYI"); | |
71 } | |
72 | |
73 void do_oop(oop* p) { | |
74 assert(_containing_obj != NULL, "Precondition"); | |
811 | 75 assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), |
76 "Precondition"); | |
342 | 77 oop obj = *p; |
78 if (obj != NULL) { | |
79 bool failed = false; | |
811 | 80 if (!_g1h->is_in_closed_subset(obj) || |
81 _g1h->is_obj_dead_cond(obj, _use_prev_marking)) { | |
342 | 82 if (!_failures) { |
83 gclog_or_tty->print_cr(""); | |
84 gclog_or_tty->print_cr("----------"); | |
85 } | |
86 if (!_g1h->is_in_closed_subset(obj)) { | |
87 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
88 " of live obj "PTR_FORMAT | |
89 " points to obj "PTR_FORMAT | |
90 " not in the heap.", | |
91 p, (void*) _containing_obj, (void*) obj); | |
92 } else { | |
93 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
94 " of live obj "PTR_FORMAT | |
95 " points to dead obj "PTR_FORMAT".", | |
96 p, (void*) _containing_obj, (void*) obj); | |
97 } | |
98 gclog_or_tty->print_cr("Live obj:"); | |
99 _containing_obj->print_on(gclog_or_tty); | |
100 gclog_or_tty->print_cr("Bad referent:"); | |
101 obj->print_on(gclog_or_tty); | |
102 gclog_or_tty->print_cr("----------"); | |
103 _failures = true; | |
104 failed = true; | |
105 _n_failures++; | |
106 } | |
107 | |
108 if (!_g1h->full_collection()) { | |
109 HeapRegion* from = _g1h->heap_region_containing(p); | |
110 HeapRegion* to = _g1h->heap_region_containing(*p); | |
111 if (from != NULL && to != NULL && | |
112 from != to && | |
113 !to->isHumongous()) { | |
114 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); | |
115 jbyte cv_field = *_bs->byte_for_const(p); | |
116 const jbyte dirty = CardTableModRefBS::dirty_card_val(); | |
117 | |
118 bool is_bad = !(from->is_young() | |
119 || to->rem_set()->contains_reference(p) | |
120 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed | |
121 (_containing_obj->is_objArray() ? | |
122 cv_field == dirty | |
123 : cv_obj == dirty || cv_field == dirty)); | |
124 if (is_bad) { | |
125 if (!_failures) { | |
126 gclog_or_tty->print_cr(""); | |
127 gclog_or_tty->print_cr("----------"); | |
128 } | |
129 gclog_or_tty->print_cr("Missing rem set entry:"); | |
130 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
131 " of obj "PTR_FORMAT | |
132 ", in region %d ["PTR_FORMAT | |
133 ", "PTR_FORMAT"),", | |
134 p, (void*) _containing_obj, | |
135 from->hrs_index(), | |
136 from->bottom(), | |
137 from->end()); | |
138 _containing_obj->print_on(gclog_or_tty); | |
139 gclog_or_tty->print_cr("points to obj "PTR_FORMAT | |
140 " in region %d ["PTR_FORMAT | |
141 ", "PTR_FORMAT").", | |
142 (void*) obj, to->hrs_index(), | |
143 to->bottom(), to->end()); | |
144 obj->print_on(gclog_or_tty); | |
145 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", | |
146 cv_obj, cv_field); | |
147 gclog_or_tty->print_cr("----------"); | |
148 _failures = true; | |
149 if (!failed) _n_failures++; | |
150 } | |
151 } | |
152 } | |
153 } | |
154 } | |
155 }; | |
156 | |
157 template<class ClosureType> | |
158 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
159 HeapRegion* hr, | |
160 HeapWord* cur, HeapWord* top) { | |
161 oop cur_oop = oop(cur); | |
162 int oop_size = cur_oop->size(); | |
163 HeapWord* next_obj = cur + oop_size; | |
164 while (next_obj < top) { | |
165 // Keep filtering the remembered set. | |
166 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
167 // Bottom lies entirely below top, so we can call the | |
168 // non-memRegion version of oop_iterate below. | |
169 cur_oop->oop_iterate(cl); | |
170 } | |
171 cur = next_obj; | |
172 cur_oop = oop(cur); | |
173 oop_size = cur_oop->size(); | |
174 next_obj = cur + oop_size; | |
175 } | |
176 return cur; | |
177 } | |
178 | |
179 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
180 HeapWord* bottom, | |
181 HeapWord* top, | |
182 OopClosure* cl) { | |
183 G1CollectedHeap* g1h = _g1; | |
184 | |
185 int oop_size; | |
186 | |
187 OopClosure* cl2 = cl; | |
188 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
189 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
190 switch (_fk) { | |
191 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
192 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
193 } | |
194 | |
195 // Start filtering what we add to the remembered set. If the object is | |
196 // not considered dead, either because it is marked (in the mark bitmap) | |
197 // or it was allocated after marking finished, then we add it. Otherwise | |
198 // we can safely ignore the object. | |
199 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
200 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
201 } else { | |
202 oop_size = oop(bottom)->size(); | |
203 } | |
204 | |
205 bottom += oop_size; | |
206 | |
207 if (bottom < top) { | |
208 // We replicate the loop below for several kinds of possible filters. | |
209 switch (_fk) { | |
210 case NoFilterKind: | |
211 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
212 break; | |
213 case IntoCSFilterKind: { | |
214 FilterIntoCSClosure filt(this, g1h, cl); | |
215 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
216 break; | |
217 } | |
218 case OutOfRegionFilterKind: { | |
219 FilterOutOfRegionClosure filt(_hr, cl); | |
220 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
221 break; | |
222 } | |
223 default: | |
224 ShouldNotReachHere(); | |
225 } | |
226 | |
227 // Last object. Need to do dead-obj filtering here too. | |
228 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
229 oop(bottom)->oop_iterate(cl2, mr); | |
230 } | |
231 } | |
232 } | |
233 | |
234 void HeapRegion::reset_after_compaction() { | |
235 G1OffsetTableContigSpace::reset_after_compaction(); | |
236 // After a compaction the mark bitmap is invalid, so we must | |
237 // treat all objects as being inside the unmarked area. | |
238 zero_marked_bytes(); | |
239 init_top_at_mark_start(); | |
240 } | |
241 | |
242 DirtyCardToOopClosure* | |
243 HeapRegion::new_dcto_closure(OopClosure* cl, | |
244 CardTableModRefBS::PrecisionStyle precision, | |
245 HeapRegionDCTOC::FilterKind fk) { | |
246 return new HeapRegionDCTOC(G1CollectedHeap::heap(), | |
247 this, cl, precision, fk); | |
248 } | |
249 | |
250 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
355 | 251 _humongous_type = NotHumongous; |
342 | 252 _humongous_start_region = NULL; |
253 _in_collection_set = false; | |
254 _is_gc_alloc_region = false; | |
255 | |
256 // Age stuff (if parallel, this will be done separately, since it needs | |
257 // to be sequential). | |
258 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
259 | |
260 set_young_index_in_cset(-1); | |
261 uninstall_surv_rate_group(); | |
262 set_young_type(NotYoung); | |
263 | |
264 // In case it had been the start of a humongous sequence, reset its end. | |
265 set_end(_orig_end); | |
266 | |
267 if (!par) { | |
268 // If this is parallel, this will be done later. | |
269 HeapRegionRemSet* hrrs = rem_set(); | |
270 if (hrrs != NULL) hrrs->clear(); | |
355 | 271 _claimed = InitialClaimValue; |
342 | 272 } |
273 zero_marked_bytes(); | |
274 set_sort_index(-1); | |
275 | |
276 _offsets.resize(HeapRegion::GrainWords); | |
277 init_top_at_mark_start(); | |
356 | 278 if (clear_space) clear(SpaceDecorator::Mangle); |
342 | 279 } |
280 | |
281 // <PREDICTION> | |
282 void HeapRegion::calc_gc_efficiency() { | |
283 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
284 _gc_efficiency = (double) garbage_bytes() / | |
285 g1h->predict_region_elapsed_time_ms(this, false); | |
286 } | |
287 // </PREDICTION> | |
288 | |
289 void HeapRegion::set_startsHumongous() { | |
355 | 290 _humongous_type = StartsHumongous; |
342 | 291 _humongous_start_region = this; |
292 assert(end() == _orig_end, "Should be normal before alloc."); | |
293 } | |
294 | |
295 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
296 jint current = _claimed; | |
297 if (current != claimValue) { | |
298 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
299 if (res == current) { | |
300 return true; | |
301 } | |
302 } | |
303 return false; | |
304 } | |
305 | |
306 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
307 HeapWord* low = addr; | |
308 HeapWord* high = end(); | |
309 while (low < high) { | |
310 size_t diff = pointer_delta(high, low); | |
311 // Must add one below to bias toward the high amount. Otherwise, if | |
312 // "high" were at the desired value, and "low" were one less, we | |
313 // would not converge on "high". This is not symmetric, because | |
314 // we set "high" to a block start, which might be the right one, | |
315 // which we don't do for "low". | |
316 HeapWord* middle = low + (diff+1)/2; | |
317 if (middle == high) return high; | |
318 HeapWord* mid_bs = block_start_careful(middle); | |
319 if (mid_bs < addr) { | |
320 low = middle; | |
321 } else { | |
322 high = mid_bs; | |
323 } | |
324 } | |
325 assert(low == high && low >= addr, "Didn't work."); | |
326 return low; | |
327 } | |
328 | |
329 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { | |
330 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); | |
331 _next_in_special_set = r; | |
332 } | |
333 | |
334 void HeapRegion::set_on_unclean_list(bool b) { | |
335 _is_on_unclean_list = b; | |
336 } | |
337 | |
356 | 338 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
339 G1OffsetTableContigSpace::initialize(mr, false, mangle_space); | |
342 | 340 hr_clear(false/*par*/, clear_space); |
341 } | |
342 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
343 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
344 #endif // _MSC_VER | |
345 | |
346 | |
347 HeapRegion:: | |
348 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
349 MemRegion mr, bool is_zeroed) | |
350 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), | |
351 _next_fk(HeapRegionDCTOC::NoFilterKind), | |
352 _hrs_index(-1), | |
355 | 353 _humongous_type(NotHumongous), _humongous_start_region(NULL), |
342 | 354 _in_collection_set(false), _is_gc_alloc_region(false), |
355 _is_on_free_list(false), _is_on_unclean_list(false), | |
356 _next_in_special_set(NULL), _orig_end(NULL), | |
355 | 357 _claimed(InitialClaimValue), _evacuation_failed(false), |
342 | 358 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
359 _young_type(NotYoung), _next_young_region(NULL), | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
751
diff
changeset
|
360 _next_dirty_cards_region(NULL), |
342 | 361 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), |
362 _rem_set(NULL), _zfs(NotZeroFilled) | |
363 { | |
364 _orig_end = mr.end(); | |
365 // Note that initialize() will set the start of the unmarked area of the | |
366 // region. | |
356 | 367 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
368 set_top(bottom()); | |
369 set_saved_mark(); | |
342 | 370 |
371 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
372 | |
373 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
374 // In case the region is allocated during a pause, note the top. | |
375 // We haven't done any counting on a brand new region. | |
376 _top_at_conc_mark_count = bottom(); | |
377 } | |
378 | |
379 class NextCompactionHeapRegionClosure: public HeapRegionClosure { | |
380 const HeapRegion* _target; | |
381 bool _target_seen; | |
382 HeapRegion* _last; | |
383 CompactibleSpace* _res; | |
384 public: | |
385 NextCompactionHeapRegionClosure(const HeapRegion* target) : | |
386 _target(target), _target_seen(false), _res(NULL) {} | |
387 bool doHeapRegion(HeapRegion* cur) { | |
388 if (_target_seen) { | |
389 if (!cur->isHumongous()) { | |
390 _res = cur; | |
391 return true; | |
392 } | |
393 } else if (cur == _target) { | |
394 _target_seen = true; | |
395 } | |
396 return false; | |
397 } | |
398 CompactibleSpace* result() { return _res; } | |
399 }; | |
400 | |
401 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
402 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
403 // cast away const-ness | |
404 HeapRegion* r = (HeapRegion*) this; | |
405 NextCompactionHeapRegionClosure blk(r); | |
406 g1h->heap_region_iterate_from(r, &blk); | |
407 return blk.result(); | |
408 } | |
409 | |
410 void HeapRegion::set_continuesHumongous(HeapRegion* start) { | |
411 // The order is important here. | |
412 start->add_continuingHumongousRegion(this); | |
355 | 413 _humongous_type = ContinuesHumongous; |
342 | 414 _humongous_start_region = start; |
415 } | |
416 | |
417 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { | |
418 // Must join the blocks of the current H region seq with the block of the | |
419 // added region. | |
420 offsets()->join_blocks(bottom(), cont->bottom()); | |
421 arrayOop obj = (arrayOop)(bottom()); | |
422 obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); | |
423 set_end(cont->end()); | |
424 set_top(cont->end()); | |
425 } | |
426 | |
427 void HeapRegion::save_marks() { | |
428 set_saved_mark(); | |
429 } | |
430 | |
431 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { | |
432 HeapWord* p = mr.start(); | |
433 HeapWord* e = mr.end(); | |
434 oop obj; | |
435 while (p < e) { | |
436 obj = oop(p); | |
437 p += obj->oop_iterate(cl); | |
438 } | |
439 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
440 } | |
441 | |
442 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
443 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
444 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
445 } | |
446 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
447 | |
448 | |
449 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { | |
450 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
451 } | |
452 | |
453 #ifdef DEBUG | |
454 HeapWord* HeapRegion::allocate(size_t size) { | |
455 jint state = zero_fill_state(); | |
456 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || | |
457 zero_fill_is_allocated(), | |
458 "When ZF is on, only alloc in ZF'd regions"); | |
459 return G1OffsetTableContigSpace::allocate(size); | |
460 } | |
461 #endif | |
462 | |
463 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { | |
464 assert(top() == bottom() || zfs == Allocated, | |
465 "Region must be empty, or we must be setting it to allocated."); | |
466 assert(ZF_mon->owned_by_self() || | |
467 Universe::heap()->is_gc_active(), | |
468 "Must hold the lock or be a full GC to modify."); | |
469 _zfs = zfs; | |
470 } | |
471 | |
472 void HeapRegion::set_zero_fill_complete() { | |
473 set_zero_fill_state_work(ZeroFilled); | |
474 if (ZF_mon->owned_by_self()) { | |
475 ZF_mon->notify_all(); | |
476 } | |
477 } | |
478 | |
479 | |
480 void HeapRegion::ensure_zero_filled() { | |
481 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
482 ensure_zero_filled_locked(); | |
483 } | |
484 | |
485 void HeapRegion::ensure_zero_filled_locked() { | |
486 assert(ZF_mon->owned_by_self(), "Precondition"); | |
487 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); | |
488 assert(should_ignore_zf || Heap_lock->is_locked(), | |
489 "Either we're in a GC or we're allocating a region."); | |
490 switch (zero_fill_state()) { | |
491 case HeapRegion::NotZeroFilled: | |
492 set_zero_fill_in_progress(Thread::current()); | |
493 { | |
494 ZF_mon->unlock(); | |
495 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
496 ZF_mon->lock_without_safepoint_check(); | |
497 } | |
498 // A trap. | |
499 guarantee(zero_fill_state() == HeapRegion::ZeroFilling | |
500 && zero_filler() == Thread::current(), | |
501 "AHA! Tell Dave D if you see this..."); | |
502 set_zero_fill_complete(); | |
503 // gclog_or_tty->print_cr("Did sync ZF."); | |
504 ConcurrentZFThread::note_sync_zfs(); | |
505 break; | |
506 case HeapRegion::ZeroFilling: | |
507 if (should_ignore_zf) { | |
508 // We can "break" the lock and take over the work. | |
509 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
510 set_zero_fill_complete(); | |
511 ConcurrentZFThread::note_sync_zfs(); | |
512 break; | |
513 } else { | |
514 ConcurrentZFThread::wait_for_ZF_completed(this); | |
515 } | |
516 case HeapRegion::ZeroFilled: | |
517 // Nothing to do. | |
518 break; | |
519 case HeapRegion::Allocated: | |
520 guarantee(false, "Should not call on allocated regions."); | |
521 } | |
522 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); | |
523 } | |
524 | |
525 HeapWord* | |
526 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
527 ObjectClosure* cl) { | |
528 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
529 // We used to use "block_start_careful" here. But we're actually happy | |
530 // to update the BOT while we do this... | |
531 HeapWord* cur = block_start(mr.start()); | |
532 mr = mr.intersection(used_region()); | |
533 if (mr.is_empty()) return NULL; | |
534 // Otherwise, find the obj that extends onto mr.start(). | |
535 | |
536 assert(cur <= mr.start() | |
537 && (oop(cur)->klass() == NULL || | |
538 cur + oop(cur)->size() > mr.start()), | |
539 "postcondition of block_start"); | |
540 oop obj; | |
541 while (cur < mr.end()) { | |
542 obj = oop(cur); | |
543 if (obj->klass() == NULL) { | |
544 // Ran into an unparseable point. | |
545 return cur; | |
546 } else if (!g1h->is_obj_dead(obj)) { | |
547 cl->do_object(obj); | |
548 } | |
549 if (cl->abort()) return cur; | |
550 // The check above must occur before the operation below, since an | |
551 // abort might invalidate the "size" operation. | |
552 cur += obj->size(); | |
553 } | |
554 return NULL; | |
555 } | |
556 | |
557 HeapWord* | |
558 HeapRegion:: | |
559 oops_on_card_seq_iterate_careful(MemRegion mr, | |
560 FilterOutOfRegionClosure* cl) { | |
561 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
562 | |
563 // If we're within a stop-world GC, then we might look at a card in a | |
564 // GC alloc region that extends onto a GC LAB, which may not be | |
565 // parseable. Stop such at the "saved_mark" of the region. | |
566 if (G1CollectedHeap::heap()->is_gc_active()) { | |
567 mr = mr.intersection(used_region_at_save_marks()); | |
568 } else { | |
569 mr = mr.intersection(used_region()); | |
570 } | |
571 if (mr.is_empty()) return NULL; | |
572 // Otherwise, find the obj that extends onto mr.start(). | |
573 | |
574 // We used to use "block_start_careful" here. But we're actually happy | |
575 // to update the BOT while we do this... | |
576 HeapWord* cur = block_start(mr.start()); | |
577 assert(cur <= mr.start(), "Postcondition"); | |
578 | |
579 while (cur <= mr.start()) { | |
580 if (oop(cur)->klass() == NULL) { | |
581 // Ran into an unparseable point. | |
582 return cur; | |
583 } | |
584 // Otherwise... | |
585 int sz = oop(cur)->size(); | |
586 if (cur + sz > mr.start()) break; | |
587 // Otherwise, go on. | |
588 cur = cur + sz; | |
589 } | |
590 oop obj; | |
591 obj = oop(cur); | |
592 // If we finish this loop... | |
593 assert(cur <= mr.start() | |
594 && obj->klass() != NULL | |
595 && cur + obj->size() > mr.start(), | |
596 "Loop postcondition"); | |
597 if (!g1h->is_obj_dead(obj)) { | |
598 obj->oop_iterate(cl, mr); | |
599 } | |
600 | |
601 HeapWord* next; | |
602 while (cur < mr.end()) { | |
603 obj = oop(cur); | |
604 if (obj->klass() == NULL) { | |
605 // Ran into an unparseable point. | |
606 return cur; | |
607 }; | |
608 // Otherwise: | |
609 next = (cur + obj->size()); | |
610 if (!g1h->is_obj_dead(obj)) { | |
611 if (next < mr.end()) { | |
612 obj->oop_iterate(cl); | |
613 } else { | |
614 // this obj spans the boundary. If it's an array, stop at the | |
615 // boundary. | |
616 if (obj->is_objArray()) { | |
617 obj->oop_iterate(cl, mr); | |
618 } else { | |
619 obj->oop_iterate(cl); | |
620 } | |
621 } | |
622 } | |
623 cur = next; | |
624 } | |
625 return NULL; | |
626 } | |
627 | |
628 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
629 void HeapRegion::print_on(outputStream* st) const { | |
630 if (isHumongous()) { | |
631 if (startsHumongous()) | |
632 st->print(" HS"); | |
633 else | |
634 st->print(" HC"); | |
635 } else { | |
636 st->print(" "); | |
637 } | |
638 if (in_collection_set()) | |
639 st->print(" CS"); | |
640 else if (is_gc_alloc_region()) | |
641 st->print(" A "); | |
642 else | |
643 st->print(" "); | |
644 if (is_young()) | |
645 st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); | |
646 else | |
647 st->print(" "); | |
648 if (is_empty()) | |
649 st->print(" F"); | |
650 else | |
651 st->print(" "); | |
652 st->print(" %d", _gc_time_stamp); | |
653 G1OffsetTableContigSpace::print_on(st); | |
654 } | |
655 | |
811 | 656 void HeapRegion::verify(bool allow_dirty) const { |
657 verify(allow_dirty, /* use_prev_marking */ true); | |
658 } | |
659 | |
342 | 660 #define OBJ_SAMPLE_INTERVAL 0 |
661 #define BLOCK_SAMPLE_INTERVAL 100 | |
662 | |
663 // This really ought to be commoned up into OffsetTableContigSpace somehow. | |
664 // We would need a mechanism to make that code skip dead objects. | |
665 | |
811 | 666 void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const { |
342 | 667 G1CollectedHeap* g1 = G1CollectedHeap::heap(); |
668 HeapWord* p = bottom(); | |
669 HeapWord* prev_p = NULL; | |
670 int objs = 0; | |
671 int blocks = 0; | |
811 | 672 VerifyLiveClosure vl_cl(g1, use_prev_marking); |
342 | 673 while (p < top()) { |
674 size_t size = oop(p)->size(); | |
675 if (blocks == BLOCK_SAMPLE_INTERVAL) { | |
676 guarantee(p == block_start_const(p + (size/2)), | |
677 "check offset computation"); | |
678 blocks = 0; | |
679 } else { | |
680 blocks++; | |
681 } | |
682 if (objs == OBJ_SAMPLE_INTERVAL) { | |
683 oop obj = oop(p); | |
811 | 684 if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) { |
342 | 685 obj->verify(); |
686 vl_cl.set_containing_obj(obj); | |
687 obj->oop_iterate(&vl_cl); | |
688 if (G1MaxVerifyFailures >= 0 | |
689 && vl_cl.n_failures() >= G1MaxVerifyFailures) break; | |
690 } | |
691 objs = 0; | |
692 } else { | |
693 objs++; | |
694 } | |
695 prev_p = p; | |
696 p += size; | |
697 } | |
698 HeapWord* rend = end(); | |
699 HeapWord* rtop = top(); | |
700 if (rtop < rend) { | |
701 guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop, | |
702 "check offset computation"); | |
703 } | |
704 if (vl_cl.failures()) { | |
705 gclog_or_tty->print_cr("Heap:"); | |
838
0316eac49d5a
6855834: G1: minimize the output when -XX:+PrintHeapAtGC is set
tonyp
parents:
811
diff
changeset
|
706 G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */); |
342 | 707 gclog_or_tty->print_cr(""); |
708 } | |
751 | 709 if (VerifyDuringGC && |
342 | 710 G1VerifyConcMarkPrintReachable && |
711 vl_cl.failures()) { | |
712 g1->concurrent_mark()->print_prev_bitmap_reachable(); | |
713 } | |
745
2b6c55e36143
6829013: G1: set the default value of G1VerifyConcMarkPrintRechable to false
tonyp
parents:
677
diff
changeset
|
714 guarantee(!vl_cl.failures(), "region verification failed"); |
342 | 715 guarantee(p == top(), "end of last object must match end of space"); |
716 } | |
717 | |
718 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | |
719 // away eventually. | |
720 | |
356 | 721 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
342 | 722 // false ==> we'll do the clearing if there's clearing to be done. |
356 | 723 ContiguousSpace::initialize(mr, false, mangle_space); |
342 | 724 _offsets.zero_bottom_entry(); |
725 _offsets.initialize_threshold(); | |
356 | 726 if (clear_space) clear(mangle_space); |
342 | 727 } |
728 | |
356 | 729 void G1OffsetTableContigSpace::clear(bool mangle_space) { |
730 ContiguousSpace::clear(mangle_space); | |
342 | 731 _offsets.zero_bottom_entry(); |
732 _offsets.initialize_threshold(); | |
733 } | |
734 | |
735 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
736 Space::set_bottom(new_bottom); | |
737 _offsets.set_bottom(new_bottom); | |
738 } | |
739 | |
740 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
741 Space::set_end(new_end); | |
742 _offsets.resize(new_end - bottom()); | |
743 } | |
744 | |
745 void G1OffsetTableContigSpace::print() const { | |
746 print_short(); | |
747 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
748 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
749 bottom(), top(), _offsets.threshold(), end()); | |
750 } | |
751 | |
752 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { | |
753 return _offsets.initialize_threshold(); | |
754 } | |
755 | |
756 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, | |
757 HeapWord* end) { | |
758 _offsets.alloc_block(start, end); | |
759 return _offsets.threshold(); | |
760 } | |
761 | |
762 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { | |
763 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
764 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); | |
765 if (_gc_time_stamp < g1h->get_gc_time_stamp()) | |
766 return top(); | |
767 else | |
768 return ContiguousSpace::saved_mark_word(); | |
769 } | |
770 | |
771 void G1OffsetTableContigSpace::set_saved_mark() { | |
772 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
773 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); | |
774 | |
775 if (_gc_time_stamp < curr_gc_time_stamp) { | |
776 // The order of these is important, as another thread might be | |
777 // about to start scanning this region. If it does so after | |
778 // set_saved_mark and before _gc_time_stamp = ..., then the latter | |
779 // will be false, and it will pick up top() as the high water mark | |
780 // of region. If it does so after _gc_time_stamp = ..., then it | |
781 // will pick up the right saved_mark_word() as the high water mark | |
782 // of the region. Either way, the behaviour will be correct. | |
783 ContiguousSpace::set_saved_mark(); | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
784 _gc_time_stamp = curr_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
785 OrderAccess::fence(); |
342 | 786 } |
787 } | |
788 | |
789 G1OffsetTableContigSpace:: | |
790 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
791 MemRegion mr, bool is_zeroed) : | |
792 _offsets(sharedOffsetArray, mr), | |
793 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), | |
794 _gc_time_stamp(0) | |
795 { | |
796 _offsets.set_space(this); | |
356 | 797 initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
342 | 798 } |
799 | |
800 size_t RegionList::length() { | |
801 size_t len = 0; | |
802 HeapRegion* cur = hd(); | |
803 DEBUG_ONLY(HeapRegion* last = NULL); | |
804 while (cur != NULL) { | |
805 len++; | |
806 DEBUG_ONLY(last = cur); | |
807 cur = get_next(cur); | |
808 } | |
809 assert(last == tl(), "Invariant"); | |
810 return len; | |
811 } | |
812 | |
813 void RegionList::insert_before_head(HeapRegion* r) { | |
814 assert(well_formed(), "Inv"); | |
815 set_next(r, hd()); | |
816 _hd = r; | |
817 _sz++; | |
818 if (tl() == NULL) _tl = r; | |
819 assert(well_formed(), "Inv"); | |
820 } | |
821 | |
822 void RegionList::prepend_list(RegionList* new_list) { | |
823 assert(well_formed(), "Precondition"); | |
824 assert(new_list->well_formed(), "Precondition"); | |
825 HeapRegion* new_tl = new_list->tl(); | |
826 if (new_tl != NULL) { | |
827 set_next(new_tl, hd()); | |
828 _hd = new_list->hd(); | |
829 _sz += new_list->sz(); | |
830 if (tl() == NULL) _tl = new_list->tl(); | |
831 } else { | |
832 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); | |
833 } | |
834 assert(well_formed(), "Inv"); | |
835 } | |
836 | |
837 void RegionList::delete_after(HeapRegion* r) { | |
838 assert(well_formed(), "Precondition"); | |
839 HeapRegion* next = get_next(r); | |
840 assert(r != NULL, "Precondition"); | |
841 HeapRegion* next_tl = get_next(next); | |
842 set_next(r, next_tl); | |
843 dec_sz(); | |
844 if (next == tl()) { | |
845 assert(next_tl == NULL, "Inv"); | |
846 _tl = r; | |
847 } | |
848 assert(well_formed(), "Inv"); | |
849 } | |
850 | |
851 HeapRegion* RegionList::pop() { | |
852 assert(well_formed(), "Inv"); | |
853 HeapRegion* res = hd(); | |
854 if (res != NULL) { | |
855 _hd = get_next(res); | |
856 _sz--; | |
857 set_next(res, NULL); | |
858 if (sz() == 0) _tl = NULL; | |
859 } | |
860 assert(well_formed(), "Inv"); | |
861 return res; | |
862 } |