Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegion.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 1ee8caae33af |
children | ad8c8ca4ab0f |
rev | line source |
---|---|
342 | 1 /* |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegion.cpp.incl" | |
27 | |
28 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1, | |
29 HeapRegion* hr, OopClosure* cl, | |
30 CardTableModRefBS::PrecisionStyle precision, | |
31 FilterKind fk) : | |
32 ContiguousSpaceDCTOC(hr, cl, precision, NULL), | |
33 _hr(hr), _fk(fk), _g1(g1) | |
34 {} | |
35 | |
36 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | |
37 OopClosure* oc) : | |
38 _r_bottom(r->bottom()), _r_end(r->end()), | |
39 _oc(oc), _out_of_region(0) | |
40 {} | |
41 | |
42 class VerifyLiveClosure: public OopClosure { | |
43 G1CollectedHeap* _g1h; | |
44 CardTableModRefBS* _bs; | |
45 oop _containing_obj; | |
46 bool _failures; | |
47 int _n_failures; | |
48 public: | |
49 VerifyLiveClosure(G1CollectedHeap* g1h) : | |
50 _g1h(g1h), _bs(NULL), _containing_obj(NULL), | |
51 _failures(false), _n_failures(0) | |
52 { | |
53 BarrierSet* bs = _g1h->barrier_set(); | |
54 if (bs->is_a(BarrierSet::CardTableModRef)) | |
55 _bs = (CardTableModRefBS*)bs; | |
56 } | |
57 | |
58 void set_containing_obj(oop obj) { | |
59 _containing_obj = obj; | |
60 } | |
61 | |
62 bool failures() { return _failures; } | |
63 int n_failures() { return _n_failures; } | |
64 | |
65 virtual void do_oop(narrowOop* p) { | |
66 guarantee(false, "NYI"); | |
67 } | |
68 | |
69 void do_oop(oop* p) { | |
70 assert(_containing_obj != NULL, "Precondition"); | |
71 assert(!_g1h->is_obj_dead(_containing_obj), "Precondition"); | |
72 oop obj = *p; | |
73 if (obj != NULL) { | |
74 bool failed = false; | |
75 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead(obj)) { | |
76 if (!_failures) { | |
77 gclog_or_tty->print_cr(""); | |
78 gclog_or_tty->print_cr("----------"); | |
79 } | |
80 if (!_g1h->is_in_closed_subset(obj)) { | |
81 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
82 " of live obj "PTR_FORMAT | |
83 " points to obj "PTR_FORMAT | |
84 " not in the heap.", | |
85 p, (void*) _containing_obj, (void*) obj); | |
86 } else { | |
87 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
88 " of live obj "PTR_FORMAT | |
89 " points to dead obj "PTR_FORMAT".", | |
90 p, (void*) _containing_obj, (void*) obj); | |
91 } | |
92 gclog_or_tty->print_cr("Live obj:"); | |
93 _containing_obj->print_on(gclog_or_tty); | |
94 gclog_or_tty->print_cr("Bad referent:"); | |
95 obj->print_on(gclog_or_tty); | |
96 gclog_or_tty->print_cr("----------"); | |
97 _failures = true; | |
98 failed = true; | |
99 _n_failures++; | |
100 } | |
101 | |
102 if (!_g1h->full_collection()) { | |
103 HeapRegion* from = _g1h->heap_region_containing(p); | |
104 HeapRegion* to = _g1h->heap_region_containing(*p); | |
105 if (from != NULL && to != NULL && | |
106 from != to && | |
107 !to->popular() && | |
108 !to->isHumongous()) { | |
109 jbyte cv_obj = *_bs->byte_for_const(_containing_obj); | |
110 jbyte cv_field = *_bs->byte_for_const(p); | |
111 const jbyte dirty = CardTableModRefBS::dirty_card_val(); | |
112 | |
113 bool is_bad = !(from->is_young() | |
114 || to->rem_set()->contains_reference(p) | |
115 || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed | |
116 (_containing_obj->is_objArray() ? | |
117 cv_field == dirty | |
118 : cv_obj == dirty || cv_field == dirty)); | |
119 if (is_bad) { | |
120 if (!_failures) { | |
121 gclog_or_tty->print_cr(""); | |
122 gclog_or_tty->print_cr("----------"); | |
123 } | |
124 gclog_or_tty->print_cr("Missing rem set entry:"); | |
125 gclog_or_tty->print_cr("Field "PTR_FORMAT | |
126 " of obj "PTR_FORMAT | |
127 ", in region %d ["PTR_FORMAT | |
128 ", "PTR_FORMAT"),", | |
129 p, (void*) _containing_obj, | |
130 from->hrs_index(), | |
131 from->bottom(), | |
132 from->end()); | |
133 _containing_obj->print_on(gclog_or_tty); | |
134 gclog_or_tty->print_cr("points to obj "PTR_FORMAT | |
135 " in region %d ["PTR_FORMAT | |
136 ", "PTR_FORMAT").", | |
137 (void*) obj, to->hrs_index(), | |
138 to->bottom(), to->end()); | |
139 obj->print_on(gclog_or_tty); | |
140 gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", | |
141 cv_obj, cv_field); | |
142 gclog_or_tty->print_cr("----------"); | |
143 _failures = true; | |
144 if (!failed) _n_failures++; | |
145 } | |
146 } | |
147 } | |
148 } | |
149 } | |
150 }; | |
151 | |
152 template<class ClosureType> | |
153 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
154 HeapRegion* hr, | |
155 HeapWord* cur, HeapWord* top) { | |
156 oop cur_oop = oop(cur); | |
157 int oop_size = cur_oop->size(); | |
158 HeapWord* next_obj = cur + oop_size; | |
159 while (next_obj < top) { | |
160 // Keep filtering the remembered set. | |
161 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
162 // Bottom lies entirely below top, so we can call the | |
163 // non-memRegion version of oop_iterate below. | |
164 #ifndef PRODUCT | |
165 if (G1VerifyMarkingInEvac) { | |
166 VerifyLiveClosure vl_cl(g1h); | |
167 cur_oop->oop_iterate(&vl_cl); | |
168 } | |
169 #endif | |
170 cur_oop->oop_iterate(cl); | |
171 } | |
172 cur = next_obj; | |
173 cur_oop = oop(cur); | |
174 oop_size = cur_oop->size(); | |
175 next_obj = cur + oop_size; | |
176 } | |
177 return cur; | |
178 } | |
179 | |
180 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
181 HeapWord* bottom, | |
182 HeapWord* top, | |
183 OopClosure* cl) { | |
184 G1CollectedHeap* g1h = _g1; | |
185 | |
186 int oop_size; | |
187 | |
188 OopClosure* cl2 = cl; | |
189 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
190 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
191 switch (_fk) { | |
192 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
193 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
194 } | |
195 | |
196 // Start filtering what we add to the remembered set. If the object is | |
197 // not considered dead, either because it is marked (in the mark bitmap) | |
198 // or it was allocated after marking finished, then we add it. Otherwise | |
199 // we can safely ignore the object. | |
200 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
201 #ifndef PRODUCT | |
202 if (G1VerifyMarkingInEvac) { | |
203 VerifyLiveClosure vl_cl(g1h); | |
204 oop(bottom)->oop_iterate(&vl_cl, mr); | |
205 } | |
206 #endif | |
207 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
208 } else { | |
209 oop_size = oop(bottom)->size(); | |
210 } | |
211 | |
212 bottom += oop_size; | |
213 | |
214 if (bottom < top) { | |
215 // We replicate the loop below for several kinds of possible filters. | |
216 switch (_fk) { | |
217 case NoFilterKind: | |
218 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
219 break; | |
220 case IntoCSFilterKind: { | |
221 FilterIntoCSClosure filt(this, g1h, cl); | |
222 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
223 break; | |
224 } | |
225 case OutOfRegionFilterKind: { | |
226 FilterOutOfRegionClosure filt(_hr, cl); | |
227 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
228 break; | |
229 } | |
230 default: | |
231 ShouldNotReachHere(); | |
232 } | |
233 | |
234 // Last object. Need to do dead-obj filtering here too. | |
235 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
236 #ifndef PRODUCT | |
237 if (G1VerifyMarkingInEvac) { | |
238 VerifyLiveClosure vl_cl(g1h); | |
239 oop(bottom)->oop_iterate(&vl_cl, mr); | |
240 } | |
241 #endif | |
242 oop(bottom)->oop_iterate(cl2, mr); | |
243 } | |
244 } | |
245 } | |
246 | |
247 void HeapRegion::reset_after_compaction() { | |
248 G1OffsetTableContigSpace::reset_after_compaction(); | |
249 // After a compaction the mark bitmap is invalid, so we must | |
250 // treat all objects as being inside the unmarked area. | |
251 zero_marked_bytes(); | |
252 init_top_at_mark_start(); | |
253 } | |
254 | |
255 DirtyCardToOopClosure* | |
256 HeapRegion::new_dcto_closure(OopClosure* cl, | |
257 CardTableModRefBS::PrecisionStyle precision, | |
258 HeapRegionDCTOC::FilterKind fk) { | |
259 return new HeapRegionDCTOC(G1CollectedHeap::heap(), | |
260 this, cl, precision, fk); | |
261 } | |
262 | |
263 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
355 | 264 _humongous_type = NotHumongous; |
342 | 265 _humongous_start_region = NULL; |
266 _in_collection_set = false; | |
267 _is_gc_alloc_region = false; | |
268 | |
269 // Age stuff (if parallel, this will be done separately, since it needs | |
270 // to be sequential). | |
271 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
272 | |
273 set_young_index_in_cset(-1); | |
274 uninstall_surv_rate_group(); | |
275 set_young_type(NotYoung); | |
276 | |
277 // In case it had been the start of a humongous sequence, reset its end. | |
278 set_end(_orig_end); | |
279 | |
280 if (!par) { | |
281 // If this is parallel, this will be done later. | |
282 HeapRegionRemSet* hrrs = rem_set(); | |
283 if (hrrs != NULL) hrrs->clear(); | |
355 | 284 _claimed = InitialClaimValue; |
342 | 285 } |
286 zero_marked_bytes(); | |
287 set_sort_index(-1); | |
288 if ((uintptr_t)bottom() >= (uintptr_t)g1h->popular_object_boundary()) | |
289 set_popular(false); | |
290 | |
291 _offsets.resize(HeapRegion::GrainWords); | |
292 init_top_at_mark_start(); | |
356 | 293 if (clear_space) clear(SpaceDecorator::Mangle); |
342 | 294 } |
295 | |
296 // <PREDICTION> | |
297 void HeapRegion::calc_gc_efficiency() { | |
298 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
299 _gc_efficiency = (double) garbage_bytes() / | |
300 g1h->predict_region_elapsed_time_ms(this, false); | |
301 } | |
302 // </PREDICTION> | |
303 | |
304 void HeapRegion::set_startsHumongous() { | |
355 | 305 _humongous_type = StartsHumongous; |
342 | 306 _humongous_start_region = this; |
307 assert(end() == _orig_end, "Should be normal before alloc."); | |
308 } | |
309 | |
310 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
311 jint current = _claimed; | |
312 if (current != claimValue) { | |
313 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
314 if (res == current) { | |
315 return true; | |
316 } | |
317 } | |
318 return false; | |
319 } | |
320 | |
321 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
322 HeapWord* low = addr; | |
323 HeapWord* high = end(); | |
324 while (low < high) { | |
325 size_t diff = pointer_delta(high, low); | |
326 // Must add one below to bias toward the high amount. Otherwise, if | |
327 // "high" were at the desired value, and "low" were one less, we | |
328 // would not converge on "high". This is not symmetric, because | |
329 // we set "high" to a block start, which might be the right one, | |
330 // which we don't do for "low". | |
331 HeapWord* middle = low + (diff+1)/2; | |
332 if (middle == high) return high; | |
333 HeapWord* mid_bs = block_start_careful(middle); | |
334 if (mid_bs < addr) { | |
335 low = middle; | |
336 } else { | |
337 high = mid_bs; | |
338 } | |
339 } | |
340 assert(low == high && low >= addr, "Didn't work."); | |
341 return low; | |
342 } | |
343 | |
344 void HeapRegion::set_next_on_unclean_list(HeapRegion* r) { | |
345 assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list."); | |
346 _next_in_special_set = r; | |
347 } | |
348 | |
349 void HeapRegion::set_on_unclean_list(bool b) { | |
350 _is_on_unclean_list = b; | |
351 } | |
352 | |
356 | 353 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
354 G1OffsetTableContigSpace::initialize(mr, false, mangle_space); | |
342 | 355 hr_clear(false/*par*/, clear_space); |
356 } | |
357 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
358 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
359 #endif // _MSC_VER | |
360 | |
361 | |
362 HeapRegion:: | |
363 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
364 MemRegion mr, bool is_zeroed) | |
365 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), | |
366 _next_fk(HeapRegionDCTOC::NoFilterKind), | |
367 _hrs_index(-1), | |
355 | 368 _humongous_type(NotHumongous), _humongous_start_region(NULL), |
342 | 369 _in_collection_set(false), _is_gc_alloc_region(false), |
370 _is_on_free_list(false), _is_on_unclean_list(false), | |
371 _next_in_special_set(NULL), _orig_end(NULL), | |
355 | 372 _claimed(InitialClaimValue), _evacuation_failed(false), |
342 | 373 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), |
374 _popularity(NotPopular), | |
375 _young_type(NotYoung), _next_young_region(NULL), | |
376 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), | |
377 _rem_set(NULL), _zfs(NotZeroFilled) | |
378 { | |
379 _orig_end = mr.end(); | |
380 // Note that initialize() will set the start of the unmarked area of the | |
381 // region. | |
356 | 382 this->initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
383 set_top(bottom()); | |
384 set_saved_mark(); | |
342 | 385 |
386 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
387 | |
388 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
389 // In case the region is allocated during a pause, note the top. | |
390 // We haven't done any counting on a brand new region. | |
391 _top_at_conc_mark_count = bottom(); | |
392 } | |
393 | |
394 class NextCompactionHeapRegionClosure: public HeapRegionClosure { | |
395 const HeapRegion* _target; | |
396 bool _target_seen; | |
397 HeapRegion* _last; | |
398 CompactibleSpace* _res; | |
399 public: | |
400 NextCompactionHeapRegionClosure(const HeapRegion* target) : | |
401 _target(target), _target_seen(false), _res(NULL) {} | |
402 bool doHeapRegion(HeapRegion* cur) { | |
403 if (_target_seen) { | |
404 if (!cur->isHumongous()) { | |
405 _res = cur; | |
406 return true; | |
407 } | |
408 } else if (cur == _target) { | |
409 _target_seen = true; | |
410 } | |
411 return false; | |
412 } | |
413 CompactibleSpace* result() { return _res; } | |
414 }; | |
415 | |
416 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
417 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
418 // cast away const-ness | |
419 HeapRegion* r = (HeapRegion*) this; | |
420 NextCompactionHeapRegionClosure blk(r); | |
421 g1h->heap_region_iterate_from(r, &blk); | |
422 return blk.result(); | |
423 } | |
424 | |
425 void HeapRegion::set_continuesHumongous(HeapRegion* start) { | |
426 // The order is important here. | |
427 start->add_continuingHumongousRegion(this); | |
355 | 428 _humongous_type = ContinuesHumongous; |
342 | 429 _humongous_start_region = start; |
430 } | |
431 | |
432 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { | |
433 // Must join the blocks of the current H region seq with the block of the | |
434 // added region. | |
435 offsets()->join_blocks(bottom(), cont->bottom()); | |
436 arrayOop obj = (arrayOop)(bottom()); | |
437 obj->set_length((int) (obj->length() + cont->capacity()/jintSize)); | |
438 set_end(cont->end()); | |
439 set_top(cont->end()); | |
440 } | |
441 | |
442 void HeapRegion::save_marks() { | |
443 set_saved_mark(); | |
444 } | |
445 | |
446 void HeapRegion::oops_in_mr_iterate(MemRegion mr, OopClosure* cl) { | |
447 HeapWord* p = mr.start(); | |
448 HeapWord* e = mr.end(); | |
449 oop obj; | |
450 while (p < e) { | |
451 obj = oop(p); | |
452 p += obj->oop_iterate(cl); | |
453 } | |
454 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
455 } | |
456 | |
457 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
458 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
459 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
460 } | |
461 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
462 | |
463 | |
464 void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) { | |
465 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
466 } | |
467 | |
468 #ifdef DEBUG | |
469 HeapWord* HeapRegion::allocate(size_t size) { | |
470 jint state = zero_fill_state(); | |
471 assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() || | |
472 zero_fill_is_allocated(), | |
473 "When ZF is on, only alloc in ZF'd regions"); | |
474 return G1OffsetTableContigSpace::allocate(size); | |
475 } | |
476 #endif | |
477 | |
478 void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { | |
479 assert(top() == bottom() || zfs == Allocated, | |
480 "Region must be empty, or we must be setting it to allocated."); | |
481 assert(ZF_mon->owned_by_self() || | |
482 Universe::heap()->is_gc_active(), | |
483 "Must hold the lock or be a full GC to modify."); | |
484 _zfs = zfs; | |
485 } | |
486 | |
487 void HeapRegion::set_zero_fill_complete() { | |
488 set_zero_fill_state_work(ZeroFilled); | |
489 if (ZF_mon->owned_by_self()) { | |
490 ZF_mon->notify_all(); | |
491 } | |
492 } | |
493 | |
494 | |
495 void HeapRegion::ensure_zero_filled() { | |
496 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); | |
497 ensure_zero_filled_locked(); | |
498 } | |
499 | |
500 void HeapRegion::ensure_zero_filled_locked() { | |
501 assert(ZF_mon->owned_by_self(), "Precondition"); | |
502 bool should_ignore_zf = SafepointSynchronize::is_at_safepoint(); | |
503 assert(should_ignore_zf || Heap_lock->is_locked(), | |
504 "Either we're in a GC or we're allocating a region."); | |
505 switch (zero_fill_state()) { | |
506 case HeapRegion::NotZeroFilled: | |
507 set_zero_fill_in_progress(Thread::current()); | |
508 { | |
509 ZF_mon->unlock(); | |
510 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
511 ZF_mon->lock_without_safepoint_check(); | |
512 } | |
513 // A trap. | |
514 guarantee(zero_fill_state() == HeapRegion::ZeroFilling | |
515 && zero_filler() == Thread::current(), | |
516 "AHA! Tell Dave D if you see this..."); | |
517 set_zero_fill_complete(); | |
518 // gclog_or_tty->print_cr("Did sync ZF."); | |
519 ConcurrentZFThread::note_sync_zfs(); | |
520 break; | |
521 case HeapRegion::ZeroFilling: | |
522 if (should_ignore_zf) { | |
523 // We can "break" the lock and take over the work. | |
524 Copy::fill_to_words(bottom(), capacity()/HeapWordSize); | |
525 set_zero_fill_complete(); | |
526 ConcurrentZFThread::note_sync_zfs(); | |
527 break; | |
528 } else { | |
529 ConcurrentZFThread::wait_for_ZF_completed(this); | |
530 } | |
531 case HeapRegion::ZeroFilled: | |
532 // Nothing to do. | |
533 break; | |
534 case HeapRegion::Allocated: | |
535 guarantee(false, "Should not call on allocated regions."); | |
536 } | |
537 assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post"); | |
538 } | |
539 | |
540 HeapWord* | |
541 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
542 ObjectClosure* cl) { | |
543 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
544 // We used to use "block_start_careful" here. But we're actually happy | |
545 // to update the BOT while we do this... | |
546 HeapWord* cur = block_start(mr.start()); | |
547 mr = mr.intersection(used_region()); | |
548 if (mr.is_empty()) return NULL; | |
549 // Otherwise, find the obj that extends onto mr.start(). | |
550 | |
551 assert(cur <= mr.start() | |
552 && (oop(cur)->klass() == NULL || | |
553 cur + oop(cur)->size() > mr.start()), | |
554 "postcondition of block_start"); | |
555 oop obj; | |
556 while (cur < mr.end()) { | |
557 obj = oop(cur); | |
558 if (obj->klass() == NULL) { | |
559 // Ran into an unparseable point. | |
560 return cur; | |
561 } else if (!g1h->is_obj_dead(obj)) { | |
562 cl->do_object(obj); | |
563 } | |
564 if (cl->abort()) return cur; | |
565 // The check above must occur before the operation below, since an | |
566 // abort might invalidate the "size" operation. | |
567 cur += obj->size(); | |
568 } | |
569 return NULL; | |
570 } | |
571 | |
572 HeapWord* | |
573 HeapRegion:: | |
574 oops_on_card_seq_iterate_careful(MemRegion mr, | |
575 FilterOutOfRegionClosure* cl) { | |
576 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
577 | |
578 // If we're within a stop-world GC, then we might look at a card in a | |
579 // GC alloc region that extends onto a GC LAB, which may not be | |
580 // parseable. Stop such at the "saved_mark" of the region. | |
581 if (G1CollectedHeap::heap()->is_gc_active()) { | |
582 mr = mr.intersection(used_region_at_save_marks()); | |
583 } else { | |
584 mr = mr.intersection(used_region()); | |
585 } | |
586 if (mr.is_empty()) return NULL; | |
587 // Otherwise, find the obj that extends onto mr.start(). | |
588 | |
589 // We used to use "block_start_careful" here. But we're actually happy | |
590 // to update the BOT while we do this... | |
591 HeapWord* cur = block_start(mr.start()); | |
592 assert(cur <= mr.start(), "Postcondition"); | |
593 | |
594 while (cur <= mr.start()) { | |
595 if (oop(cur)->klass() == NULL) { | |
596 // Ran into an unparseable point. | |
597 return cur; | |
598 } | |
599 // Otherwise... | |
600 int sz = oop(cur)->size(); | |
601 if (cur + sz > mr.start()) break; | |
602 // Otherwise, go on. | |
603 cur = cur + sz; | |
604 } | |
605 oop obj; | |
606 obj = oop(cur); | |
607 // If we finish this loop... | |
608 assert(cur <= mr.start() | |
609 && obj->klass() != NULL | |
610 && cur + obj->size() > mr.start(), | |
611 "Loop postcondition"); | |
612 if (!g1h->is_obj_dead(obj)) { | |
613 obj->oop_iterate(cl, mr); | |
614 } | |
615 | |
616 HeapWord* next; | |
617 while (cur < mr.end()) { | |
618 obj = oop(cur); | |
619 if (obj->klass() == NULL) { | |
620 // Ran into an unparseable point. | |
621 return cur; | |
622 }; | |
623 // Otherwise: | |
624 next = (cur + obj->size()); | |
625 if (!g1h->is_obj_dead(obj)) { | |
626 if (next < mr.end()) { | |
627 obj->oop_iterate(cl); | |
628 } else { | |
629 // this obj spans the boundary. If it's an array, stop at the | |
630 // boundary. | |
631 if (obj->is_objArray()) { | |
632 obj->oop_iterate(cl, mr); | |
633 } else { | |
634 obj->oop_iterate(cl); | |
635 } | |
636 } | |
637 } | |
638 cur = next; | |
639 } | |
640 return NULL; | |
641 } | |
642 | |
643 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
644 void HeapRegion::print_on(outputStream* st) const { | |
645 if (isHumongous()) { | |
646 if (startsHumongous()) | |
647 st->print(" HS"); | |
648 else | |
649 st->print(" HC"); | |
650 } else { | |
651 st->print(" "); | |
652 } | |
653 if (in_collection_set()) | |
654 st->print(" CS"); | |
655 else if (is_gc_alloc_region()) | |
656 st->print(" A "); | |
657 else | |
658 st->print(" "); | |
659 if (is_young()) | |
660 st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); | |
661 else | |
662 st->print(" "); | |
663 if (is_empty()) | |
664 st->print(" F"); | |
665 else | |
666 st->print(" "); | |
667 st->print(" %d", _gc_time_stamp); | |
668 G1OffsetTableContigSpace::print_on(st); | |
669 } | |
670 | |
671 #define OBJ_SAMPLE_INTERVAL 0 | |
672 #define BLOCK_SAMPLE_INTERVAL 100 | |
673 | |
674 // This really ought to be commoned up into OffsetTableContigSpace somehow. | |
675 // We would need a mechanism to make that code skip dead objects. | |
676 | |
677 void HeapRegion::verify(bool allow_dirty) const { | |
678 G1CollectedHeap* g1 = G1CollectedHeap::heap(); | |
679 HeapWord* p = bottom(); | |
680 HeapWord* prev_p = NULL; | |
681 int objs = 0; | |
682 int blocks = 0; | |
683 VerifyLiveClosure vl_cl(g1); | |
684 while (p < top()) { | |
685 size_t size = oop(p)->size(); | |
686 if (blocks == BLOCK_SAMPLE_INTERVAL) { | |
687 guarantee(p == block_start_const(p + (size/2)), | |
688 "check offset computation"); | |
689 blocks = 0; | |
690 } else { | |
691 blocks++; | |
692 } | |
693 if (objs == OBJ_SAMPLE_INTERVAL) { | |
694 oop obj = oop(p); | |
695 if (!g1->is_obj_dead(obj, this)) { | |
696 obj->verify(); | |
697 vl_cl.set_containing_obj(obj); | |
698 obj->oop_iterate(&vl_cl); | |
699 if (G1MaxVerifyFailures >= 0 | |
700 && vl_cl.n_failures() >= G1MaxVerifyFailures) break; | |
701 } | |
702 objs = 0; | |
703 } else { | |
704 objs++; | |
705 } | |
706 prev_p = p; | |
707 p += size; | |
708 } | |
709 HeapWord* rend = end(); | |
710 HeapWord* rtop = top(); | |
711 if (rtop < rend) { | |
712 guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop, | |
713 "check offset computation"); | |
714 } | |
715 if (vl_cl.failures()) { | |
716 gclog_or_tty->print_cr("Heap:"); | |
717 G1CollectedHeap::heap()->print(); | |
718 gclog_or_tty->print_cr(""); | |
719 } | |
720 if (G1VerifyConcMark && | |
721 G1VerifyConcMarkPrintReachable && | |
722 vl_cl.failures()) { | |
723 g1->concurrent_mark()->print_prev_bitmap_reachable(); | |
724 } | |
725 guarantee(!vl_cl.failures(), "should not have had any failures"); | |
726 guarantee(p == top(), "end of last object must match end of space"); | |
727 } | |
728 | |
729 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | |
730 // away eventually. | |
731 | |
356 | 732 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
342 | 733 // false ==> we'll do the clearing if there's clearing to be done. |
356 | 734 ContiguousSpace::initialize(mr, false, mangle_space); |
342 | 735 _offsets.zero_bottom_entry(); |
736 _offsets.initialize_threshold(); | |
356 | 737 if (clear_space) clear(mangle_space); |
342 | 738 } |
739 | |
356 | 740 void G1OffsetTableContigSpace::clear(bool mangle_space) { |
741 ContiguousSpace::clear(mangle_space); | |
342 | 742 _offsets.zero_bottom_entry(); |
743 _offsets.initialize_threshold(); | |
744 } | |
745 | |
746 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { | |
747 Space::set_bottom(new_bottom); | |
748 _offsets.set_bottom(new_bottom); | |
749 } | |
750 | |
751 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { | |
752 Space::set_end(new_end); | |
753 _offsets.resize(new_end - bottom()); | |
754 } | |
755 | |
756 void G1OffsetTableContigSpace::print() const { | |
757 print_short(); | |
758 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " | |
759 INTPTR_FORMAT ", " INTPTR_FORMAT ")", | |
760 bottom(), top(), _offsets.threshold(), end()); | |
761 } | |
762 | |
763 HeapWord* G1OffsetTableContigSpace::initialize_threshold() { | |
764 return _offsets.initialize_threshold(); | |
765 } | |
766 | |
767 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start, | |
768 HeapWord* end) { | |
769 _offsets.alloc_block(start, end); | |
770 return _offsets.threshold(); | |
771 } | |
772 | |
773 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { | |
774 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
775 assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); | |
776 if (_gc_time_stamp < g1h->get_gc_time_stamp()) | |
777 return top(); | |
778 else | |
779 return ContiguousSpace::saved_mark_word(); | |
780 } | |
781 | |
782 void G1OffsetTableContigSpace::set_saved_mark() { | |
783 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
784 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp(); | |
785 | |
786 if (_gc_time_stamp < curr_gc_time_stamp) { | |
787 // The order of these is important, as another thread might be | |
788 // about to start scanning this region. If it does so after | |
789 // set_saved_mark and before _gc_time_stamp = ..., then the latter | |
790 // will be false, and it will pick up top() as the high water mark | |
791 // of region. If it does so after _gc_time_stamp = ..., then it | |
792 // will pick up the right saved_mark_word() as the high water mark | |
793 // of the region. Either way, the behaviour will be correct. | |
794 ContiguousSpace::set_saved_mark(); | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
795 _gc_time_stamp = curr_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
796 OrderAccess::fence(); |
342 | 797 } |
798 } | |
799 | |
800 G1OffsetTableContigSpace:: | |
801 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
802 MemRegion mr, bool is_zeroed) : | |
803 _offsets(sharedOffsetArray, mr), | |
804 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), | |
805 _gc_time_stamp(0) | |
806 { | |
807 _offsets.set_space(this); | |
356 | 808 initialize(mr, !is_zeroed, SpaceDecorator::Mangle); |
342 | 809 } |
810 | |
811 size_t RegionList::length() { | |
812 size_t len = 0; | |
813 HeapRegion* cur = hd(); | |
814 DEBUG_ONLY(HeapRegion* last = NULL); | |
815 while (cur != NULL) { | |
816 len++; | |
817 DEBUG_ONLY(last = cur); | |
818 cur = get_next(cur); | |
819 } | |
820 assert(last == tl(), "Invariant"); | |
821 return len; | |
822 } | |
823 | |
824 void RegionList::insert_before_head(HeapRegion* r) { | |
825 assert(well_formed(), "Inv"); | |
826 set_next(r, hd()); | |
827 _hd = r; | |
828 _sz++; | |
829 if (tl() == NULL) _tl = r; | |
830 assert(well_formed(), "Inv"); | |
831 } | |
832 | |
833 void RegionList::prepend_list(RegionList* new_list) { | |
834 assert(well_formed(), "Precondition"); | |
835 assert(new_list->well_formed(), "Precondition"); | |
836 HeapRegion* new_tl = new_list->tl(); | |
837 if (new_tl != NULL) { | |
838 set_next(new_tl, hd()); | |
839 _hd = new_list->hd(); | |
840 _sz += new_list->sz(); | |
841 if (tl() == NULL) _tl = new_list->tl(); | |
842 } else { | |
843 assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv"); | |
844 } | |
845 assert(well_formed(), "Inv"); | |
846 } | |
847 | |
848 void RegionList::delete_after(HeapRegion* r) { | |
849 assert(well_formed(), "Precondition"); | |
850 HeapRegion* next = get_next(r); | |
851 assert(r != NULL, "Precondition"); | |
852 HeapRegion* next_tl = get_next(next); | |
853 set_next(r, next_tl); | |
854 dec_sz(); | |
855 if (next == tl()) { | |
856 assert(next_tl == NULL, "Inv"); | |
857 _tl = r; | |
858 } | |
859 assert(well_formed(), "Inv"); | |
860 } | |
861 | |
862 HeapRegion* RegionList::pop() { | |
863 assert(well_formed(), "Inv"); | |
864 HeapRegion* res = hd(); | |
865 if (res != NULL) { | |
866 _hd = get_next(res); | |
867 _sz--; | |
868 set_next(res, NULL); | |
869 if (sz() == 0) _tl = NULL; | |
870 } | |
871 assert(well_formed(), "Inv"); | |
872 return res; | |
873 } |