Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 12355:cefad50507d8
Merge with hs25-b53
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Fri, 11 Oct 2013 10:38:03 +0200 |
parents | 3cce976666d9 190899198332 |
children | 096c224171c4 |
comparison
equal
deleted
inserted
replaced
12058:ccb4f2af2319 | 12355:cefad50507d8 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
21 * questions. | 21 * questions. |
22 * | 22 * |
23 */ | 23 */ |
24 | 24 |
25 #include "precompiled.hpp" | 25 #include "precompiled.hpp" |
26 #include "code/nmethod.hpp" | |
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" | 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
29 #include "gc_implementation/g1/heapRegion.inline.hpp" | 30 #include "gc_implementation/g1/heapRegion.inline.hpp" |
30 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
47 _hr(hr), _fk(fk), _g1(g1) { } | 48 _hr(hr), _fk(fk), _g1(g1) { } |
48 | 49 |
49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, |
50 OopClosure* oc) : | 51 OopClosure* oc) : |
51 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } | 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } |
53 | |
54 template<class ClosureType> | |
55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
56 HeapRegion* hr, | |
57 HeapWord* cur, HeapWord* top) { | |
58 oop cur_oop = oop(cur); | |
59 int oop_size = cur_oop->size(); | |
60 HeapWord* next_obj = cur + oop_size; | |
61 while (next_obj < top) { | |
62 // Keep filtering the remembered set. | |
63 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
64 // Bottom lies entirely below top, so we can call the | |
65 // non-memRegion version of oop_iterate below. | |
66 cur_oop->oop_iterate(cl); | |
67 } | |
68 cur = next_obj; | |
69 cur_oop = oop(cur); | |
70 oop_size = cur_oop->size(); | |
71 next_obj = cur + oop_size; | |
72 } | |
73 return cur; | |
74 } | |
75 | |
76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
77 HeapWord* bottom, | |
78 HeapWord* top, | |
79 ExtendedOopClosure* cl) { | |
80 G1CollectedHeap* g1h = _g1; | |
81 int oop_size; | |
82 ExtendedOopClosure* cl2 = NULL; | |
83 | |
84 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
86 | |
87 switch (_fk) { | |
88 case NoFilterKind: cl2 = cl; break; | |
89 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
91 default: ShouldNotReachHere(); | |
92 } | |
93 | |
94 // Start filtering what we add to the remembered set. If the object is | |
95 // not considered dead, either because it is marked (in the mark bitmap) | |
96 // or it was allocated after marking finished, then we add it. Otherwise | |
97 // we can safely ignore the object. | |
98 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
99 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
100 } else { | |
101 oop_size = oop(bottom)->size(); | |
102 } | |
103 | |
104 bottom += oop_size; | |
105 | |
106 if (bottom < top) { | |
107 // We replicate the loop below for several kinds of possible filters. | |
108 switch (_fk) { | |
109 case NoFilterKind: | |
110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
111 break; | |
112 | |
113 case IntoCSFilterKind: { | |
114 FilterIntoCSClosure filt(this, g1h, cl); | |
115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
116 break; | |
117 } | |
118 | |
119 case OutOfRegionFilterKind: { | |
120 FilterOutOfRegionClosure filt(_hr, cl); | |
121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
122 break; | |
123 } | |
124 | |
125 default: | |
126 ShouldNotReachHere(); | |
127 } | |
128 | |
129 // Last object. Need to do dead-obj filtering here too. | |
130 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
131 oop(bottom)->oop_iterate(cl2, mr); | |
132 } | |
133 } | |
134 } | |
135 | |
136 // Minimum region size; we won't go lower than that. | |
137 // We might want to decrease this in the future, to deal with small | |
138 // heaps a bit more efficiently. | |
139 #define MIN_REGION_SIZE ( 1024 * 1024 ) | |
140 | |
141 // Maximum region size; we don't go higher than that. There's a good | |
142 // reason for having an upper bound. We don't want regions to get too | |
143 // large, otherwise cleanup's effectiveness would decrease as there | |
144 // will be fewer opportunities to find totally empty regions after | |
145 // marking. | |
146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) | |
147 | |
148 // The automatic region size calculation will try to have around this | |
149 // many regions in the heap (based on the min heap size). | |
150 #define TARGET_REGION_NUMBER 2048 | |
151 | |
152 size_t HeapRegion::max_region_size() { | |
153 return (size_t)MAX_REGION_SIZE; | |
154 } | |
155 | |
156 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { | |
157 uintx region_size = G1HeapRegionSize; | |
158 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { | |
159 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; | |
160 region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER, | |
161 (uintx) MIN_REGION_SIZE); | |
162 } | |
163 | |
164 int region_size_log = log2_long((jlong) region_size); | |
165 // Recalculate the region size to make sure it's a power of | |
166 // 2. This means that region_size is the largest power of 2 that's | |
167 // <= what we've calculated so far. | |
168 region_size = ((uintx)1 << region_size_log); | |
169 | |
170 // Now make sure that we don't go over or under our limits. | |
171 if (region_size < MIN_REGION_SIZE) { | |
172 region_size = MIN_REGION_SIZE; | |
173 } else if (region_size > MAX_REGION_SIZE) { | |
174 region_size = MAX_REGION_SIZE; | |
175 } | |
176 | |
177 if (region_size != G1HeapRegionSize) { | |
178 // Update the flag to make sure that PrintFlagsFinal logs the correct value | |
179 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); | |
180 } | |
181 | |
182 // And recalculate the log. | |
183 region_size_log = log2_long((jlong) region_size); | |
184 | |
185 // Now, set up the globals. | |
186 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); | |
187 LogOfHRGrainBytes = region_size_log; | |
188 | |
189 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); | |
190 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; | |
191 | |
192 guarantee(GrainBytes == 0, "we should only set it once"); | |
193 // The cast to int is safe, given that we've bounded region_size by | |
194 // MIN_REGION_SIZE and MAX_REGION_SIZE. | |
195 GrainBytes = (size_t)region_size; | |
196 | |
197 guarantee(GrainWords == 0, "we should only set it once"); | |
198 GrainWords = GrainBytes >> LogHeapWordSize; | |
199 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); | |
200 | |
201 guarantee(CardsPerRegion == 0, "we should only set it once"); | |
202 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; | |
203 } | |
204 | |
205 void HeapRegion::reset_after_compaction() { | |
206 G1OffsetTableContigSpace::reset_after_compaction(); | |
207 // After a compaction the mark bitmap is invalid, so we must | |
208 // treat all objects as being inside the unmarked area. | |
209 zero_marked_bytes(); | |
210 init_top_at_mark_start(); | |
211 } | |
212 | |
213 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
214 assert(_humongous_type == NotHumongous, | |
215 "we should have already filtered out humongous regions"); | |
216 assert(_humongous_start_region == NULL, | |
217 "we should have already filtered out humongous regions"); | |
218 assert(_end == _orig_end, | |
219 "we should have already filtered out humongous regions"); | |
220 | |
221 _in_collection_set = false; | |
222 | |
223 set_young_index_in_cset(-1); | |
224 uninstall_surv_rate_group(); | |
225 set_young_type(NotYoung); | |
226 reset_pre_dummy_top(); | |
227 | |
228 if (!par) { | |
229 // If this is parallel, this will be done later. | |
230 HeapRegionRemSet* hrrs = rem_set(); | |
231 hrrs->clear(); | |
232 _claimed = InitialClaimValue; | |
233 } | |
234 zero_marked_bytes(); | |
235 | |
236 _offsets.resize(HeapRegion::GrainWords); | |
237 init_top_at_mark_start(); | |
238 if (clear_space) clear(SpaceDecorator::Mangle); | |
239 } | |
240 | |
241 void HeapRegion::par_clear() { | |
242 assert(used() == 0, "the region should have been already cleared"); | |
243 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); | |
244 HeapRegionRemSet* hrrs = rem_set(); | |
245 hrrs->clear(); | |
246 CardTableModRefBS* ct_bs = | |
247 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); | |
248 ct_bs->clear(MemRegion(bottom(), end())); | |
249 } | |
250 | |
251 void HeapRegion::calc_gc_efficiency() { | |
252 // GC efficiency is the ratio of how much space would be | |
253 // reclaimed over how long we predict it would take to reclaim it. | |
254 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
255 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
256 | |
257 // Retrieve a prediction of the elapsed time for this region for | |
258 // a mixed gc because the region will only be evacuated during a | |
259 // mixed gc. | |
260 double region_elapsed_time_ms = | |
261 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); | |
262 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; | |
263 } | |
264 | |
265 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { | |
266 assert(!isHumongous(), "sanity / pre-condition"); | |
267 assert(end() == _orig_end, | |
268 "Should be normal before the humongous object allocation"); | |
269 assert(top() == bottom(), "should be empty"); | |
270 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); | |
271 | |
272 _humongous_type = StartsHumongous; | |
273 _humongous_start_region = this; | |
274 | |
275 set_end(new_end); | |
276 _offsets.set_for_starts_humongous(new_top); | |
277 } | |
278 | |
279 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { | |
280 assert(!isHumongous(), "sanity / pre-condition"); | |
281 assert(end() == _orig_end, | |
282 "Should be normal before the humongous object allocation"); | |
283 assert(top() == bottom(), "should be empty"); | |
284 assert(first_hr->startsHumongous(), "pre-condition"); | |
285 | |
286 _humongous_type = ContinuesHumongous; | |
287 _humongous_start_region = first_hr; | |
288 } | |
289 | |
290 void HeapRegion::set_notHumongous() { | |
291 assert(isHumongous(), "pre-condition"); | |
292 | |
293 if (startsHumongous()) { | |
294 assert(top() <= end(), "pre-condition"); | |
295 set_end(_orig_end); | |
296 if (top() > end()) { | |
297 // at least one "continues humongous" region after it | |
298 set_top(end()); | |
299 } | |
300 } else { | |
301 // continues humongous | |
302 assert(end() == _orig_end, "sanity"); | |
303 } | |
304 | |
305 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); | |
306 _humongous_type = NotHumongous; | |
307 _humongous_start_region = NULL; | |
308 } | |
309 | |
310 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
311 jint current = _claimed; | |
312 if (current != claimValue) { | |
313 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
314 if (res == current) { | |
315 return true; | |
316 } | |
317 } | |
318 return false; | |
319 } | |
320 | |
321 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
322 HeapWord* low = addr; | |
323 HeapWord* high = end(); | |
324 while (low < high) { | |
325 size_t diff = pointer_delta(high, low); | |
326 // Must add one below to bias toward the high amount. Otherwise, if | |
327 // "high" were at the desired value, and "low" were one less, we | |
328 // would not converge on "high". This is not symmetric, because | |
329 // we set "high" to a block start, which might be the right one, | |
330 // which we don't do for "low". | |
331 HeapWord* middle = low + (diff+1)/2; | |
332 if (middle == high) return high; | |
333 HeapWord* mid_bs = block_start_careful(middle); | |
334 if (mid_bs < addr) { | |
335 low = middle; | |
336 } else { | |
337 high = mid_bs; | |
338 } | |
339 } | |
340 assert(low == high && low >= addr, "Didn't work."); | |
341 return low; | |
342 } | |
343 | |
344 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
345 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
346 #endif // _MSC_VER | |
347 | |
348 | |
349 HeapRegion::HeapRegion(uint hrs_index, | |
350 G1BlockOffsetSharedArray* sharedOffsetArray, | |
351 MemRegion mr) : | |
352 G1OffsetTableContigSpace(sharedOffsetArray, mr), | |
353 _hrs_index(hrs_index), | |
354 _humongous_type(NotHumongous), _humongous_start_region(NULL), | |
355 _in_collection_set(false), | |
356 _next_in_special_set(NULL), _orig_end(NULL), | |
357 _claimed(InitialClaimValue), _evacuation_failed(false), | |
358 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), | |
359 _young_type(NotYoung), _next_young_region(NULL), | |
360 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), | |
361 #ifdef ASSERT | |
362 _containing_set(NULL), | |
363 #endif // ASSERT | |
364 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), | |
365 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), | |
366 _predicted_bytes_to_copy(0) | |
367 { | |
368 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
369 _orig_end = mr.end(); | |
370 // Note that initialize() will set the start of the unmarked area of the | |
371 // region. | |
372 hr_clear(false /*par*/, false /*clear_space*/); | |
373 set_top(bottom()); | |
374 set_saved_mark(); | |
375 | |
376 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
377 } | |
378 | |
379 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
380 // We're not using an iterator given that it will wrap around when | |
381 // it reaches the last region and this is not what we want here. | |
382 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
383 uint index = hrs_index() + 1; | |
384 while (index < g1h->n_regions()) { | |
385 HeapRegion* hr = g1h->region_at(index); | |
386 if (!hr->isHumongous()) { | |
387 return hr; | |
388 } | |
389 index += 1; | |
390 } | |
391 return NULL; | |
392 } | |
393 | |
394 void HeapRegion::save_marks() { | |
395 set_saved_mark(); | |
396 } | |
397 | |
398 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { | |
399 HeapWord* p = mr.start(); | |
400 HeapWord* e = mr.end(); | |
401 oop obj; | |
402 while (p < e) { | |
403 obj = oop(p); | |
404 p += obj->oop_iterate(cl); | |
405 } | |
406 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
407 } | |
408 | |
409 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
410 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
411 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
412 } | |
413 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
414 | |
415 | |
416 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { | |
417 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
418 } | |
419 | |
420 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, | |
421 bool during_conc_mark) { | |
422 // We always recreate the prev marking info and we'll explicitly | |
423 // mark all objects we find to be self-forwarded on the prev | |
424 // bitmap. So all objects need to be below PTAMS. | |
425 _prev_top_at_mark_start = top(); | |
426 _prev_marked_bytes = 0; | |
427 | |
428 if (during_initial_mark) { | |
429 // During initial-mark, we'll also explicitly mark all objects | |
430 // we find to be self-forwarded on the next bitmap. So all | |
431 // objects need to be below NTAMS. | |
432 _next_top_at_mark_start = top(); | |
433 _next_marked_bytes = 0; | |
434 } else if (during_conc_mark) { | |
435 // During concurrent mark, all objects in the CSet (including | |
436 // the ones we find to be self-forwarded) are implicitly live. | |
437 // So all objects need to be above NTAMS. | |
438 _next_top_at_mark_start = bottom(); | |
439 _next_marked_bytes = 0; | |
440 } | |
441 } | |
442 | |
443 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, | |
444 bool during_conc_mark, | |
445 size_t marked_bytes) { | |
446 assert(0 <= marked_bytes && marked_bytes <= used(), | |
447 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, | |
448 marked_bytes, used())); | |
449 _prev_marked_bytes = marked_bytes; | |
450 } | |
451 | |
452 HeapWord* | |
453 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
454 ObjectClosure* cl) { | |
455 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
456 // We used to use "block_start_careful" here. But we're actually happy | |
457 // to update the BOT while we do this... | |
458 HeapWord* cur = block_start(mr.start()); | |
459 mr = mr.intersection(used_region()); | |
460 if (mr.is_empty()) return NULL; | |
461 // Otherwise, find the obj that extends onto mr.start(). | |
462 | |
463 assert(cur <= mr.start() | |
464 && (oop(cur)->klass_or_null() == NULL || | |
465 cur + oop(cur)->size() > mr.start()), | |
466 "postcondition of block_start"); | |
467 oop obj; | |
468 while (cur < mr.end()) { | |
469 obj = oop(cur); | |
470 if (obj->klass_or_null() == NULL) { | |
471 // Ran into an unparseable point. | |
472 return cur; | |
473 } else if (!g1h->is_obj_dead(obj)) { | |
474 cl->do_object(obj); | |
475 } | |
476 if (cl->abort()) return cur; | |
477 // The check above must occur before the operation below, since an | |
478 // abort might invalidate the "size" operation. | |
479 cur += obj->size(); | |
480 } | |
481 return NULL; | |
482 } | |
483 | |
484 HeapWord* | |
485 HeapRegion:: | |
486 oops_on_card_seq_iterate_careful(MemRegion mr, | |
487 FilterOutOfRegionClosure* cl, | |
488 bool filter_young, | |
489 jbyte* card_ptr) { | |
490 // Currently, we should only have to clean the card if filter_young | |
491 // is true and vice versa. | |
492 if (filter_young) { | |
493 assert(card_ptr != NULL, "pre-condition"); | |
494 } else { | |
495 assert(card_ptr == NULL, "pre-condition"); | |
496 } | |
497 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
498 | |
499 // If we're within a stop-world GC, then we might look at a card in a | |
500 // GC alloc region that extends onto a GC LAB, which may not be | |
501 // parseable. Stop such at the "saved_mark" of the region. | |
502 if (g1h->is_gc_active()) { | |
503 mr = mr.intersection(used_region_at_save_marks()); | |
504 } else { | |
505 mr = mr.intersection(used_region()); | |
506 } | |
507 if (mr.is_empty()) return NULL; | |
508 // Otherwise, find the obj that extends onto mr.start(). | |
509 | |
510 // The intersection of the incoming mr (for the card) and the | |
511 // allocated part of the region is non-empty. This implies that | |
512 // we have actually allocated into this region. The code in | |
513 // G1CollectedHeap.cpp that allocates a new region sets the | |
514 // is_young tag on the region before allocating. Thus we | |
515 // safely know if this region is young. | |
516 if (is_young() && filter_young) { | |
517 return NULL; | |
518 } | |
519 | |
520 assert(!is_young(), "check value of filter_young"); | |
521 | |
522 // We can only clean the card here, after we make the decision that | |
523 // the card is not young. And we only clean the card if we have been | |
524 // asked to (i.e., card_ptr != NULL). | |
525 if (card_ptr != NULL) { | |
526 *card_ptr = CardTableModRefBS::clean_card_val(); | |
527 // We must complete this write before we do any of the reads below. | |
528 OrderAccess::storeload(); | |
529 } | |
530 | |
531 // Cache the boundaries of the memory region in some const locals | |
532 HeapWord* const start = mr.start(); | |
533 HeapWord* const end = mr.end(); | |
534 | |
535 // We used to use "block_start_careful" here. But we're actually happy | |
536 // to update the BOT while we do this... | |
537 HeapWord* cur = block_start(start); | |
538 assert(cur <= start, "Postcondition"); | |
539 | |
540 oop obj; | |
541 | |
542 HeapWord* next = cur; | |
543 while (next <= start) { | |
544 cur = next; | |
545 obj = oop(cur); | |
546 if (obj->klass_or_null() == NULL) { | |
547 // Ran into an unparseable point. | |
548 return cur; | |
549 } | |
550 // Otherwise... | |
551 next = (cur + obj->size()); | |
552 } | |
553 | |
554 // If we finish the above loop...We have a parseable object that | |
555 // begins on or before the start of the memory region, and ends | |
556 // inside or spans the entire region. | |
557 | |
558 assert(obj == oop(cur), "sanity"); | |
559 assert(cur <= start && | |
560 obj->klass_or_null() != NULL && | |
561 (cur + obj->size()) > start, | |
562 "Loop postcondition"); | |
563 | |
564 if (!g1h->is_obj_dead(obj)) { | |
565 obj->oop_iterate(cl, mr); | |
566 } | |
567 | |
568 while (cur < end) { | |
569 obj = oop(cur); | |
570 if (obj->klass_or_null() == NULL) { | |
571 // Ran into an unparseable point. | |
572 return cur; | |
573 }; | |
574 | |
575 // Otherwise: | |
576 next = (cur + obj->size()); | |
577 | |
578 if (!g1h->is_obj_dead(obj)) { | |
579 if (next < end || !obj->is_objArray()) { | |
580 // This object either does not span the MemRegion | |
581 // boundary, or if it does it's not an array. | |
582 // Apply closure to whole object. | |
583 obj->oop_iterate(cl); | |
584 } else { | |
585 // This obj is an array that spans the boundary. | |
586 // Stop at the boundary. | |
587 obj->oop_iterate(cl, mr); | |
588 } | |
589 } | |
590 cur = next; | |
591 } | |
592 return NULL; | |
593 } | |
594 | |
595 // Code roots support | |
596 | |
597 void HeapRegion::add_strong_code_root(nmethod* nm) { | |
598 HeapRegionRemSet* hrrs = rem_set(); | |
599 hrrs->add_strong_code_root(nm); | |
600 } | |
601 | |
602 void HeapRegion::remove_strong_code_root(nmethod* nm) { | |
603 HeapRegionRemSet* hrrs = rem_set(); | |
604 hrrs->remove_strong_code_root(nm); | |
605 } | |
606 | |
607 void HeapRegion::migrate_strong_code_roots() { | |
608 assert(in_collection_set(), "only collection set regions"); | |
609 assert(!isHumongous(), "not humongous regions"); | |
610 | |
611 HeapRegionRemSet* hrrs = rem_set(); | |
612 hrrs->migrate_strong_code_roots(); | |
613 } | |
614 | |
615 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { | |
616 HeapRegionRemSet* hrrs = rem_set(); | |
617 hrrs->strong_code_roots_do(blk); | |
618 } | |
619 | |
620 class VerifyStrongCodeRootOopClosure: public OopClosure { | |
621 const HeapRegion* _hr; | |
622 nmethod* _nm; | |
623 bool _failures; | |
624 bool _has_oops_in_region; | |
625 | |
626 template <class T> void do_oop_work(T* p) { | |
627 T heap_oop = oopDesc::load_heap_oop(p); | |
628 if (!oopDesc::is_null(heap_oop)) { | |
629 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); | |
630 | |
631 // Note: not all the oops embedded in the nmethod are in the | |
632 // current region. We only look at those which are. | |
633 if (_hr->is_in(obj)) { | |
634 // Object is in the region. Check that its less than top | |
635 if (_hr->top() <= (HeapWord*)obj) { | |
636 // Object is above top | |
637 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " | |
638 "["PTR_FORMAT", "PTR_FORMAT") is above " | |
639 "top "PTR_FORMAT, | |
640 (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); | |
641 _failures = true; | |
642 return; | |
643 } | |
644 // Nmethod has at least one oop in the current region | |
645 _has_oops_in_region = true; | |
646 } | |
647 } | |
648 } | |
649 | |
650 public: | |
651 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): | |
652 _hr(hr), _failures(false), _has_oops_in_region(false) {} | |
653 | |
654 void do_oop(narrowOop* p) { do_oop_work(p); } | |
655 void do_oop(oop* p) { do_oop_work(p); } | |
656 | |
657 bool failures() { return _failures; } | |
658 bool has_oops_in_region() { return _has_oops_in_region; } | |
659 }; | |
660 | |
661 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { | |
662 const HeapRegion* _hr; | |
663 bool _failures; | |
664 public: | |
665 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : | |
666 _hr(hr), _failures(false) {} | |
667 | |
668 void do_code_blob(CodeBlob* cb) { | |
669 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); | |
670 if (nm != NULL) { | |
671 // Verify that the nemthod is live | |
672 if (!nm->is_alive()) { | |
673 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " | |
674 PTR_FORMAT" in its strong code roots", | |
675 _hr->bottom(), _hr->end(), nm); | |
676 _failures = true; | |
677 } else { | |
678 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); | |
679 nm->oops_do(&oop_cl); | |
680 if (!oop_cl.has_oops_in_region()) { | |
681 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " | |
682 PTR_FORMAT" in its strong code roots " | |
683 "with no pointers into region", | |
684 _hr->bottom(), _hr->end(), nm); | |
685 _failures = true; | |
686 } else if (oop_cl.failures()) { | |
687 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " | |
688 "failures for nmethod "PTR_FORMAT, | |
689 _hr->bottom(), _hr->end(), nm); | |
690 _failures = true; | |
691 } | |
692 } | |
693 } | |
694 } | |
695 | |
696 bool failures() { return _failures; } | |
697 }; | |
698 | |
699 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { | |
700 if (!G1VerifyHeapRegionCodeRoots) { | |
701 // We're not verifying code roots. | |
702 return; | |
703 } | |
704 if (vo == VerifyOption_G1UseMarkWord) { | |
705 // Marking verification during a full GC is performed after class | |
706 // unloading, code cache unloading, etc so the strong code roots | |
707 // attached to each heap region are in an inconsistent state. They won't | |
708 // be consistent until the strong code roots are rebuilt after the | |
709 // actual GC. Skip verifying the strong code roots in this particular | |
710 // time. | |
711 assert(VerifyDuringGC, "only way to get here"); | |
712 return; | |
713 } | |
714 | |
715 HeapRegionRemSet* hrrs = rem_set(); | |
716 int strong_code_roots_length = hrrs->strong_code_roots_list_length(); | |
717 | |
718 // if this region is empty then there should be no entries | |
719 // on its strong code root list | |
720 if (is_empty()) { | |
721 if (strong_code_roots_length > 0) { | |
722 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " | |
723 "but has "INT32_FORMAT" code root entries", | |
724 bottom(), end(), strong_code_roots_length); | |
725 *failures = true; | |
726 } | |
727 return; | |
728 } | |
729 | |
730 // An H-region should have an empty strong code root list | |
731 if (isHumongous()) { | |
732 if (strong_code_roots_length > 0) { | |
733 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " | |
734 "but has "INT32_FORMAT" code root entries", | |
735 bottom(), end(), strong_code_roots_length); | |
736 *failures = true; | |
737 } | |
738 return; | |
739 } | |
740 | |
741 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); | |
742 strong_code_roots_do(&cb_cl); | |
743 | |
744 if (cb_cl.failures()) { | |
745 *failures = true; | |
746 } | |
747 } | |
748 | |
749 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
750 void HeapRegion::print_on(outputStream* st) const { | |
751 if (isHumongous()) { | |
752 if (startsHumongous()) | |
753 st->print(" HS"); | |
754 else | |
755 st->print(" HC"); | |
756 } else { | |
757 st->print(" "); | |
758 } | |
759 if (in_collection_set()) | |
760 st->print(" CS"); | |
761 else | |
762 st->print(" "); | |
763 if (is_young()) | |
764 st->print(is_survivor() ? " SU" : " Y "); | |
765 else | |
766 st->print(" "); | |
767 if (is_empty()) | |
768 st->print(" F"); | |
769 else | |
770 st->print(" "); | |
771 st->print(" TS %5d", _gc_time_stamp); | |
772 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, | |
773 prev_top_at_mark_start(), next_top_at_mark_start()); | |
774 G1OffsetTableContigSpace::print_on(st); | |
775 } | |
52 | 776 |
53 class VerifyLiveClosure: public OopClosure { | 777 class VerifyLiveClosure: public OopClosure { |
54 private: | 778 private: |
55 G1CollectedHeap* _g1h; | 779 G1CollectedHeap* _g1h; |
56 CardTableModRefBS* _bs; | 780 CardTableModRefBS* _bs; |
186 } | 910 } |
187 } | 911 } |
188 } | 912 } |
189 }; | 913 }; |
190 | 914 |
191 template<class ClosureType> | |
192 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
193 HeapRegion* hr, | |
194 HeapWord* cur, HeapWord* top) { | |
195 oop cur_oop = oop(cur); | |
196 int oop_size = cur_oop->size(); | |
197 HeapWord* next_obj = cur + oop_size; | |
198 while (next_obj < top) { | |
199 // Keep filtering the remembered set. | |
200 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
201 // Bottom lies entirely below top, so we can call the | |
202 // non-memRegion version of oop_iterate below. | |
203 cur_oop->oop_iterate(cl); | |
204 } | |
205 cur = next_obj; | |
206 cur_oop = oop(cur); | |
207 oop_size = cur_oop->size(); | |
208 next_obj = cur + oop_size; | |
209 } | |
210 return cur; | |
211 } | |
212 | |
213 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
214 HeapWord* bottom, | |
215 HeapWord* top, | |
216 ExtendedOopClosure* cl) { | |
217 G1CollectedHeap* g1h = _g1; | |
218 int oop_size; | |
219 ExtendedOopClosure* cl2 = NULL; | |
220 | |
221 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
222 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
223 | |
224 switch (_fk) { | |
225 case NoFilterKind: cl2 = cl; break; | |
226 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
227 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
228 default: ShouldNotReachHere(); | |
229 } | |
230 | |
231 // Start filtering what we add to the remembered set. If the object is | |
232 // not considered dead, either because it is marked (in the mark bitmap) | |
233 // or it was allocated after marking finished, then we add it. Otherwise | |
234 // we can safely ignore the object. | |
235 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
236 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
237 } else { | |
238 oop_size = oop(bottom)->size(); | |
239 } | |
240 | |
241 bottom += oop_size; | |
242 | |
243 if (bottom < top) { | |
244 // We replicate the loop below for several kinds of possible filters. | |
245 switch (_fk) { | |
246 case NoFilterKind: | |
247 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
248 break; | |
249 | |
250 case IntoCSFilterKind: { | |
251 FilterIntoCSClosure filt(this, g1h, cl); | |
252 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
253 break; | |
254 } | |
255 | |
256 case OutOfRegionFilterKind: { | |
257 FilterOutOfRegionClosure filt(_hr, cl); | |
258 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
259 break; | |
260 } | |
261 | |
262 default: | |
263 ShouldNotReachHere(); | |
264 } | |
265 | |
266 // Last object. Need to do dead-obj filtering here too. | |
267 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
268 oop(bottom)->oop_iterate(cl2, mr); | |
269 } | |
270 } | |
271 } | |
272 | |
273 // Minimum region size; we won't go lower than that. | |
274 // We might want to decrease this in the future, to deal with small | |
275 // heaps a bit more efficiently. | |
276 #define MIN_REGION_SIZE ( 1024 * 1024 ) | |
277 | |
278 // Maximum region size; we don't go higher than that. There's a good | |
279 // reason for having an upper bound. We don't want regions to get too | |
280 // large, otherwise cleanup's effectiveness would decrease as there | |
281 // will be fewer opportunities to find totally empty regions after | |
282 // marking. | |
283 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) | |
284 | |
285 // The automatic region size calculation will try to have around this | |
286 // many regions in the heap (based on the min heap size). | |
287 #define TARGET_REGION_NUMBER 2048 | |
288 | |
289 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { | |
290 // region_size in bytes | |
291 uintx region_size = G1HeapRegionSize; | |
292 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { | |
293 // We base the automatic calculation on the min heap size. This | |
294 // can be problematic if the spread between min and max is quite | |
295 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on | |
296 // the max size, the region size might be way too large for the | |
297 // min size. Either way, some users might have to set the region | |
298 // size manually for some -Xms / -Xmx combos. | |
299 | |
300 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, | |
301 (uintx) MIN_REGION_SIZE); | |
302 } | |
303 | |
304 int region_size_log = log2_long((jlong) region_size); | |
305 // Recalculate the region size to make sure it's a power of | |
306 // 2. This means that region_size is the largest power of 2 that's | |
307 // <= what we've calculated so far. | |
308 region_size = ((uintx)1 << region_size_log); | |
309 | |
310 // Now make sure that we don't go over or under our limits. | |
311 if (region_size < MIN_REGION_SIZE) { | |
312 region_size = MIN_REGION_SIZE; | |
313 } else if (region_size > MAX_REGION_SIZE) { | |
314 region_size = MAX_REGION_SIZE; | |
315 } | |
316 | |
317 if (region_size != G1HeapRegionSize) { | |
318 // Update the flag to make sure that PrintFlagsFinal logs the correct value | |
319 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); | |
320 } | |
321 | |
322 // And recalculate the log. | |
323 region_size_log = log2_long((jlong) region_size); | |
324 | |
325 // Now, set up the globals. | |
326 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); | |
327 LogOfHRGrainBytes = region_size_log; | |
328 | |
329 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); | |
330 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; | |
331 | |
332 guarantee(GrainBytes == 0, "we should only set it once"); | |
333 // The cast to int is safe, given that we've bounded region_size by | |
334 // MIN_REGION_SIZE and MAX_REGION_SIZE. | |
335 GrainBytes = (size_t)region_size; | |
336 | |
337 guarantee(GrainWords == 0, "we should only set it once"); | |
338 GrainWords = GrainBytes >> LogHeapWordSize; | |
339 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); | |
340 | |
341 guarantee(CardsPerRegion == 0, "we should only set it once"); | |
342 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; | |
343 } | |
344 | |
345 void HeapRegion::reset_after_compaction() { | |
346 G1OffsetTableContigSpace::reset_after_compaction(); | |
347 // After a compaction the mark bitmap is invalid, so we must | |
348 // treat all objects as being inside the unmarked area. | |
349 zero_marked_bytes(); | |
350 init_top_at_mark_start(); | |
351 } | |
352 | |
353 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
354 assert(_humongous_type == NotHumongous, | |
355 "we should have already filtered out humongous regions"); | |
356 assert(_humongous_start_region == NULL, | |
357 "we should have already filtered out humongous regions"); | |
358 assert(_end == _orig_end, | |
359 "we should have already filtered out humongous regions"); | |
360 | |
361 _in_collection_set = false; | |
362 | |
363 set_young_index_in_cset(-1); | |
364 uninstall_surv_rate_group(); | |
365 set_young_type(NotYoung); | |
366 reset_pre_dummy_top(); | |
367 | |
368 if (!par) { | |
369 // If this is parallel, this will be done later. | |
370 HeapRegionRemSet* hrrs = rem_set(); | |
371 if (hrrs != NULL) hrrs->clear(); | |
372 _claimed = InitialClaimValue; | |
373 } | |
374 zero_marked_bytes(); | |
375 | |
376 _offsets.resize(HeapRegion::GrainWords); | |
377 init_top_at_mark_start(); | |
378 if (clear_space) clear(SpaceDecorator::Mangle); | |
379 } | |
380 | |
381 void HeapRegion::par_clear() { | |
382 assert(used() == 0, "the region should have been already cleared"); | |
383 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); | |
384 HeapRegionRemSet* hrrs = rem_set(); | |
385 hrrs->clear(); | |
386 CardTableModRefBS* ct_bs = | |
387 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); | |
388 ct_bs->clear(MemRegion(bottom(), end())); | |
389 } | |
390 | |
391 void HeapRegion::calc_gc_efficiency() { | |
392 // GC efficiency is the ratio of how much space would be | |
393 // reclaimed over how long we predict it would take to reclaim it. | |
394 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
395 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
396 | |
397 // Retrieve a prediction of the elapsed time for this region for | |
398 // a mixed gc because the region will only be evacuated during a | |
399 // mixed gc. | |
400 double region_elapsed_time_ms = | |
401 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); | |
402 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; | |
403 } | |
404 | |
405 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { | |
406 assert(!isHumongous(), "sanity / pre-condition"); | |
407 assert(end() == _orig_end, | |
408 "Should be normal before the humongous object allocation"); | |
409 assert(top() == bottom(), "should be empty"); | |
410 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); | |
411 | |
412 _humongous_type = StartsHumongous; | |
413 _humongous_start_region = this; | |
414 | |
415 set_end(new_end); | |
416 _offsets.set_for_starts_humongous(new_top); | |
417 } | |
418 | |
419 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { | |
420 assert(!isHumongous(), "sanity / pre-condition"); | |
421 assert(end() == _orig_end, | |
422 "Should be normal before the humongous object allocation"); | |
423 assert(top() == bottom(), "should be empty"); | |
424 assert(first_hr->startsHumongous(), "pre-condition"); | |
425 | |
426 _humongous_type = ContinuesHumongous; | |
427 _humongous_start_region = first_hr; | |
428 } | |
429 | |
430 void HeapRegion::set_notHumongous() { | |
431 assert(isHumongous(), "pre-condition"); | |
432 | |
433 if (startsHumongous()) { | |
434 assert(top() <= end(), "pre-condition"); | |
435 set_end(_orig_end); | |
436 if (top() > end()) { | |
437 // at least one "continues humongous" region after it | |
438 set_top(end()); | |
439 } | |
440 } else { | |
441 // continues humongous | |
442 assert(end() == _orig_end, "sanity"); | |
443 } | |
444 | |
445 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); | |
446 _humongous_type = NotHumongous; | |
447 _humongous_start_region = NULL; | |
448 } | |
449 | |
450 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
451 jint current = _claimed; | |
452 if (current != claimValue) { | |
453 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
454 if (res == current) { | |
455 return true; | |
456 } | |
457 } | |
458 return false; | |
459 } | |
460 | |
461 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
462 HeapWord* low = addr; | |
463 HeapWord* high = end(); | |
464 while (low < high) { | |
465 size_t diff = pointer_delta(high, low); | |
466 // Must add one below to bias toward the high amount. Otherwise, if | |
467 // "high" were at the desired value, and "low" were one less, we | |
468 // would not converge on "high". This is not symmetric, because | |
469 // we set "high" to a block start, which might be the right one, | |
470 // which we don't do for "low". | |
471 HeapWord* middle = low + (diff+1)/2; | |
472 if (middle == high) return high; | |
473 HeapWord* mid_bs = block_start_careful(middle); | |
474 if (mid_bs < addr) { | |
475 low = middle; | |
476 } else { | |
477 high = mid_bs; | |
478 } | |
479 } | |
480 assert(low == high && low >= addr, "Didn't work."); | |
481 return low; | |
482 } | |
483 | |
484 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
485 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
486 #endif // _MSC_VER | |
487 | |
488 | |
489 HeapRegion::HeapRegion(uint hrs_index, | |
490 G1BlockOffsetSharedArray* sharedOffsetArray, | |
491 MemRegion mr) : | |
492 G1OffsetTableContigSpace(sharedOffsetArray, mr), | |
493 _hrs_index(hrs_index), | |
494 _humongous_type(NotHumongous), _humongous_start_region(NULL), | |
495 _in_collection_set(false), | |
496 _next_in_special_set(NULL), _orig_end(NULL), | |
497 _claimed(InitialClaimValue), _evacuation_failed(false), | |
498 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), | |
499 _young_type(NotYoung), _next_young_region(NULL), | |
500 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), | |
501 #ifdef ASSERT | |
502 _containing_set(NULL), | |
503 #endif // ASSERT | |
504 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), | |
505 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), | |
506 _predicted_bytes_to_copy(0) | |
507 { | |
508 _orig_end = mr.end(); | |
509 // Note that initialize() will set the start of the unmarked area of the | |
510 // region. | |
511 hr_clear(false /*par*/, false /*clear_space*/); | |
512 set_top(bottom()); | |
513 set_saved_mark(); | |
514 | |
515 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
516 | |
517 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
518 } | |
519 | |
520 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
521 // We're not using an iterator given that it will wrap around when | |
522 // it reaches the last region and this is not what we want here. | |
523 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
524 uint index = hrs_index() + 1; | |
525 while (index < g1h->n_regions()) { | |
526 HeapRegion* hr = g1h->region_at(index); | |
527 if (!hr->isHumongous()) { | |
528 return hr; | |
529 } | |
530 index += 1; | |
531 } | |
532 return NULL; | |
533 } | |
534 | |
535 void HeapRegion::save_marks() { | |
536 set_saved_mark(); | |
537 } | |
538 | |
539 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { | |
540 HeapWord* p = mr.start(); | |
541 HeapWord* e = mr.end(); | |
542 oop obj; | |
543 while (p < e) { | |
544 obj = oop(p); | |
545 p += obj->oop_iterate(cl); | |
546 } | |
547 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
548 } | |
549 | |
550 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
551 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
552 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
553 } | |
554 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
555 | |
556 | |
557 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { | |
558 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
559 } | |
560 | |
561 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, | |
562 bool during_conc_mark) { | |
563 // We always recreate the prev marking info and we'll explicitly | |
564 // mark all objects we find to be self-forwarded on the prev | |
565 // bitmap. So all objects need to be below PTAMS. | |
566 _prev_top_at_mark_start = top(); | |
567 _prev_marked_bytes = 0; | |
568 | |
569 if (during_initial_mark) { | |
570 // During initial-mark, we'll also explicitly mark all objects | |
571 // we find to be self-forwarded on the next bitmap. So all | |
572 // objects need to be below NTAMS. | |
573 _next_top_at_mark_start = top(); | |
574 _next_marked_bytes = 0; | |
575 } else if (during_conc_mark) { | |
576 // During concurrent mark, all objects in the CSet (including | |
577 // the ones we find to be self-forwarded) are implicitly live. | |
578 // So all objects need to be above NTAMS. | |
579 _next_top_at_mark_start = bottom(); | |
580 _next_marked_bytes = 0; | |
581 } | |
582 } | |
583 | |
584 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, | |
585 bool during_conc_mark, | |
586 size_t marked_bytes) { | |
587 assert(0 <= marked_bytes && marked_bytes <= used(), | |
588 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, | |
589 marked_bytes, used())); | |
590 _prev_marked_bytes = marked_bytes; | |
591 } | |
592 | |
593 HeapWord* | |
594 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
595 ObjectClosure* cl) { | |
596 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
597 // We used to use "block_start_careful" here. But we're actually happy | |
598 // to update the BOT while we do this... | |
599 HeapWord* cur = block_start(mr.start()); | |
600 mr = mr.intersection(used_region()); | |
601 if (mr.is_empty()) return NULL; | |
602 // Otherwise, find the obj that extends onto mr.start(). | |
603 | |
604 assert(cur <= mr.start() | |
605 && (oop(cur)->klass_or_null() == NULL || | |
606 cur + oop(cur)->size() > mr.start()), | |
607 "postcondition of block_start"); | |
608 oop obj; | |
609 while (cur < mr.end()) { | |
610 obj = oop(cur); | |
611 if (obj->klass_or_null() == NULL) { | |
612 // Ran into an unparseable point. | |
613 return cur; | |
614 } else if (!g1h->is_obj_dead(obj)) { | |
615 cl->do_object(obj); | |
616 } | |
617 if (cl->abort()) return cur; | |
618 // The check above must occur before the operation below, since an | |
619 // abort might invalidate the "size" operation. | |
620 cur += obj->size(); | |
621 } | |
622 return NULL; | |
623 } | |
624 | |
625 HeapWord* | |
626 HeapRegion:: | |
627 oops_on_card_seq_iterate_careful(MemRegion mr, | |
628 FilterOutOfRegionClosure* cl, | |
629 bool filter_young, | |
630 jbyte* card_ptr) { | |
631 // Currently, we should only have to clean the card if filter_young | |
632 // is true and vice versa. | |
633 if (filter_young) { | |
634 assert(card_ptr != NULL, "pre-condition"); | |
635 } else { | |
636 assert(card_ptr == NULL, "pre-condition"); | |
637 } | |
638 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
639 | |
640 // If we're within a stop-world GC, then we might look at a card in a | |
641 // GC alloc region that extends onto a GC LAB, which may not be | |
642 // parseable. Stop such at the "saved_mark" of the region. | |
643 if (g1h->is_gc_active()) { | |
644 mr = mr.intersection(used_region_at_save_marks()); | |
645 } else { | |
646 mr = mr.intersection(used_region()); | |
647 } | |
648 if (mr.is_empty()) return NULL; | |
649 // Otherwise, find the obj that extends onto mr.start(). | |
650 | |
651 // The intersection of the incoming mr (for the card) and the | |
652 // allocated part of the region is non-empty. This implies that | |
653 // we have actually allocated into this region. The code in | |
654 // G1CollectedHeap.cpp that allocates a new region sets the | |
655 // is_young tag on the region before allocating. Thus we | |
656 // safely know if this region is young. | |
657 if (is_young() && filter_young) { | |
658 return NULL; | |
659 } | |
660 | |
661 assert(!is_young(), "check value of filter_young"); | |
662 | |
663 // We can only clean the card here, after we make the decision that | |
664 // the card is not young. And we only clean the card if we have been | |
665 // asked to (i.e., card_ptr != NULL). | |
666 if (card_ptr != NULL) { | |
667 *card_ptr = CardTableModRefBS::clean_card_val(); | |
668 // We must complete this write before we do any of the reads below. | |
669 OrderAccess::storeload(); | |
670 } | |
671 | |
672 // Cache the boundaries of the memory region in some const locals | |
673 HeapWord* const start = mr.start(); | |
674 HeapWord* const end = mr.end(); | |
675 | |
676 // We used to use "block_start_careful" here. But we're actually happy | |
677 // to update the BOT while we do this... | |
678 HeapWord* cur = block_start(start); | |
679 assert(cur <= start, "Postcondition"); | |
680 | |
681 oop obj; | |
682 | |
683 HeapWord* next = cur; | |
684 while (next <= start) { | |
685 cur = next; | |
686 obj = oop(cur); | |
687 if (obj->klass_or_null() == NULL) { | |
688 // Ran into an unparseable point. | |
689 return cur; | |
690 } | |
691 // Otherwise... | |
692 next = (cur + obj->size()); | |
693 } | |
694 | |
695 // If we finish the above loop...We have a parseable object that | |
696 // begins on or before the start of the memory region, and ends | |
697 // inside or spans the entire region. | |
698 | |
699 assert(obj == oop(cur), "sanity"); | |
700 assert(cur <= start && | |
701 obj->klass_or_null() != NULL && | |
702 (cur + obj->size()) > start, | |
703 "Loop postcondition"); | |
704 | |
705 if (!g1h->is_obj_dead(obj)) { | |
706 obj->oop_iterate(cl, mr); | |
707 } | |
708 | |
709 while (cur < end) { | |
710 obj = oop(cur); | |
711 if (obj->klass_or_null() == NULL) { | |
712 // Ran into an unparseable point. | |
713 return cur; | |
714 }; | |
715 | |
716 // Otherwise: | |
717 next = (cur + obj->size()); | |
718 | |
719 if (!g1h->is_obj_dead(obj)) { | |
720 if (next < end || !obj->is_objArray()) { | |
721 // This object either does not span the MemRegion | |
722 // boundary, or if it does it's not an array. | |
723 // Apply closure to whole object. | |
724 obj->oop_iterate(cl); | |
725 } else { | |
726 // This obj is an array that spans the boundary. | |
727 // Stop at the boundary. | |
728 obj->oop_iterate(cl, mr); | |
729 } | |
730 } | |
731 cur = next; | |
732 } | |
733 return NULL; | |
734 } | |
735 | |
736 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
737 void HeapRegion::print_on(outputStream* st) const { | |
738 if (isHumongous()) { | |
739 if (startsHumongous()) | |
740 st->print(" HS"); | |
741 else | |
742 st->print(" HC"); | |
743 } else { | |
744 st->print(" "); | |
745 } | |
746 if (in_collection_set()) | |
747 st->print(" CS"); | |
748 else | |
749 st->print(" "); | |
750 if (is_young()) | |
751 st->print(is_survivor() ? " SU" : " Y "); | |
752 else | |
753 st->print(" "); | |
754 if (is_empty()) | |
755 st->print(" F"); | |
756 else | |
757 st->print(" "); | |
758 st->print(" TS %5d", _gc_time_stamp); | |
759 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, | |
760 prev_top_at_mark_start(), next_top_at_mark_start()); | |
761 G1OffsetTableContigSpace::print_on(st); | |
762 } | |
763 | |
764 void HeapRegion::verify() const { | |
765 bool dummy = false; | |
766 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); | |
767 } | |
768 | |
769 // This really ought to be commoned up into OffsetTableContigSpace somehow. | 915 // This really ought to be commoned up into OffsetTableContigSpace somehow. |
770 // We would need a mechanism to make that code skip dead objects. | 916 // We would need a mechanism to make that code skip dead objects. |
771 | 917 |
772 void HeapRegion::verify(VerifyOption vo, | 918 void HeapRegion::verify(VerifyOption vo, |
773 bool* failures) const { | 919 bool* failures) const { |
803 if (!g1->is_obj_dead_cond(obj, this, vo)) { | 949 if (!g1->is_obj_dead_cond(obj, this, vo)) { |
804 if (obj->is_oop()) { | 950 if (obj->is_oop()) { |
805 Klass* klass = obj->klass(); | 951 Klass* klass = obj->klass(); |
806 if (!klass->is_metaspace_object()) { | 952 if (!klass->is_metaspace_object()) { |
807 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " | 953 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
808 "not metadata", klass, obj); | 954 "not metadata", klass, (void *)obj); |
809 *failures = true; | 955 *failures = true; |
810 return; | 956 return; |
811 } else if (!klass->is_klass()) { | 957 } else if (!klass->is_klass()) { |
812 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " | 958 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " |
813 "not a klass", klass, obj); | 959 "not a klass", klass, (void *)obj); |
814 *failures = true; | 960 *failures = true; |
815 return; | 961 return; |
816 } else { | 962 } else { |
817 vl_cl.set_containing_obj(obj); | 963 vl_cl.set_containing_obj(obj); |
818 obj->oop_iterate_no_header(&vl_cl); | 964 obj->oop_iterate_no_header(&vl_cl); |
823 vl_cl.n_failures() >= G1MaxVerifyFailures) { | 969 vl_cl.n_failures() >= G1MaxVerifyFailures) { |
824 return; | 970 return; |
825 } | 971 } |
826 } | 972 } |
827 } else { | 973 } else { |
828 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj); | 974 gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj); |
829 *failures = true; | 975 *failures = true; |
830 return; | 976 return; |
831 } | 977 } |
832 } | 978 } |
833 prev_p = p; | 979 prev_p = p; |
902 "but has "SIZE_FORMAT", objects", | 1048 "but has "SIZE_FORMAT", objects", |
903 bottom(), end(), object_num); | 1049 bottom(), end(), object_num); |
904 *failures = true; | 1050 *failures = true; |
905 return; | 1051 return; |
906 } | 1052 } |
1053 | |
1054 verify_strong_code_roots(vo, failures); | |
1055 } | |
1056 | |
1057 void HeapRegion::verify() const { | |
1058 bool dummy = false; | |
1059 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); | |
907 } | 1060 } |
908 | 1061 |
909 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | 1062 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
910 // away eventually. | 1063 // away eventually. |
911 | 1064 |