Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 12080:5888334c9c24
7145569: G1: optimize nmethods scanning
Summary: Add a list of nmethods to the RSet for a region that contain references into the region. Skip scanning the code cache during root scanning and scan the nmethod lists during RSet scanning instead.
Reviewed-by: tschatzl, brutisso, mgerdin, twisti, kvn
author | johnc |
---|---|
date | Thu, 15 Aug 2013 10:52:18 +0200 |
parents | dae8324fc7d1 |
children | 84683e78e713 |
comparison
equal
deleted
inserted
replaced
12033:bd902affe102 | 12080:5888334c9c24 |
---|---|
21 * questions. | 21 * questions. |
22 * | 22 * |
23 */ | 23 */ |
24 | 24 |
25 #include "precompiled.hpp" | 25 #include "precompiled.hpp" |
26 #include "code/nmethod.hpp" | |
26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" | 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
28 #include "gc_implementation/g1/g1OopClosures.inline.hpp" | 29 #include "gc_implementation/g1/g1OopClosures.inline.hpp" |
29 #include "gc_implementation/g1/heapRegion.inline.hpp" | 30 #include "gc_implementation/g1/heapRegion.inline.hpp" |
30 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 31 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
47 _hr(hr), _fk(fk), _g1(g1) { } | 48 _hr(hr), _fk(fk), _g1(g1) { } |
48 | 49 |
49 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, | 50 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, |
50 OopClosure* oc) : | 51 OopClosure* oc) : |
51 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } | 52 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } |
53 | |
54 template<class ClosureType> | |
55 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
56 HeapRegion* hr, | |
57 HeapWord* cur, HeapWord* top) { | |
58 oop cur_oop = oop(cur); | |
59 int oop_size = cur_oop->size(); | |
60 HeapWord* next_obj = cur + oop_size; | |
61 while (next_obj < top) { | |
62 // Keep filtering the remembered set. | |
63 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
64 // Bottom lies entirely below top, so we can call the | |
65 // non-memRegion version of oop_iterate below. | |
66 cur_oop->oop_iterate(cl); | |
67 } | |
68 cur = next_obj; | |
69 cur_oop = oop(cur); | |
70 oop_size = cur_oop->size(); | |
71 next_obj = cur + oop_size; | |
72 } | |
73 return cur; | |
74 } | |
75 | |
76 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
77 HeapWord* bottom, | |
78 HeapWord* top, | |
79 ExtendedOopClosure* cl) { | |
80 G1CollectedHeap* g1h = _g1; | |
81 int oop_size; | |
82 ExtendedOopClosure* cl2 = NULL; | |
83 | |
84 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
85 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
86 | |
87 switch (_fk) { | |
88 case NoFilterKind: cl2 = cl; break; | |
89 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
90 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
91 default: ShouldNotReachHere(); | |
92 } | |
93 | |
94 // Start filtering what we add to the remembered set. If the object is | |
95 // not considered dead, either because it is marked (in the mark bitmap) | |
96 // or it was allocated after marking finished, then we add it. Otherwise | |
97 // we can safely ignore the object. | |
98 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
99 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
100 } else { | |
101 oop_size = oop(bottom)->size(); | |
102 } | |
103 | |
104 bottom += oop_size; | |
105 | |
106 if (bottom < top) { | |
107 // We replicate the loop below for several kinds of possible filters. | |
108 switch (_fk) { | |
109 case NoFilterKind: | |
110 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
111 break; | |
112 | |
113 case IntoCSFilterKind: { | |
114 FilterIntoCSClosure filt(this, g1h, cl); | |
115 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
116 break; | |
117 } | |
118 | |
119 case OutOfRegionFilterKind: { | |
120 FilterOutOfRegionClosure filt(_hr, cl); | |
121 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
122 break; | |
123 } | |
124 | |
125 default: | |
126 ShouldNotReachHere(); | |
127 } | |
128 | |
129 // Last object. Need to do dead-obj filtering here too. | |
130 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
131 oop(bottom)->oop_iterate(cl2, mr); | |
132 } | |
133 } | |
134 } | |
135 | |
136 // Minimum region size; we won't go lower than that. | |
137 // We might want to decrease this in the future, to deal with small | |
138 // heaps a bit more efficiently. | |
139 #define MIN_REGION_SIZE ( 1024 * 1024 ) | |
140 | |
141 // Maximum region size; we don't go higher than that. There's a good | |
142 // reason for having an upper bound. We don't want regions to get too | |
143 // large, otherwise cleanup's effectiveness would decrease as there | |
144 // will be fewer opportunities to find totally empty regions after | |
145 // marking. | |
146 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) | |
147 | |
148 // The automatic region size calculation will try to have around this | |
149 // many regions in the heap (based on the min heap size). | |
150 #define TARGET_REGION_NUMBER 2048 | |
151 | |
152 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { | |
153 // region_size in bytes | |
154 uintx region_size = G1HeapRegionSize; | |
155 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { | |
156 // We base the automatic calculation on the min heap size. This | |
157 // can be problematic if the spread between min and max is quite | |
158 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on | |
159 // the max size, the region size might be way too large for the | |
160 // min size. Either way, some users might have to set the region | |
161 // size manually for some -Xms / -Xmx combos. | |
162 | |
163 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, | |
164 (uintx) MIN_REGION_SIZE); | |
165 } | |
166 | |
167 int region_size_log = log2_long((jlong) region_size); | |
168 // Recalculate the region size to make sure it's a power of | |
169 // 2. This means that region_size is the largest power of 2 that's | |
170 // <= what we've calculated so far. | |
171 region_size = ((uintx)1 << region_size_log); | |
172 | |
173 // Now make sure that we don't go over or under our limits. | |
174 if (region_size < MIN_REGION_SIZE) { | |
175 region_size = MIN_REGION_SIZE; | |
176 } else if (region_size > MAX_REGION_SIZE) { | |
177 region_size = MAX_REGION_SIZE; | |
178 } | |
179 | |
180 if (region_size != G1HeapRegionSize) { | |
181 // Update the flag to make sure that PrintFlagsFinal logs the correct value | |
182 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); | |
183 } | |
184 | |
185 // And recalculate the log. | |
186 region_size_log = log2_long((jlong) region_size); | |
187 | |
188 // Now, set up the globals. | |
189 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); | |
190 LogOfHRGrainBytes = region_size_log; | |
191 | |
192 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); | |
193 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; | |
194 | |
195 guarantee(GrainBytes == 0, "we should only set it once"); | |
196 // The cast to int is safe, given that we've bounded region_size by | |
197 // MIN_REGION_SIZE and MAX_REGION_SIZE. | |
198 GrainBytes = (size_t)region_size; | |
199 | |
200 guarantee(GrainWords == 0, "we should only set it once"); | |
201 GrainWords = GrainBytes >> LogHeapWordSize; | |
202 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); | |
203 | |
204 guarantee(CardsPerRegion == 0, "we should only set it once"); | |
205 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; | |
206 } | |
207 | |
208 void HeapRegion::reset_after_compaction() { | |
209 G1OffsetTableContigSpace::reset_after_compaction(); | |
210 // After a compaction the mark bitmap is invalid, so we must | |
211 // treat all objects as being inside the unmarked area. | |
212 zero_marked_bytes(); | |
213 init_top_at_mark_start(); | |
214 } | |
215 | |
216 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
217 assert(_humongous_type == NotHumongous, | |
218 "we should have already filtered out humongous regions"); | |
219 assert(_humongous_start_region == NULL, | |
220 "we should have already filtered out humongous regions"); | |
221 assert(_end == _orig_end, | |
222 "we should have already filtered out humongous regions"); | |
223 | |
224 _in_collection_set = false; | |
225 | |
226 set_young_index_in_cset(-1); | |
227 uninstall_surv_rate_group(); | |
228 set_young_type(NotYoung); | |
229 reset_pre_dummy_top(); | |
230 | |
231 if (!par) { | |
232 // If this is parallel, this will be done later. | |
233 HeapRegionRemSet* hrrs = rem_set(); | |
234 hrrs->clear(); | |
235 _claimed = InitialClaimValue; | |
236 } | |
237 zero_marked_bytes(); | |
238 | |
239 _offsets.resize(HeapRegion::GrainWords); | |
240 init_top_at_mark_start(); | |
241 if (clear_space) clear(SpaceDecorator::Mangle); | |
242 } | |
243 | |
244 void HeapRegion::par_clear() { | |
245 assert(used() == 0, "the region should have been already cleared"); | |
246 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); | |
247 HeapRegionRemSet* hrrs = rem_set(); | |
248 hrrs->clear(); | |
249 CardTableModRefBS* ct_bs = | |
250 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); | |
251 ct_bs->clear(MemRegion(bottom(), end())); | |
252 } | |
253 | |
254 void HeapRegion::calc_gc_efficiency() { | |
255 // GC efficiency is the ratio of how much space would be | |
256 // reclaimed over how long we predict it would take to reclaim it. | |
257 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
258 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
259 | |
260 // Retrieve a prediction of the elapsed time for this region for | |
261 // a mixed gc because the region will only be evacuated during a | |
262 // mixed gc. | |
263 double region_elapsed_time_ms = | |
264 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); | |
265 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; | |
266 } | |
267 | |
268 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { | |
269 assert(!isHumongous(), "sanity / pre-condition"); | |
270 assert(end() == _orig_end, | |
271 "Should be normal before the humongous object allocation"); | |
272 assert(top() == bottom(), "should be empty"); | |
273 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); | |
274 | |
275 _humongous_type = StartsHumongous; | |
276 _humongous_start_region = this; | |
277 | |
278 set_end(new_end); | |
279 _offsets.set_for_starts_humongous(new_top); | |
280 } | |
281 | |
282 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { | |
283 assert(!isHumongous(), "sanity / pre-condition"); | |
284 assert(end() == _orig_end, | |
285 "Should be normal before the humongous object allocation"); | |
286 assert(top() == bottom(), "should be empty"); | |
287 assert(first_hr->startsHumongous(), "pre-condition"); | |
288 | |
289 _humongous_type = ContinuesHumongous; | |
290 _humongous_start_region = first_hr; | |
291 } | |
292 | |
293 void HeapRegion::set_notHumongous() { | |
294 assert(isHumongous(), "pre-condition"); | |
295 | |
296 if (startsHumongous()) { | |
297 assert(top() <= end(), "pre-condition"); | |
298 set_end(_orig_end); | |
299 if (top() > end()) { | |
300 // at least one "continues humongous" region after it | |
301 set_top(end()); | |
302 } | |
303 } else { | |
304 // continues humongous | |
305 assert(end() == _orig_end, "sanity"); | |
306 } | |
307 | |
308 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); | |
309 _humongous_type = NotHumongous; | |
310 _humongous_start_region = NULL; | |
311 } | |
312 | |
313 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
314 jint current = _claimed; | |
315 if (current != claimValue) { | |
316 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
317 if (res == current) { | |
318 return true; | |
319 } | |
320 } | |
321 return false; | |
322 } | |
323 | |
324 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
325 HeapWord* low = addr; | |
326 HeapWord* high = end(); | |
327 while (low < high) { | |
328 size_t diff = pointer_delta(high, low); | |
329 // Must add one below to bias toward the high amount. Otherwise, if | |
330 // "high" were at the desired value, and "low" were one less, we | |
331 // would not converge on "high". This is not symmetric, because | |
332 // we set "high" to a block start, which might be the right one, | |
333 // which we don't do for "low". | |
334 HeapWord* middle = low + (diff+1)/2; | |
335 if (middle == high) return high; | |
336 HeapWord* mid_bs = block_start_careful(middle); | |
337 if (mid_bs < addr) { | |
338 low = middle; | |
339 } else { | |
340 high = mid_bs; | |
341 } | |
342 } | |
343 assert(low == high && low >= addr, "Didn't work."); | |
344 return low; | |
345 } | |
346 | |
347 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
348 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
349 #endif // _MSC_VER | |
350 | |
351 | |
352 HeapRegion::HeapRegion(uint hrs_index, | |
353 G1BlockOffsetSharedArray* sharedOffsetArray, | |
354 MemRegion mr) : | |
355 G1OffsetTableContigSpace(sharedOffsetArray, mr), | |
356 _hrs_index(hrs_index), | |
357 _humongous_type(NotHumongous), _humongous_start_region(NULL), | |
358 _in_collection_set(false), | |
359 _next_in_special_set(NULL), _orig_end(NULL), | |
360 _claimed(InitialClaimValue), _evacuation_failed(false), | |
361 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), | |
362 _young_type(NotYoung), _next_young_region(NULL), | |
363 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), | |
364 #ifdef ASSERT | |
365 _containing_set(NULL), | |
366 #endif // ASSERT | |
367 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), | |
368 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), | |
369 _predicted_bytes_to_copy(0) | |
370 { | |
371 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
372 _orig_end = mr.end(); | |
373 // Note that initialize() will set the start of the unmarked area of the | |
374 // region. | |
375 hr_clear(false /*par*/, false /*clear_space*/); | |
376 set_top(bottom()); | |
377 set_saved_mark(); | |
378 | |
379 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
380 } | |
381 | |
382 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
383 // We're not using an iterator given that it will wrap around when | |
384 // it reaches the last region and this is not what we want here. | |
385 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
386 uint index = hrs_index() + 1; | |
387 while (index < g1h->n_regions()) { | |
388 HeapRegion* hr = g1h->region_at(index); | |
389 if (!hr->isHumongous()) { | |
390 return hr; | |
391 } | |
392 index += 1; | |
393 } | |
394 return NULL; | |
395 } | |
396 | |
397 void HeapRegion::save_marks() { | |
398 set_saved_mark(); | |
399 } | |
400 | |
401 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { | |
402 HeapWord* p = mr.start(); | |
403 HeapWord* e = mr.end(); | |
404 oop obj; | |
405 while (p < e) { | |
406 obj = oop(p); | |
407 p += obj->oop_iterate(cl); | |
408 } | |
409 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
410 } | |
411 | |
412 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
413 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
414 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
415 } | |
416 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
417 | |
418 | |
419 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { | |
420 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
421 } | |
422 | |
423 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, | |
424 bool during_conc_mark) { | |
425 // We always recreate the prev marking info and we'll explicitly | |
426 // mark all objects we find to be self-forwarded on the prev | |
427 // bitmap. So all objects need to be below PTAMS. | |
428 _prev_top_at_mark_start = top(); | |
429 _prev_marked_bytes = 0; | |
430 | |
431 if (during_initial_mark) { | |
432 // During initial-mark, we'll also explicitly mark all objects | |
433 // we find to be self-forwarded on the next bitmap. So all | |
434 // objects need to be below NTAMS. | |
435 _next_top_at_mark_start = top(); | |
436 _next_marked_bytes = 0; | |
437 } else if (during_conc_mark) { | |
438 // During concurrent mark, all objects in the CSet (including | |
439 // the ones we find to be self-forwarded) are implicitly live. | |
440 // So all objects need to be above NTAMS. | |
441 _next_top_at_mark_start = bottom(); | |
442 _next_marked_bytes = 0; | |
443 } | |
444 } | |
445 | |
446 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, | |
447 bool during_conc_mark, | |
448 size_t marked_bytes) { | |
449 assert(0 <= marked_bytes && marked_bytes <= used(), | |
450 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, | |
451 marked_bytes, used())); | |
452 _prev_marked_bytes = marked_bytes; | |
453 } | |
454 | |
455 HeapWord* | |
456 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
457 ObjectClosure* cl) { | |
458 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
459 // We used to use "block_start_careful" here. But we're actually happy | |
460 // to update the BOT while we do this... | |
461 HeapWord* cur = block_start(mr.start()); | |
462 mr = mr.intersection(used_region()); | |
463 if (mr.is_empty()) return NULL; | |
464 // Otherwise, find the obj that extends onto mr.start(). | |
465 | |
466 assert(cur <= mr.start() | |
467 && (oop(cur)->klass_or_null() == NULL || | |
468 cur + oop(cur)->size() > mr.start()), | |
469 "postcondition of block_start"); | |
470 oop obj; | |
471 while (cur < mr.end()) { | |
472 obj = oop(cur); | |
473 if (obj->klass_or_null() == NULL) { | |
474 // Ran into an unparseable point. | |
475 return cur; | |
476 } else if (!g1h->is_obj_dead(obj)) { | |
477 cl->do_object(obj); | |
478 } | |
479 if (cl->abort()) return cur; | |
480 // The check above must occur before the operation below, since an | |
481 // abort might invalidate the "size" operation. | |
482 cur += obj->size(); | |
483 } | |
484 return NULL; | |
485 } | |
486 | |
487 HeapWord* | |
488 HeapRegion:: | |
489 oops_on_card_seq_iterate_careful(MemRegion mr, | |
490 FilterOutOfRegionClosure* cl, | |
491 bool filter_young, | |
492 jbyte* card_ptr) { | |
493 // Currently, we should only have to clean the card if filter_young | |
494 // is true and vice versa. | |
495 if (filter_young) { | |
496 assert(card_ptr != NULL, "pre-condition"); | |
497 } else { | |
498 assert(card_ptr == NULL, "pre-condition"); | |
499 } | |
500 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
501 | |
502 // If we're within a stop-world GC, then we might look at a card in a | |
503 // GC alloc region that extends onto a GC LAB, which may not be | |
504 // parseable. Stop such at the "saved_mark" of the region. | |
505 if (g1h->is_gc_active()) { | |
506 mr = mr.intersection(used_region_at_save_marks()); | |
507 } else { | |
508 mr = mr.intersection(used_region()); | |
509 } | |
510 if (mr.is_empty()) return NULL; | |
511 // Otherwise, find the obj that extends onto mr.start(). | |
512 | |
513 // The intersection of the incoming mr (for the card) and the | |
514 // allocated part of the region is non-empty. This implies that | |
515 // we have actually allocated into this region. The code in | |
516 // G1CollectedHeap.cpp that allocates a new region sets the | |
517 // is_young tag on the region before allocating. Thus we | |
518 // safely know if this region is young. | |
519 if (is_young() && filter_young) { | |
520 return NULL; | |
521 } | |
522 | |
523 assert(!is_young(), "check value of filter_young"); | |
524 | |
525 // We can only clean the card here, after we make the decision that | |
526 // the card is not young. And we only clean the card if we have been | |
527 // asked to (i.e., card_ptr != NULL). | |
528 if (card_ptr != NULL) { | |
529 *card_ptr = CardTableModRefBS::clean_card_val(); | |
530 // We must complete this write before we do any of the reads below. | |
531 OrderAccess::storeload(); | |
532 } | |
533 | |
534 // Cache the boundaries of the memory region in some const locals | |
535 HeapWord* const start = mr.start(); | |
536 HeapWord* const end = mr.end(); | |
537 | |
538 // We used to use "block_start_careful" here. But we're actually happy | |
539 // to update the BOT while we do this... | |
540 HeapWord* cur = block_start(start); | |
541 assert(cur <= start, "Postcondition"); | |
542 | |
543 oop obj; | |
544 | |
545 HeapWord* next = cur; | |
546 while (next <= start) { | |
547 cur = next; | |
548 obj = oop(cur); | |
549 if (obj->klass_or_null() == NULL) { | |
550 // Ran into an unparseable point. | |
551 return cur; | |
552 } | |
553 // Otherwise... | |
554 next = (cur + obj->size()); | |
555 } | |
556 | |
557 // If we finish the above loop...We have a parseable object that | |
558 // begins on or before the start of the memory region, and ends | |
559 // inside or spans the entire region. | |
560 | |
561 assert(obj == oop(cur), "sanity"); | |
562 assert(cur <= start && | |
563 obj->klass_or_null() != NULL && | |
564 (cur + obj->size()) > start, | |
565 "Loop postcondition"); | |
566 | |
567 if (!g1h->is_obj_dead(obj)) { | |
568 obj->oop_iterate(cl, mr); | |
569 } | |
570 | |
571 while (cur < end) { | |
572 obj = oop(cur); | |
573 if (obj->klass_or_null() == NULL) { | |
574 // Ran into an unparseable point. | |
575 return cur; | |
576 }; | |
577 | |
578 // Otherwise: | |
579 next = (cur + obj->size()); | |
580 | |
581 if (!g1h->is_obj_dead(obj)) { | |
582 if (next < end || !obj->is_objArray()) { | |
583 // This object either does not span the MemRegion | |
584 // boundary, or if it does it's not an array. | |
585 // Apply closure to whole object. | |
586 obj->oop_iterate(cl); | |
587 } else { | |
588 // This obj is an array that spans the boundary. | |
589 // Stop at the boundary. | |
590 obj->oop_iterate(cl, mr); | |
591 } | |
592 } | |
593 cur = next; | |
594 } | |
595 return NULL; | |
596 } | |
597 | |
598 // Code roots support | |
599 | |
600 void HeapRegion::add_strong_code_root(nmethod* nm) { | |
601 HeapRegionRemSet* hrrs = rem_set(); | |
602 hrrs->add_strong_code_root(nm); | |
603 } | |
604 | |
605 void HeapRegion::remove_strong_code_root(nmethod* nm) { | |
606 HeapRegionRemSet* hrrs = rem_set(); | |
607 hrrs->remove_strong_code_root(nm); | |
608 } | |
609 | |
610 void HeapRegion::migrate_strong_code_roots() { | |
611 assert(in_collection_set(), "only collection set regions"); | |
612 assert(!isHumongous(), "not humongous regions"); | |
613 | |
614 HeapRegionRemSet* hrrs = rem_set(); | |
615 hrrs->migrate_strong_code_roots(); | |
616 } | |
617 | |
618 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { | |
619 HeapRegionRemSet* hrrs = rem_set(); | |
620 hrrs->strong_code_roots_do(blk); | |
621 } | |
622 | |
623 class VerifyStrongCodeRootOopClosure: public OopClosure { | |
624 const HeapRegion* _hr; | |
625 nmethod* _nm; | |
626 bool _failures; | |
627 bool _has_oops_in_region; | |
628 | |
629 template <class T> void do_oop_work(T* p) { | |
630 T heap_oop = oopDesc::load_heap_oop(p); | |
631 if (!oopDesc::is_null(heap_oop)) { | |
632 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); | |
633 | |
634 // Note: not all the oops embedded in the nmethod are in the | |
635 // current region. We only look at those which are. | |
636 if (_hr->is_in(obj)) { | |
637 // Object is in the region. Check that its less than top | |
638 if (_hr->top() <= (HeapWord*)obj) { | |
639 // Object is above top | |
640 gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " | |
641 "["PTR_FORMAT", "PTR_FORMAT") is above " | |
642 "top "PTR_FORMAT, | |
643 obj, _hr->bottom(), _hr->end(), _hr->top()); | |
644 _failures = true; | |
645 return; | |
646 } | |
647 // Nmethod has at least one oop in the current region | |
648 _has_oops_in_region = true; | |
649 } | |
650 } | |
651 } | |
652 | |
653 public: | |
654 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm): | |
655 _hr(hr), _failures(false), _has_oops_in_region(false) {} | |
656 | |
657 void do_oop(narrowOop* p) { do_oop_work(p); } | |
658 void do_oop(oop* p) { do_oop_work(p); } | |
659 | |
660 bool failures() { return _failures; } | |
661 bool has_oops_in_region() { return _has_oops_in_region; } | |
662 }; | |
663 | |
664 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { | |
665 const HeapRegion* _hr; | |
666 bool _failures; | |
667 public: | |
668 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : | |
669 _hr(hr), _failures(false) {} | |
670 | |
671 void do_code_blob(CodeBlob* cb) { | |
672 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); | |
673 if (nm != NULL) { | |
674 // Verify that the nemthod is live | |
675 if (!nm->is_alive()) { | |
676 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " | |
677 PTR_FORMAT" in its strong code roots", | |
678 _hr->bottom(), _hr->end(), nm); | |
679 _failures = true; | |
680 } else { | |
681 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); | |
682 nm->oops_do(&oop_cl); | |
683 if (!oop_cl.has_oops_in_region()) { | |
684 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " | |
685 PTR_FORMAT" in its strong code roots " | |
686 "with no pointers into region", | |
687 _hr->bottom(), _hr->end(), nm); | |
688 _failures = true; | |
689 } else if (oop_cl.failures()) { | |
690 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " | |
691 "failures for nmethod "PTR_FORMAT, | |
692 _hr->bottom(), _hr->end(), nm); | |
693 _failures = true; | |
694 } | |
695 } | |
696 } | |
697 } | |
698 | |
699 bool failures() { return _failures; } | |
700 }; | |
701 | |
702 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { | |
703 if (!G1VerifyHeapRegionCodeRoots) { | |
704 // We're not verifying code roots. | |
705 return; | |
706 } | |
707 if (vo == VerifyOption_G1UseMarkWord) { | |
708 // Marking verification during a full GC is performed after class | |
709 // unloading, code cache unloading, etc so the strong code roots | |
710 // attached to each heap region are in an inconsistent state. They won't | |
711 // be consistent until the strong code roots are rebuilt after the | |
712 // actual GC. Skip verifying the strong code roots in this particular | |
713 // time. | |
714 assert(VerifyDuringGC, "only way to get here"); | |
715 return; | |
716 } | |
717 | |
718 HeapRegionRemSet* hrrs = rem_set(); | |
719 int strong_code_roots_length = hrrs->strong_code_roots_list_length(); | |
720 | |
721 // if this region is empty then there should be no entries | |
722 // on its strong code root list | |
723 if (is_empty()) { | |
724 if (strong_code_roots_length > 0) { | |
725 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty " | |
726 "but has "INT32_FORMAT" code root entries", | |
727 bottom(), end(), strong_code_roots_length); | |
728 *failures = true; | |
729 } | |
730 return; | |
731 } | |
732 | |
733 // An H-region should have an empty strong code root list | |
734 if (isHumongous()) { | |
735 if (strong_code_roots_length > 0) { | |
736 gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " | |
737 "but has "INT32_FORMAT" code root entries", | |
738 bottom(), end(), strong_code_roots_length); | |
739 *failures = true; | |
740 } | |
741 return; | |
742 } | |
743 | |
744 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); | |
745 strong_code_roots_do(&cb_cl); | |
746 | |
747 if (cb_cl.failures()) { | |
748 *failures = true; | |
749 } | |
750 } | |
751 | |
752 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
753 void HeapRegion::print_on(outputStream* st) const { | |
754 if (isHumongous()) { | |
755 if (startsHumongous()) | |
756 st->print(" HS"); | |
757 else | |
758 st->print(" HC"); | |
759 } else { | |
760 st->print(" "); | |
761 } | |
762 if (in_collection_set()) | |
763 st->print(" CS"); | |
764 else | |
765 st->print(" "); | |
766 if (is_young()) | |
767 st->print(is_survivor() ? " SU" : " Y "); | |
768 else | |
769 st->print(" "); | |
770 if (is_empty()) | |
771 st->print(" F"); | |
772 else | |
773 st->print(" "); | |
774 st->print(" TS %5d", _gc_time_stamp); | |
775 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, | |
776 prev_top_at_mark_start(), next_top_at_mark_start()); | |
777 G1OffsetTableContigSpace::print_on(st); | |
778 } | |
52 | 779 |
53 class VerifyLiveClosure: public OopClosure { | 780 class VerifyLiveClosure: public OopClosure { |
54 private: | 781 private: |
55 G1CollectedHeap* _g1h; | 782 G1CollectedHeap* _g1h; |
56 CardTableModRefBS* _bs; | 783 CardTableModRefBS* _bs; |
186 } | 913 } |
187 } | 914 } |
188 } | 915 } |
189 }; | 916 }; |
190 | 917 |
191 template<class ClosureType> | |
192 HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, | |
193 HeapRegion* hr, | |
194 HeapWord* cur, HeapWord* top) { | |
195 oop cur_oop = oop(cur); | |
196 int oop_size = cur_oop->size(); | |
197 HeapWord* next_obj = cur + oop_size; | |
198 while (next_obj < top) { | |
199 // Keep filtering the remembered set. | |
200 if (!g1h->is_obj_dead(cur_oop, hr)) { | |
201 // Bottom lies entirely below top, so we can call the | |
202 // non-memRegion version of oop_iterate below. | |
203 cur_oop->oop_iterate(cl); | |
204 } | |
205 cur = next_obj; | |
206 cur_oop = oop(cur); | |
207 oop_size = cur_oop->size(); | |
208 next_obj = cur + oop_size; | |
209 } | |
210 return cur; | |
211 } | |
212 | |
213 void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, | |
214 HeapWord* bottom, | |
215 HeapWord* top, | |
216 ExtendedOopClosure* cl) { | |
217 G1CollectedHeap* g1h = _g1; | |
218 int oop_size; | |
219 ExtendedOopClosure* cl2 = NULL; | |
220 | |
221 FilterIntoCSClosure intoCSFilt(this, g1h, cl); | |
222 FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); | |
223 | |
224 switch (_fk) { | |
225 case NoFilterKind: cl2 = cl; break; | |
226 case IntoCSFilterKind: cl2 = &intoCSFilt; break; | |
227 case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; | |
228 default: ShouldNotReachHere(); | |
229 } | |
230 | |
231 // Start filtering what we add to the remembered set. If the object is | |
232 // not considered dead, either because it is marked (in the mark bitmap) | |
233 // or it was allocated after marking finished, then we add it. Otherwise | |
234 // we can safely ignore the object. | |
235 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
236 oop_size = oop(bottom)->oop_iterate(cl2, mr); | |
237 } else { | |
238 oop_size = oop(bottom)->size(); | |
239 } | |
240 | |
241 bottom += oop_size; | |
242 | |
243 if (bottom < top) { | |
244 // We replicate the loop below for several kinds of possible filters. | |
245 switch (_fk) { | |
246 case NoFilterKind: | |
247 bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); | |
248 break; | |
249 | |
250 case IntoCSFilterKind: { | |
251 FilterIntoCSClosure filt(this, g1h, cl); | |
252 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
253 break; | |
254 } | |
255 | |
256 case OutOfRegionFilterKind: { | |
257 FilterOutOfRegionClosure filt(_hr, cl); | |
258 bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); | |
259 break; | |
260 } | |
261 | |
262 default: | |
263 ShouldNotReachHere(); | |
264 } | |
265 | |
266 // Last object. Need to do dead-obj filtering here too. | |
267 if (!g1h->is_obj_dead(oop(bottom), _hr)) { | |
268 oop(bottom)->oop_iterate(cl2, mr); | |
269 } | |
270 } | |
271 } | |
272 | |
273 // Minimum region size; we won't go lower than that. | |
274 // We might want to decrease this in the future, to deal with small | |
275 // heaps a bit more efficiently. | |
276 #define MIN_REGION_SIZE ( 1024 * 1024 ) | |
277 | |
278 // Maximum region size; we don't go higher than that. There's a good | |
279 // reason for having an upper bound. We don't want regions to get too | |
280 // large, otherwise cleanup's effectiveness would decrease as there | |
281 // will be fewer opportunities to find totally empty regions after | |
282 // marking. | |
283 #define MAX_REGION_SIZE ( 32 * 1024 * 1024 ) | |
284 | |
285 // The automatic region size calculation will try to have around this | |
286 // many regions in the heap (based on the min heap size). | |
287 #define TARGET_REGION_NUMBER 2048 | |
288 | |
289 void HeapRegion::setup_heap_region_size(uintx min_heap_size) { | |
290 // region_size in bytes | |
291 uintx region_size = G1HeapRegionSize; | |
292 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { | |
293 // We base the automatic calculation on the min heap size. This | |
294 // can be problematic if the spread between min and max is quite | |
295 // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on | |
296 // the max size, the region size might be way too large for the | |
297 // min size. Either way, some users might have to set the region | |
298 // size manually for some -Xms / -Xmx combos. | |
299 | |
300 region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER, | |
301 (uintx) MIN_REGION_SIZE); | |
302 } | |
303 | |
304 int region_size_log = log2_long((jlong) region_size); | |
305 // Recalculate the region size to make sure it's a power of | |
306 // 2. This means that region_size is the largest power of 2 that's | |
307 // <= what we've calculated so far. | |
308 region_size = ((uintx)1 << region_size_log); | |
309 | |
310 // Now make sure that we don't go over or under our limits. | |
311 if (region_size < MIN_REGION_SIZE) { | |
312 region_size = MIN_REGION_SIZE; | |
313 } else if (region_size > MAX_REGION_SIZE) { | |
314 region_size = MAX_REGION_SIZE; | |
315 } | |
316 | |
317 if (region_size != G1HeapRegionSize) { | |
318 // Update the flag to make sure that PrintFlagsFinal logs the correct value | |
319 FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); | |
320 } | |
321 | |
322 // And recalculate the log. | |
323 region_size_log = log2_long((jlong) region_size); | |
324 | |
325 // Now, set up the globals. | |
326 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); | |
327 LogOfHRGrainBytes = region_size_log; | |
328 | |
329 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); | |
330 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; | |
331 | |
332 guarantee(GrainBytes == 0, "we should only set it once"); | |
333 // The cast to int is safe, given that we've bounded region_size by | |
334 // MIN_REGION_SIZE and MAX_REGION_SIZE. | |
335 GrainBytes = (size_t)region_size; | |
336 | |
337 guarantee(GrainWords == 0, "we should only set it once"); | |
338 GrainWords = GrainBytes >> LogHeapWordSize; | |
339 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); | |
340 | |
341 guarantee(CardsPerRegion == 0, "we should only set it once"); | |
342 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; | |
343 } | |
344 | |
345 void HeapRegion::reset_after_compaction() { | |
346 G1OffsetTableContigSpace::reset_after_compaction(); | |
347 // After a compaction the mark bitmap is invalid, so we must | |
348 // treat all objects as being inside the unmarked area. | |
349 zero_marked_bytes(); | |
350 init_top_at_mark_start(); | |
351 } | |
352 | |
353 void HeapRegion::hr_clear(bool par, bool clear_space) { | |
354 assert(_humongous_type == NotHumongous, | |
355 "we should have already filtered out humongous regions"); | |
356 assert(_humongous_start_region == NULL, | |
357 "we should have already filtered out humongous regions"); | |
358 assert(_end == _orig_end, | |
359 "we should have already filtered out humongous regions"); | |
360 | |
361 _in_collection_set = false; | |
362 | |
363 set_young_index_in_cset(-1); | |
364 uninstall_surv_rate_group(); | |
365 set_young_type(NotYoung); | |
366 reset_pre_dummy_top(); | |
367 | |
368 if (!par) { | |
369 // If this is parallel, this will be done later. | |
370 HeapRegionRemSet* hrrs = rem_set(); | |
371 if (hrrs != NULL) hrrs->clear(); | |
372 _claimed = InitialClaimValue; | |
373 } | |
374 zero_marked_bytes(); | |
375 | |
376 _offsets.resize(HeapRegion::GrainWords); | |
377 init_top_at_mark_start(); | |
378 if (clear_space) clear(SpaceDecorator::Mangle); | |
379 } | |
380 | |
381 void HeapRegion::par_clear() { | |
382 assert(used() == 0, "the region should have been already cleared"); | |
383 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal"); | |
384 HeapRegionRemSet* hrrs = rem_set(); | |
385 hrrs->clear(); | |
386 CardTableModRefBS* ct_bs = | |
387 (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set(); | |
388 ct_bs->clear(MemRegion(bottom(), end())); | |
389 } | |
390 | |
391 void HeapRegion::calc_gc_efficiency() { | |
392 // GC efficiency is the ratio of how much space would be | |
393 // reclaimed over how long we predict it would take to reclaim it. | |
394 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
395 G1CollectorPolicy* g1p = g1h->g1_policy(); | |
396 | |
397 // Retrieve a prediction of the elapsed time for this region for | |
398 // a mixed gc because the region will only be evacuated during a | |
399 // mixed gc. | |
400 double region_elapsed_time_ms = | |
401 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); | |
402 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; | |
403 } | |
404 | |
405 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) { | |
406 assert(!isHumongous(), "sanity / pre-condition"); | |
407 assert(end() == _orig_end, | |
408 "Should be normal before the humongous object allocation"); | |
409 assert(top() == bottom(), "should be empty"); | |
410 assert(bottom() <= new_top && new_top <= new_end, "pre-condition"); | |
411 | |
412 _humongous_type = StartsHumongous; | |
413 _humongous_start_region = this; | |
414 | |
415 set_end(new_end); | |
416 _offsets.set_for_starts_humongous(new_top); | |
417 } | |
418 | |
419 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) { | |
420 assert(!isHumongous(), "sanity / pre-condition"); | |
421 assert(end() == _orig_end, | |
422 "Should be normal before the humongous object allocation"); | |
423 assert(top() == bottom(), "should be empty"); | |
424 assert(first_hr->startsHumongous(), "pre-condition"); | |
425 | |
426 _humongous_type = ContinuesHumongous; | |
427 _humongous_start_region = first_hr; | |
428 } | |
429 | |
430 void HeapRegion::set_notHumongous() { | |
431 assert(isHumongous(), "pre-condition"); | |
432 | |
433 if (startsHumongous()) { | |
434 assert(top() <= end(), "pre-condition"); | |
435 set_end(_orig_end); | |
436 if (top() > end()) { | |
437 // at least one "continues humongous" region after it | |
438 set_top(end()); | |
439 } | |
440 } else { | |
441 // continues humongous | |
442 assert(end() == _orig_end, "sanity"); | |
443 } | |
444 | |
445 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); | |
446 _humongous_type = NotHumongous; | |
447 _humongous_start_region = NULL; | |
448 } | |
449 | |
450 bool HeapRegion::claimHeapRegion(jint claimValue) { | |
451 jint current = _claimed; | |
452 if (current != claimValue) { | |
453 jint res = Atomic::cmpxchg(claimValue, &_claimed, current); | |
454 if (res == current) { | |
455 return true; | |
456 } | |
457 } | |
458 return false; | |
459 } | |
460 | |
461 HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) { | |
462 HeapWord* low = addr; | |
463 HeapWord* high = end(); | |
464 while (low < high) { | |
465 size_t diff = pointer_delta(high, low); | |
466 // Must add one below to bias toward the high amount. Otherwise, if | |
467 // "high" were at the desired value, and "low" were one less, we | |
468 // would not converge on "high". This is not symmetric, because | |
469 // we set "high" to a block start, which might be the right one, | |
470 // which we don't do for "low". | |
471 HeapWord* middle = low + (diff+1)/2; | |
472 if (middle == high) return high; | |
473 HeapWord* mid_bs = block_start_careful(middle); | |
474 if (mid_bs < addr) { | |
475 low = middle; | |
476 } else { | |
477 high = mid_bs; | |
478 } | |
479 } | |
480 assert(low == high && low >= addr, "Didn't work."); | |
481 return low; | |
482 } | |
483 | |
484 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away | |
485 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list | |
486 #endif // _MSC_VER | |
487 | |
488 | |
489 HeapRegion::HeapRegion(uint hrs_index, | |
490 G1BlockOffsetSharedArray* sharedOffsetArray, | |
491 MemRegion mr) : | |
492 G1OffsetTableContigSpace(sharedOffsetArray, mr), | |
493 _hrs_index(hrs_index), | |
494 _humongous_type(NotHumongous), _humongous_start_region(NULL), | |
495 _in_collection_set(false), | |
496 _next_in_special_set(NULL), _orig_end(NULL), | |
497 _claimed(InitialClaimValue), _evacuation_failed(false), | |
498 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), | |
499 _young_type(NotYoung), _next_young_region(NULL), | |
500 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), | |
501 #ifdef ASSERT | |
502 _containing_set(NULL), | |
503 #endif // ASSERT | |
504 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), | |
505 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0), | |
506 _predicted_bytes_to_copy(0) | |
507 { | |
508 _orig_end = mr.end(); | |
509 // Note that initialize() will set the start of the unmarked area of the | |
510 // region. | |
511 hr_clear(false /*par*/, false /*clear_space*/); | |
512 set_top(bottom()); | |
513 set_saved_mark(); | |
514 | |
515 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); | |
516 | |
517 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); | |
518 } | |
519 | |
520 CompactibleSpace* HeapRegion::next_compaction_space() const { | |
521 // We're not using an iterator given that it will wrap around when | |
522 // it reaches the last region and this is not what we want here. | |
523 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
524 uint index = hrs_index() + 1; | |
525 while (index < g1h->n_regions()) { | |
526 HeapRegion* hr = g1h->region_at(index); | |
527 if (!hr->isHumongous()) { | |
528 return hr; | |
529 } | |
530 index += 1; | |
531 } | |
532 return NULL; | |
533 } | |
534 | |
535 void HeapRegion::save_marks() { | |
536 set_saved_mark(); | |
537 } | |
538 | |
539 void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) { | |
540 HeapWord* p = mr.start(); | |
541 HeapWord* e = mr.end(); | |
542 oop obj; | |
543 while (p < e) { | |
544 obj = oop(p); | |
545 p += obj->oop_iterate(cl); | |
546 } | |
547 assert(p == e, "bad memregion: doesn't end on obj boundary"); | |
548 } | |
549 | |
550 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
551 void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
552 ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl); \ | |
553 } | |
554 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN) | |
555 | |
556 | |
557 void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) { | |
558 oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl); | |
559 } | |
560 | |
561 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, | |
562 bool during_conc_mark) { | |
563 // We always recreate the prev marking info and we'll explicitly | |
564 // mark all objects we find to be self-forwarded on the prev | |
565 // bitmap. So all objects need to be below PTAMS. | |
566 _prev_top_at_mark_start = top(); | |
567 _prev_marked_bytes = 0; | |
568 | |
569 if (during_initial_mark) { | |
570 // During initial-mark, we'll also explicitly mark all objects | |
571 // we find to be self-forwarded on the next bitmap. So all | |
572 // objects need to be below NTAMS. | |
573 _next_top_at_mark_start = top(); | |
574 _next_marked_bytes = 0; | |
575 } else if (during_conc_mark) { | |
576 // During concurrent mark, all objects in the CSet (including | |
577 // the ones we find to be self-forwarded) are implicitly live. | |
578 // So all objects need to be above NTAMS. | |
579 _next_top_at_mark_start = bottom(); | |
580 _next_marked_bytes = 0; | |
581 } | |
582 } | |
583 | |
584 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark, | |
585 bool during_conc_mark, | |
586 size_t marked_bytes) { | |
587 assert(0 <= marked_bytes && marked_bytes <= used(), | |
588 err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT, | |
589 marked_bytes, used())); | |
590 _prev_marked_bytes = marked_bytes; | |
591 } | |
592 | |
593 HeapWord* | |
594 HeapRegion::object_iterate_mem_careful(MemRegion mr, | |
595 ObjectClosure* cl) { | |
596 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
597 // We used to use "block_start_careful" here. But we're actually happy | |
598 // to update the BOT while we do this... | |
599 HeapWord* cur = block_start(mr.start()); | |
600 mr = mr.intersection(used_region()); | |
601 if (mr.is_empty()) return NULL; | |
602 // Otherwise, find the obj that extends onto mr.start(). | |
603 | |
604 assert(cur <= mr.start() | |
605 && (oop(cur)->klass_or_null() == NULL || | |
606 cur + oop(cur)->size() > mr.start()), | |
607 "postcondition of block_start"); | |
608 oop obj; | |
609 while (cur < mr.end()) { | |
610 obj = oop(cur); | |
611 if (obj->klass_or_null() == NULL) { | |
612 // Ran into an unparseable point. | |
613 return cur; | |
614 } else if (!g1h->is_obj_dead(obj)) { | |
615 cl->do_object(obj); | |
616 } | |
617 if (cl->abort()) return cur; | |
618 // The check above must occur before the operation below, since an | |
619 // abort might invalidate the "size" operation. | |
620 cur += obj->size(); | |
621 } | |
622 return NULL; | |
623 } | |
624 | |
625 HeapWord* | |
626 HeapRegion:: | |
627 oops_on_card_seq_iterate_careful(MemRegion mr, | |
628 FilterOutOfRegionClosure* cl, | |
629 bool filter_young, | |
630 jbyte* card_ptr) { | |
631 // Currently, we should only have to clean the card if filter_young | |
632 // is true and vice versa. | |
633 if (filter_young) { | |
634 assert(card_ptr != NULL, "pre-condition"); | |
635 } else { | |
636 assert(card_ptr == NULL, "pre-condition"); | |
637 } | |
638 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
639 | |
640 // If we're within a stop-world GC, then we might look at a card in a | |
641 // GC alloc region that extends onto a GC LAB, which may not be | |
642 // parseable. Stop such at the "saved_mark" of the region. | |
643 if (g1h->is_gc_active()) { | |
644 mr = mr.intersection(used_region_at_save_marks()); | |
645 } else { | |
646 mr = mr.intersection(used_region()); | |
647 } | |
648 if (mr.is_empty()) return NULL; | |
649 // Otherwise, find the obj that extends onto mr.start(). | |
650 | |
651 // The intersection of the incoming mr (for the card) and the | |
652 // allocated part of the region is non-empty. This implies that | |
653 // we have actually allocated into this region. The code in | |
654 // G1CollectedHeap.cpp that allocates a new region sets the | |
655 // is_young tag on the region before allocating. Thus we | |
656 // safely know if this region is young. | |
657 if (is_young() && filter_young) { | |
658 return NULL; | |
659 } | |
660 | |
661 assert(!is_young(), "check value of filter_young"); | |
662 | |
663 // We can only clean the card here, after we make the decision that | |
664 // the card is not young. And we only clean the card if we have been | |
665 // asked to (i.e., card_ptr != NULL). | |
666 if (card_ptr != NULL) { | |
667 *card_ptr = CardTableModRefBS::clean_card_val(); | |
668 // We must complete this write before we do any of the reads below. | |
669 OrderAccess::storeload(); | |
670 } | |
671 | |
672 // Cache the boundaries of the memory region in some const locals | |
673 HeapWord* const start = mr.start(); | |
674 HeapWord* const end = mr.end(); | |
675 | |
676 // We used to use "block_start_careful" here. But we're actually happy | |
677 // to update the BOT while we do this... | |
678 HeapWord* cur = block_start(start); | |
679 assert(cur <= start, "Postcondition"); | |
680 | |
681 oop obj; | |
682 | |
683 HeapWord* next = cur; | |
684 while (next <= start) { | |
685 cur = next; | |
686 obj = oop(cur); | |
687 if (obj->klass_or_null() == NULL) { | |
688 // Ran into an unparseable point. | |
689 return cur; | |
690 } | |
691 // Otherwise... | |
692 next = (cur + obj->size()); | |
693 } | |
694 | |
695 // If we finish the above loop...We have a parseable object that | |
696 // begins on or before the start of the memory region, and ends | |
697 // inside or spans the entire region. | |
698 | |
699 assert(obj == oop(cur), "sanity"); | |
700 assert(cur <= start && | |
701 obj->klass_or_null() != NULL && | |
702 (cur + obj->size()) > start, | |
703 "Loop postcondition"); | |
704 | |
705 if (!g1h->is_obj_dead(obj)) { | |
706 obj->oop_iterate(cl, mr); | |
707 } | |
708 | |
709 while (cur < end) { | |
710 obj = oop(cur); | |
711 if (obj->klass_or_null() == NULL) { | |
712 // Ran into an unparseable point. | |
713 return cur; | |
714 }; | |
715 | |
716 // Otherwise: | |
717 next = (cur + obj->size()); | |
718 | |
719 if (!g1h->is_obj_dead(obj)) { | |
720 if (next < end || !obj->is_objArray()) { | |
721 // This object either does not span the MemRegion | |
722 // boundary, or if it does it's not an array. | |
723 // Apply closure to whole object. | |
724 obj->oop_iterate(cl); | |
725 } else { | |
726 // This obj is an array that spans the boundary. | |
727 // Stop at the boundary. | |
728 obj->oop_iterate(cl, mr); | |
729 } | |
730 } | |
731 cur = next; | |
732 } | |
733 return NULL; | |
734 } | |
735 | |
736 void HeapRegion::print() const { print_on(gclog_or_tty); } | |
737 void HeapRegion::print_on(outputStream* st) const { | |
738 if (isHumongous()) { | |
739 if (startsHumongous()) | |
740 st->print(" HS"); | |
741 else | |
742 st->print(" HC"); | |
743 } else { | |
744 st->print(" "); | |
745 } | |
746 if (in_collection_set()) | |
747 st->print(" CS"); | |
748 else | |
749 st->print(" "); | |
750 if (is_young()) | |
751 st->print(is_survivor() ? " SU" : " Y "); | |
752 else | |
753 st->print(" "); | |
754 if (is_empty()) | |
755 st->print(" F"); | |
756 else | |
757 st->print(" "); | |
758 st->print(" TS %5d", _gc_time_stamp); | |
759 st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, | |
760 prev_top_at_mark_start(), next_top_at_mark_start()); | |
761 G1OffsetTableContigSpace::print_on(st); | |
762 } | |
763 | |
764 void HeapRegion::verify() const { | |
765 bool dummy = false; | |
766 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); | |
767 } | |
768 | |
769 // This really ought to be commoned up into OffsetTableContigSpace somehow. | 918 // This really ought to be commoned up into OffsetTableContigSpace somehow. |
770 // We would need a mechanism to make that code skip dead objects. | 919 // We would need a mechanism to make that code skip dead objects. |
771 | 920 |
772 void HeapRegion::verify(VerifyOption vo, | 921 void HeapRegion::verify(VerifyOption vo, |
773 bool* failures) const { | 922 bool* failures) const { |
902 "but has "SIZE_FORMAT", objects", | 1051 "but has "SIZE_FORMAT", objects", |
903 bottom(), end(), object_num); | 1052 bottom(), end(), object_num); |
904 *failures = true; | 1053 *failures = true; |
905 return; | 1054 return; |
906 } | 1055 } |
1056 | |
1057 verify_strong_code_roots(vo, failures); | |
1058 } | |
1059 | |
1060 void HeapRegion::verify() const { | |
1061 bool dummy = false; | |
1062 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); | |
907 } | 1063 } |
908 | 1064 |
909 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go | 1065 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
910 // away eventually. | 1066 // away eventually. |
911 | 1067 |