comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 1886:72a161e62cc4

6991377: G1: race between concurrent refinement and humongous object allocation Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT. Reviewed-by: jcoomes, johnc, ysr
author tonyp
date Sat, 16 Oct 2010 17:12:19 -0400
parents bb847e31b836
children 2d26b0046e0d f95d63e2154a
comparison
equal deleted inserted replaced
1885:a5c514e74487 1886:72a161e62cc4
375 _gc_efficiency = (double) garbage_bytes() / 375 _gc_efficiency = (double) garbage_bytes() /
376 g1h->predict_region_elapsed_time_ms(this, false); 376 g1h->predict_region_elapsed_time_ms(this, false);
377 } 377 }
378 // </PREDICTION> 378 // </PREDICTION>
379 379
380 void HeapRegion::set_startsHumongous() { 380 void HeapRegion::set_startsHumongous(HeapWord* new_end) {
381 assert(end() == _orig_end,
382 "Should be normal before the humongous object allocation");
383 assert(top() == bottom(), "should be empty");
384
381 _humongous_type = StartsHumongous; 385 _humongous_type = StartsHumongous;
382 _humongous_start_region = this; 386 _humongous_start_region = this;
383 assert(end() == _orig_end, "Should be normal before alloc."); 387
388 set_end(new_end);
389 _offsets.set_for_starts_humongous(new_end);
390 }
391
392 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
393 assert(end() == _orig_end,
394 "Should be normal before the humongous object allocation");
395 assert(top() == bottom(), "should be empty");
396 assert(start->startsHumongous(), "pre-condition");
397
398 _humongous_type = ContinuesHumongous;
399 _humongous_start_region = start;
384 } 400 }
385 401
386 bool HeapRegion::claimHeapRegion(jint claimValue) { 402 bool HeapRegion::claimHeapRegion(jint claimValue) {
387 jint current = _claimed; 403 jint current = _claimed;
388 if (current != claimValue) { 404 if (current != claimValue) {
496 // cast away const-ness 512 // cast away const-ness
497 HeapRegion* r = (HeapRegion*) this; 513 HeapRegion* r = (HeapRegion*) this;
498 NextCompactionHeapRegionClosure blk(r); 514 NextCompactionHeapRegionClosure blk(r);
499 g1h->heap_region_iterate_from(r, &blk); 515 g1h->heap_region_iterate_from(r, &blk);
500 return blk.result(); 516 return blk.result();
501 }
502
503 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
504 // The order is important here.
505 start->add_continuingHumongousRegion(this);
506 _humongous_type = ContinuesHumongous;
507 _humongous_start_region = start;
508 }
509
510 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
511 // Must join the blocks of the current H region seq with the block of the
512 // added region.
513 offsets()->join_blocks(bottom(), cont->bottom());
514 arrayOop obj = (arrayOop)(bottom());
515 obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
516 set_end(cont->end());
517 set_top(cont->end());
518 } 517 }
519 518
520 void HeapRegion::save_marks() { 519 void HeapRegion::save_marks() {
521 set_saved_mark(); 520 set_saved_mark();
522 } 521 }