comparison src/share/vm/gc_implementation/g1/heapRegion.cpp @ 355:0edda524b58c

6722565: G1: assert !r->is_on_unclean_list() fires Summary: Under certain circumstances, two cleanup threads can claim and process the same region. Reviewed-by: apetrusenko, ysr
author tonyp
date Wed, 06 Aug 2008 11:57:31 -0400
parents 9bb2c10ac07b
children 1ee8caae33af
comparison
equal deleted inserted replaced
354:c0f8f7790199 355:0edda524b58c
261 return new HeapRegionDCTOC(G1CollectedHeap::heap(), 261 return new HeapRegionDCTOC(G1CollectedHeap::heap(),
262 this, cl, precision, fk); 262 this, cl, precision, fk);
263 } 263 }
264 264
265 void HeapRegion::hr_clear(bool par, bool clear_space) { 265 void HeapRegion::hr_clear(bool par, bool clear_space) {
266 _humongous = false; 266 _humongous_type = NotHumongous;
267 _humongous_start = false;
268 _humongous_start_region = NULL; 267 _humongous_start_region = NULL;
269 _in_collection_set = false; 268 _in_collection_set = false;
270 _is_gc_alloc_region = false; 269 _is_gc_alloc_region = false;
271 270
272 // Age stuff (if parallel, this will be done separately, since it needs 271 // Age stuff (if parallel, this will be done separately, since it needs
282 281
283 if (!par) { 282 if (!par) {
284 // If this is parallel, this will be done later. 283 // If this is parallel, this will be done later.
285 HeapRegionRemSet* hrrs = rem_set(); 284 HeapRegionRemSet* hrrs = rem_set();
286 if (hrrs != NULL) hrrs->clear(); 285 if (hrrs != NULL) hrrs->clear();
287 _claimed = 0; 286 _claimed = InitialClaimValue;
288 } 287 }
289 zero_marked_bytes(); 288 zero_marked_bytes();
290 set_sort_index(-1); 289 set_sort_index(-1);
291 if ((uintptr_t)bottom() >= (uintptr_t)g1h->popular_object_boundary()) 290 if ((uintptr_t)bottom() >= (uintptr_t)g1h->popular_object_boundary())
292 set_popular(false); 291 set_popular(false);
303 g1h->predict_region_elapsed_time_ms(this, false); 302 g1h->predict_region_elapsed_time_ms(this, false);
304 } 303 }
305 // </PREDICTION> 304 // </PREDICTION>
306 305
307 void HeapRegion::set_startsHumongous() { 306 void HeapRegion::set_startsHumongous() {
308 _humongous_start = true; _humongous = true; 307 _humongous_type = StartsHumongous;
309 _humongous_start_region = this; 308 _humongous_start_region = this;
310 assert(end() == _orig_end, "Should be normal before alloc."); 309 assert(end() == _orig_end, "Should be normal before alloc.");
311 } 310 }
312 311
313 bool HeapRegion::claimHeapRegion(jint claimValue) { 312 bool HeapRegion::claimHeapRegion(jint claimValue) {
366 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, 365 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
367 MemRegion mr, bool is_zeroed) 366 MemRegion mr, bool is_zeroed)
368 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), 367 : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
369 _next_fk(HeapRegionDCTOC::NoFilterKind), 368 _next_fk(HeapRegionDCTOC::NoFilterKind),
370 _hrs_index(-1), 369 _hrs_index(-1),
371 _humongous(false), _humongous_start(false), _humongous_start_region(NULL), 370 _humongous_type(NotHumongous), _humongous_start_region(NULL),
372 _in_collection_set(false), _is_gc_alloc_region(false), 371 _in_collection_set(false), _is_gc_alloc_region(false),
373 _is_on_free_list(false), _is_on_unclean_list(false), 372 _is_on_free_list(false), _is_on_unclean_list(false),
374 _next_in_special_set(NULL), _orig_end(NULL), 373 _next_in_special_set(NULL), _orig_end(NULL),
375 _claimed(0), _evacuation_failed(false), 374 _claimed(InitialClaimValue), _evacuation_failed(false),
376 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), 375 _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
377 _popularity(NotPopular), 376 _popularity(NotPopular),
378 _young_type(NotYoung), _next_young_region(NULL), 377 _young_type(NotYoung), _next_young_region(NULL),
379 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 378 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
380 _rem_set(NULL), _zfs(NotZeroFilled) 379 _rem_set(NULL), _zfs(NotZeroFilled)
424 } 423 }
425 424
426 void HeapRegion::set_continuesHumongous(HeapRegion* start) { 425 void HeapRegion::set_continuesHumongous(HeapRegion* start) {
427 // The order is important here. 426 // The order is important here.
428 start->add_continuingHumongousRegion(this); 427 start->add_continuingHumongousRegion(this);
429 _humongous = true; _humongous_start = false; 428 _humongous_type = ContinuesHumongous;
430 _humongous_start_region = start; 429 _humongous_start_region = start;
431 } 430 }
432 431
433 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) { 432 void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
434 // Must join the blocks of the current H region seq with the block of the 433 // Must join the blocks of the current H region seq with the block of the