comparison src/share/vm/memory/cardTableModRefBS.cpp @ 616:4f360ec815ba

6720309: G1: don't synchronously update RSet during evacuation pauses 6720334: G1: don't update RSets of collection set regions during an evacuation pause Summary: Introduced a deferred update mechanism for delaying the rset updates during the collection pause Reviewed-by: apetrusenko, tonyp
author iveresov
date Fri, 06 Mar 2009 13:50:14 -0800
parents 9e5a6ed08fc9
children 7bb995fbd3c0
comparison
equal deleted inserted replaced
615:c6c601a0f2d6 616:4f360ec815ba
354 354
355 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { 355 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
356 inline_write_ref_field(field, newVal); 356 inline_write_ref_field(field, newVal);
357 } 357 }
358 358
359 /*
360 Claimed and deferred bits are used together in G1 during the evacuation
361 pause. These bits can have the following state transitions:
362 1. The claimed bit can be put over any other card state. Except that
363 the "dirty -> dirty and claimed" transition is checked for in
364 G1 code and is not used.
365 2. Deferred bit can be set only if the previous state of the card
366 was either clean or claimed. mark_card_deferred() is wait-free.
367 We do not care if the operation is be successful because if
368 it does not it will only result in duplicate entry in the update
369 buffer because of the "cache-miss". So it's not worth spinning.
370 */
371
359 372
360 bool CardTableModRefBS::claim_card(size_t card_index) { 373 bool CardTableModRefBS::claim_card(size_t card_index) {
361 jbyte val = _byte_map[card_index]; 374 jbyte val = _byte_map[card_index];
362 if (val != claimed_card_val()) { 375 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
363 jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val); 376 while (val == clean_card_val() ||
364 if (res == val) 377 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
378 jbyte new_val = val;
379 if (val == clean_card_val()) {
380 new_val = (jbyte)claimed_card_val();
381 } else {
382 new_val = val | (jbyte)claimed_card_val();
383 }
384 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
385 if (res == val) {
365 return true; 386 return true;
366 else return false; 387 }
388 val = res;
367 } 389 }
368 return false; 390 return false;
369 } 391 }
392
393 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
394 jbyte val = _byte_map[card_index];
395 // It's already processed
396 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
397 return false;
398 }
399 // Cached bit can be installed either on a clean card or on a claimed card.
400 jbyte new_val = val;
401 if (val == clean_card_val()) {
402 new_val = (jbyte)deferred_card_val();
403 } else {
404 if (val & claimed_card_val()) {
405 new_val = val | (jbyte)deferred_card_val();
406 }
407 }
408 if (new_val != val) {
409 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
410 }
411 return true;
412 }
413
370 414
371 void CardTableModRefBS::non_clean_card_iterate(Space* sp, 415 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
372 MemRegion mr, 416 MemRegion mr,
373 DirtyCardToOopClosure* dcto_cl, 417 DirtyCardToOopClosure* dcto_cl,
374 MemRegionClosure* cl, 418 MemRegionClosure* cl,