comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 3830:f44782f04dd4

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former. Reviewed-by: brutisso, johnc, ysr
author tonyp
date Fri, 12 Aug 2011 11:31:06 -0400
parents c3f1170908be
children c9814fadeb38
comparison
equal deleted inserted replaced
3829:87e40b34bc2b 3830:f44782f04dd4
75 dirty_young_block(result, word_size); 75 dirty_young_block(result, word_size);
76 } 76 }
77 return result; 77 return result;
78 } 78 }
79 79
80 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
81 word_size) {
82 assert(!isHumongous(word_size),
83 "we should not be seeing humongous-size allocations in this path");
84
85 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
86 false /* bot_updates */);
87 if (result == NULL) {
88 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
89 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
90 false /* bot_updates */);
91 }
92 if (result != NULL) {
93 dirty_young_block(result, word_size);
94 }
95 return result;
96 }
97
98 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
99 assert(!isHumongous(word_size),
100 "we should not be seeing humongous-size allocations in this path");
101
102 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
103 true /* bot_updates */);
104 if (result == NULL) {
105 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
106 result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
107 true /* bot_updates */);
108 }
109 return result;
110 }
111
80 // It dirties the cards that cover the block so that so that the post 112 // It dirties the cards that cover the block so that so that the post
81 // write barrier never queues anything when updating objects on this 113 // write barrier never queues anything when updating objects on this
82 // block. It is assumed (and in fact we assert) that the block 114 // block. It is assumed (and in fact we assert) that the block
83 // belongs to a young region. 115 // belongs to a young region.
84 inline void 116 inline void