comparison src/share/vm/gc_implementation/g1/g1AllocRegion.cpp @ 20404:227a9e5e4b4a

8057536: Refactor G1 to allow context specific allocations Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context. Reviewed-by: mgerdin, brutisso
author sjohanss
date Fri, 05 Sep 2014 09:49:19 +0200
parents ce8f6bb717c9
children c132be0fb74d
comparison
equal deleted inserted replaced
20403:8ec8971f511a 20404:227a9e5e4b4a
127 127
128 OrderAccess::storestore(); 128 OrderAccess::storestore();
129 // Note that we first perform the allocation and then we store the 129 // Note that we first perform the allocation and then we store the
130 // region in _alloc_region. This is the reason why an active region 130 // region in _alloc_region. This is the reason why an active region
131 // can never be empty. 131 // can never be empty.
132 _alloc_region = new_alloc_region; 132 update_alloc_region(new_alloc_region);
133 _count += 1;
134 trace("region allocation successful"); 133 trace("region allocation successful");
135 return result; 134 return result;
136 } else { 135 } else {
137 trace("region allocation failed"); 136 trace("region allocation failed");
138 return NULL; 137 return NULL;
168 167
169 _used_bytes_before = alloc_region->used(); 168 _used_bytes_before = alloc_region->used();
170 _alloc_region = alloc_region; 169 _alloc_region = alloc_region;
171 _count += 1; 170 _count += 1;
172 trace("set"); 171 trace("set");
172 }
173
174 void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
175 trace("update");
176 // We explicitly check that the region is not empty to make sure we
177 // maintain the "the alloc region cannot be empty" invariant.
178 assert(alloc_region != NULL && !alloc_region->is_empty(),
179 ar_ext_msg(this, "pre-condition"));
180
181 _alloc_region = alloc_region;
182 _alloc_region->set_allocation_context(allocation_context());
183 _count += 1;
184 trace("updated");
173 } 185 }
174 186
175 HeapRegion* G1AllocRegion::release() { 187 HeapRegion* G1AllocRegion::release() {
176 trace("releasing"); 188 trace("releasing");
177 HeapRegion* alloc_region = _alloc_region; 189 HeapRegion* alloc_region = _alloc_region;
223 #endif // G1_ALLOC_REGION_TRACING 235 #endif // G1_ALLOC_REGION_TRACING
224 236
225 G1AllocRegion::G1AllocRegion(const char* name, 237 G1AllocRegion::G1AllocRegion(const char* name,
226 bool bot_updates) 238 bool bot_updates)
227 : _name(name), _bot_updates(bot_updates), 239 : _name(name), _bot_updates(bot_updates),
228 _alloc_region(NULL), _count(0), _used_bytes_before(0) { } 240 _alloc_region(NULL), _count(0), _used_bytes_before(0),
229 241 _allocation_context(AllocationContext::system()) { }
242
243
244 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
245 bool force) {
246 return _g1h->new_mutator_alloc_region(word_size, force);
247 }
248
249 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
250 size_t allocated_bytes) {
251 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
252 }
253
254 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
255 bool force) {
256 assert(!force, "not supported for GC alloc regions");
257 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
258 }
259
260 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
261 size_t allocated_bytes) {
262 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
263 GCAllocForSurvived);
264 }
265
266 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
267 bool force) {
268 assert(!force, "not supported for GC alloc regions");
269 return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
270 }
271
272 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
273 size_t allocated_bytes) {
274 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
275 GCAllocForTenured);
276 }
277
278 HeapRegion* OldGCAllocRegion::release() {
279 HeapRegion* cur = get();
280 if (cur != NULL) {
281 // Determine how far we are from the next card boundary. If it is smaller than
282 // the minimum object size we can allocate into, expand into the next card.
283 HeapWord* top = cur->top();
284 HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
285
286 size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
287
288 if (to_allocate_words != 0) {
289 // We are not at a card boundary. Fill up, possibly into the next, taking the
290 // end of the region and the minimum object size into account.
291 to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
292 MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
293
294 // Skip allocation if there is not enough space to allocate even the smallest
295 // possible object. In this case this region will not be retained, so the
296 // original problem cannot occur.
297 if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
298 HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
299 CollectedHeap::fill_with_object(dummy, to_allocate_words);
300 }
301 }
302 }
303 return G1AllocRegion::release();
304 }
305
306