comparison src/share/vm/memory/space.cpp @ 2433:abdfc822206f

7023069: G1: Introduce symmetric locking in the slow allocation path 7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads 7018286: G1: humongous allocation attempts should take the GC locker into account Summary: First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future. Reviewed-by: stefank, brutisso, johnc
author tonyp
date Wed, 30 Mar 2011 10:26:59 -0400
parents f95d63e2154a
children fc2b798ab316
comparison
equal deleted inserted replaced
2432:455328d90876 2433:abdfc822206f
816 } 816 }
817 817
818 // This version requires locking. 818 // This version requires locking.
819 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, 819 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
820 HeapWord* const end_value) { 820 HeapWord* const end_value) {
821 // In G1 there are places where a GC worker can allocates into a
822 // region using this serial allocation code without being prone to a
823 // race with other GC workers (we ensure that no other GC worker can
824 // access the same region at the same time). So the assert below is
825 // too strong in the case of G1.
821 assert(Heap_lock->owned_by_self() || 826 assert(Heap_lock->owned_by_self() ||
822 (SafepointSynchronize::is_at_safepoint() && 827 (SafepointSynchronize::is_at_safepoint() &&
823 Thread::current()->is_VM_thread()), 828 (Thread::current()->is_VM_thread() || UseG1GC)),
824 "not locked"); 829 "not locked");
825 HeapWord* obj = top(); 830 HeapWord* obj = top();
826 if (pointer_delta(end_value, obj) >= size) { 831 if (pointer_delta(end_value, obj) >= size) {
827 HeapWord* new_top = obj + size; 832 HeapWord* new_top = obj + size;
828 set_top(new_top); 833 set_top(new_top);