Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 2433:abdfc822206f
7023069: G1: Introduce symmetric locking in the slow allocation path
7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads
7018286: G1: humongous allocation attempts should take the GC locker into account
Summary: First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future.
Reviewed-by: stefank, brutisso, johnc
author | tonyp |
---|---|
date | Wed, 30 Mar 2011 10:26:59 -0400 |
parents | 0fa27f37d4d4 |
children | c3f1170908be |
comparison
equal
deleted
inserted
replaced
2432:455328d90876 | 2433:abdfc822206f |
---|---|
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP | 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
27 | 27 |
28 #include "gc_implementation/g1/concurrentMark.hpp" | 28 #include "gc_implementation/g1/concurrentMark.hpp" |
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" | 29 #include "gc_implementation/g1/g1CollectedHeap.hpp" |
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" | |
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
32 #include "utilities/taskqueue.hpp" | 33 #include "utilities/taskqueue.hpp" |
33 | 34 |
34 // Inline functions for G1CollectedHeap | 35 // Inline functions for G1CollectedHeap |
57 inline bool G1CollectedHeap::obj_in_cs(oop obj) { | 58 inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
58 HeapRegion* r = _hrs->addr_to_region(obj); | 59 HeapRegion* r = _hrs->addr_to_region(obj); |
59 return r != NULL && r->in_collection_set(); | 60 return r != NULL && r->in_collection_set(); |
60 } | 61 } |
61 | 62 |
62 // See the comment in the .hpp file about the locking protocol and | |
63 // assumptions of this method (and other related ones). | |
64 inline HeapWord* | 63 inline HeapWord* |
65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, | 64 G1CollectedHeap::attempt_allocation(size_t word_size, |
66 size_t word_size, | 65 unsigned int* gc_count_before_ret) { |
67 bool with_heap_lock) { | 66 assert_heap_not_locked_and_not_at_safepoint(); |
68 assert_not_at_safepoint(); | 67 assert(!isHumongous(word_size), "attempt_allocation() should not " |
69 assert(with_heap_lock == Heap_lock->owned_by_self(), | 68 "be called for humongous allocation requests"); |
70 "with_heap_lock and Heap_lock->owned_by_self() should be a tautology"); | |
71 assert(cur_alloc_region != NULL, "pre-condition of the method"); | |
72 assert(cur_alloc_region->is_young(), | |
73 "we only support young current alloc regions"); | |
74 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " | |
75 "should not be used for humongous allocations"); | |
76 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug."); | |
77 | 69 |
78 assert(!cur_alloc_region->is_empty(), | 70 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, |
79 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", | 71 false /* bot_updates */); |
80 cur_alloc_region->bottom(), cur_alloc_region->end())); | 72 if (result == NULL) { |
81 HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size); | 73 result = attempt_allocation_slow(word_size, gc_count_before_ret); |
74 } | |
75 assert_heap_not_locked(); | |
82 if (result != NULL) { | 76 if (result != NULL) { |
83 assert(is_in(result), "result should be in the heap"); | |
84 | |
85 if (with_heap_lock) { | |
86 Heap_lock->unlock(); | |
87 } | |
88 assert_heap_not_locked(); | |
89 // Do the dirtying after we release the Heap_lock. | |
90 dirty_young_block(result, word_size); | 77 dirty_young_block(result, word_size); |
91 return result; | |
92 } | 78 } |
93 | 79 return result; |
94 if (with_heap_lock) { | |
95 assert_heap_locked(); | |
96 } else { | |
97 assert_heap_not_locked(); | |
98 } | |
99 return NULL; | |
100 } | |
101 | |
102 // See the comment in the .hpp file about the locking protocol and | |
103 // assumptions of this method (and other related ones). | |
104 inline HeapWord* | |
105 G1CollectedHeap::attempt_allocation(size_t word_size) { | |
106 assert_heap_not_locked_and_not_at_safepoint(); | |
107 assert(!isHumongous(word_size), "attempt_allocation() should not be called " | |
108 "for humongous allocation requests"); | |
109 | |
110 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
111 if (cur_alloc_region != NULL) { | |
112 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, | |
113 word_size, | |
114 false /* with_heap_lock */); | |
115 assert_heap_not_locked(); | |
116 if (result != NULL) { | |
117 return result; | |
118 } | |
119 } | |
120 | |
121 // Our attempt to allocate lock-free failed as the current | |
122 // allocation region is either NULL or full. So, we'll now take the | |
123 // Heap_lock and retry. | |
124 Heap_lock->lock(); | |
125 | |
126 HeapWord* result = attempt_allocation_locked(word_size); | |
127 if (result != NULL) { | |
128 assert_heap_not_locked(); | |
129 return result; | |
130 } | |
131 | |
132 assert_heap_locked(); | |
133 return NULL; | |
134 } | |
135 | |
136 inline void | |
137 G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) { | |
138 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); | |
139 assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region, | |
140 "pre-condition of the call"); | |
141 assert(cur_alloc_region->is_young(), | |
142 "we only support young current alloc regions"); | |
143 | |
144 // The region is guaranteed to be young | |
145 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region); | |
146 _summary_bytes_used += cur_alloc_region->used(); | |
147 _cur_alloc_region = NULL; | |
148 } | |
149 | |
150 inline HeapWord* | |
151 G1CollectedHeap::attempt_allocation_locked(size_t word_size) { | |
152 assert_heap_locked_and_not_at_safepoint(); | |
153 assert(!isHumongous(word_size), "attempt_allocation_locked() " | |
154 "should not be called for humongous allocation requests"); | |
155 | |
156 // First, reread the current alloc region and retry the allocation | |
157 // in case somebody replaced it while we were waiting to get the | |
158 // Heap_lock. | |
159 HeapRegion* cur_alloc_region = _cur_alloc_region; | |
160 if (cur_alloc_region != NULL) { | |
161 HeapWord* result = allocate_from_cur_alloc_region( | |
162 cur_alloc_region, word_size, | |
163 true /* with_heap_lock */); | |
164 if (result != NULL) { | |
165 assert_heap_not_locked(); | |
166 return result; | |
167 } | |
168 | |
169 // We failed to allocate out of the current alloc region, so let's | |
170 // retire it before getting a new one. | |
171 retire_cur_alloc_region(cur_alloc_region); | |
172 } | |
173 | |
174 assert_heap_locked(); | |
175 // Try to get a new region and allocate out of it | |
176 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, | |
177 false, /* at_safepoint */ | |
178 true, /* do_dirtying */ | |
179 false /* can_expand */); | |
180 if (result != NULL) { | |
181 assert_heap_not_locked(); | |
182 return result; | |
183 } | |
184 | |
185 assert_heap_locked(); | |
186 return NULL; | |
187 } | 80 } |
188 | 81 |
189 // It dirties the cards that cover the block so that so that the post | 82 // It dirties the cards that cover the block so that so that the post |
190 // write barrier never queues anything when updating objects on this | 83 // write barrier never queues anything when updating objects on this |
191 // block. It is assumed (and in fact we assert) that the block | 84 // block. It is assumed (and in fact we assert) that the block |