Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp @ 6595:aaf61e68b255
6818524: G1: use ergonomic resizing of PLABs
Summary: Employ PLABStats instances to record information about survivor and old PLABs, and use the recorded stats to adjust the sizes of survivor and old PLABS.
Reviewed-by: johnc, ysr
Contributed-by: Brandon Mitchell <brandon@twitter.com>
author | johnc |
---|---|
date | Mon, 06 Aug 2012 12:20:14 -0700 |
parents | src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp@f95d63e2154a |
children | b2ef234911c9 |
comparison
equal
deleted
inserted
replaced
6265:ef437ea56651 | 6595:aaf61e68b255 |
---|---|
1 /* | |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 * | |
23 */ | |
24 | |
25 #include "precompiled.hpp" | |
26 #include "gc_implementation/shared/parGCAllocBuffer.hpp" | |
27 #include "memory/sharedHeap.hpp" | |
28 #include "oops/arrayOop.hpp" | |
29 #include "oops/oop.inline.hpp" | |
30 | |
31 ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) : | |
32 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL), | |
33 _end(NULL), _hard_end(NULL), | |
34 _retained(false), _retained_filler(), | |
35 _allocated(0), _wasted(0) | |
36 { | |
37 assert (min_size() > AlignmentReserve, "Inconsistency!"); | |
38 // arrayOopDesc::header_size depends on command line initialization. | |
39 FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT)); | |
40 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0; | |
41 } | |
42 | |
43 size_t ParGCAllocBuffer::FillerHeaderSize; | |
44 | |
45 // If the minimum object size is greater than MinObjAlignment, we can | |
46 // end up with a shard at the end of the buffer that's smaller than | |
47 // the smallest object. We can't allow that because the buffer must | |
48 // look like it's full of objects when we retire it, so we make | |
49 // sure we have enough space for a filler int array object. | |
50 size_t ParGCAllocBuffer::AlignmentReserve; | |
51 | |
52 void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) { | |
53 assert(!retain || end_of_gc, "Can only retain at GC end."); | |
54 if (_retained) { | |
55 // If the buffer had been retained shorten the previous filler object. | |
56 assert(_retained_filler.end() <= _top, "INVARIANT"); | |
57 CollectedHeap::fill_with_object(_retained_filler); | |
58 // Wasted space book-keeping, otherwise (normally) done in invalidate() | |
59 _wasted += _retained_filler.word_size(); | |
60 _retained = false; | |
61 } | |
62 assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); | |
63 if (_top < _hard_end) { | |
64 CollectedHeap::fill_with_object(_top, _hard_end); | |
65 if (!retain) { | |
66 invalidate(); | |
67 } else { | |
68 // Is there wasted space we'd like to retain for the next GC? | |
69 if (pointer_delta(_end, _top) > FillerHeaderSize) { | |
70 _retained = true; | |
71 _retained_filler = MemRegion(_top, FillerHeaderSize); | |
72 _top = _top + FillerHeaderSize; | |
73 } else { | |
74 invalidate(); | |
75 } | |
76 } | |
77 } | |
78 } | |
79 | |
80 void ParGCAllocBuffer::flush_stats(PLABStats* stats) { | |
81 assert(ResizePLAB, "Wasted work"); | |
82 stats->add_allocated(_allocated); | |
83 stats->add_wasted(_wasted); | |
84 stats->add_unused(pointer_delta(_end, _top)); | |
85 } | |
86 | |
87 // Compute desired plab size and latch result for later | |
88 // use. This should be called once at the end of parallel | |
89 // scavenge; it clears the sensor accumulators. | |
90 void PLABStats::adjust_desired_plab_sz() { | |
91 assert(ResizePLAB, "Not set"); | |
92 if (_allocated == 0) { | |
93 assert(_unused == 0, "Inconsistency in PLAB stats"); | |
94 _allocated = 1; | |
95 } | |
96 double wasted_frac = (double)_unused/(double)_allocated; | |
97 size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/ | |
98 TargetPLABWastePct); | |
99 if (target_refills == 0) { | |
100 target_refills = 1; | |
101 } | |
102 _used = _allocated - _wasted - _unused; | |
103 size_t plab_sz = _used/(target_refills*ParallelGCThreads); | |
104 if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz); | |
105 // Take historical weighted average | |
106 _filter.sample(plab_sz); | |
107 // Clip from above and below, and align to object boundary | |
108 plab_sz = MAX2(min_size(), (size_t)_filter.average()); | |
109 plab_sz = MIN2(max_size(), plab_sz); | |
110 plab_sz = align_object_size(plab_sz); | |
111 // Latch the result | |
112 if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz); | |
113 _desired_plab_sz = plab_sz; | |
114 // Now clear the accumulators for next round: | |
115 // note this needs to be fixed in the case where we | |
116 // are retaining across scavenges. FIX ME !!! XXX | |
117 _allocated = 0; | |
118 _wasted = 0; | |
119 _unused = 0; | |
120 } | |
121 | |
122 #ifndef PRODUCT | |
123 void ParGCAllocBuffer::print() { | |
124 gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p" | |
125 "_retained: %c _retained_filler: [%p,%p)\n", | |
126 _bottom, _top, _end, _hard_end, | |
127 "FT"[_retained], _retained_filler.start(), _retained_filler.end()); | |
128 } | |
129 #endif // !PRODUCT | |
130 | |
131 const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords = | |
132 MIN2(CardTableModRefBS::par_chunk_heapword_alignment(), | |
133 ((size_t)Generation::GenGrain)/HeapWordSize); | |
134 const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes = | |
135 MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize, | |
136 (size_t)Generation::GenGrain); | |
137 | |
138 ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz, | |
139 BlockOffsetSharedArray* bsa) : | |
140 ParGCAllocBuffer(word_sz), | |
141 _bsa(bsa), | |
142 _bt(bsa, MemRegion(_bottom, _hard_end)), | |
143 _true_end(_hard_end) | |
144 {} | |
145 | |
146 // The buffer comes with its own BOT, with a shared (obviously) underlying | |
147 // BlockOffsetSharedArray. We manipulate this BOT in the normal way | |
148 // as we would for any contiguous space. However, on accasion we | |
149 // need to do some buffer surgery at the extremities before we | |
150 // start using the body of the buffer for allocations. Such surgery | |
151 // (as explained elsewhere) is to prevent allocation on a card that | |
152 // is in the process of being walked concurrently by another GC thread. | |
153 // When such surgery happens at a point that is far removed (to the | |
154 // right of the current allocation point, top), we use the "contig" | |
155 // parameter below to directly manipulate the shared array without | |
156 // modifying the _next_threshold state in the BOT. | |
157 void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, | |
158 bool contig) { | |
159 CollectedHeap::fill_with_object(mr); | |
160 if (contig) { | |
161 _bt.alloc_block(mr.start(), mr.end()); | |
162 } else { | |
163 _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end()); | |
164 } | |
165 } | |
166 | |
167 HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) { | |
168 HeapWord* res = NULL; | |
169 if (_true_end > _hard_end) { | |
170 assert((HeapWord*)align_size_down(intptr_t(_hard_end), | |
171 ChunkSizeInBytes) == _hard_end, | |
172 "or else _true_end should be equal to _hard_end"); | |
173 assert(_retained, "or else _true_end should be equal to _hard_end"); | |
174 assert(_retained_filler.end() <= _top, "INVARIANT"); | |
175 CollectedHeap::fill_with_object(_retained_filler); | |
176 if (_top < _hard_end) { | |
177 fill_region_with_block(MemRegion(_top, _hard_end), true); | |
178 } | |
179 HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords); | |
180 _retained_filler = MemRegion(_hard_end, FillerHeaderSize); | |
181 _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size()); | |
182 _top = _retained_filler.end(); | |
183 _hard_end = next_hard_end; | |
184 _end = _hard_end - AlignmentReserve; | |
185 res = ParGCAllocBuffer::allocate(word_sz); | |
186 if (res != NULL) { | |
187 _bt.alloc_block(res, word_sz); | |
188 } | |
189 } | |
190 return res; | |
191 } | |
192 | |
193 void | |
194 ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) { | |
195 ParGCAllocBuffer::undo_allocation(obj, word_sz); | |
196 // This may back us up beyond the previous threshold, so reset. | |
197 _bt.set_region(MemRegion(_top, _hard_end)); | |
198 _bt.initialize_threshold(); | |
199 } | |
200 | |
201 void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) { | |
202 assert(!retain || end_of_gc, "Can only retain at GC end."); | |
203 if (_retained) { | |
204 // We're about to make the retained_filler into a block. | |
205 _bt.BlockOffsetArray::alloc_block(_retained_filler.start(), | |
206 _retained_filler.end()); | |
207 } | |
208 // Reset _hard_end to _true_end (and update _end) | |
209 if (retain && _hard_end != NULL) { | |
210 assert(_hard_end <= _true_end, "Invariant."); | |
211 _hard_end = _true_end; | |
212 _end = MAX2(_top, _hard_end - AlignmentReserve); | |
213 assert(_end <= _hard_end, "Invariant."); | |
214 } | |
215 _true_end = _hard_end; | |
216 HeapWord* pre_top = _top; | |
217 | |
218 ParGCAllocBuffer::retire(end_of_gc, retain); | |
219 // Now any old _retained_filler is cut back to size, the free part is | |
220 // filled with a filler object, and top is past the header of that | |
221 // object. | |
222 | |
223 if (retain && _top < _end) { | |
224 assert(end_of_gc && retain, "Or else retain should be false."); | |
225 // If the lab does not start on a card boundary, we don't want to | |
226 // allocate onto that card, since that might lead to concurrent | |
227 // allocation and card scanning, which we don't support. So we fill | |
228 // the first card with a garbage object. | |
229 size_t first_card_index = _bsa->index_for(pre_top); | |
230 HeapWord* first_card_start = _bsa->address_for_index(first_card_index); | |
231 if (first_card_start < pre_top) { | |
232 HeapWord* second_card_start = | |
233 _bsa->inc_by_region_size(first_card_start); | |
234 | |
235 // Ensure enough room to fill with the smallest block | |
236 second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve); | |
237 | |
238 // If the end is already in the first card, don't go beyond it! | |
239 // Or if the remainder is too small for a filler object, gobble it up. | |
240 if (_hard_end < second_card_start || | |
241 pointer_delta(_hard_end, second_card_start) < AlignmentReserve) { | |
242 second_card_start = _hard_end; | |
243 } | |
244 if (pre_top < second_card_start) { | |
245 MemRegion first_card_suffix(pre_top, second_card_start); | |
246 fill_region_with_block(first_card_suffix, true); | |
247 } | |
248 pre_top = second_card_start; | |
249 _top = pre_top; | |
250 _end = MAX2(_top, _hard_end - AlignmentReserve); | |
251 } | |
252 | |
253 // If the lab does not end on a card boundary, we don't want to | |
254 // allocate onto that card, since that might lead to concurrent | |
255 // allocation and card scanning, which we don't support. So we fill | |
256 // the last card with a garbage object. | |
257 size_t last_card_index = _bsa->index_for(_hard_end); | |
258 HeapWord* last_card_start = _bsa->address_for_index(last_card_index); | |
259 if (last_card_start < _hard_end) { | |
260 | |
261 // Ensure enough room to fill with the smallest block | |
262 last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve); | |
263 | |
264 // If the top is already in the last card, don't go back beyond it! | |
265 // Or if the remainder is too small for a filler object, gobble it up. | |
266 if (_top > last_card_start || | |
267 pointer_delta(last_card_start, _top) < AlignmentReserve) { | |
268 last_card_start = _top; | |
269 } | |
270 if (last_card_start < _hard_end) { | |
271 MemRegion last_card_prefix(last_card_start, _hard_end); | |
272 fill_region_with_block(last_card_prefix, false); | |
273 } | |
274 _hard_end = last_card_start; | |
275 _end = MAX2(_top, _hard_end - AlignmentReserve); | |
276 _true_end = _hard_end; | |
277 assert(_end <= _hard_end, "Invariant."); | |
278 } | |
279 | |
280 // At this point: | |
281 // 1) we had a filler object from the original top to hard_end. | |
282 // 2) We've filled in any partial cards at the front and back. | |
283 if (pre_top < _hard_end) { | |
284 // Now we can reset the _bt to do allocation in the given area. | |
285 MemRegion new_filler(pre_top, _hard_end); | |
286 fill_region_with_block(new_filler, false); | |
287 _top = pre_top + ParGCAllocBuffer::FillerHeaderSize; | |
288 // If there's no space left, don't retain. | |
289 if (_top >= _end) { | |
290 _retained = false; | |
291 invalidate(); | |
292 return; | |
293 } | |
294 _retained_filler = MemRegion(pre_top, _top); | |
295 _bt.set_region(MemRegion(_top, _hard_end)); | |
296 _bt.initialize_threshold(); | |
297 assert(_bt.threshold() > _top, "initialize_threshold failed!"); | |
298 | |
299 // There may be other reasons for queries into the middle of the | |
300 // filler object. When such queries are done in parallel with | |
301 // allocation, bad things can happen, if the query involves object | |
302 // iteration. So we ensure that such queries do not involve object | |
303 // iteration, by putting another filler object on the boundaries of | |
304 // such queries. One such is the object spanning a parallel card | |
305 // chunk boundary. | |
306 | |
307 // "chunk_boundary" is the address of the first chunk boundary less | |
308 // than "hard_end". | |
309 HeapWord* chunk_boundary = | |
310 (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes); | |
311 assert(chunk_boundary < _hard_end, "Or else above did not work."); | |
312 assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve, | |
313 "Consequence of last card handling above."); | |
314 | |
315 if (_top <= chunk_boundary) { | |
316 assert(_true_end == _hard_end, "Invariant."); | |
317 while (_top <= chunk_boundary) { | |
318 assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, | |
319 "Consequence of last card handling above."); | |
320 _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end); | |
321 CollectedHeap::fill_with_object(chunk_boundary, _hard_end); | |
322 _hard_end = chunk_boundary; | |
323 chunk_boundary -= ChunkSizeInWords; | |
324 } | |
325 _end = _hard_end - AlignmentReserve; | |
326 assert(_top <= _end, "Invariant."); | |
327 // Now reset the initial filler chunk so it doesn't overlap with | |
328 // the one(s) inserted above. | |
329 MemRegion new_filler(pre_top, _hard_end); | |
330 fill_region_with_block(new_filler, false); | |
331 } | |
332 } else { | |
333 _retained = false; | |
334 invalidate(); | |
335 } | |
336 } else { | |
337 assert(!end_of_gc || | |
338 (!_retained && _true_end == _hard_end), "Checking."); | |
339 } | |
340 assert(_end <= _hard_end, "Invariant."); | |
341 assert(_top < _end || _top == _hard_end, "Invariant"); | |
342 } |