Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp @ 963:9601152ccfc1
6875393: 2/3 JNI itable index cache is broken
Summary: Add missing initialization of cache size.
Reviewed-by: tbell
author | dcubed |
---|---|
date | Fri, 28 Aug 2009 12:25:46 -0600 |
parents | d1605aabd0a1 |
children | 09f82af55c3e |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Forward decl. | |
26 | |
27 class PLABStats; | |
28 | |
29 // A per-thread allocation buffer used during GC. | |
30 class ParGCAllocBuffer: public CHeapObj { | |
31 protected: | |
32 char head[32]; | |
33 size_t _word_sz; // in HeapWord units | |
34 HeapWord* _bottom; | |
35 HeapWord* _top; | |
36 HeapWord* _end; // last allocatable address + 1 | |
37 HeapWord* _hard_end; // _end + AlignmentReserve | |
38 bool _retained; // whether we hold a _retained_filler | |
39 MemRegion _retained_filler; | |
40 // In support of ergonomic sizing of PLAB's | |
41 size_t _allocated; // in HeapWord units | |
42 size_t _wasted; // in HeapWord units | |
43 char tail[32]; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
44 static size_t FillerHeaderSize; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
45 static size_t AlignmentReserve; |
0 | 46 |
47 public: | |
48 // Initializes the buffer to be empty, but with the given "word_sz". | |
49 // Must get initialized with "set_buf" for an allocation to succeed. | |
50 ParGCAllocBuffer(size_t word_sz); | |
51 | |
52 static const size_t min_size() { | |
53 return ThreadLocalAllocBuffer::min_size(); | |
54 } | |
55 | |
56 static const size_t max_size() { | |
57 return ThreadLocalAllocBuffer::max_size(); | |
58 } | |
59 | |
60 // If an allocation of the given "word_sz" can be satisfied within the | |
61 // buffer, do the allocation, returning a pointer to the start of the | |
62 // allocated block. If the allocation request cannot be satisfied, | |
63 // return NULL. | |
64 HeapWord* allocate(size_t word_sz) { | |
65 HeapWord* res = _top; | |
66 HeapWord* new_top = _top + word_sz; | |
67 if (new_top <= _end) { | |
68 _top = new_top; | |
69 return res; | |
70 } else { | |
71 return NULL; | |
72 } | |
73 } | |
74 | |
75 // Undo the last allocation in the buffer, which is required to be of the | |
76 // "obj" of the given "word_sz". | |
77 void undo_allocation(HeapWord* obj, size_t word_sz) { | |
78 assert(_top - word_sz >= _bottom | |
79 && _top - word_sz == obj, | |
80 "Bad undo_allocation"); | |
81 _top = _top - word_sz; | |
82 } | |
83 | |
84 // The total (word) size of the buffer, including both allocated and | |
85 // unallocted space. | |
86 size_t word_sz() { return _word_sz; } | |
87 | |
88 // Should only be done if we are about to reset with a new buffer of the | |
89 // given size. | |
90 void set_word_size(size_t new_word_sz) { | |
91 assert(new_word_sz > AlignmentReserve, "Too small"); | |
92 _word_sz = new_word_sz; | |
93 } | |
94 | |
95 // The number of words of unallocated space remaining in the buffer. | |
96 size_t words_remaining() { | |
97 assert(_end >= _top, "Negative buffer"); | |
98 return pointer_delta(_end, _top, HeapWordSize); | |
99 } | |
100 | |
101 bool contains(void* addr) { | |
102 return (void*)_bottom <= addr && addr < (void*)_hard_end; | |
103 } | |
104 | |
105 // Sets the space of the buffer to be [buf, space+word_sz()). | |
106 void set_buf(HeapWord* buf) { | |
107 _bottom = buf; | |
108 _top = _bottom; | |
109 _hard_end = _bottom + word_sz(); | |
110 _end = _hard_end - AlignmentReserve; | |
111 assert(_end >= _top, "Negative buffer"); | |
112 // In support of ergonomic sizing | |
113 _allocated += word_sz(); | |
114 } | |
115 | |
116 // Flush the stats supporting ergonomic sizing of PLAB's | |
117 void flush_stats(PLABStats* stats); | |
118 void flush_stats_and_retire(PLABStats* stats, bool retain) { | |
119 // We flush the stats first in order to get a reading of | |
120 // unused space in the last buffer. | |
121 if (ResizePLAB) { | |
122 flush_stats(stats); | |
123 } | |
124 // Retire the last allocation buffer. | |
125 retire(true, retain); | |
126 } | |
127 | |
128 // Force future allocations to fail and queries for contains() | |
129 // to return false | |
130 void invalidate() { | |
131 assert(!_retained, "Shouldn't retain an invalidated buffer."); | |
132 _end = _hard_end; | |
133 _wasted += pointer_delta(_end, _top); // unused space | |
134 _top = _end; // force future allocations to fail | |
135 _bottom = _end; // force future contains() queries to return false | |
136 } | |
137 | |
138 // Fills in the unallocated portion of the buffer with a garbage object. | |
139 // If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain" | |
140 // is true, attempt to re-use the unused portion in the next GC. | |
141 void retire(bool end_of_gc, bool retain); | |
142 | |
143 void print() PRODUCT_RETURN; | |
144 }; | |
145 | |
146 // PLAB stats book-keeping | |
147 class PLABStats VALUE_OBJ_CLASS_SPEC { | |
148 size_t _allocated; // total allocated | |
149 size_t _wasted; // of which wasted (internal fragmentation) | |
150 size_t _unused; // Unused in last buffer | |
151 size_t _used; // derived = allocated - wasted - unused | |
152 size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized | |
153 AdaptiveWeightedAverage | |
154 _filter; // integrator with decay | |
155 | |
156 public: | |
157 PLABStats(size_t desired_plab_sz_, unsigned wt) : | |
158 _allocated(0), | |
159 _wasted(0), | |
160 _unused(0), | |
161 _used(0), | |
162 _desired_plab_sz(desired_plab_sz_), | |
163 _filter(wt) | |
164 { | |
165 size_t min_sz = min_size(); | |
166 size_t max_sz = max_size(); | |
167 size_t aligned_min_sz = align_object_size(min_sz); | |
168 size_t aligned_max_sz = align_object_size(max_sz); | |
169 assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz && | |
170 min_sz <= max_sz, | |
171 "PLAB clipping computation in adjust_desired_plab_sz()" | |
172 " may be incorrect"); | |
173 } | |
174 | |
175 static const size_t min_size() { | |
176 return ParGCAllocBuffer::min_size(); | |
177 } | |
178 | |
179 static const size_t max_size() { | |
180 return ParGCAllocBuffer::max_size(); | |
181 } | |
182 | |
183 size_t desired_plab_sz() { | |
184 return _desired_plab_sz; | |
185 } | |
186 | |
187 void adjust_desired_plab_sz(); // filter computation, latches output to | |
188 // _desired_plab_sz, clears sensor accumulators | |
189 | |
190 void add_allocated(size_t v) { | |
191 Atomic::add_ptr(v, &_allocated); | |
192 } | |
193 | |
194 void add_unused(size_t v) { | |
195 Atomic::add_ptr(v, &_unused); | |
196 } | |
197 | |
198 void add_wasted(size_t v) { | |
199 Atomic::add_ptr(v, &_wasted); | |
200 } | |
201 }; | |
202 | |
203 class ParGCAllocBufferWithBOT: public ParGCAllocBuffer { | |
204 BlockOffsetArrayContigSpace _bt; | |
205 BlockOffsetSharedArray* _bsa; | |
206 HeapWord* _true_end; // end of the whole ParGCAllocBuffer | |
207 | |
208 static const size_t ChunkSizeInWords; | |
209 static const size_t ChunkSizeInBytes; | |
210 HeapWord* allocate_slow(size_t word_sz); | |
211 | |
212 void fill_region_with_block(MemRegion mr, bool contig); | |
213 | |
214 public: | |
215 ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa); | |
216 | |
217 HeapWord* allocate(size_t word_sz) { | |
218 HeapWord* res = ParGCAllocBuffer::allocate(word_sz); | |
219 if (res != NULL) { | |
220 _bt.alloc_block(res, word_sz); | |
221 } else { | |
222 res = allocate_slow(word_sz); | |
223 } | |
224 return res; | |
225 } | |
226 | |
227 void undo_allocation(HeapWord* obj, size_t word_sz); | |
228 | |
229 void set_buf(HeapWord* buf_start) { | |
230 ParGCAllocBuffer::set_buf(buf_start); | |
231 _true_end = _hard_end; | |
232 _bt.set_region(MemRegion(buf_start, word_sz())); | |
233 _bt.initialize_threshold(); | |
234 } | |
235 | |
236 void retire(bool end_of_gc, bool retain); | |
237 | |
238 MemRegion range() { | |
239 return MemRegion(_top, _true_end); | |
240 } | |
241 }; |