comparison src/share/vm/memory/freeList.hpp @ 6026:9f059abe8cf2

7131629: Generalize the CMS free list code Summary: Make the FreeChunk, FreeList, TreeList, and BinaryTreeDictionary classes usable outside CMS. Reviewed-by: brutisso, johnc, jwilhelm Contributed-by: coleen.phillimore@oracle.com
author jmasa
date Thu, 29 Mar 2012 19:46:24 -0700
parents src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp@f95d63e2154a
children f69a5d43dc19
comparison
equal deleted inserted replaced
6016:3c91f2c9fd21 6026:9f059abe8cf2
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_FREELIST_HPP
26 #define SHARE_VM_MEMORY_FREELIST_HPP
27
28 #include "gc_implementation/shared/allocationStats.hpp"
29
30 class CompactibleFreeListSpace;
31
32 // A class for maintaining a free list of Chunk's. The FreeList
33 // maintains a the structure of the list (head, tail, etc.) plus
34 // statistics for allocations from the list. The links between items
35 // are not part of FreeList. The statistics are
36 // used to make decisions about coalescing Chunk's when they
37 // are swept during collection.
38 //
39 // See the corresponding .cpp file for a description of the specifics
40 // for that implementation.
41
42 class Mutex;
43 template <class Chunk> class TreeList;
44 template <class Chunk> class PrintTreeCensusClosure;
45
46 template <class Chunk>
47 class FreeList VALUE_OBJ_CLASS_SPEC {
48 friend class CompactibleFreeListSpace;
49 friend class VMStructs;
50 friend class PrintTreeCensusClosure<Chunk>;
51
52 private:
53 Chunk* _head; // Head of list of free chunks
54 Chunk* _tail; // Tail of list of free chunks
55 size_t _size; // Size in Heap words of each chunk
56 ssize_t _count; // Number of entries in list
57 size_t _hint; // next larger size list with a positive surplus
58
59 AllocationStats _allocation_stats; // allocation-related statistics
60
61 #ifdef ASSERT
62 Mutex* _protecting_lock;
63 #endif
64
65 // Asserts false if the protecting lock (if any) is not held.
66 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
67 void assert_proper_lock_protection() const {
68 #ifdef ASSERT
69 if (_protecting_lock != NULL)
70 assert_proper_lock_protection_work();
71 #endif
72 }
73
74 // Initialize the allocation statistics.
75 protected:
76 void init_statistics(bool split_birth = false);
77 void set_count(ssize_t v) { _count = v;}
78 void increment_count() {
79 _count++;
80 }
81
82 void decrement_count() {
83 _count--;
84 assert(_count >= 0, "Count should not be negative");
85 }
86
87 public:
88 // Constructor
89 // Construct a list without any entries.
90 FreeList();
91 // Construct a list with "fc" as the first (and lone) entry in the list.
92 FreeList(Chunk* fc);
93
94 // Reset the head, tail, hint, and count of a free list.
95 void reset(size_t hint);
96
97 // Declare the current free list to be protected by the given lock.
98 #ifdef ASSERT
99 void set_protecting_lock(Mutex* protecting_lock) {
100 _protecting_lock = protecting_lock;
101 }
102 #endif
103
104 // Accessors.
105 Chunk* head() const {
106 assert_proper_lock_protection();
107 return _head;
108 }
109 void set_head(Chunk* v) {
110 assert_proper_lock_protection();
111 _head = v;
112 assert(!_head || _head->size() == _size, "bad chunk size");
113 }
114 // Set the head of the list and set the prev field of non-null
115 // values to NULL.
116 void link_head(Chunk* v) {
117 assert_proper_lock_protection();
118 set_head(v);
119 // If this method is not used (just set the head instead),
120 // this check can be avoided.
121 if (v != NULL) {
122 v->linkPrev(NULL);
123 }
124 }
125
126 Chunk* tail() const {
127 assert_proper_lock_protection();
128 return _tail;
129 }
130 void set_tail(Chunk* v) {
131 assert_proper_lock_protection();
132 _tail = v;
133 assert(!_tail || _tail->size() == _size, "bad chunk size");
134 }
135 // Set the tail of the list and set the next field of non-null
136 // values to NULL.
137 void link_tail(Chunk* v) {
138 assert_proper_lock_protection();
139 set_tail(v);
140 if (v != NULL) {
141 v->clearNext();
142 }
143 }
144
145 // No locking checks in read-accessors: lock-free reads (only) are benign.
146 // Readers are expected to have the lock if they are doing work that
147 // requires atomicity guarantees in sections of code.
148 size_t size() const {
149 return _size;
150 }
151 void set_size(size_t v) {
152 assert_proper_lock_protection();
153 _size = v;
154 }
155 ssize_t count() const {
156 return _count;
157 }
158 size_t hint() const {
159 return _hint;
160 }
161 void set_hint(size_t v) {
162 assert_proper_lock_protection();
163 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
164 }
165
166 // Accessors for statistics
167 AllocationStats* allocation_stats() {
168 assert_proper_lock_protection();
169 return &_allocation_stats;
170 }
171
172 ssize_t desired() const {
173 return _allocation_stats.desired();
174 }
175 void set_desired(ssize_t v) {
176 assert_proper_lock_protection();
177 _allocation_stats.set_desired(v);
178 }
179 void compute_desired(float inter_sweep_current,
180 float inter_sweep_estimate,
181 float intra_sweep_estimate) {
182 assert_proper_lock_protection();
183 _allocation_stats.compute_desired(_count,
184 inter_sweep_current,
185 inter_sweep_estimate,
186 intra_sweep_estimate);
187 }
188 ssize_t coalDesired() const {
189 return _allocation_stats.coalDesired();
190 }
191 void set_coalDesired(ssize_t v) {
192 assert_proper_lock_protection();
193 _allocation_stats.set_coalDesired(v);
194 }
195
196 ssize_t surplus() const {
197 return _allocation_stats.surplus();
198 }
199 void set_surplus(ssize_t v) {
200 assert_proper_lock_protection();
201 _allocation_stats.set_surplus(v);
202 }
203 void increment_surplus() {
204 assert_proper_lock_protection();
205 _allocation_stats.increment_surplus();
206 }
207 void decrement_surplus() {
208 assert_proper_lock_protection();
209 _allocation_stats.decrement_surplus();
210 }
211
212 ssize_t bfrSurp() const {
213 return _allocation_stats.bfrSurp();
214 }
215 void set_bfrSurp(ssize_t v) {
216 assert_proper_lock_protection();
217 _allocation_stats.set_bfrSurp(v);
218 }
219 ssize_t prevSweep() const {
220 return _allocation_stats.prevSweep();
221 }
222 void set_prevSweep(ssize_t v) {
223 assert_proper_lock_protection();
224 _allocation_stats.set_prevSweep(v);
225 }
226 ssize_t beforeSweep() const {
227 return _allocation_stats.beforeSweep();
228 }
229 void set_beforeSweep(ssize_t v) {
230 assert_proper_lock_protection();
231 _allocation_stats.set_beforeSweep(v);
232 }
233
234 ssize_t coalBirths() const {
235 return _allocation_stats.coalBirths();
236 }
237 void set_coalBirths(ssize_t v) {
238 assert_proper_lock_protection();
239 _allocation_stats.set_coalBirths(v);
240 }
241 void increment_coalBirths() {
242 assert_proper_lock_protection();
243 _allocation_stats.increment_coalBirths();
244 }
245
246 ssize_t coalDeaths() const {
247 return _allocation_stats.coalDeaths();
248 }
249 void set_coalDeaths(ssize_t v) {
250 assert_proper_lock_protection();
251 _allocation_stats.set_coalDeaths(v);
252 }
253 void increment_coalDeaths() {
254 assert_proper_lock_protection();
255 _allocation_stats.increment_coalDeaths();
256 }
257
258 ssize_t splitBirths() const {
259 return _allocation_stats.splitBirths();
260 }
261 void set_splitBirths(ssize_t v) {
262 assert_proper_lock_protection();
263 _allocation_stats.set_splitBirths(v);
264 }
265 void increment_splitBirths() {
266 assert_proper_lock_protection();
267 _allocation_stats.increment_splitBirths();
268 }
269
270 ssize_t splitDeaths() const {
271 return _allocation_stats.splitDeaths();
272 }
273 void set_splitDeaths(ssize_t v) {
274 assert_proper_lock_protection();
275 _allocation_stats.set_splitDeaths(v);
276 }
277 void increment_splitDeaths() {
278 assert_proper_lock_protection();
279 _allocation_stats.increment_splitDeaths();
280 }
281
282 NOT_PRODUCT(
283 // For debugging. The "_returnedBytes" in all the lists are summed
284 // and compared with the total number of bytes swept during a
285 // collection.
286 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
287 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
288 void increment_returnedBytes_by(size_t v) {
289 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
290 }
291 )
292
293 // Unlink head of list and return it. Returns NULL if
294 // the list is empty.
295 Chunk* getChunkAtHead();
296
297 // Remove the first "n" or "count", whichever is smaller, chunks from the
298 // list, setting "fl", which is required to be empty, to point to them.
299 void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
300
301 // Unlink this chunk from it's free list
302 void removeChunk(Chunk* fc);
303
304 // Add this chunk to this free list.
305 void returnChunkAtHead(Chunk* fc);
306 void returnChunkAtTail(Chunk* fc);
307
308 // Similar to returnChunk* but also records some diagnostic
309 // information.
310 void returnChunkAtHead(Chunk* fc, bool record_return);
311 void returnChunkAtTail(Chunk* fc, bool record_return);
312
313 // Prepend "fl" (whose size is required to be the same as that of "this")
314 // to the front of "this" list.
315 void prepend(FreeList<Chunk>* fl);
316
317 // Verify that the chunk is in the list.
318 // found. Return NULL if "fc" is not found.
319 bool verifyChunkInFreeLists(Chunk* fc) const;
320
321 // Stats verification
322 void verify_stats() const PRODUCT_RETURN;
323
324 // Printing support
325 static void print_labels_on(outputStream* st, const char* c);
326 void print_on(outputStream* st, const char* c = NULL) const;
327 };
328
329 #endif // SHARE_VM_MEMORY_FREELIST_HPP