comparison src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 6432c3bb6240
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 class CompactibleFreeListSpace;
26
27 // A class for maintaining a free list of FreeChunk's. The FreeList
28 // maintains a the structure of the list (head, tail, etc.) plus
29 // statistics for allocations from the list. The links between items
30 // are not part of FreeList. The statistics are
31 // used to make decisions about coalescing FreeChunk's when they
32 // are swept during collection.
33 //
34 // See the corresponding .cpp file for a description of the specifics
35 // for that implementation.
36
37 class Mutex;
38
39 class FreeList VALUE_OBJ_CLASS_SPEC {
40 friend class CompactibleFreeListSpace;
41 FreeChunk* _head; // List of free chunks
42 FreeChunk* _tail; // Tail of list of free chunks
43 size_t _size; // Size in Heap words of each chunks
44 ssize_t _count; // Number of entries in list
45 size_t _hint; // next larger size list with a positive surplus
46
47 AllocationStats _allocation_stats; // statistics for smart allocation
48
49 #ifdef ASSERT
50 Mutex* _protecting_lock;
51 #endif
52
53 // Asserts false if the protecting lock (if any) is not held.
54 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
55 void assert_proper_lock_protection() const {
56 #ifdef ASSERT
57 if (_protecting_lock != NULL)
58 assert_proper_lock_protection_work();
59 #endif
60 }
61
62 // Initialize the allocation statistics.
63 protected:
64 void init_statistics();
65 void set_count(ssize_t v) { _count = v;}
66 void increment_count() { _count++; }
67 void decrement_count() {
68 _count--;
69 assert(_count >= 0, "Count should not be negative"); }
70
71 public:
72 // Constructor
73 // Construct a list without any entries.
74 FreeList();
75 // Construct a list with "fc" as the first (and lone) entry in the list.
76 FreeList(FreeChunk* fc);
77 // Construct a list which will have a FreeChunk at address "addr" and
78 // of size "size" as the first (and lone) entry in the list.
79 FreeList(HeapWord* addr, size_t size);
80
81 // Reset the head, tail, hint, and count of a free list.
82 void reset(size_t hint);
83
84 // Declare the current free list to be protected by the given lock.
85 #ifdef ASSERT
86 void set_protecting_lock(Mutex* protecting_lock) {
87 _protecting_lock = protecting_lock;
88 }
89 #endif
90
91 // Accessors.
92 FreeChunk* head() const {
93 assert_proper_lock_protection();
94 return _head;
95 }
96 void set_head(FreeChunk* v) {
97 assert_proper_lock_protection();
98 _head = v;
99 assert(!_head || _head->size() == _size, "bad chunk size");
100 }
101 // Set the head of the list and set the prev field of non-null
102 // values to NULL.
103 void link_head(FreeChunk* v) {
104 assert_proper_lock_protection();
105 set_head(v);
106 // If this method is not used (just set the head instead),
107 // this check can be avoided.
108 if (v != NULL) {
109 v->linkPrev(NULL);
110 }
111 }
112
113 FreeChunk* tail() const {
114 assert_proper_lock_protection();
115 return _tail;
116 }
117 void set_tail(FreeChunk* v) {
118 assert_proper_lock_protection();
119 _tail = v;
120 assert(!_tail || _tail->size() == _size, "bad chunk size");
121 }
122 // Set the tail of the list and set the next field of non-null
123 // values to NULL.
124 void link_tail(FreeChunk* v) {
125 assert_proper_lock_protection();
126 set_tail(v);
127 if (v != NULL) {
128 v->clearNext();
129 }
130 }
131
132 // No locking checks in read-accessors: lock-free reads (only) are benign.
133 // Readers are expected to have the lock if they are doing work that
134 // requires atomicity guarantees in sections of code.
135 size_t size() const {
136 return _size;
137 }
138 void set_size(size_t v) {
139 assert_proper_lock_protection();
140 _size = v;
141 }
142 ssize_t count() const {
143 return _count;
144 }
145 size_t hint() const {
146 return _hint;
147 }
148 void set_hint(size_t v) {
149 assert_proper_lock_protection();
150 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
151 }
152
153 // Accessors for statistics
154 AllocationStats* allocation_stats() {
155 assert_proper_lock_protection();
156 return &_allocation_stats;
157 }
158
159 ssize_t desired() const {
160 return _allocation_stats.desired();
161 }
162 void compute_desired(float inter_sweep_current,
163 float inter_sweep_estimate) {
164 assert_proper_lock_protection();
165 _allocation_stats.compute_desired(_count,
166 inter_sweep_current,
167 inter_sweep_estimate);
168 }
169 ssize_t coalDesired() const {
170 return _allocation_stats.coalDesired();
171 }
172 void set_coalDesired(ssize_t v) {
173 assert_proper_lock_protection();
174 _allocation_stats.set_coalDesired(v);
175 }
176
177 ssize_t surplus() const {
178 return _allocation_stats.surplus();
179 }
180 void set_surplus(ssize_t v) {
181 assert_proper_lock_protection();
182 _allocation_stats.set_surplus(v);
183 }
184 void increment_surplus() {
185 assert_proper_lock_protection();
186 _allocation_stats.increment_surplus();
187 }
188 void decrement_surplus() {
189 assert_proper_lock_protection();
190 _allocation_stats.decrement_surplus();
191 }
192
193 ssize_t bfrSurp() const {
194 return _allocation_stats.bfrSurp();
195 }
196 void set_bfrSurp(ssize_t v) {
197 assert_proper_lock_protection();
198 _allocation_stats.set_bfrSurp(v);
199 }
200 ssize_t prevSweep() const {
201 return _allocation_stats.prevSweep();
202 }
203 void set_prevSweep(ssize_t v) {
204 assert_proper_lock_protection();
205 _allocation_stats.set_prevSweep(v);
206 }
207 ssize_t beforeSweep() const {
208 return _allocation_stats.beforeSweep();
209 }
210 void set_beforeSweep(ssize_t v) {
211 assert_proper_lock_protection();
212 _allocation_stats.set_beforeSweep(v);
213 }
214
215 ssize_t coalBirths() const {
216 return _allocation_stats.coalBirths();
217 }
218 void set_coalBirths(ssize_t v) {
219 assert_proper_lock_protection();
220 _allocation_stats.set_coalBirths(v);
221 }
222 void increment_coalBirths() {
223 assert_proper_lock_protection();
224 _allocation_stats.increment_coalBirths();
225 }
226
227 ssize_t coalDeaths() const {
228 return _allocation_stats.coalDeaths();
229 }
230 void set_coalDeaths(ssize_t v) {
231 assert_proper_lock_protection();
232 _allocation_stats.set_coalDeaths(v);
233 }
234 void increment_coalDeaths() {
235 assert_proper_lock_protection();
236 _allocation_stats.increment_coalDeaths();
237 }
238
239 ssize_t splitBirths() const {
240 return _allocation_stats.splitBirths();
241 }
242 void set_splitBirths(ssize_t v) {
243 assert_proper_lock_protection();
244 _allocation_stats.set_splitBirths(v);
245 }
246 void increment_splitBirths() {
247 assert_proper_lock_protection();
248 _allocation_stats.increment_splitBirths();
249 }
250
251 ssize_t splitDeaths() const {
252 return _allocation_stats.splitDeaths();
253 }
254 void set_splitDeaths(ssize_t v) {
255 assert_proper_lock_protection();
256 _allocation_stats.set_splitDeaths(v);
257 }
258 void increment_splitDeaths() {
259 assert_proper_lock_protection();
260 _allocation_stats.increment_splitDeaths();
261 }
262
263 NOT_PRODUCT(
264 // For debugging. The "_returnedBytes" in all the lists are summed
265 // and compared with the total number of bytes swept during a
266 // collection.
267 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
268 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
269 void increment_returnedBytes_by(size_t v) {
270 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
271 }
272 )
273
274 // Unlink head of list and return it. Returns NULL if
275 // the list is empty.
276 FreeChunk* getChunkAtHead();
277
278 // Remove the first "n" or "count", whichever is smaller, chunks from the
279 // list, setting "fl", which is required to be empty, to point to them.
280 void getFirstNChunksFromList(size_t n, FreeList* fl);
281
282 // Unlink this chunk from it's free list
283 void removeChunk(FreeChunk* fc);
284
285 // Add this chunk to this free list.
286 void returnChunkAtHead(FreeChunk* fc);
287 void returnChunkAtTail(FreeChunk* fc);
288
289 // Similar to returnChunk* but also records some diagnostic
290 // information.
291 void returnChunkAtHead(FreeChunk* fc, bool record_return);
292 void returnChunkAtTail(FreeChunk* fc, bool record_return);
293
294 // Prepend "fl" (whose size is required to be the same as that of "this")
295 // to the front of "this" list.
296 void prepend(FreeList* fl);
297
298 // Verify that the chunk is in the list.
299 // found. Return NULL if "fc" is not found.
300 bool verifyChunkInFreeLists(FreeChunk* fc) const;
301 };