comparison src/share/vm/memory/freeList.hpp @ 6885:685df3c6f84b

7045397: NPG: Add freelists to class loader arenas. Reviewed-by: coleenp, stefank, jprovino, ohair
author jmasa
date Tue, 18 Sep 2012 23:35:42 -0700
parents b9a9ed0f8eeb
children 7c5a1b62f53d
comparison
equal deleted inserted replaced
6877:d0e7716b179e 6885:685df3c6f84b
38 // 38 //
39 // See the corresponding .cpp file for a description of the specifics 39 // See the corresponding .cpp file for a description of the specifics
40 // for that implementation. 40 // for that implementation.
41 41
42 class Mutex; 42 class Mutex;
43 template <class Chunk> class TreeList;
44 template <class Chunk> class PrintTreeCensusClosure;
45 43
46 template <class Chunk> 44 template <class Chunk_t>
47 class FreeList VALUE_OBJ_CLASS_SPEC { 45 class FreeList VALUE_OBJ_CLASS_SPEC {
48 friend class CompactibleFreeListSpace; 46 friend class CompactibleFreeListSpace;
49 friend class VMStructs; 47 friend class VMStructs;
50 friend class PrintTreeCensusClosure<Chunk>;
51 48
52 private: 49 private:
53 Chunk* _head; // Head of list of free chunks 50 Chunk_t* _head; // Head of list of free chunks
54 Chunk* _tail; // Tail of list of free chunks 51 Chunk_t* _tail; // Tail of list of free chunks
55 size_t _size; // Size in Heap words of each chunk 52 size_t _size; // Size in Heap words of each chunk
56 ssize_t _count; // Number of entries in list 53 ssize_t _count; // Number of entries in list
57 size_t _hint; // next larger size list with a positive surplus
58 54
59 AllocationStats _allocation_stats; // allocation-related statistics 55 protected:
60 56
61 #ifdef ASSERT 57 #ifdef ASSERT
62 Mutex* _protecting_lock; 58 Mutex* _protecting_lock;
63 #endif 59 #endif
64 60
69 if (_protecting_lock != NULL) 65 if (_protecting_lock != NULL)
70 assert_proper_lock_protection_work(); 66 assert_proper_lock_protection_work();
71 #endif 67 #endif
72 } 68 }
73 69
74 // Initialize the allocation statistics.
75 protected:
76 void init_statistics(bool split_birth = false);
77 void set_count(ssize_t v) { _count = v;}
78 void increment_count() { 70 void increment_count() {
79 _count++; 71 _count++;
80 } 72 }
81 73
82 void decrement_count() { 74 void decrement_count() {
87 public: 79 public:
88 // Constructor 80 // Constructor
89 // Construct a list without any entries. 81 // Construct a list without any entries.
90 FreeList(); 82 FreeList();
91 // Construct a list with "fc" as the first (and lone) entry in the list. 83 // Construct a list with "fc" as the first (and lone) entry in the list.
92 FreeList(Chunk* fc); 84 FreeList(Chunk_t* fc);
93 85
94 // Reset the head, tail, hint, and count of a free list. 86 // Do initialization
95 void reset(size_t hint); 87 void initialize();
88
89 // Reset the head, tail, and count of a free list.
90 void reset();
96 91
97 // Declare the current free list to be protected by the given lock. 92 // Declare the current free list to be protected by the given lock.
98 #ifdef ASSERT 93 #ifdef ASSERT
99 void set_protecting_lock(Mutex* protecting_lock) { 94 Mutex* protecting_lock() const { return _protecting_lock; }
100 _protecting_lock = protecting_lock; 95 void set_protecting_lock(Mutex* v) {
96 _protecting_lock = v;
101 } 97 }
102 #endif 98 #endif
103 99
104 // Accessors. 100 // Accessors.
105 Chunk* head() const { 101 Chunk_t* head() const {
106 assert_proper_lock_protection(); 102 assert_proper_lock_protection();
107 return _head; 103 return _head;
108 } 104 }
109 void set_head(Chunk* v) { 105 void set_head(Chunk_t* v) {
110 assert_proper_lock_protection(); 106 assert_proper_lock_protection();
111 _head = v; 107 _head = v;
112 assert(!_head || _head->size() == _size, "bad chunk size"); 108 assert(!_head || _head->size() == _size, "bad chunk size");
113 } 109 }
114 // Set the head of the list and set the prev field of non-null 110 // Set the head of the list and set the prev field of non-null
115 // values to NULL. 111 // values to NULL.
116 void link_head(Chunk* v) { 112 void link_head(Chunk_t* v);
117 assert_proper_lock_protection();
118 set_head(v);
119 // If this method is not used (just set the head instead),
120 // this check can be avoided.
121 if (v != NULL) {
122 v->link_prev(NULL);
123 }
124 }
125 113
126 Chunk* tail() const { 114 Chunk_t* tail() const {
127 assert_proper_lock_protection(); 115 assert_proper_lock_protection();
128 return _tail; 116 return _tail;
129 } 117 }
130 void set_tail(Chunk* v) { 118 void set_tail(Chunk_t* v) {
131 assert_proper_lock_protection(); 119 assert_proper_lock_protection();
132 _tail = v; 120 _tail = v;
133 assert(!_tail || _tail->size() == _size, "bad chunk size"); 121 assert(!_tail || _tail->size() == _size, "bad chunk size");
134 } 122 }
135 // Set the tail of the list and set the next field of non-null 123 // Set the tail of the list and set the next field of non-null
136 // values to NULL. 124 // values to NULL.
137 void link_tail(Chunk* v) { 125 void link_tail(Chunk_t* v) {
138 assert_proper_lock_protection(); 126 assert_proper_lock_protection();
139 set_tail(v); 127 set_tail(v);
140 if (v != NULL) { 128 if (v != NULL) {
141 v->clear_next(); 129 v->clear_next();
142 } 130 }
150 } 138 }
151 void set_size(size_t v) { 139 void set_size(size_t v) {
152 assert_proper_lock_protection(); 140 assert_proper_lock_protection();
153 _size = v; 141 _size = v;
154 } 142 }
155 ssize_t count() const { 143 ssize_t count() const { return _count; }
156 return _count; 144 void set_count(ssize_t v) { _count = v;}
157 }
158 size_t hint() const {
159 return _hint;
160 }
161 void set_hint(size_t v) {
162 assert_proper_lock_protection();
163 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
164 }
165 145
166 // Accessors for statistics 146 size_t get_better_size() { return size(); }
167 AllocationStats* allocation_stats() {
168 assert_proper_lock_protection();
169 return &_allocation_stats;
170 }
171 147
172 ssize_t desired() const { 148 size_t returned_bytes() const { ShouldNotReachHere(); return 0; }
173 return _allocation_stats.desired(); 149 void set_returned_bytes(size_t v) {}
174 } 150 void increment_returned_bytes_by(size_t v) {}
175 void set_desired(ssize_t v) {
176 assert_proper_lock_protection();
177 _allocation_stats.set_desired(v);
178 }
179 void compute_desired(float inter_sweep_current,
180 float inter_sweep_estimate,
181 float intra_sweep_estimate) {
182 assert_proper_lock_protection();
183 _allocation_stats.compute_desired(_count,
184 inter_sweep_current,
185 inter_sweep_estimate,
186 intra_sweep_estimate);
187 }
188 ssize_t coal_desired() const {
189 return _allocation_stats.coal_desired();
190 }
191 void set_coal_desired(ssize_t v) {
192 assert_proper_lock_protection();
193 _allocation_stats.set_coal_desired(v);
194 }
195
196 ssize_t surplus() const {
197 return _allocation_stats.surplus();
198 }
199 void set_surplus(ssize_t v) {
200 assert_proper_lock_protection();
201 _allocation_stats.set_surplus(v);
202 }
203 void increment_surplus() {
204 assert_proper_lock_protection();
205 _allocation_stats.increment_surplus();
206 }
207 void decrement_surplus() {
208 assert_proper_lock_protection();
209 _allocation_stats.decrement_surplus();
210 }
211
212 ssize_t bfr_surp() const {
213 return _allocation_stats.bfr_surp();
214 }
215 void set_bfr_surp(ssize_t v) {
216 assert_proper_lock_protection();
217 _allocation_stats.set_bfr_surp(v);
218 }
219 ssize_t prev_sweep() const {
220 return _allocation_stats.prev_sweep();
221 }
222 void set_prev_sweep(ssize_t v) {
223 assert_proper_lock_protection();
224 _allocation_stats.set_prev_sweep(v);
225 }
226 ssize_t before_sweep() const {
227 return _allocation_stats.before_sweep();
228 }
229 void set_before_sweep(ssize_t v) {
230 assert_proper_lock_protection();
231 _allocation_stats.set_before_sweep(v);
232 }
233
234 ssize_t coal_births() const {
235 return _allocation_stats.coal_births();
236 }
237 void set_coal_births(ssize_t v) {
238 assert_proper_lock_protection();
239 _allocation_stats.set_coal_births(v);
240 }
241 void increment_coal_births() {
242 assert_proper_lock_protection();
243 _allocation_stats.increment_coal_births();
244 }
245
246 ssize_t coal_deaths() const {
247 return _allocation_stats.coal_deaths();
248 }
249 void set_coal_deaths(ssize_t v) {
250 assert_proper_lock_protection();
251 _allocation_stats.set_coal_deaths(v);
252 }
253 void increment_coal_deaths() {
254 assert_proper_lock_protection();
255 _allocation_stats.increment_coal_deaths();
256 }
257
258 ssize_t split_births() const {
259 return _allocation_stats.split_births();
260 }
261 void set_split_births(ssize_t v) {
262 assert_proper_lock_protection();
263 _allocation_stats.set_split_births(v);
264 }
265 void increment_split_births() {
266 assert_proper_lock_protection();
267 _allocation_stats.increment_split_births();
268 }
269
270 ssize_t split_deaths() const {
271 return _allocation_stats.split_deaths();
272 }
273 void set_split_deaths(ssize_t v) {
274 assert_proper_lock_protection();
275 _allocation_stats.set_split_deaths(v);
276 }
277 void increment_split_deaths() {
278 assert_proper_lock_protection();
279 _allocation_stats.increment_split_deaths();
280 }
281
282 NOT_PRODUCT(
283 // For debugging. The "_returned_bytes" in all the lists are summed
284 // and compared with the total number of bytes swept during a
285 // collection.
286 size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
287 void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
288 void increment_returned_bytes_by(size_t v) {
289 _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
290 }
291 )
292 151
293 // Unlink head of list and return it. Returns NULL if 152 // Unlink head of list and return it. Returns NULL if
294 // the list is empty. 153 // the list is empty.
295 Chunk* get_chunk_at_head(); 154 Chunk_t* get_chunk_at_head();
296 155
297 // Remove the first "n" or "count", whichever is smaller, chunks from the 156 // Remove the first "n" or "count", whichever is smaller, chunks from the
298 // list, setting "fl", which is required to be empty, to point to them. 157 // list, setting "fl", which is required to be empty, to point to them.
299 void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl); 158 void getFirstNChunksFromList(size_t n, FreeList<Chunk_t>* fl);
300 159
301 // Unlink this chunk from it's free list 160 // Unlink this chunk from it's free list
302 void remove_chunk(Chunk* fc); 161 void remove_chunk(Chunk_t* fc);
303 162
304 // Add this chunk to this free list. 163 // Add this chunk to this free list.
305 void return_chunk_at_head(Chunk* fc); 164 void return_chunk_at_head(Chunk_t* fc);
306 void return_chunk_at_tail(Chunk* fc); 165 void return_chunk_at_tail(Chunk_t* fc);
307 166
308 // Similar to returnChunk* but also records some diagnostic 167 // Similar to returnChunk* but also records some diagnostic
309 // information. 168 // information.
310 void return_chunk_at_head(Chunk* fc, bool record_return); 169 void return_chunk_at_head(Chunk_t* fc, bool record_return);
311 void return_chunk_at_tail(Chunk* fc, bool record_return); 170 void return_chunk_at_tail(Chunk_t* fc, bool record_return);
312 171
313 // Prepend "fl" (whose size is required to be the same as that of "this") 172 // Prepend "fl" (whose size is required to be the same as that of "this")
314 // to the front of "this" list. 173 // to the front of "this" list.
315 void prepend(FreeList<Chunk>* fl); 174 void prepend(FreeList<Chunk_t>* fl);
316 175
317 // Verify that the chunk is in the list. 176 // Verify that the chunk is in the list.
318 // found. Return NULL if "fc" is not found. 177 // found. Return NULL if "fc" is not found.
319 bool verify_chunk_in_free_list(Chunk* fc) const; 178 bool verify_chunk_in_free_list(Chunk_t* fc) const;
320 179
321 // Stats verification 180 // Stats verification
322 void verify_stats() const PRODUCT_RETURN; 181 // void verify_stats() const { ShouldNotReachHere(); };
323 182
324 // Printing support 183 // Printing support
325 static void print_labels_on(outputStream* st, const char* c); 184 static void print_labels_on(outputStream* st, const char* c);
326 void print_on(outputStream* st, const char* c = NULL) const; 185 void print_on(outputStream* st, const char* c = NULL) const;
327 }; 186 };