Mercurial > hg > graal-compiler
annotate src/share/vm/memory/referenceProcessor.hpp @ 263:12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
Summary: Maintain a high water mark for the allocations in a space and mangle only up to that high water mark.
Reviewed-by: ysr, apetrusenko
author | jmasa |
---|---|
date | Wed, 09 Jul 2008 15:08:55 -0700 |
parents | ba764ed4b6f2 |
children | d1605aabd0a1 37f87013dfd8 |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // ReferenceProcessor class encapsulates the per-"collector" processing | |
26 // of "weak" references for GC. The interface is useful for supporting | |
27 // a generational abstraction, in particular when there are multiple | |
28 // generations that are being independently collected -- possibly | |
29 // concurrently and/or incrementally. Note, however, that the | |
30 // ReferenceProcessor class abstracts away from a generational setting | |
31 // by using only a heap interval (called "span" below), thus allowing | |
32 // its use in a straightforward manner in a general, non-generational | |
33 // setting. | |
34 // | |
35 // The basic idea is that each ReferenceProcessor object concerns | |
36 // itself with ("weak") reference processing in a specific "span" | |
37 // of the heap of interest to a specific collector. Currently, | |
38 // the span is a convex interval of the heap, but, efficiency | |
39 // apart, there seems to be no reason it couldn't be extended | |
40 // (with appropriate modifications) to any "non-convex interval". | |
41 | |
42 // forward references | |
43 class ReferencePolicy; | |
44 class AbstractRefProcTaskExecutor; | |
45 class DiscoveredList; | |
46 | |
47 class ReferenceProcessor : public CHeapObj { | |
48 protected: | |
49 // End of list marker | |
50 static oop _sentinelRef; | |
51 MemRegion _span; // (right-open) interval of heap | |
52 // subject to wkref discovery | |
53 bool _discovering_refs; // true when discovery enabled | |
54 bool _discovery_is_atomic; // if discovery is atomic wrt | |
55 // other collectors in configuration | |
56 bool _discovery_is_mt; // true if reference discovery is MT. | |
57 bool _enqueuing_is_done; // true if all weak references enqueued | |
58 bool _processing_is_mt; // true during phases when | |
59 // reference processing is MT. | |
60 int _next_id; // round-robin counter in | |
61 // support of work distribution | |
62 | |
63 // For collectors that do not keep GC marking information | |
64 // in the object header, this field holds a closure that | |
65 // helps the reference processor determine the reachability | |
66 // of an oop (the field is currently initialized to NULL for | |
67 // all collectors but the CMS collector). | |
68 BoolObjectClosure* _is_alive_non_header; | |
69 | |
70 // The discovered ref lists themselves | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
71 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
72 // The MT'ness degree of the queues below |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
73 int _num_q; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
74 // Arrays of lists of oops, one per thread |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
75 DiscoveredList* _discoveredSoftRefs; |
0 | 76 DiscoveredList* _discoveredWeakRefs; |
77 DiscoveredList* _discoveredFinalRefs; | |
78 DiscoveredList* _discoveredPhantomRefs; | |
79 | |
80 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
81 int num_q() { return _num_q; } |
0 | 82 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
83 static oop sentinel_ref() { return _sentinelRef; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
84 static oop* adr_sentinel_ref() { return &_sentinelRef; } |
0 | 85 |
86 public: | |
87 // Process references with a certain reachability level. | |
88 void process_discovered_reflist(DiscoveredList refs_lists[], | |
89 ReferencePolicy* policy, | |
90 bool clear_referent, | |
91 BoolObjectClosure* is_alive, | |
92 OopClosure* keep_alive, | |
93 VoidClosure* complete_gc, | |
94 AbstractRefProcTaskExecutor* task_executor); | |
95 | |
96 void process_phaseJNI(BoolObjectClosure* is_alive, | |
97 OopClosure* keep_alive, | |
98 VoidClosure* complete_gc); | |
99 | |
100 // Work methods used by the method process_discovered_reflist | |
101 // Phase1: keep alive all those referents that are otherwise | |
102 // dead but which must be kept alive by policy (and their closure). | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
103 void process_phase1(DiscoveredList& refs_list, |
0 | 104 ReferencePolicy* policy, |
105 BoolObjectClosure* is_alive, | |
106 OopClosure* keep_alive, | |
107 VoidClosure* complete_gc); | |
108 // Phase2: remove all those references whose referents are | |
109 // reachable. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
110 inline void process_phase2(DiscoveredList& refs_list, |
0 | 111 BoolObjectClosure* is_alive, |
112 OopClosure* keep_alive, | |
113 VoidClosure* complete_gc) { | |
114 if (discovery_is_atomic()) { | |
115 // complete_gc is ignored in this case for this phase | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
116 pp2_work(refs_list, is_alive, keep_alive); |
0 | 117 } else { |
118 assert(complete_gc != NULL, "Error"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
119 pp2_work_concurrent_discovery(refs_list, is_alive, |
0 | 120 keep_alive, complete_gc); |
121 } | |
122 } | |
123 // Work methods in support of process_phase2 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
124 void pp2_work(DiscoveredList& refs_list, |
0 | 125 BoolObjectClosure* is_alive, |
126 OopClosure* keep_alive); | |
127 void pp2_work_concurrent_discovery( | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
128 DiscoveredList& refs_list, |
0 | 129 BoolObjectClosure* is_alive, |
130 OopClosure* keep_alive, | |
131 VoidClosure* complete_gc); | |
132 // Phase3: process the referents by either clearing them | |
133 // or keeping them alive (and their closure) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
134 void process_phase3(DiscoveredList& refs_list, |
0 | 135 bool clear_referent, |
136 BoolObjectClosure* is_alive, | |
137 OopClosure* keep_alive, | |
138 VoidClosure* complete_gc); | |
139 | |
140 // Enqueue references with a certain reachability level | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
141 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); |
0 | 142 |
143 // "Preclean" all the discovered reference lists | |
144 // by removing references with strongly reachable referents. | |
145 // The first argument is a predicate on an oop that indicates | |
146 // its (strong) reachability and the second is a closure that | |
147 // may be used to incrementalize or abort the precleaning process. | |
148 // The caller is responsible for taking care of potential | |
149 // interference with concurrent operations on these lists | |
150 // (or predicates involved) by other threads. Currently | |
151 // only used by the CMS collector. | |
152 void preclean_discovered_references(BoolObjectClosure* is_alive, | |
153 OopClosure* keep_alive, | |
154 VoidClosure* complete_gc, | |
155 YieldClosure* yield); | |
156 | |
157 // Delete entries in the discovered lists that have | |
158 // either a null referent or are not active. Such | |
159 // Reference objects can result from the clearing | |
160 // or enqueueing of Reference objects concurrent | |
161 // with their discovery by a (concurrent) collector. | |
162 // For a definition of "active" see java.lang.ref.Reference; | |
163 // Refs are born active, become inactive when enqueued, | |
164 // and never become active again. The state of being | |
165 // active is encoded as follows: A Ref is active | |
166 // if and only if its "next" field is NULL. | |
167 void clean_up_discovered_references(); | |
168 void clean_up_discovered_reflist(DiscoveredList& refs_list); | |
169 | |
170 // Returns the name of the discovered reference list | |
171 // occupying the i / _num_q slot. | |
172 const char* list_name(int i); | |
173 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
174 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
175 |
0 | 176 protected: |
177 // "Preclean" the given discovered reference list | |
178 // by removing references with strongly reachable referents. | |
179 // Currently used in support of CMS only. | |
180 void preclean_discovered_reflist(DiscoveredList& refs_list, | |
181 BoolObjectClosure* is_alive, | |
182 OopClosure* keep_alive, | |
183 VoidClosure* complete_gc, | |
184 YieldClosure* yield); | |
185 | |
186 int next_id() { | |
187 int id = _next_id; | |
188 if (++_next_id == _num_q) { | |
189 _next_id = 0; | |
190 } | |
191 return id; | |
192 } | |
193 DiscoveredList* get_discovered_list(ReferenceType rt); | |
194 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
195 HeapWord* discovered_addr); |
0 | 196 void verify_ok_to_handle_reflists() PRODUCT_RETURN; |
197 | |
198 void abandon_partial_discovered_list(DiscoveredList& refs_list); | |
199 void abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]); | |
200 | |
201 // Calculate the number of jni handles. | |
202 unsigned int count_jni_refs(); | |
203 | |
204 // Balances reference queues. | |
205 void balance_queues(DiscoveredList ref_lists[]); | |
206 | |
207 // Update (advance) the soft ref master clock field. | |
208 void update_soft_ref_master_clock(); | |
209 | |
210 public: | |
211 // constructor | |
212 ReferenceProcessor(): | |
213 _span((HeapWord*)NULL, (HeapWord*)NULL), | |
214 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), | |
215 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), | |
216 _discovering_refs(false), | |
217 _discovery_is_atomic(true), | |
218 _enqueuing_is_done(false), | |
219 _discovery_is_mt(false), | |
220 _is_alive_non_header(NULL), | |
221 _num_q(0), | |
222 _processing_is_mt(false), | |
223 _next_id(0) | |
224 {} | |
225 | |
226 ReferenceProcessor(MemRegion span, bool atomic_discovery, | |
227 bool mt_discovery, int mt_degree = 1, | |
228 bool mt_processing = false); | |
229 | |
230 // Allocates and initializes a reference processor. | |
231 static ReferenceProcessor* create_ref_processor( | |
232 MemRegion span, | |
233 bool atomic_discovery, | |
234 bool mt_discovery, | |
235 BoolObjectClosure* is_alive_non_header = NULL, | |
236 int parallel_gc_threads = 1, | |
237 bool mt_processing = false); | |
238 | |
239 // RefDiscoveryPolicy values | |
240 enum { | |
241 ReferenceBasedDiscovery = 0, | |
242 ReferentBasedDiscovery = 1 | |
243 }; | |
244 | |
245 static void init_statics(); | |
246 | |
247 public: | |
248 // get and set "is_alive_non_header" field | |
249 BoolObjectClosure* is_alive_non_header() { | |
250 return _is_alive_non_header; | |
251 } | |
252 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { | |
253 _is_alive_non_header = is_alive_non_header; | |
254 } | |
255 | |
256 // get and set span | |
257 MemRegion span() { return _span; } | |
258 void set_span(MemRegion span) { _span = span; } | |
259 | |
260 // start and stop weak ref discovery | |
261 void enable_discovery() { _discovering_refs = true; } | |
262 void disable_discovery() { _discovering_refs = false; } | |
263 bool discovery_enabled() { return _discovering_refs; } | |
264 | |
265 // whether discovery is atomic wrt other collectors | |
266 bool discovery_is_atomic() const { return _discovery_is_atomic; } | |
267 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } | |
268 | |
269 // whether discovery is done by multiple threads same-old-timeously | |
270 bool discovery_is_mt() const { return _discovery_is_mt; } | |
271 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } | |
272 | |
273 // Whether we are in a phase when _processing_ is MT. | |
274 bool processing_is_mt() const { return _processing_is_mt; } | |
275 void set_mt_processing(bool mt) { _processing_is_mt = mt; } | |
276 | |
277 // whether all enqueuing of weak references is complete | |
278 bool enqueuing_is_done() { return _enqueuing_is_done; } | |
279 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } | |
280 | |
281 // iterate over oops | |
282 void weak_oops_do(OopClosure* f); // weak roots | |
283 static void oops_do(OopClosure* f); // strong root(s) | |
284 | |
285 // Discover a Reference object, using appropriate discovery criteria | |
286 bool discover_reference(oop obj, ReferenceType rt); | |
287 | |
288 // Process references found during GC (called by the garbage collector) | |
289 void process_discovered_references(ReferencePolicy* policy, | |
290 BoolObjectClosure* is_alive, | |
291 OopClosure* keep_alive, | |
292 VoidClosure* complete_gc, | |
293 AbstractRefProcTaskExecutor* task_executor); | |
294 | |
295 public: | |
296 // Enqueue references at end of GC (called by the garbage collector) | |
297 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); | |
298 | |
299 // debugging | |
300 void verify_no_references_recorded() PRODUCT_RETURN; | |
301 static void verify(); | |
302 | |
303 // clear the discovered lists (unlinking each entry). | |
304 void clear_discovered_references() PRODUCT_RETURN; | |
305 }; | |
306 | |
307 // A utility class to disable reference discovery in | |
308 // the scope which contains it, for given ReferenceProcessor. | |
309 class NoRefDiscovery: StackObj { | |
310 private: | |
311 ReferenceProcessor* _rp; | |
312 bool _was_discovering_refs; | |
313 public: | |
314 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { | |
315 if (_was_discovering_refs = _rp->discovery_enabled()) { | |
316 _rp->disable_discovery(); | |
317 } | |
318 } | |
319 | |
320 ~NoRefDiscovery() { | |
321 if (_was_discovering_refs) { | |
322 _rp->enable_discovery(); | |
323 } | |
324 } | |
325 }; | |
326 | |
327 | |
328 // A utility class to temporarily mutate the span of the | |
329 // given ReferenceProcessor in the scope that contains it. | |
330 class ReferenceProcessorSpanMutator: StackObj { | |
331 private: | |
332 ReferenceProcessor* _rp; | |
333 MemRegion _saved_span; | |
334 | |
335 public: | |
336 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, | |
337 MemRegion span): | |
338 _rp(rp) { | |
339 _saved_span = _rp->span(); | |
340 _rp->set_span(span); | |
341 } | |
342 | |
343 ~ReferenceProcessorSpanMutator() { | |
344 _rp->set_span(_saved_span); | |
345 } | |
346 }; | |
347 | |
348 // A utility class to temporarily change the MT'ness of | |
349 // reference discovery for the given ReferenceProcessor | |
350 // in the scope that contains it. | |
351 class ReferenceProcessorMTMutator: StackObj { | |
352 private: | |
353 ReferenceProcessor* _rp; | |
354 bool _saved_mt; | |
355 | |
356 public: | |
357 ReferenceProcessorMTMutator(ReferenceProcessor* rp, | |
358 bool mt): | |
359 _rp(rp) { | |
360 _saved_mt = _rp->discovery_is_mt(); | |
361 _rp->set_mt_discovery(mt); | |
362 } | |
363 | |
364 ~ReferenceProcessorMTMutator() { | |
365 _rp->set_mt_discovery(_saved_mt); | |
366 } | |
367 }; | |
368 | |
369 | |
370 // A utility class to temporarily change the disposition | |
371 // of the "is_alive_non_header" closure field of the | |
372 // given ReferenceProcessor in the scope that contains it. | |
373 class ReferenceProcessorIsAliveMutator: StackObj { | |
374 private: | |
375 ReferenceProcessor* _rp; | |
376 BoolObjectClosure* _saved_cl; | |
377 | |
378 public: | |
379 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, | |
380 BoolObjectClosure* cl): | |
381 _rp(rp) { | |
382 _saved_cl = _rp->is_alive_non_header(); | |
383 _rp->set_is_alive_non_header(cl); | |
384 } | |
385 | |
386 ~ReferenceProcessorIsAliveMutator() { | |
387 _rp->set_is_alive_non_header(_saved_cl); | |
388 } | |
389 }; | |
390 | |
391 // A utility class to temporarily change the disposition | |
392 // of the "discovery_is_atomic" field of the | |
393 // given ReferenceProcessor in the scope that contains it. | |
394 class ReferenceProcessorAtomicMutator: StackObj { | |
395 private: | |
396 ReferenceProcessor* _rp; | |
397 bool _saved_atomic_discovery; | |
398 | |
399 public: | |
400 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, | |
401 bool atomic): | |
402 _rp(rp) { | |
403 _saved_atomic_discovery = _rp->discovery_is_atomic(); | |
404 _rp->set_atomic_discovery(atomic); | |
405 } | |
406 | |
407 ~ReferenceProcessorAtomicMutator() { | |
408 _rp->set_atomic_discovery(_saved_atomic_discovery); | |
409 } | |
410 }; | |
411 | |
412 | |
413 // A utility class to temporarily change the MT processing | |
414 // disposition of the given ReferenceProcessor instance | |
415 // in the scope that contains it. | |
416 class ReferenceProcessorMTProcMutator: StackObj { | |
417 private: | |
418 ReferenceProcessor* _rp; | |
419 bool _saved_mt; | |
420 | |
421 public: | |
422 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, | |
423 bool mt): | |
424 _rp(rp) { | |
425 _saved_mt = _rp->processing_is_mt(); | |
426 _rp->set_mt_processing(mt); | |
427 } | |
428 | |
429 ~ReferenceProcessorMTProcMutator() { | |
430 _rp->set_mt_processing(_saved_mt); | |
431 } | |
432 }; | |
433 | |
434 | |
435 // This class is an interface used to implement task execution for the | |
436 // reference processing. | |
437 class AbstractRefProcTaskExecutor { | |
438 public: | |
439 | |
440 // Abstract tasks to execute. | |
441 class ProcessTask; | |
442 class EnqueueTask; | |
443 | |
444 // Executes a task using worker threads. | |
445 virtual void execute(ProcessTask& task) = 0; | |
446 virtual void execute(EnqueueTask& task) = 0; | |
447 | |
448 // Switch to single threaded mode. | |
449 virtual void set_single_threaded_mode() { }; | |
450 }; | |
451 | |
452 // Abstract reference processing task to execute. | |
453 class AbstractRefProcTaskExecutor::ProcessTask { | |
454 protected: | |
455 ProcessTask(ReferenceProcessor& ref_processor, | |
456 DiscoveredList refs_lists[], | |
457 bool marks_oops_alive) | |
458 : _ref_processor(ref_processor), | |
459 _refs_lists(refs_lists), | |
460 _marks_oops_alive(marks_oops_alive) | |
461 { } | |
462 | |
463 public: | |
464 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, | |
465 OopClosure& keep_alive, | |
466 VoidClosure& complete_gc) = 0; | |
467 | |
468 // Returns true if a task marks some oops as alive. | |
469 bool marks_oops_alive() const | |
470 { return _marks_oops_alive; } | |
471 | |
472 protected: | |
473 ReferenceProcessor& _ref_processor; | |
474 DiscoveredList* _refs_lists; | |
475 const bool _marks_oops_alive; | |
476 }; | |
477 | |
478 // Abstract reference processing task to execute. | |
479 class AbstractRefProcTaskExecutor::EnqueueTask { | |
480 protected: | |
481 EnqueueTask(ReferenceProcessor& ref_processor, | |
482 DiscoveredList refs_lists[], | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
483 HeapWord* pending_list_addr, |
0 | 484 oop sentinel_ref, |
485 int n_queues) | |
486 : _ref_processor(ref_processor), | |
487 _refs_lists(refs_lists), | |
488 _pending_list_addr(pending_list_addr), | |
489 _sentinel_ref(sentinel_ref), | |
490 _n_queues(n_queues) | |
491 { } | |
492 | |
493 public: | |
494 virtual void work(unsigned int work_id) = 0; | |
495 | |
496 protected: | |
497 ReferenceProcessor& _ref_processor; | |
498 DiscoveredList* _refs_lists; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
499 HeapWord* _pending_list_addr; |
0 | 500 oop _sentinel_ref; |
501 int _n_queues; | |
502 }; |