Mercurial > hg > graal-compiler
annotate src/share/vm/memory/referenceProcessor.hpp @ 1190:4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
Summary: Adjust assertion checking for ExplicitGCInvokesConcurrentAndUnloadsClasses as a reason for class unloading
Reviewed-by: ysr
author | jmasa |
---|---|
date | Thu, 21 Jan 2010 11:33:32 -0800 |
parents | 27a80744a83b |
children | 745c853ee57f |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // ReferenceProcessor class encapsulates the per-"collector" processing | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
26 // of java.lang.Reference objects for GC. The interface is useful for supporting |
0 | 27 // a generational abstraction, in particular when there are multiple |
28 // generations that are being independently collected -- possibly | |
29 // concurrently and/or incrementally. Note, however, that the | |
30 // ReferenceProcessor class abstracts away from a generational setting | |
31 // by using only a heap interval (called "span" below), thus allowing | |
32 // its use in a straightforward manner in a general, non-generational | |
33 // setting. | |
34 // | |
35 // The basic idea is that each ReferenceProcessor object concerns | |
36 // itself with ("weak") reference processing in a specific "span" | |
37 // of the heap of interest to a specific collector. Currently, | |
38 // the span is a convex interval of the heap, but, efficiency | |
39 // apart, there seems to be no reason it couldn't be extended | |
40 // (with appropriate modifications) to any "non-convex interval". | |
41 | |
42 // forward references | |
43 class ReferencePolicy; | |
44 class AbstractRefProcTaskExecutor; | |
45 class DiscoveredList; | |
46 | |
47 class ReferenceProcessor : public CHeapObj { | |
48 protected: | |
49 // End of list marker | |
50 static oop _sentinelRef; | |
51 MemRegion _span; // (right-open) interval of heap | |
52 // subject to wkref discovery | |
53 bool _discovering_refs; // true when discovery enabled | |
54 bool _discovery_is_atomic; // if discovery is atomic wrt | |
55 // other collectors in configuration | |
56 bool _discovery_is_mt; // true if reference discovery is MT. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
57 // If true, setting "next" field of a discovered refs list requires |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
58 // write barrier(s). (Must be true if used in a collector in which |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
59 // elements of a discovered list may be moved during discovery: for |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
60 // example, a collector like Garbage-First that moves objects during a |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
61 // long-term concurrent marking phase that does weak reference |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
62 // discovery.) |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
63 bool _discovered_list_needs_barrier; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
64 BarrierSet* _bs; // Cached copy of BarrierSet. |
0 | 65 bool _enqueuing_is_done; // true if all weak references enqueued |
66 bool _processing_is_mt; // true during phases when | |
67 // reference processing is MT. | |
68 int _next_id; // round-robin counter in | |
69 // support of work distribution | |
70 | |
71 // For collectors that do not keep GC marking information | |
72 // in the object header, this field holds a closure that | |
73 // helps the reference processor determine the reachability | |
74 // of an oop (the field is currently initialized to NULL for | |
75 // all collectors but the CMS collector). | |
76 BoolObjectClosure* _is_alive_non_header; | |
77 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
78 // Soft ref clearing policies |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
79 // . the default policy |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
80 static ReferencePolicy* _default_soft_ref_policy; |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
81 // . the "clear all" policy |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
82 static ReferencePolicy* _always_clear_soft_ref_policy; |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
83 // . the current policy below is either one of the above |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
84 ReferencePolicy* _current_soft_ref_policy; |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
85 |
0 | 86 // The discovered ref lists themselves |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
87 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
88 // The MT'ness degree of the queues below |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
89 int _num_q; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
90 // Arrays of lists of oops, one per thread |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
91 DiscoveredList* _discoveredSoftRefs; |
0 | 92 DiscoveredList* _discoveredWeakRefs; |
93 DiscoveredList* _discoveredFinalRefs; | |
94 DiscoveredList* _discoveredPhantomRefs; | |
95 | |
96 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
97 int num_q() { return _num_q; } |
0 | 98 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
99 static oop sentinel_ref() { return _sentinelRef; } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
100 static oop* adr_sentinel_ref() { return &_sentinelRef; } |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
101 ReferencePolicy* setup_policy(bool always_clear) { |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
102 _current_soft_ref_policy = always_clear ? |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
103 _always_clear_soft_ref_policy : _default_soft_ref_policy; |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
104 _current_soft_ref_policy->setup(); // snapshot the policy threshold |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
105 return _current_soft_ref_policy; |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
106 } |
0 | 107 |
108 public: | |
109 // Process references with a certain reachability level. | |
110 void process_discovered_reflist(DiscoveredList refs_lists[], | |
111 ReferencePolicy* policy, | |
112 bool clear_referent, | |
113 BoolObjectClosure* is_alive, | |
114 OopClosure* keep_alive, | |
115 VoidClosure* complete_gc, | |
116 AbstractRefProcTaskExecutor* task_executor); | |
117 | |
118 void process_phaseJNI(BoolObjectClosure* is_alive, | |
119 OopClosure* keep_alive, | |
120 VoidClosure* complete_gc); | |
121 | |
122 // Work methods used by the method process_discovered_reflist | |
123 // Phase1: keep alive all those referents that are otherwise | |
124 // dead but which must be kept alive by policy (and their closure). | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
125 void process_phase1(DiscoveredList& refs_list, |
0 | 126 ReferencePolicy* policy, |
127 BoolObjectClosure* is_alive, | |
128 OopClosure* keep_alive, | |
129 VoidClosure* complete_gc); | |
130 // Phase2: remove all those references whose referents are | |
131 // reachable. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
132 inline void process_phase2(DiscoveredList& refs_list, |
0 | 133 BoolObjectClosure* is_alive, |
134 OopClosure* keep_alive, | |
135 VoidClosure* complete_gc) { | |
136 if (discovery_is_atomic()) { | |
137 // complete_gc is ignored in this case for this phase | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
138 pp2_work(refs_list, is_alive, keep_alive); |
0 | 139 } else { |
140 assert(complete_gc != NULL, "Error"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
141 pp2_work_concurrent_discovery(refs_list, is_alive, |
0 | 142 keep_alive, complete_gc); |
143 } | |
144 } | |
145 // Work methods in support of process_phase2 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
146 void pp2_work(DiscoveredList& refs_list, |
0 | 147 BoolObjectClosure* is_alive, |
148 OopClosure* keep_alive); | |
149 void pp2_work_concurrent_discovery( | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
150 DiscoveredList& refs_list, |
0 | 151 BoolObjectClosure* is_alive, |
152 OopClosure* keep_alive, | |
153 VoidClosure* complete_gc); | |
154 // Phase3: process the referents by either clearing them | |
155 // or keeping them alive (and their closure) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
156 void process_phase3(DiscoveredList& refs_list, |
0 | 157 bool clear_referent, |
158 BoolObjectClosure* is_alive, | |
159 OopClosure* keep_alive, | |
160 VoidClosure* complete_gc); | |
161 | |
162 // Enqueue references with a certain reachability level | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
163 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); |
0 | 164 |
165 // "Preclean" all the discovered reference lists | |
166 // by removing references with strongly reachable referents. | |
167 // The first argument is a predicate on an oop that indicates | |
168 // its (strong) reachability and the second is a closure that | |
169 // may be used to incrementalize or abort the precleaning process. | |
170 // The caller is responsible for taking care of potential | |
171 // interference with concurrent operations on these lists | |
172 // (or predicates involved) by other threads. Currently | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
457
diff
changeset
|
173 // only used by the CMS collector. should_unload_classes is |
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
457
diff
changeset
|
174 // used to aid assertion checking when classes are collected. |
0 | 175 void preclean_discovered_references(BoolObjectClosure* is_alive, |
176 OopClosure* keep_alive, | |
177 VoidClosure* complete_gc, | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
457
diff
changeset
|
178 YieldClosure* yield, |
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
457
diff
changeset
|
179 bool should_unload_classes); |
0 | 180 |
181 // Delete entries in the discovered lists that have | |
182 // either a null referent or are not active. Such | |
183 // Reference objects can result from the clearing | |
184 // or enqueueing of Reference objects concurrent | |
185 // with their discovery by a (concurrent) collector. | |
186 // For a definition of "active" see java.lang.ref.Reference; | |
187 // Refs are born active, become inactive when enqueued, | |
188 // and never become active again. The state of being | |
189 // active is encoded as follows: A Ref is active | |
190 // if and only if its "next" field is NULL. | |
191 void clean_up_discovered_references(); | |
192 void clean_up_discovered_reflist(DiscoveredList& refs_list); | |
193 | |
194 // Returns the name of the discovered reference list | |
195 // occupying the i / _num_q slot. | |
196 const char* list_name(int i); | |
197 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
198 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
199 |
0 | 200 protected: |
201 // "Preclean" the given discovered reference list | |
202 // by removing references with strongly reachable referents. | |
203 // Currently used in support of CMS only. | |
204 void preclean_discovered_reflist(DiscoveredList& refs_list, | |
205 BoolObjectClosure* is_alive, | |
206 OopClosure* keep_alive, | |
207 VoidClosure* complete_gc, | |
208 YieldClosure* yield); | |
209 | |
210 int next_id() { | |
211 int id = _next_id; | |
212 if (++_next_id == _num_q) { | |
213 _next_id = 0; | |
214 } | |
215 return id; | |
216 } | |
217 DiscoveredList* get_discovered_list(ReferenceType rt); | |
218 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
219 HeapWord* discovered_addr); |
0 | 220 void verify_ok_to_handle_reflists() PRODUCT_RETURN; |
221 | |
222 void abandon_partial_discovered_list(DiscoveredList& refs_list); | |
223 | |
224 // Calculate the number of jni handles. | |
225 unsigned int count_jni_refs(); | |
226 | |
227 // Balances reference queues. | |
228 void balance_queues(DiscoveredList ref_lists[]); | |
229 | |
230 // Update (advance) the soft ref master clock field. | |
231 void update_soft_ref_master_clock(); | |
232 | |
233 public: | |
234 // constructor | |
235 ReferenceProcessor(): | |
236 _span((HeapWord*)NULL, (HeapWord*)NULL), | |
237 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), | |
238 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), | |
239 _discovering_refs(false), | |
240 _discovery_is_atomic(true), | |
241 _enqueuing_is_done(false), | |
242 _discovery_is_mt(false), | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
243 _discovered_list_needs_barrier(false), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
244 _bs(NULL), |
0 | 245 _is_alive_non_header(NULL), |
246 _num_q(0), | |
247 _processing_is_mt(false), | |
248 _next_id(0) | |
249 {} | |
250 | |
251 ReferenceProcessor(MemRegion span, bool atomic_discovery, | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
252 bool mt_discovery, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
253 int mt_degree = 1, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
254 bool mt_processing = false, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
255 bool discovered_list_needs_barrier = false); |
0 | 256 |
257 // Allocates and initializes a reference processor. | |
258 static ReferenceProcessor* create_ref_processor( | |
259 MemRegion span, | |
260 bool atomic_discovery, | |
261 bool mt_discovery, | |
262 BoolObjectClosure* is_alive_non_header = NULL, | |
263 int parallel_gc_threads = 1, | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
264 bool mt_processing = false, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
265 bool discovered_list_needs_barrier = false); |
0 | 266 // RefDiscoveryPolicy values |
267 enum { | |
268 ReferenceBasedDiscovery = 0, | |
269 ReferentBasedDiscovery = 1 | |
270 }; | |
271 | |
272 static void init_statics(); | |
273 | |
274 public: | |
275 // get and set "is_alive_non_header" field | |
276 BoolObjectClosure* is_alive_non_header() { | |
277 return _is_alive_non_header; | |
278 } | |
279 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { | |
280 _is_alive_non_header = is_alive_non_header; | |
281 } | |
282 | |
283 // get and set span | |
284 MemRegion span() { return _span; } | |
285 void set_span(MemRegion span) { _span = span; } | |
286 | |
287 // start and stop weak ref discovery | |
288 void enable_discovery() { _discovering_refs = true; } | |
289 void disable_discovery() { _discovering_refs = false; } | |
290 bool discovery_enabled() { return _discovering_refs; } | |
291 | |
292 // whether discovery is atomic wrt other collectors | |
293 bool discovery_is_atomic() const { return _discovery_is_atomic; } | |
294 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } | |
295 | |
296 // whether discovery is done by multiple threads same-old-timeously | |
297 bool discovery_is_mt() const { return _discovery_is_mt; } | |
298 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } | |
299 | |
300 // Whether we are in a phase when _processing_ is MT. | |
301 bool processing_is_mt() const { return _processing_is_mt; } | |
302 void set_mt_processing(bool mt) { _processing_is_mt = mt; } | |
303 | |
304 // whether all enqueuing of weak references is complete | |
305 bool enqueuing_is_done() { return _enqueuing_is_done; } | |
306 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } | |
307 | |
308 // iterate over oops | |
309 void weak_oops_do(OopClosure* f); // weak roots | |
310 static void oops_do(OopClosure* f); // strong root(s) | |
311 | |
312 // Discover a Reference object, using appropriate discovery criteria | |
313 bool discover_reference(oop obj, ReferenceType rt); | |
314 | |
315 // Process references found during GC (called by the garbage collector) | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
316 void process_discovered_references(BoolObjectClosure* is_alive, |
0 | 317 OopClosure* keep_alive, |
318 VoidClosure* complete_gc, | |
319 AbstractRefProcTaskExecutor* task_executor); | |
320 | |
321 public: | |
322 // Enqueue references at end of GC (called by the garbage collector) | |
323 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); | |
324 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
325 // If a discovery is in process that is being superceded, abandon it: all |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
326 // the discovered lists will be empty, and all the objects on them will |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
327 // have NULL discovered fields. Must be called only at a safepoint. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
328 void abandon_partial_discovery(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
329 |
0 | 330 // debugging |
331 void verify_no_references_recorded() PRODUCT_RETURN; | |
332 static void verify(); | |
333 | |
334 // clear the discovered lists (unlinking each entry). | |
335 void clear_discovered_references() PRODUCT_RETURN; | |
336 }; | |
337 | |
338 // A utility class to disable reference discovery in | |
339 // the scope which contains it, for given ReferenceProcessor. | |
340 class NoRefDiscovery: StackObj { | |
341 private: | |
342 ReferenceProcessor* _rp; | |
343 bool _was_discovering_refs; | |
344 public: | |
345 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { | |
346 if (_was_discovering_refs = _rp->discovery_enabled()) { | |
347 _rp->disable_discovery(); | |
348 } | |
349 } | |
350 | |
351 ~NoRefDiscovery() { | |
352 if (_was_discovering_refs) { | |
353 _rp->enable_discovery(); | |
354 } | |
355 } | |
356 }; | |
357 | |
358 | |
359 // A utility class to temporarily mutate the span of the | |
360 // given ReferenceProcessor in the scope that contains it. | |
361 class ReferenceProcessorSpanMutator: StackObj { | |
362 private: | |
363 ReferenceProcessor* _rp; | |
364 MemRegion _saved_span; | |
365 | |
366 public: | |
367 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, | |
368 MemRegion span): | |
369 _rp(rp) { | |
370 _saved_span = _rp->span(); | |
371 _rp->set_span(span); | |
372 } | |
373 | |
374 ~ReferenceProcessorSpanMutator() { | |
375 _rp->set_span(_saved_span); | |
376 } | |
377 }; | |
378 | |
379 // A utility class to temporarily change the MT'ness of | |
380 // reference discovery for the given ReferenceProcessor | |
381 // in the scope that contains it. | |
382 class ReferenceProcessorMTMutator: StackObj { | |
383 private: | |
384 ReferenceProcessor* _rp; | |
385 bool _saved_mt; | |
386 | |
387 public: | |
388 ReferenceProcessorMTMutator(ReferenceProcessor* rp, | |
389 bool mt): | |
390 _rp(rp) { | |
391 _saved_mt = _rp->discovery_is_mt(); | |
392 _rp->set_mt_discovery(mt); | |
393 } | |
394 | |
395 ~ReferenceProcessorMTMutator() { | |
396 _rp->set_mt_discovery(_saved_mt); | |
397 } | |
398 }; | |
399 | |
400 | |
401 // A utility class to temporarily change the disposition | |
402 // of the "is_alive_non_header" closure field of the | |
403 // given ReferenceProcessor in the scope that contains it. | |
404 class ReferenceProcessorIsAliveMutator: StackObj { | |
405 private: | |
406 ReferenceProcessor* _rp; | |
407 BoolObjectClosure* _saved_cl; | |
408 | |
409 public: | |
410 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, | |
411 BoolObjectClosure* cl): | |
412 _rp(rp) { | |
413 _saved_cl = _rp->is_alive_non_header(); | |
414 _rp->set_is_alive_non_header(cl); | |
415 } | |
416 | |
417 ~ReferenceProcessorIsAliveMutator() { | |
418 _rp->set_is_alive_non_header(_saved_cl); | |
419 } | |
420 }; | |
421 | |
422 // A utility class to temporarily change the disposition | |
423 // of the "discovery_is_atomic" field of the | |
424 // given ReferenceProcessor in the scope that contains it. | |
425 class ReferenceProcessorAtomicMutator: StackObj { | |
426 private: | |
427 ReferenceProcessor* _rp; | |
428 bool _saved_atomic_discovery; | |
429 | |
430 public: | |
431 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, | |
432 bool atomic): | |
433 _rp(rp) { | |
434 _saved_atomic_discovery = _rp->discovery_is_atomic(); | |
435 _rp->set_atomic_discovery(atomic); | |
436 } | |
437 | |
438 ~ReferenceProcessorAtomicMutator() { | |
439 _rp->set_atomic_discovery(_saved_atomic_discovery); | |
440 } | |
441 }; | |
442 | |
443 | |
444 // A utility class to temporarily change the MT processing | |
445 // disposition of the given ReferenceProcessor instance | |
446 // in the scope that contains it. | |
447 class ReferenceProcessorMTProcMutator: StackObj { | |
448 private: | |
449 ReferenceProcessor* _rp; | |
450 bool _saved_mt; | |
451 | |
452 public: | |
453 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, | |
454 bool mt): | |
455 _rp(rp) { | |
456 _saved_mt = _rp->processing_is_mt(); | |
457 _rp->set_mt_processing(mt); | |
458 } | |
459 | |
460 ~ReferenceProcessorMTProcMutator() { | |
461 _rp->set_mt_processing(_saved_mt); | |
462 } | |
463 }; | |
464 | |
465 | |
466 // This class is an interface used to implement task execution for the | |
467 // reference processing. | |
468 class AbstractRefProcTaskExecutor { | |
469 public: | |
470 | |
471 // Abstract tasks to execute. | |
472 class ProcessTask; | |
473 class EnqueueTask; | |
474 | |
475 // Executes a task using worker threads. | |
476 virtual void execute(ProcessTask& task) = 0; | |
477 virtual void execute(EnqueueTask& task) = 0; | |
478 | |
479 // Switch to single threaded mode. | |
480 virtual void set_single_threaded_mode() { }; | |
481 }; | |
482 | |
483 // Abstract reference processing task to execute. | |
484 class AbstractRefProcTaskExecutor::ProcessTask { | |
485 protected: | |
486 ProcessTask(ReferenceProcessor& ref_processor, | |
487 DiscoveredList refs_lists[], | |
488 bool marks_oops_alive) | |
489 : _ref_processor(ref_processor), | |
490 _refs_lists(refs_lists), | |
491 _marks_oops_alive(marks_oops_alive) | |
492 { } | |
493 | |
494 public: | |
495 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, | |
496 OopClosure& keep_alive, | |
497 VoidClosure& complete_gc) = 0; | |
498 | |
499 // Returns true if a task marks some oops as alive. | |
500 bool marks_oops_alive() const | |
501 { return _marks_oops_alive; } | |
502 | |
503 protected: | |
504 ReferenceProcessor& _ref_processor; | |
505 DiscoveredList* _refs_lists; | |
506 const bool _marks_oops_alive; | |
507 }; | |
508 | |
509 // Abstract reference processing task to execute. | |
510 class AbstractRefProcTaskExecutor::EnqueueTask { | |
511 protected: | |
512 EnqueueTask(ReferenceProcessor& ref_processor, | |
513 DiscoveredList refs_lists[], | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
514 HeapWord* pending_list_addr, |
0 | 515 oop sentinel_ref, |
516 int n_queues) | |
517 : _ref_processor(ref_processor), | |
518 _refs_lists(refs_lists), | |
519 _pending_list_addr(pending_list_addr), | |
520 _sentinel_ref(sentinel_ref), | |
521 _n_queues(n_queues) | |
522 { } | |
523 | |
524 public: | |
525 virtual void work(unsigned int work_id) = 0; | |
526 | |
527 protected: | |
528 ReferenceProcessor& _ref_processor; | |
529 DiscoveredList* _refs_lists; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
530 HeapWord* _pending_list_addr; |
0 | 531 oop _sentinel_ref; |
532 int _n_queues; | |
533 }; |