comparison src/share/vm/memory/referenceProcessor.hpp @ 3979:4dfb2df418f2

6484982: G1: process references during evacuation pauses Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate. Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
author johnc
date Thu, 22 Sep 2011 10:57:37 -0700
parents eca1193ca245
children d1bdeef3e3e2
comparison
equal deleted inserted replaced
3978:f0ecbe78fc7b 3979:4dfb2df418f2
46 // (with appropriate modifications) to any "non-convex interval". 46 // (with appropriate modifications) to any "non-convex interval".
47 47
48 // forward references 48 // forward references
49 class ReferencePolicy; 49 class ReferencePolicy;
50 class AbstractRefProcTaskExecutor; 50 class AbstractRefProcTaskExecutor;
51 class DiscoveredList; 51
52 // List of discovered references.
53 class DiscoveredList {
54 public:
55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
56 oop head() const {
57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
58 _oop_head;
59 }
60 HeapWord* adr_head() {
61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
62 (HeapWord*)&_oop_head;
63 }
64 void set_head(oop o) {
65 if (UseCompressedOops) {
66 // Must compress the head ptr.
67 _compressed_head = oopDesc::encode_heap_oop(o);
68 } else {
69 _oop_head = o;
70 }
71 }
72 bool is_empty() const { return head() == NULL; }
73 size_t length() { return _len; }
74 void set_length(size_t len) { _len = len; }
75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
76 void dec_length(size_t dec) { _len -= dec; }
77 private:
78 // Set value depending on UseCompressedOops. This could be a template class
79 // but then we have to fix all the instantiations and declarations that use this class.
80 oop _oop_head;
81 narrowOop _compressed_head;
82 size_t _len;
83 };
84
85 // Iterator for the list of discovered references.
86 class DiscoveredListIterator {
87 private:
88 DiscoveredList& _refs_list;
89 HeapWord* _prev_next;
90 oop _prev;
91 oop _ref;
92 HeapWord* _discovered_addr;
93 oop _next;
94 HeapWord* _referent_addr;
95 oop _referent;
96 OopClosure* _keep_alive;
97 BoolObjectClosure* _is_alive;
98
99 DEBUG_ONLY(
100 oop _first_seen; // cyclic linked list check
101 )
102
103 NOT_PRODUCT(
104 size_t _processed;
105 size_t _removed;
106 )
107
108 public:
109 inline DiscoveredListIterator(DiscoveredList& refs_list,
110 OopClosure* keep_alive,
111 BoolObjectClosure* is_alive):
112 _refs_list(refs_list),
113 _prev_next(refs_list.adr_head()),
114 _prev(NULL),
115 _ref(refs_list.head()),
116 #ifdef ASSERT
117 _first_seen(refs_list.head()),
118 #endif
119 #ifndef PRODUCT
120 _processed(0),
121 _removed(0),
122 #endif
123 _next(NULL),
124 _keep_alive(keep_alive),
125 _is_alive(is_alive)
126 { }
127
128 // End Of List.
129 inline bool has_next() const { return _ref != NULL; }
130
131 // Get oop to the Reference object.
132 inline oop obj() const { return _ref; }
133
134 // Get oop to the referent object.
135 inline oop referent() const { return _referent; }
136
137 // Returns true if referent is alive.
138 inline bool is_referent_alive() const {
139 return _is_alive->do_object_b(_referent);
140 }
141
142 // Loads data for the current reference.
143 // The "allow_null_referent" argument tells us to allow for the possibility
144 // of a NULL referent in the discovered Reference object. This typically
145 // happens in the case of concurrent collectors that may have done the
146 // discovery concurrently, or interleaved, with mutator execution.
147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
148
149 // Move to the next discovered reference.
150 inline void next() {
151 _prev_next = _discovered_addr;
152 _prev = _ref;
153 move_to_next();
154 }
155
156 // Remove the current reference from the list
157 void remove();
158
159 // Make the Reference object active again.
160 void make_active();
161
162 // Make the referent alive.
163 inline void make_referent_alive() {
164 if (UseCompressedOops) {
165 _keep_alive->do_oop((narrowOop*)_referent_addr);
166 } else {
167 _keep_alive->do_oop((oop*)_referent_addr);
168 }
169 }
170
171 // Update the discovered field.
172 inline void update_discovered() {
173 // First _prev_next ref actually points into DiscoveredList (gross).
174 if (UseCompressedOops) {
175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
176 _keep_alive->do_oop((narrowOop*)_prev_next);
177 }
178 } else {
179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
180 _keep_alive->do_oop((oop*)_prev_next);
181 }
182 }
183 }
184
185 // NULL out referent pointer.
186 void clear_referent();
187
188 // Statistics
189 NOT_PRODUCT(
190 inline size_t processed() const { return _processed; }
191 inline size_t removed() const { return _removed; }
192 )
193
194 inline void move_to_next() {
195 if (_ref == _next) {
196 // End of the list.
197 _ref = NULL;
198 } else {
199 _ref = _next;
200 }
201 assert(_ref != _first_seen, "cyclic ref_list found");
202 NOT_PRODUCT(_processed++);
203 }
204
205 };
52 206
53 class ReferenceProcessor : public CHeapObj { 207 class ReferenceProcessor : public CHeapObj {
54 protected: 208 protected:
55 // Compatibility with pre-4965777 JDK's 209 // Compatibility with pre-4965777 JDK's
56 static bool _pending_list_uses_discovered_field; 210 static bool _pending_list_uses_discovered_field;
57 MemRegion _span; // (right-open) interval of heap 211
58 // subject to wkref discovery 212 MemRegion _span; // (right-open) interval of heap
59 bool _discovering_refs; // true when discovery enabled 213 // subject to wkref discovery
60 bool _discovery_is_atomic; // if discovery is atomic wrt 214
61 // other collectors in configuration 215 bool _discovering_refs; // true when discovery enabled
62 bool _discovery_is_mt; // true if reference discovery is MT. 216 bool _discovery_is_atomic; // if discovery is atomic wrt
217 // other collectors in configuration
218 bool _discovery_is_mt; // true if reference discovery is MT.
219
63 // If true, setting "next" field of a discovered refs list requires 220 // If true, setting "next" field of a discovered refs list requires
64 // write barrier(s). (Must be true if used in a collector in which 221 // write barrier(s). (Must be true if used in a collector in which
65 // elements of a discovered list may be moved during discovery: for 222 // elements of a discovered list may be moved during discovery: for
66 // example, a collector like Garbage-First that moves objects during a 223 // example, a collector like Garbage-First that moves objects during a
67 // long-term concurrent marking phase that does weak reference 224 // long-term concurrent marking phase that does weak reference
68 // discovery.) 225 // discovery.)
69 bool _discovered_list_needs_barrier; 226 bool _discovered_list_needs_barrier;
70 BarrierSet* _bs; // Cached copy of BarrierSet. 227
71 bool _enqueuing_is_done; // true if all weak references enqueued 228 BarrierSet* _bs; // Cached copy of BarrierSet.
72 bool _processing_is_mt; // true during phases when 229 bool _enqueuing_is_done; // true if all weak references enqueued
73 // reference processing is MT. 230 bool _processing_is_mt; // true during phases when
74 int _next_id; // round-robin mod _num_q counter in 231 // reference processing is MT.
75 // support of work distribution 232 int _next_id; // round-robin mod _num_q counter in
76 233 // support of work distribution
77 // For collectors that do not keep GC marking information 234
235 // For collectors that do not keep GC liveness information
78 // in the object header, this field holds a closure that 236 // in the object header, this field holds a closure that
79 // helps the reference processor determine the reachability 237 // helps the reference processor determine the reachability
80 // of an oop (the field is currently initialized to NULL for 238 // of an oop. It is currently initialized to NULL for all
81 // all collectors but the CMS collector). 239 // collectors except for CMS and G1.
82 BoolObjectClosure* _is_alive_non_header; 240 BoolObjectClosure* _is_alive_non_header;
83 241
84 // Soft ref clearing policies 242 // Soft ref clearing policies
85 // . the default policy 243 // . the default policy
86 static ReferencePolicy* _default_soft_ref_policy; 244 static ReferencePolicy* _default_soft_ref_policy;
100 DiscoveredList* _discoveredWeakRefs; 258 DiscoveredList* _discoveredWeakRefs;
101 DiscoveredList* _discoveredFinalRefs; 259 DiscoveredList* _discoveredFinalRefs;
102 DiscoveredList* _discoveredPhantomRefs; 260 DiscoveredList* _discoveredPhantomRefs;
103 261
104 public: 262 public:
105 int num_q() { return _num_q; } 263 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
106 int max_num_q() { return _max_num_q; } 264
107 void set_active_mt_degree(int v) { _num_q = v; } 265 int num_q() { return _num_q; }
108 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } 266 int max_num_q() { return _max_num_q; }
267 void set_active_mt_degree(int v) { _num_q = v; }
268 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
269
109 ReferencePolicy* setup_policy(bool always_clear) { 270 ReferencePolicy* setup_policy(bool always_clear) {
110 _current_soft_ref_policy = always_clear ? 271 _current_soft_ref_policy = always_clear ?
111 _always_clear_soft_ref_policy : _default_soft_ref_policy; 272 _always_clear_soft_ref_policy : _default_soft_ref_policy;
112 _current_soft_ref_policy->setup(); // snapshot the policy threshold 273 _current_soft_ref_policy->setup(); // snapshot the policy threshold
113 return _current_soft_ref_policy; 274 return _current_soft_ref_policy;
203 const char* list_name(int i); 364 const char* list_name(int i);
204 365
205 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 366 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
206 367
207 protected: 368 protected:
369 // Set the 'discovered' field of the given reference to
370 // the given value - emitting barriers depending upon
371 // the value of _discovered_list_needs_barrier.
372 void set_discovered(oop ref, oop value);
373
208 // "Preclean" the given discovered reference list 374 // "Preclean" the given discovered reference list
209 // by removing references with strongly reachable referents. 375 // by removing references with strongly reachable referents.
210 // Currently used in support of CMS only. 376 // Currently used in support of CMS only.
211 void preclean_discovered_reflist(DiscoveredList& refs_list, 377 void preclean_discovered_reflist(DiscoveredList& refs_list,
212 BoolObjectClosure* is_alive, 378 BoolObjectClosure* is_alive,
288 // get and set span 454 // get and set span
289 MemRegion span() { return _span; } 455 MemRegion span() { return _span; }
290 void set_span(MemRegion span) { _span = span; } 456 void set_span(MemRegion span) { _span = span; }
291 457
292 // start and stop weak ref discovery 458 // start and stop weak ref discovery
293 void enable_discovery() { _discovering_refs = true; } 459 void enable_discovery(bool verify_disabled, bool check_no_refs) {
460 #ifdef ASSERT
461 // Verify that we're not currently discovering refs
462 assert(!verify_disabled || !_discovering_refs, "nested call?");
463
464 if (check_no_refs) {
465 // Verify that the discovered lists are empty
466 verify_no_references_recorded();
467 }
468 #endif // ASSERT
469 _discovering_refs = true;
470 }
471
294 void disable_discovery() { _discovering_refs = false; } 472 void disable_discovery() { _discovering_refs = false; }
295 bool discovery_enabled() { return _discovering_refs; } 473 bool discovery_enabled() { return _discovering_refs; }
296 474
297 // whether discovery is atomic wrt other collectors 475 // whether discovery is atomic wrt other collectors
298 bool discovery_is_atomic() const { return _discovery_is_atomic; } 476 bool discovery_is_atomic() const { return _discovery_is_atomic; }
363 } 541 }
364 } 542 }
365 543
366 ~NoRefDiscovery() { 544 ~NoRefDiscovery() {
367 if (_was_discovering_refs) { 545 if (_was_discovering_refs) {
368 _rp->enable_discovery(); 546 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
369 } 547 }
370 } 548 }
371 }; 549 };
372 550
373 551