comparison src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp @ 935:05f89f00a864

6798898: CMS: bugs related to class unloading Summary: Override should_remember_klasses() and remember_klass() as needed. Reviewed-by: ysr, jcoomes
author jmasa
date Mon, 24 Aug 2009 10:36:31 -0700
parents 00b023ae2d78
children 8b46c4d82093
comparison
equal deleted inserted replaced
912:308762b2bf14 935:05f89f00a864
90 Prefetch::style prefetch_style() { 90 Prefetch::style prefetch_style() {
91 return Prefetch::do_read; 91 return Prefetch::do_read;
92 } 92 }
93 }; 93 };
94 94
95 // KlassRememberingOopClosure is used when marking of the permanent generation
96 // is being done. It adds fields to support revisiting of klasses
97 // for class unloading. _should_remember_klasses should be set to
98 // indicate if klasses should be remembered. Currently that is whenever
99 // CMS class unloading is turned on. The _revisit_stack is used
100 // to save the klasses for later processing.
101 class KlassRememberingOopClosure : public OopClosure {
102 protected:
103 CMSCollector* _collector;
104 CMSMarkStack* _revisit_stack;
105 bool const _should_remember_klasses;
106 public:
107 void check_remember_klasses() const PRODUCT_RETURN;
108 virtual const bool should_remember_klasses() const {
109 check_remember_klasses();
110 return _should_remember_klasses;
111 }
112 virtual void remember_klass(Klass* k);
113
114 KlassRememberingOopClosure(CMSCollector* collector,
115 ReferenceProcessor* rp,
116 CMSMarkStack* revisit_stack);
117 };
118
119 // Similar to KlassRememberingOopClosure for use when multiple
120 // GC threads will execute the closure.
121
122 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
123 public:
124 Par_KlassRememberingOopClosure(CMSCollector* collector,
125 ReferenceProcessor* rp,
126 CMSMarkStack* revisit_stack):
127 KlassRememberingOopClosure(collector, rp, revisit_stack) {}
128 virtual void remember_klass(Klass* k);
129 };
130
95 // The non-parallel version (the parallel version appears further below). 131 // The non-parallel version (the parallel version appears further below).
96 class PushAndMarkClosure: public OopClosure { 132 class PushAndMarkClosure: public KlassRememberingOopClosure {
97 private: 133 private:
98 CMSCollector* _collector;
99 MemRegion _span; 134 MemRegion _span;
100 CMSBitMap* _bit_map; 135 CMSBitMap* _bit_map;
101 CMSBitMap* _mod_union_table; 136 CMSBitMap* _mod_union_table;
102 CMSMarkStack* _mark_stack; 137 CMSMarkStack* _mark_stack;
103 CMSMarkStack* _revisit_stack;
104 bool _concurrent_precleaning; 138 bool _concurrent_precleaning;
105 bool const _should_remember_klasses;
106 protected: 139 protected:
107 DO_OOP_WORK_DEFN 140 DO_OOP_WORK_DEFN
108 public: 141 public:
109 PushAndMarkClosure(CMSCollector* collector, 142 PushAndMarkClosure(CMSCollector* collector,
110 MemRegion span, 143 MemRegion span,
120 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 153 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
121 bool do_header() { return true; } 154 bool do_header() { return true; }
122 Prefetch::style prefetch_style() { 155 Prefetch::style prefetch_style() {
123 return Prefetch::do_read; 156 return Prefetch::do_read;
124 } 157 }
125 virtual const bool should_remember_klasses() const {
126 return _should_remember_klasses;
127 }
128 virtual void remember_klass(Klass* k);
129 }; 158 };
130 159
131 // In the parallel case, the revisit stack, the bit map and the 160 // In the parallel case, the revisit stack, the bit map and the
132 // reference processor are currently all shared. Access to 161 // reference processor are currently all shared. Access to
133 // these shared mutable structures must use appropriate 162 // these shared mutable structures must use appropriate
134 // synchronization (for instance, via CAS). The marking stack 163 // synchronization (for instance, via CAS). The marking stack
135 // used in the non-parallel case above is here replaced with 164 // used in the non-parallel case above is here replaced with
136 // an OopTaskQueue structure to allow efficient work stealing. 165 // an OopTaskQueue structure to allow efficient work stealing.
137 class Par_PushAndMarkClosure: public OopClosure { 166 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
138 private: 167 private:
139 CMSCollector* _collector;
140 MemRegion _span; 168 MemRegion _span;
141 CMSBitMap* _bit_map; 169 CMSBitMap* _bit_map;
142 OopTaskQueue* _work_queue; 170 OopTaskQueue* _work_queue;
143 CMSMarkStack* _revisit_stack;
144 bool const _should_remember_klasses;
145 protected: 171 protected:
146 DO_OOP_WORK_DEFN 172 DO_OOP_WORK_DEFN
147 public: 173 public:
148 Par_PushAndMarkClosure(CMSCollector* collector, 174 Par_PushAndMarkClosure(CMSCollector* collector,
149 MemRegion span, 175 MemRegion span,
157 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 183 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
158 bool do_header() { return true; } 184 bool do_header() { return true; }
159 Prefetch::style prefetch_style() { 185 Prefetch::style prefetch_style() {
160 return Prefetch::do_read; 186 return Prefetch::do_read;
161 } 187 }
162 virtual const bool should_remember_klasses() const {
163 return _should_remember_klasses;
164 }
165 virtual void remember_klass(Klass* k);
166 }; 188 };
167 189
168 // The non-parallel version (the parallel version appears further below). 190 // The non-parallel version (the parallel version appears further below).
169 class MarkRefsIntoAndScanClosure: public OopsInGenClosure { 191 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
170 private: 192 private:
199 return Prefetch::do_read; 221 return Prefetch::do_read;
200 } 222 }
201 void set_freelistLock(Mutex* m) { 223 void set_freelistLock(Mutex* m) {
202 _freelistLock = m; 224 _freelistLock = m;
203 } 225 }
226 virtual const bool should_remember_klasses() const {
227 return _pushAndMarkClosure.should_remember_klasses();
228 }
229 virtual void remember_klass(Klass* k) {
230 _pushAndMarkClosure.remember_klass(k);
231 }
204 232
205 private: 233 private:
206 inline void do_yield_check(); 234 inline void do_yield_check();
207 void do_yield_work(); 235 void do_yield_work();
208 bool take_from_overflow_list(); 236 bool take_from_overflow_list();
232 virtual void do_oop(narrowOop* p); 260 virtual void do_oop(narrowOop* p);
233 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 261 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
234 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 262 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
235 bool do_header() { return true; } 263 bool do_header() { return true; }
236 virtual const bool do_nmethods() const { return true; } 264 virtual const bool do_nmethods() const { return true; }
265 // When ScanMarkedObjectsAgainClosure is used,
266 // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
267 // and this delegation is used.
268 virtual const bool should_remember_klasses() const {
269 return _par_pushAndMarkClosure.should_remember_klasses();
270 }
271 // See comment on should_remember_klasses() above.
272 virtual void remember_klass(Klass* k) {
273 _par_pushAndMarkClosure.remember_klass(k);
274 }
237 Prefetch::style prefetch_style() { 275 Prefetch::style prefetch_style() {
238 return Prefetch::do_read; 276 return Prefetch::do_read;
239 } 277 }
240 void trim_queue(uint size); 278 void trim_queue(uint size);
241 }; 279 };
242 280
243 // This closure is used during the concurrent marking phase 281 // This closure is used during the concurrent marking phase
244 // following the first checkpoint. Its use is buried in 282 // following the first checkpoint. Its use is buried in
245 // the closure MarkFromRootsClosure. 283 // the closure MarkFromRootsClosure.
246 class PushOrMarkClosure: public OopClosure { 284 class PushOrMarkClosure: public KlassRememberingOopClosure {
247 private: 285 private:
248 CMSCollector* _collector;
249 MemRegion _span; 286 MemRegion _span;
250 CMSBitMap* _bitMap; 287 CMSBitMap* _bitMap;
251 CMSMarkStack* _markStack; 288 CMSMarkStack* _markStack;
252 CMSMarkStack* _revisitStack;
253 HeapWord* const _finger; 289 HeapWord* const _finger;
254 MarkFromRootsClosure* const 290 MarkFromRootsClosure* const
255 _parent; 291 _parent;
256 bool const _should_remember_klasses;
257 protected: 292 protected:
258 DO_OOP_WORK_DEFN 293 DO_OOP_WORK_DEFN
259 public: 294 public:
260 PushOrMarkClosure(CMSCollector* cms_collector, 295 PushOrMarkClosure(CMSCollector* cms_collector,
261 MemRegion span, 296 MemRegion span,
266 MarkFromRootsClosure* parent); 301 MarkFromRootsClosure* parent);
267 virtual void do_oop(oop* p); 302 virtual void do_oop(oop* p);
268 virtual void do_oop(narrowOop* p); 303 virtual void do_oop(narrowOop* p);
269 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } 304 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
270 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 305 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
271 virtual const bool should_remember_klasses() const {
272 return _should_remember_klasses;
273 }
274 virtual void remember_klass(Klass* k);
275 // Deal with a stack overflow condition 306 // Deal with a stack overflow condition
276 void handle_stack_overflow(HeapWord* lost); 307 void handle_stack_overflow(HeapWord* lost);
277 private: 308 private:
278 inline void do_yield_check(); 309 inline void do_yield_check();
279 }; 310 };
280 311
281 // A parallel (MT) version of the above. 312 // A parallel (MT) version of the above.
282 // This closure is used during the concurrent marking phase 313 // This closure is used during the concurrent marking phase
283 // following the first checkpoint. Its use is buried in 314 // following the first checkpoint. Its use is buried in
284 // the closure Par_MarkFromRootsClosure. 315 // the closure Par_MarkFromRootsClosure.
285 class Par_PushOrMarkClosure: public OopClosure { 316 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
286 private: 317 private:
287 CMSCollector* _collector;
288 MemRegion _whole_span; 318 MemRegion _whole_span;
289 MemRegion _span; // local chunk 319 MemRegion _span; // local chunk
290 CMSBitMap* _bit_map; 320 CMSBitMap* _bit_map;
291 OopTaskQueue* _work_queue; 321 OopTaskQueue* _work_queue;
292 CMSMarkStack* _overflow_stack; 322 CMSMarkStack* _overflow_stack;
293 CMSMarkStack* _revisit_stack;
294 HeapWord* const _finger; 323 HeapWord* const _finger;
295 HeapWord** const _global_finger_addr; 324 HeapWord** const _global_finger_addr;
296 Par_MarkFromRootsClosure* const 325 Par_MarkFromRootsClosure* const
297 _parent; 326 _parent;
298 bool const _should_remember_klasses;
299 protected: 327 protected:
300 DO_OOP_WORK_DEFN 328 DO_OOP_WORK_DEFN
301 public: 329 public:
302 Par_PushOrMarkClosure(CMSCollector* cms_collector, 330 Par_PushOrMarkClosure(CMSCollector* cms_collector,
303 MemRegion span, 331 MemRegion span,
310 Par_MarkFromRootsClosure* parent); 338 Par_MarkFromRootsClosure* parent);
311 virtual void do_oop(oop* p); 339 virtual void do_oop(oop* p);
312 virtual void do_oop(narrowOop* p); 340 virtual void do_oop(narrowOop* p);
313 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 341 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
314 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 342 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
315 virtual const bool should_remember_klasses() const {
316 return _should_remember_klasses;
317 }
318 virtual void remember_klass(Klass* k);
319 // Deal with a stack overflow condition 343 // Deal with a stack overflow condition
320 void handle_stack_overflow(HeapWord* lost); 344 void handle_stack_overflow(HeapWord* lost);
321 private: 345 private:
322 inline void do_yield_check(); 346 inline void do_yield_check();
323 }; 347 };
326 // given objects (transitively) as being reachable/live. 350 // given objects (transitively) as being reachable/live.
327 // This is currently used during the (weak) reference object 351 // This is currently used during the (weak) reference object
328 // processing phase of the CMS final checkpoint step, as 352 // processing phase of the CMS final checkpoint step, as
329 // well as during the concurrent precleaning of the discovered 353 // well as during the concurrent precleaning of the discovered
330 // reference lists. 354 // reference lists.
331 class CMSKeepAliveClosure: public OopClosure { 355 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
332 private: 356 private:
333 CMSCollector* _collector;
334 const MemRegion _span; 357 const MemRegion _span;
335 CMSMarkStack* _mark_stack; 358 CMSMarkStack* _mark_stack;
336 CMSBitMap* _bit_map; 359 CMSBitMap* _bit_map;
337 bool _concurrent_precleaning; 360 bool _concurrent_precleaning;
338 protected: 361 protected:
339 DO_OOP_WORK_DEFN 362 DO_OOP_WORK_DEFN
340 public: 363 public:
341 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 364 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
342 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 365 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
343 bool cpc): 366 CMSMarkStack* revisit_stack, bool cpc);
344 _collector(collector),
345 _span(span),
346 _bit_map(bit_map),
347 _mark_stack(mark_stack),
348 _concurrent_precleaning(cpc) {
349 assert(!_span.is_empty(), "Empty span could spell trouble");
350 }
351 bool concurrent_precleaning() const { return _concurrent_precleaning; } 367 bool concurrent_precleaning() const { return _concurrent_precleaning; }
352 virtual void do_oop(oop* p); 368 virtual void do_oop(oop* p);
353 virtual void do_oop(narrowOop* p); 369 virtual void do_oop(narrowOop* p);
354 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 370 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
355 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 371 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
356 }; 372 };
357 373
358 class CMSInnerParMarkAndPushClosure: public OopClosure { 374 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
359 private: 375 private:
360 CMSCollector* _collector;
361 MemRegion _span; 376 MemRegion _span;
362 OopTaskQueue* _work_queue; 377 OopTaskQueue* _work_queue;
363 CMSBitMap* _bit_map; 378 CMSBitMap* _bit_map;
364 protected: 379 protected:
365 DO_OOP_WORK_DEFN 380 DO_OOP_WORK_DEFN
366 public: 381 public:
367 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 382 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
368 MemRegion span, CMSBitMap* bit_map, 383 MemRegion span, CMSBitMap* bit_map,
369 OopTaskQueue* work_queue): 384 CMSMarkStack* revisit_stack,
370 _collector(collector), 385 OopTaskQueue* work_queue);
371 _span(span),
372 _bit_map(bit_map),
373 _work_queue(work_queue) { }
374 virtual void do_oop(oop* p); 386 virtual void do_oop(oop* p);
375 virtual void do_oop(narrowOop* p); 387 virtual void do_oop(narrowOop* p);
376 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 388 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
377 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 389 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
378 }; 390 };
379 391
380 // A parallel (MT) version of the above, used when 392 // A parallel (MT) version of the above, used when
381 // reference processing is parallel; the only difference 393 // reference processing is parallel; the only difference
382 // is in the do_oop method. 394 // is in the do_oop method.
383 class CMSParKeepAliveClosure: public OopClosure { 395 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
384 private: 396 private:
385 CMSCollector* _collector;
386 MemRegion _span; 397 MemRegion _span;
387 OopTaskQueue* _work_queue; 398 OopTaskQueue* _work_queue;
388 CMSBitMap* _bit_map; 399 CMSBitMap* _bit_map;
389 CMSInnerParMarkAndPushClosure 400 CMSInnerParMarkAndPushClosure
390 _mark_and_push; 401 _mark_and_push;
392 void trim_queue(uint max); 403 void trim_queue(uint max);
393 protected: 404 protected:
394 DO_OOP_WORK_DEFN 405 DO_OOP_WORK_DEFN
395 public: 406 public:
396 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 407 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
397 CMSBitMap* bit_map, OopTaskQueue* work_queue); 408 CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
409 OopTaskQueue* work_queue);
398 virtual void do_oop(oop* p); 410 virtual void do_oop(oop* p);
399 virtual void do_oop(narrowOop* p); 411 virtual void do_oop(narrowOop* p);
400 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 412 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
401 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 413 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
402 }; 414 };