comparison src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp @ 993:54b3b351d6f9

Merge
author jrose
date Wed, 23 Sep 2009 23:56:15 -0700
parents 148e5441d916 8b46c4d82093
children 753cf9794df9
comparison
equal deleted inserted replaced
992:6a8ccac44f41 993:54b3b351d6f9
86 Prefetch::style prefetch_style() { 86 Prefetch::style prefetch_style() {
87 return Prefetch::do_read; 87 return Prefetch::do_read;
88 } 88 }
89 }; 89 };
90 90
91 // KlassRememberingOopClosure is used when marking of the permanent generation
92 // is being done. It adds fields to support revisiting of klasses
93 // for class unloading. _should_remember_klasses should be set to
94 // indicate if klasses should be remembered. Currently that is whenever
95 // CMS class unloading is turned on. The _revisit_stack is used
96 // to save the klasses for later processing.
97 class KlassRememberingOopClosure : public OopClosure {
98 protected:
99 CMSCollector* _collector;
100 CMSMarkStack* _revisit_stack;
101 bool const _should_remember_klasses;
102 public:
103 void check_remember_klasses() const PRODUCT_RETURN;
104 virtual const bool should_remember_klasses() const {
105 check_remember_klasses();
106 return _should_remember_klasses;
107 }
108 virtual void remember_klass(Klass* k);
109
110 KlassRememberingOopClosure(CMSCollector* collector,
111 ReferenceProcessor* rp,
112 CMSMarkStack* revisit_stack);
113 };
114
115 // Similar to KlassRememberingOopClosure for use when multiple
116 // GC threads will execute the closure.
117
118 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
119 public:
120 Par_KlassRememberingOopClosure(CMSCollector* collector,
121 ReferenceProcessor* rp,
122 CMSMarkStack* revisit_stack):
123 KlassRememberingOopClosure(collector, rp, revisit_stack) {}
124 virtual void remember_klass(Klass* k);
125 };
126
91 // The non-parallel version (the parallel version appears further below). 127 // The non-parallel version (the parallel version appears further below).
92 class PushAndMarkClosure: public OopClosure { 128 class PushAndMarkClosure: public KlassRememberingOopClosure {
93 private: 129 private:
94 CMSCollector* _collector;
95 MemRegion _span; 130 MemRegion _span;
96 CMSBitMap* _bit_map; 131 CMSBitMap* _bit_map;
97 CMSBitMap* _mod_union_table; 132 CMSBitMap* _mod_union_table;
98 CMSMarkStack* _mark_stack; 133 CMSMarkStack* _mark_stack;
99 CMSMarkStack* _revisit_stack;
100 bool _concurrent_precleaning; 134 bool _concurrent_precleaning;
101 bool const _should_remember_klasses;
102 protected: 135 protected:
103 DO_OOP_WORK_DEFN 136 DO_OOP_WORK_DEFN
104 public: 137 public:
105 PushAndMarkClosure(CMSCollector* collector, 138 PushAndMarkClosure(CMSCollector* collector,
106 MemRegion span, 139 MemRegion span,
116 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 149 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
117 bool do_header() { return true; } 150 bool do_header() { return true; }
118 Prefetch::style prefetch_style() { 151 Prefetch::style prefetch_style() {
119 return Prefetch::do_read; 152 return Prefetch::do_read;
120 } 153 }
121 virtual const bool should_remember_klasses() const { 154 // In support of class unloading
122 return _should_remember_klasses; 155 virtual const bool should_remember_mdo() const {
123 } 156 return false;
124 virtual void remember_klass(Klass* k); 157 // return _should_remember_klasses;
158 }
159 virtual void remember_mdo(DataLayout* v);
125 }; 160 };
126 161
127 // In the parallel case, the revisit stack, the bit map and the 162 // In the parallel case, the revisit stack, the bit map and the
128 // reference processor are currently all shared. Access to 163 // reference processor are currently all shared. Access to
129 // these shared mutable structures must use appropriate 164 // these shared mutable structures must use appropriate
130 // synchronization (for instance, via CAS). The marking stack 165 // synchronization (for instance, via CAS). The marking stack
131 // used in the non-parallel case above is here replaced with 166 // used in the non-parallel case above is here replaced with
132 // an OopTaskQueue structure to allow efficient work stealing. 167 // an OopTaskQueue structure to allow efficient work stealing.
133 class Par_PushAndMarkClosure: public OopClosure { 168 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
134 private: 169 private:
135 CMSCollector* _collector;
136 MemRegion _span; 170 MemRegion _span;
137 CMSBitMap* _bit_map; 171 CMSBitMap* _bit_map;
138 OopTaskQueue* _work_queue; 172 OopTaskQueue* _work_queue;
139 CMSMarkStack* _revisit_stack;
140 bool const _should_remember_klasses;
141 protected: 173 protected:
142 DO_OOP_WORK_DEFN 174 DO_OOP_WORK_DEFN
143 public: 175 public:
144 Par_PushAndMarkClosure(CMSCollector* collector, 176 Par_PushAndMarkClosure(CMSCollector* collector,
145 MemRegion span, 177 MemRegion span,
153 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 185 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
154 bool do_header() { return true; } 186 bool do_header() { return true; }
155 Prefetch::style prefetch_style() { 187 Prefetch::style prefetch_style() {
156 return Prefetch::do_read; 188 return Prefetch::do_read;
157 } 189 }
158 virtual const bool should_remember_klasses() const { 190 // In support of class unloading
159 return _should_remember_klasses; 191 virtual const bool should_remember_mdo() const {
160 } 192 return false;
161 virtual void remember_klass(Klass* k); 193 // return _should_remember_klasses;
194 }
195 virtual void remember_mdo(DataLayout* v);
162 }; 196 };
163 197
164 // The non-parallel version (the parallel version appears further below). 198 // The non-parallel version (the parallel version appears further below).
165 class MarkRefsIntoAndScanClosure: public OopsInGenClosure { 199 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
166 private: 200 private:
194 return Prefetch::do_read; 228 return Prefetch::do_read;
195 } 229 }
196 void set_freelistLock(Mutex* m) { 230 void set_freelistLock(Mutex* m) {
197 _freelistLock = m; 231 _freelistLock = m;
198 } 232 }
233 virtual const bool should_remember_klasses() const {
234 return _pushAndMarkClosure.should_remember_klasses();
235 }
236 virtual void remember_klass(Klass* k) {
237 _pushAndMarkClosure.remember_klass(k);
238 }
199 239
200 private: 240 private:
201 inline void do_yield_check(); 241 inline void do_yield_check();
202 void do_yield_work(); 242 void do_yield_work();
203 bool take_from_overflow_list(); 243 bool take_from_overflow_list();
227 virtual void do_oop(narrowOop* p); 267 virtual void do_oop(narrowOop* p);
228 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 268 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
229 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 269 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
230 bool do_header() { return true; } 270 bool do_header() { return true; }
231 virtual const bool do_nmethods() const { return true; } 271 virtual const bool do_nmethods() const { return true; }
272 // When ScanMarkedObjectsAgainClosure is used,
273 // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
274 // and this delegation is used.
275 virtual const bool should_remember_klasses() const {
276 return _par_pushAndMarkClosure.should_remember_klasses();
277 }
278 // See comment on should_remember_klasses() above.
279 virtual void remember_klass(Klass* k) {
280 _par_pushAndMarkClosure.remember_klass(k);
281 }
232 Prefetch::style prefetch_style() { 282 Prefetch::style prefetch_style() {
233 return Prefetch::do_read; 283 return Prefetch::do_read;
234 } 284 }
235 void trim_queue(uint size); 285 void trim_queue(uint size);
236 }; 286 };
237 287
238 // This closure is used during the concurrent marking phase 288 // This closure is used during the concurrent marking phase
239 // following the first checkpoint. Its use is buried in 289 // following the first checkpoint. Its use is buried in
240 // the closure MarkFromRootsClosure. 290 // the closure MarkFromRootsClosure.
241 class PushOrMarkClosure: public OopClosure { 291 class PushOrMarkClosure: public KlassRememberingOopClosure {
242 private: 292 private:
243 CMSCollector* _collector;
244 MemRegion _span; 293 MemRegion _span;
245 CMSBitMap* _bitMap; 294 CMSBitMap* _bitMap;
246 CMSMarkStack* _markStack; 295 CMSMarkStack* _markStack;
247 CMSMarkStack* _revisitStack;
248 HeapWord* const _finger; 296 HeapWord* const _finger;
249 MarkFromRootsClosure* const 297 MarkFromRootsClosure* const
250 _parent; 298 _parent;
251 bool const _should_remember_klasses;
252 protected: 299 protected:
253 DO_OOP_WORK_DEFN 300 DO_OOP_WORK_DEFN
254 public: 301 public:
255 PushOrMarkClosure(CMSCollector* cms_collector, 302 PushOrMarkClosure(CMSCollector* cms_collector,
256 MemRegion span, 303 MemRegion span,
261 MarkFromRootsClosure* parent); 308 MarkFromRootsClosure* parent);
262 virtual void do_oop(oop* p); 309 virtual void do_oop(oop* p);
263 virtual void do_oop(narrowOop* p); 310 virtual void do_oop(narrowOop* p);
264 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } 311 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
265 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 312 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
266 virtual const bool should_remember_klasses() const { 313 // In support of class unloading
267 return _should_remember_klasses; 314 virtual const bool should_remember_mdo() const {
268 } 315 return false;
269 virtual void remember_klass(Klass* k); 316 // return _should_remember_klasses;
317 }
318 virtual void remember_mdo(DataLayout* v);
319
270 // Deal with a stack overflow condition 320 // Deal with a stack overflow condition
271 void handle_stack_overflow(HeapWord* lost); 321 void handle_stack_overflow(HeapWord* lost);
272 private: 322 private:
273 inline void do_yield_check(); 323 inline void do_yield_check();
274 }; 324 };
275 325
276 // A parallel (MT) version of the above. 326 // A parallel (MT) version of the above.
277 // This closure is used during the concurrent marking phase 327 // This closure is used during the concurrent marking phase
278 // following the first checkpoint. Its use is buried in 328 // following the first checkpoint. Its use is buried in
279 // the closure Par_MarkFromRootsClosure. 329 // the closure Par_MarkFromRootsClosure.
280 class Par_PushOrMarkClosure: public OopClosure { 330 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
281 private: 331 private:
282 CMSCollector* _collector;
283 MemRegion _whole_span; 332 MemRegion _whole_span;
284 MemRegion _span; // local chunk 333 MemRegion _span; // local chunk
285 CMSBitMap* _bit_map; 334 CMSBitMap* _bit_map;
286 OopTaskQueue* _work_queue; 335 OopTaskQueue* _work_queue;
287 CMSMarkStack* _overflow_stack; 336 CMSMarkStack* _overflow_stack;
288 CMSMarkStack* _revisit_stack;
289 HeapWord* const _finger; 337 HeapWord* const _finger;
290 HeapWord** const _global_finger_addr; 338 HeapWord** const _global_finger_addr;
291 Par_MarkFromRootsClosure* const 339 Par_MarkFromRootsClosure* const
292 _parent; 340 _parent;
293 bool const _should_remember_klasses;
294 protected: 341 protected:
295 DO_OOP_WORK_DEFN 342 DO_OOP_WORK_DEFN
296 public: 343 public:
297 Par_PushOrMarkClosure(CMSCollector* cms_collector, 344 Par_PushOrMarkClosure(CMSCollector* cms_collector,
298 MemRegion span, 345 MemRegion span,
305 Par_MarkFromRootsClosure* parent); 352 Par_MarkFromRootsClosure* parent);
306 virtual void do_oop(oop* p); 353 virtual void do_oop(oop* p);
307 virtual void do_oop(narrowOop* p); 354 virtual void do_oop(narrowOop* p);
308 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 355 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
309 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 356 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
310 virtual const bool should_remember_klasses() const { 357 // In support of class unloading
311 return _should_remember_klasses; 358 virtual const bool should_remember_mdo() const {
312 } 359 return false;
313 virtual void remember_klass(Klass* k); 360 // return _should_remember_klasses;
361 }
362 virtual void remember_mdo(DataLayout* v);
363
314 // Deal with a stack overflow condition 364 // Deal with a stack overflow condition
315 void handle_stack_overflow(HeapWord* lost); 365 void handle_stack_overflow(HeapWord* lost);
316 private: 366 private:
317 inline void do_yield_check(); 367 inline void do_yield_check();
318 }; 368 };
321 // given objects (transitively) as being reachable/live. 371 // given objects (transitively) as being reachable/live.
322 // This is currently used during the (weak) reference object 372 // This is currently used during the (weak) reference object
323 // processing phase of the CMS final checkpoint step, as 373 // processing phase of the CMS final checkpoint step, as
324 // well as during the concurrent precleaning of the discovered 374 // well as during the concurrent precleaning of the discovered
325 // reference lists. 375 // reference lists.
326 class CMSKeepAliveClosure: public OopClosure { 376 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
327 private: 377 private:
328 CMSCollector* _collector;
329 const MemRegion _span; 378 const MemRegion _span;
330 CMSMarkStack* _mark_stack; 379 CMSMarkStack* _mark_stack;
331 CMSBitMap* _bit_map; 380 CMSBitMap* _bit_map;
332 bool _concurrent_precleaning; 381 bool _concurrent_precleaning;
333 protected: 382 protected:
334 DO_OOP_WORK_DEFN 383 DO_OOP_WORK_DEFN
335 public: 384 public:
336 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 385 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
337 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 386 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
338 bool cpc): 387 CMSMarkStack* revisit_stack, bool cpc);
339 _collector(collector),
340 _span(span),
341 _bit_map(bit_map),
342 _mark_stack(mark_stack),
343 _concurrent_precleaning(cpc) {
344 assert(!_span.is_empty(), "Empty span could spell trouble");
345 }
346 bool concurrent_precleaning() const { return _concurrent_precleaning; } 388 bool concurrent_precleaning() const { return _concurrent_precleaning; }
347 virtual void do_oop(oop* p); 389 virtual void do_oop(oop* p);
348 virtual void do_oop(narrowOop* p); 390 virtual void do_oop(narrowOop* p);
349 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 391 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
350 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 392 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
351 }; 393 };
352 394
353 class CMSInnerParMarkAndPushClosure: public OopClosure { 395 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
354 private: 396 private:
355 CMSCollector* _collector;
356 MemRegion _span; 397 MemRegion _span;
357 OopTaskQueue* _work_queue; 398 OopTaskQueue* _work_queue;
358 CMSBitMap* _bit_map; 399 CMSBitMap* _bit_map;
359 protected: 400 protected:
360 DO_OOP_WORK_DEFN 401 DO_OOP_WORK_DEFN
361 public: 402 public:
362 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 403 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
363 MemRegion span, CMSBitMap* bit_map, 404 MemRegion span, CMSBitMap* bit_map,
364 OopTaskQueue* work_queue): 405 CMSMarkStack* revisit_stack,
365 _collector(collector), 406 OopTaskQueue* work_queue);
366 _span(span),
367 _bit_map(bit_map),
368 _work_queue(work_queue) { }
369 virtual void do_oop(oop* p); 407 virtual void do_oop(oop* p);
370 virtual void do_oop(narrowOop* p); 408 virtual void do_oop(narrowOop* p);
371 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 409 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
372 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 410 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
373 }; 411 };
374 412
375 // A parallel (MT) version of the above, used when 413 // A parallel (MT) version of the above, used when
376 // reference processing is parallel; the only difference 414 // reference processing is parallel; the only difference
377 // is in the do_oop method. 415 // is in the do_oop method.
378 class CMSParKeepAliveClosure: public OopClosure { 416 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
379 private: 417 private:
380 CMSCollector* _collector;
381 MemRegion _span; 418 MemRegion _span;
382 OopTaskQueue* _work_queue; 419 OopTaskQueue* _work_queue;
383 CMSBitMap* _bit_map; 420 CMSBitMap* _bit_map;
384 CMSInnerParMarkAndPushClosure 421 CMSInnerParMarkAndPushClosure
385 _mark_and_push; 422 _mark_and_push;
387 void trim_queue(uint max); 424 void trim_queue(uint max);
388 protected: 425 protected:
389 DO_OOP_WORK_DEFN 426 DO_OOP_WORK_DEFN
390 public: 427 public:
391 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 428 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
392 CMSBitMap* bit_map, OopTaskQueue* work_queue); 429 CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
430 OopTaskQueue* work_queue);
393 virtual void do_oop(oop* p); 431 virtual void do_oop(oop* p);
394 virtual void do_oop(narrowOop* p); 432 virtual void do_oop(narrowOop* p);
395 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 433 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
396 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 434 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
397 }; 435 };