Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp @ 6948:e522a00b91aa
Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/ after NPG - C++ build works
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Mon, 12 Nov 2012 23:14:12 +0100 |
parents | da91efe96a93 |
children | ca9dedeebdec |
comparison
equal
deleted
inserted
replaced
6711:ae13cc658b80 | 6948:e522a00b91aa |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ | 46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ |
47 do_oop(obj); \ | 47 do_oop(obj); \ |
48 } \ | 48 } \ |
49 } | 49 } |
50 | 50 |
51 class MarkRefsIntoClosure: public OopsInGenClosure { | 51 // Applies the given oop closure to all oops in all klasses visited. |
52 class CMKlassClosure : public KlassClosure { | |
53 friend class CMSOopClosure; | |
54 friend class CMSOopsInGenClosure; | |
55 | |
56 OopClosure* _oop_closure; | |
57 | |
58 // Used when _oop_closure couldn't be set in an initialization list. | |
59 void initialize(OopClosure* oop_closure) { | |
60 assert(_oop_closure == NULL, "Should only be called once"); | |
61 _oop_closure = oop_closure; | |
62 } | |
63 public: | |
64 CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { } | |
65 | |
66 void do_klass(Klass* k); | |
67 }; | |
68 | |
69 // The base class for all CMS marking closures. | |
70 // It's used to proxy through the metadata to the oops defined in them. | |
71 class CMSOopClosure: public ExtendedOopClosure { | |
72 CMKlassClosure _klass_closure; | |
73 public: | |
74 CMSOopClosure() : ExtendedOopClosure() { | |
75 _klass_closure.initialize(this); | |
76 } | |
77 CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { | |
78 _klass_closure.initialize(this); | |
79 } | |
80 | |
81 virtual bool do_metadata() { return do_metadata_nv(); } | |
82 inline bool do_metadata_nv() { return true; } | |
83 | |
84 virtual void do_klass(Klass* k); | |
85 void do_klass_nv(Klass* k); | |
86 | |
87 virtual void do_class_loader_data(ClassLoaderData* cld); | |
88 }; | |
89 | |
90 // TODO: This duplication of the CMSOopClosure class is only needed because | |
91 // some CMS OopClosures derive from OopsInGenClosure. It would be good | |
92 // to get rid of them completely. | |
93 class CMSOopsInGenClosure: public OopsInGenClosure { | |
94 CMKlassClosure _klass_closure; | |
95 public: | |
96 CMSOopsInGenClosure() { | |
97 _klass_closure.initialize(this); | |
98 } | |
99 | |
100 virtual bool do_metadata() { return do_metadata_nv(); } | |
101 inline bool do_metadata_nv() { return true; } | |
102 | |
103 virtual void do_klass(Klass* k); | |
104 void do_klass_nv(Klass* k); | |
105 | |
106 virtual void do_class_loader_data(ClassLoaderData* cld); | |
107 }; | |
108 | |
109 class MarkRefsIntoClosure: public CMSOopsInGenClosure { | |
52 private: | 110 private: |
53 const MemRegion _span; | 111 const MemRegion _span; |
54 CMSBitMap* _bitMap; | 112 CMSBitMap* _bitMap; |
55 protected: | 113 protected: |
56 DO_OOP_WORK_DEFN | 114 DO_OOP_WORK_DEFN |
57 public: | 115 public: |
58 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); | 116 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); |
59 virtual void do_oop(oop* p); | 117 virtual void do_oop(oop* p); |
60 virtual void do_oop(narrowOop* p); | 118 virtual void do_oop(narrowOop* p); |
61 inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } | 119 |
62 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } | |
63 bool do_header() { return true; } | |
64 Prefetch::style prefetch_style() { | 120 Prefetch::style prefetch_style() { |
65 return Prefetch::do_read; | 121 return Prefetch::do_read; |
66 } | 122 } |
67 }; | 123 }; |
68 | 124 |
69 // A variant of the above used in certain kinds of CMS | 125 // A variant of the above used in certain kinds of CMS |
70 // marking verification. | 126 // marking verification. |
71 class MarkRefsIntoVerifyClosure: public OopsInGenClosure { | 127 class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure { |
72 private: | 128 private: |
73 const MemRegion _span; | 129 const MemRegion _span; |
74 CMSBitMap* _verification_bm; | 130 CMSBitMap* _verification_bm; |
75 CMSBitMap* _cms_bm; | 131 CMSBitMap* _cms_bm; |
76 protected: | 132 protected: |
78 public: | 134 public: |
79 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, | 135 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, |
80 CMSBitMap* cms_bm); | 136 CMSBitMap* cms_bm); |
81 virtual void do_oop(oop* p); | 137 virtual void do_oop(oop* p); |
82 virtual void do_oop(narrowOop* p); | 138 virtual void do_oop(narrowOop* p); |
83 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } | 139 |
84 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } | 140 Prefetch::style prefetch_style() { |
85 bool do_header() { return true; } | 141 return Prefetch::do_read; |
86 Prefetch::style prefetch_style() { | 142 } |
87 return Prefetch::do_read; | 143 }; |
88 } | 144 |
89 }; | 145 // The non-parallel version (the parallel version appears further below). |
90 | 146 class PushAndMarkClosure: public CMSOopClosure { |
91 // KlassRememberingOopClosure is used when marking of the permanent generation | 147 private: |
92 // is being done. It adds fields to support revisiting of klasses | |
93 // for class unloading. _should_remember_klasses should be set to | |
94 // indicate if klasses should be remembered. Currently that is whenever | |
95 // CMS class unloading is turned on. The _revisit_stack is used | |
96 // to save the klasses for later processing. | |
97 class KlassRememberingOopClosure : public OopClosure { | |
98 protected: | |
99 CMSCollector* _collector; | 148 CMSCollector* _collector; |
100 CMSMarkStack* _revisit_stack; | |
101 bool const _should_remember_klasses; | |
102 public: | |
103 void check_remember_klasses() const PRODUCT_RETURN; | |
104 virtual const bool should_remember_klasses() const { | |
105 check_remember_klasses(); | |
106 return _should_remember_klasses; | |
107 } | |
108 virtual void remember_klass(Klass* k); | |
109 | |
110 KlassRememberingOopClosure(CMSCollector* collector, | |
111 ReferenceProcessor* rp, | |
112 CMSMarkStack* revisit_stack); | |
113 }; | |
114 | |
115 // Similar to KlassRememberingOopClosure for use when multiple | |
116 // GC threads will execute the closure. | |
117 | |
118 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure { | |
119 public: | |
120 Par_KlassRememberingOopClosure(CMSCollector* collector, | |
121 ReferenceProcessor* rp, | |
122 CMSMarkStack* revisit_stack): | |
123 KlassRememberingOopClosure(collector, rp, revisit_stack) {} | |
124 virtual void remember_klass(Klass* k); | |
125 }; | |
126 | |
127 // The non-parallel version (the parallel version appears further below). | |
128 class PushAndMarkClosure: public KlassRememberingOopClosure { | |
129 private: | |
130 MemRegion _span; | 149 MemRegion _span; |
131 CMSBitMap* _bit_map; | 150 CMSBitMap* _bit_map; |
132 CMSBitMap* _mod_union_table; | 151 CMSBitMap* _mod_union_table; |
133 CMSMarkStack* _mark_stack; | 152 CMSMarkStack* _mark_stack; |
134 bool _concurrent_precleaning; | 153 bool _concurrent_precleaning; |
139 MemRegion span, | 158 MemRegion span, |
140 ReferenceProcessor* rp, | 159 ReferenceProcessor* rp, |
141 CMSBitMap* bit_map, | 160 CMSBitMap* bit_map, |
142 CMSBitMap* mod_union_table, | 161 CMSBitMap* mod_union_table, |
143 CMSMarkStack* mark_stack, | 162 CMSMarkStack* mark_stack, |
144 CMSMarkStack* revisit_stack, | |
145 bool concurrent_precleaning); | 163 bool concurrent_precleaning); |
146 virtual void do_oop(oop* p); | 164 virtual void do_oop(oop* p); |
147 virtual void do_oop(narrowOop* p); | 165 virtual void do_oop(narrowOop* p); |
148 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } | 166 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } |
149 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } | 167 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } |
150 bool do_header() { return true; } | 168 |
151 Prefetch::style prefetch_style() { | 169 Prefetch::style prefetch_style() { |
152 return Prefetch::do_read; | 170 return Prefetch::do_read; |
153 } | 171 } |
154 // In support of class unloading | 172 }; |
155 virtual const bool should_remember_mdo() const { | 173 |
156 return false; | 174 // In the parallel case, the bit map and the |
157 // return _should_remember_klasses; | |
158 } | |
159 virtual void remember_mdo(DataLayout* v); | |
160 }; | |
161 | |
162 // In the parallel case, the revisit stack, the bit map and the | |
163 // reference processor are currently all shared. Access to | 175 // reference processor are currently all shared. Access to |
164 // these shared mutable structures must use appropriate | 176 // these shared mutable structures must use appropriate |
165 // synchronization (for instance, via CAS). The marking stack | 177 // synchronization (for instance, via CAS). The marking stack |
166 // used in the non-parallel case above is here replaced with | 178 // used in the non-parallel case above is here replaced with |
167 // an OopTaskQueue structure to allow efficient work stealing. | 179 // an OopTaskQueue structure to allow efficient work stealing. |
168 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure { | 180 class Par_PushAndMarkClosure: public CMSOopClosure { |
169 private: | 181 private: |
182 CMSCollector* _collector; | |
170 MemRegion _span; | 183 MemRegion _span; |
171 CMSBitMap* _bit_map; | 184 CMSBitMap* _bit_map; |
172 OopTaskQueue* _work_queue; | 185 OopTaskQueue* _work_queue; |
173 protected: | 186 protected: |
174 DO_OOP_WORK_DEFN | 187 DO_OOP_WORK_DEFN |
175 public: | 188 public: |
176 Par_PushAndMarkClosure(CMSCollector* collector, | 189 Par_PushAndMarkClosure(CMSCollector* collector, |
177 MemRegion span, | 190 MemRegion span, |
178 ReferenceProcessor* rp, | 191 ReferenceProcessor* rp, |
179 CMSBitMap* bit_map, | 192 CMSBitMap* bit_map, |
180 OopTaskQueue* work_queue, | 193 OopTaskQueue* work_queue); |
181 CMSMarkStack* revisit_stack); | |
182 virtual void do_oop(oop* p); | 194 virtual void do_oop(oop* p); |
183 virtual void do_oop(narrowOop* p); | 195 virtual void do_oop(narrowOop* p); |
184 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } | 196 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
185 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } | 197 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
186 bool do_header() { return true; } | 198 |
187 Prefetch::style prefetch_style() { | 199 Prefetch::style prefetch_style() { |
188 return Prefetch::do_read; | 200 return Prefetch::do_read; |
189 } | 201 } |
190 // In support of class unloading | |
191 virtual const bool should_remember_mdo() const { | |
192 return false; | |
193 // return _should_remember_klasses; | |
194 } | |
195 virtual void remember_mdo(DataLayout* v); | |
196 }; | 202 }; |
197 | 203 |
198 // The non-parallel version (the parallel version appears further below). | 204 // The non-parallel version (the parallel version appears further below). |
199 class MarkRefsIntoAndScanClosure: public OopsInGenClosure { | 205 class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { |
200 private: | 206 private: |
201 MemRegion _span; | 207 MemRegion _span; |
202 CMSBitMap* _bit_map; | 208 CMSBitMap* _bit_map; |
203 CMSMarkStack* _mark_stack; | 209 CMSMarkStack* _mark_stack; |
204 PushAndMarkClosure _pushAndMarkClosure; | 210 PushAndMarkClosure _pushAndMarkClosure; |
213 MarkRefsIntoAndScanClosure(MemRegion span, | 219 MarkRefsIntoAndScanClosure(MemRegion span, |
214 ReferenceProcessor* rp, | 220 ReferenceProcessor* rp, |
215 CMSBitMap* bit_map, | 221 CMSBitMap* bit_map, |
216 CMSBitMap* mod_union_table, | 222 CMSBitMap* mod_union_table, |
217 CMSMarkStack* mark_stack, | 223 CMSMarkStack* mark_stack, |
218 CMSMarkStack* revisit_stack, | |
219 CMSCollector* collector, | 224 CMSCollector* collector, |
220 bool should_yield, | 225 bool should_yield, |
221 bool concurrent_precleaning); | 226 bool concurrent_precleaning); |
222 virtual void do_oop(oop* p); | 227 virtual void do_oop(oop* p); |
223 virtual void do_oop(narrowOop* p); | 228 virtual void do_oop(narrowOop* p); |
224 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } | 229 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
225 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } | 230 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
226 bool do_header() { return true; } | 231 |
227 Prefetch::style prefetch_style() { | 232 Prefetch::style prefetch_style() { |
228 return Prefetch::do_read; | 233 return Prefetch::do_read; |
229 } | 234 } |
230 void set_freelistLock(Mutex* m) { | 235 void set_freelistLock(Mutex* m) { |
231 _freelistLock = m; | 236 _freelistLock = m; |
232 } | |
233 virtual const bool should_remember_klasses() const { | |
234 return _pushAndMarkClosure.should_remember_klasses(); | |
235 } | |
236 virtual void remember_klass(Klass* k) { | |
237 _pushAndMarkClosure.remember_klass(k); | |
238 } | 237 } |
239 | 238 |
240 private: | 239 private: |
241 inline void do_yield_check(); | 240 inline void do_yield_check(); |
242 void do_yield_work(); | 241 void do_yield_work(); |
245 | 244 |
246 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit | 245 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit |
247 // stack and the bitMap are shared, so access needs to be suitably | 246 // stack and the bitMap are shared, so access needs to be suitably |
248 // sycnhronized. An OopTaskQueue structure, supporting efficient | 247 // sycnhronized. An OopTaskQueue structure, supporting efficient |
249 // workstealing, replaces a CMSMarkStack for storing grey objects. | 248 // workstealing, replaces a CMSMarkStack for storing grey objects. |
250 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure { | 249 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { |
251 private: | 250 private: |
252 MemRegion _span; | 251 MemRegion _span; |
253 CMSBitMap* _bit_map; | 252 CMSBitMap* _bit_map; |
254 OopTaskQueue* _work_queue; | 253 OopTaskQueue* _work_queue; |
255 const uint _low_water_mark; | 254 const uint _low_water_mark; |
259 public: | 258 public: |
260 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, | 259 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, |
261 MemRegion span, | 260 MemRegion span, |
262 ReferenceProcessor* rp, | 261 ReferenceProcessor* rp, |
263 CMSBitMap* bit_map, | 262 CMSBitMap* bit_map, |
264 OopTaskQueue* work_queue, | 263 OopTaskQueue* work_queue); |
265 CMSMarkStack* revisit_stack); | |
266 virtual void do_oop(oop* p); | 264 virtual void do_oop(oop* p); |
267 virtual void do_oop(narrowOop* p); | 265 virtual void do_oop(narrowOop* p); |
268 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } | 266 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
269 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } | 267 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
270 bool do_header() { return true; } | 268 |
271 // When ScanMarkedObjectsAgainClosure is used, | |
272 // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(), | |
273 // and this delegation is used. | |
274 virtual const bool should_remember_klasses() const { | |
275 return _par_pushAndMarkClosure.should_remember_klasses(); | |
276 } | |
277 // See comment on should_remember_klasses() above. | |
278 virtual void remember_klass(Klass* k) { | |
279 _par_pushAndMarkClosure.remember_klass(k); | |
280 } | |
281 Prefetch::style prefetch_style() { | 269 Prefetch::style prefetch_style() { |
282 return Prefetch::do_read; | 270 return Prefetch::do_read; |
283 } | 271 } |
284 void trim_queue(uint size); | 272 void trim_queue(uint size); |
285 }; | 273 }; |
286 | 274 |
287 // This closure is used during the concurrent marking phase | 275 // This closure is used during the concurrent marking phase |
288 // following the first checkpoint. Its use is buried in | 276 // following the first checkpoint. Its use is buried in |
289 // the closure MarkFromRootsClosure. | 277 // the closure MarkFromRootsClosure. |
290 class PushOrMarkClosure: public KlassRememberingOopClosure { | 278 class PushOrMarkClosure: public CMSOopClosure { |
291 private: | 279 private: |
280 CMSCollector* _collector; | |
292 MemRegion _span; | 281 MemRegion _span; |
293 CMSBitMap* _bitMap; | 282 CMSBitMap* _bitMap; |
294 CMSMarkStack* _markStack; | 283 CMSMarkStack* _markStack; |
295 HeapWord* const _finger; | 284 HeapWord* const _finger; |
296 MarkFromRootsClosure* const | 285 MarkFromRootsClosure* const |
300 public: | 289 public: |
301 PushOrMarkClosure(CMSCollector* cms_collector, | 290 PushOrMarkClosure(CMSCollector* cms_collector, |
302 MemRegion span, | 291 MemRegion span, |
303 CMSBitMap* bitMap, | 292 CMSBitMap* bitMap, |
304 CMSMarkStack* markStack, | 293 CMSMarkStack* markStack, |
305 CMSMarkStack* revisitStack, | |
306 HeapWord* finger, | 294 HeapWord* finger, |
307 MarkFromRootsClosure* parent); | 295 MarkFromRootsClosure* parent); |
308 virtual void do_oop(oop* p); | 296 virtual void do_oop(oop* p); |
309 virtual void do_oop(narrowOop* p); | 297 virtual void do_oop(narrowOop* p); |
310 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } | 298 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } |
311 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } | 299 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } |
312 // In support of class unloading | |
313 virtual const bool should_remember_mdo() const { | |
314 return false; | |
315 // return _should_remember_klasses; | |
316 } | |
317 virtual void remember_mdo(DataLayout* v); | |
318 | 300 |
319 // Deal with a stack overflow condition | 301 // Deal with a stack overflow condition |
320 void handle_stack_overflow(HeapWord* lost); | 302 void handle_stack_overflow(HeapWord* lost); |
321 private: | 303 private: |
322 inline void do_yield_check(); | 304 inline void do_yield_check(); |
324 | 306 |
325 // A parallel (MT) version of the above. | 307 // A parallel (MT) version of the above. |
326 // This closure is used during the concurrent marking phase | 308 // This closure is used during the concurrent marking phase |
327 // following the first checkpoint. Its use is buried in | 309 // following the first checkpoint. Its use is buried in |
328 // the closure Par_MarkFromRootsClosure. | 310 // the closure Par_MarkFromRootsClosure. |
329 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure { | 311 class Par_PushOrMarkClosure: public CMSOopClosure { |
330 private: | 312 private: |
313 CMSCollector* _collector; | |
331 MemRegion _whole_span; | 314 MemRegion _whole_span; |
332 MemRegion _span; // local chunk | 315 MemRegion _span; // local chunk |
333 CMSBitMap* _bit_map; | 316 CMSBitMap* _bit_map; |
334 OopTaskQueue* _work_queue; | 317 OopTaskQueue* _work_queue; |
335 CMSMarkStack* _overflow_stack; | 318 CMSMarkStack* _overflow_stack; |
343 Par_PushOrMarkClosure(CMSCollector* cms_collector, | 326 Par_PushOrMarkClosure(CMSCollector* cms_collector, |
344 MemRegion span, | 327 MemRegion span, |
345 CMSBitMap* bit_map, | 328 CMSBitMap* bit_map, |
346 OopTaskQueue* work_queue, | 329 OopTaskQueue* work_queue, |
347 CMSMarkStack* mark_stack, | 330 CMSMarkStack* mark_stack, |
348 CMSMarkStack* revisit_stack, | |
349 HeapWord* finger, | 331 HeapWord* finger, |
350 HeapWord** global_finger_addr, | 332 HeapWord** global_finger_addr, |
351 Par_MarkFromRootsClosure* parent); | 333 Par_MarkFromRootsClosure* parent); |
352 virtual void do_oop(oop* p); | 334 virtual void do_oop(oop* p); |
353 virtual void do_oop(narrowOop* p); | 335 virtual void do_oop(narrowOop* p); |
354 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } | 336 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
355 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } | 337 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
356 // In support of class unloading | |
357 virtual const bool should_remember_mdo() const { | |
358 return false; | |
359 // return _should_remember_klasses; | |
360 } | |
361 virtual void remember_mdo(DataLayout* v); | |
362 | 338 |
363 // Deal with a stack overflow condition | 339 // Deal with a stack overflow condition |
364 void handle_stack_overflow(HeapWord* lost); | 340 void handle_stack_overflow(HeapWord* lost); |
365 private: | 341 private: |
366 inline void do_yield_check(); | 342 inline void do_yield_check(); |
370 // given objects (transitively) as being reachable/live. | 346 // given objects (transitively) as being reachable/live. |
371 // This is currently used during the (weak) reference object | 347 // This is currently used during the (weak) reference object |
372 // processing phase of the CMS final checkpoint step, as | 348 // processing phase of the CMS final checkpoint step, as |
373 // well as during the concurrent precleaning of the discovered | 349 // well as during the concurrent precleaning of the discovered |
374 // reference lists. | 350 // reference lists. |
375 class CMSKeepAliveClosure: public KlassRememberingOopClosure { | 351 class CMSKeepAliveClosure: public CMSOopClosure { |
376 private: | 352 private: |
353 CMSCollector* _collector; | |
377 const MemRegion _span; | 354 const MemRegion _span; |
378 CMSMarkStack* _mark_stack; | 355 CMSMarkStack* _mark_stack; |
379 CMSBitMap* _bit_map; | 356 CMSBitMap* _bit_map; |
380 bool _concurrent_precleaning; | 357 bool _concurrent_precleaning; |
381 protected: | 358 protected: |
382 DO_OOP_WORK_DEFN | 359 DO_OOP_WORK_DEFN |
383 public: | 360 public: |
384 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, | 361 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, |
385 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | 362 CMSBitMap* bit_map, CMSMarkStack* mark_stack, |
386 CMSMarkStack* revisit_stack, bool cpc); | 363 bool cpc); |
387 bool concurrent_precleaning() const { return _concurrent_precleaning; } | 364 bool concurrent_precleaning() const { return _concurrent_precleaning; } |
388 virtual void do_oop(oop* p); | 365 virtual void do_oop(oop* p); |
389 virtual void do_oop(narrowOop* p); | 366 virtual void do_oop(narrowOop* p); |
390 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } | 367 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
391 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } | 368 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
392 }; | 369 }; |
393 | 370 |
394 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure { | 371 class CMSInnerParMarkAndPushClosure: public CMSOopClosure { |
395 private: | 372 private: |
373 CMSCollector* _collector; | |
396 MemRegion _span; | 374 MemRegion _span; |
397 OopTaskQueue* _work_queue; | 375 OopTaskQueue* _work_queue; |
398 CMSBitMap* _bit_map; | 376 CMSBitMap* _bit_map; |
399 protected: | 377 protected: |
400 DO_OOP_WORK_DEFN | 378 DO_OOP_WORK_DEFN |
401 public: | 379 public: |
402 CMSInnerParMarkAndPushClosure(CMSCollector* collector, | 380 CMSInnerParMarkAndPushClosure(CMSCollector* collector, |
403 MemRegion span, CMSBitMap* bit_map, | 381 MemRegion span, CMSBitMap* bit_map, |
404 CMSMarkStack* revisit_stack, | |
405 OopTaskQueue* work_queue); | 382 OopTaskQueue* work_queue); |
406 virtual void do_oop(oop* p); | 383 virtual void do_oop(oop* p); |
407 virtual void do_oop(narrowOop* p); | 384 virtual void do_oop(narrowOop* p); |
408 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } | 385 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
409 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } | 386 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
410 }; | 387 }; |
411 | 388 |
412 // A parallel (MT) version of the above, used when | 389 // A parallel (MT) version of the above, used when |
413 // reference processing is parallel; the only difference | 390 // reference processing is parallel; the only difference |
414 // is in the do_oop method. | 391 // is in the do_oop method. |
415 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure { | 392 class CMSParKeepAliveClosure: public CMSOopClosure { |
416 private: | 393 private: |
417 MemRegion _span; | 394 MemRegion _span; |
418 OopTaskQueue* _work_queue; | 395 OopTaskQueue* _work_queue; |
419 CMSBitMap* _bit_map; | 396 CMSBitMap* _bit_map; |
420 CMSInnerParMarkAndPushClosure | 397 CMSInnerParMarkAndPushClosure |
423 void trim_queue(uint max); | 400 void trim_queue(uint max); |
424 protected: | 401 protected: |
425 DO_OOP_WORK_DEFN | 402 DO_OOP_WORK_DEFN |
426 public: | 403 public: |
427 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, | 404 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, |
428 CMSBitMap* bit_map, CMSMarkStack* revisit_stack, | 405 CMSBitMap* bit_map, OopTaskQueue* work_queue); |
429 OopTaskQueue* work_queue); | 406 virtual void do_oop(oop* p); |
430 virtual void do_oop(oop* p); | 407 virtual void do_oop(narrowOop* p); |
431 virtual void do_oop(narrowOop* p); | |
432 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } | |
433 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } | |
434 }; | 408 }; |
435 | 409 |
436 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP | 410 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP |