comparison src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp @ 113:ba764ed4b6f2

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
author coleenp
date Sun, 13 Apr 2008 17:43:42 -0400
parents a61af66fc99e
children b5489bb705c9
comparison
equal deleted inserted replaced
110:a49a647afe9a 113:ba764ed4b6f2
27 ///////////////////////////////////////////////////////////////// 27 /////////////////////////////////////////////////////////////////
28 class ConcurrentMarkSweepGeneration; 28 class ConcurrentMarkSweepGeneration;
29 class CMSBitMap; 29 class CMSBitMap;
30 class CMSMarkStack; 30 class CMSMarkStack;
31 class CMSCollector; 31 class CMSCollector;
32 template<class E> class GenericTaskQueue;
33 typedef GenericTaskQueue<oop> OopTaskQueue;
34 template<class E> class GenericTaskQueueSet;
35 typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
36 class MarkFromRootsClosure; 32 class MarkFromRootsClosure;
37 class Par_MarkFromRootsClosure; 33 class Par_MarkFromRootsClosure;
38 34
35 // Decode the oop and call do_oop on it.
36 #define DO_OOP_WORK_DEFN \
37 void do_oop(oop obj); \
38 template <class T> inline void do_oop_work(T* p) { \
39 T heap_oop = oopDesc::load_heap_oop(p); \
40 if (!oopDesc::is_null(heap_oop)) { \
41 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
42 do_oop(obj); \
43 } \
44 }
45
39 class MarkRefsIntoClosure: public OopsInGenClosure { 46 class MarkRefsIntoClosure: public OopsInGenClosure {
40 const MemRegion _span; 47 private:
41 CMSBitMap* _bitMap; 48 const MemRegion _span;
42 const bool _should_do_nmethods; 49 CMSBitMap* _bitMap;
50 const bool _should_do_nmethods;
51 protected:
52 DO_OOP_WORK_DEFN
43 public: 53 public:
44 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap, 54 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
45 bool should_do_nmethods); 55 bool should_do_nmethods);
46 void do_oop(oop* p); 56 virtual void do_oop(oop* p);
47 void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); } 57 virtual void do_oop(narrowOop* p);
58 inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
59 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
48 bool do_header() { return true; } 60 bool do_header() { return true; }
49 virtual const bool do_nmethods() const { 61 virtual const bool do_nmethods() const {
50 return _should_do_nmethods; 62 return _should_do_nmethods;
51 } 63 }
52 Prefetch::style prefetch_style() { 64 Prefetch::style prefetch_style() {
55 }; 67 };
56 68
57 // A variant of the above used in certain kinds of CMS 69 // A variant of the above used in certain kinds of CMS
58 // marking verification. 70 // marking verification.
59 class MarkRefsIntoVerifyClosure: public OopsInGenClosure { 71 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
60 const MemRegion _span; 72 private:
61 CMSBitMap* _verification_bm; 73 const MemRegion _span;
62 CMSBitMap* _cms_bm; 74 CMSBitMap* _verification_bm;
63 const bool _should_do_nmethods; 75 CMSBitMap* _cms_bm;
76 const bool _should_do_nmethods;
77 protected:
78 DO_OOP_WORK_DEFN
64 public: 79 public:
65 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, 80 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
66 CMSBitMap* cms_bm, bool should_do_nmethods); 81 CMSBitMap* cms_bm, bool should_do_nmethods);
67 void do_oop(oop* p); 82 virtual void do_oop(oop* p);
68 void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); } 83 virtual void do_oop(narrowOop* p);
84 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
85 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
69 bool do_header() { return true; } 86 bool do_header() { return true; }
70 virtual const bool do_nmethods() const { 87 virtual const bool do_nmethods() const {
71 return _should_do_nmethods; 88 return _should_do_nmethods;
72 } 89 }
73 Prefetch::style prefetch_style() { 90 Prefetch::style prefetch_style() {
74 return Prefetch::do_read; 91 return Prefetch::do_read;
75 } 92 }
76 }; 93 };
77 94
78
79 // The non-parallel version (the parallel version appears further below). 95 // The non-parallel version (the parallel version appears further below).
80 class PushAndMarkClosure: public OopClosure { 96 class PushAndMarkClosure: public OopClosure {
81 CMSCollector* _collector; 97 private:
82 MemRegion _span; 98 CMSCollector* _collector;
83 CMSBitMap* _bit_map; 99 MemRegion _span;
84 CMSBitMap* _mod_union_table; 100 CMSBitMap* _bit_map;
85 CMSMarkStack* _mark_stack; 101 CMSBitMap* _mod_union_table;
86 CMSMarkStack* _revisit_stack; 102 CMSMarkStack* _mark_stack;
87 bool _concurrent_precleaning; 103 CMSMarkStack* _revisit_stack;
88 bool const _should_remember_klasses; 104 bool _concurrent_precleaning;
105 bool const _should_remember_klasses;
106 protected:
107 DO_OOP_WORK_DEFN
89 public: 108 public:
90 PushAndMarkClosure(CMSCollector* collector, 109 PushAndMarkClosure(CMSCollector* collector,
91 MemRegion span, 110 MemRegion span,
92 ReferenceProcessor* rp, 111 ReferenceProcessor* rp,
93 CMSBitMap* bit_map, 112 CMSBitMap* bit_map,
94 CMSBitMap* mod_union_table, 113 CMSBitMap* mod_union_table,
95 CMSMarkStack* mark_stack, 114 CMSMarkStack* mark_stack,
96 CMSMarkStack* revisit_stack, 115 CMSMarkStack* revisit_stack,
97 bool concurrent_precleaning); 116 bool concurrent_precleaning);
98 117 virtual void do_oop(oop* p);
99 void do_oop(oop* p); 118 virtual void do_oop(narrowOop* p);
100 void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); } 119 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
101 bool do_header() { return true; } 120 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
102 Prefetch::style prefetch_style() { 121 bool do_header() { return true; }
103 return Prefetch::do_read; 122 Prefetch::style prefetch_style() {
104 } 123 return Prefetch::do_read;
105 const bool should_remember_klasses() const { 124 }
125 virtual const bool should_remember_klasses() const {
106 return _should_remember_klasses; 126 return _should_remember_klasses;
107 } 127 }
108 void remember_klass(Klass* k); 128 virtual void remember_klass(Klass* k);
109 }; 129 };
110 130
111 // In the parallel case, the revisit stack, the bit map and the 131 // In the parallel case, the revisit stack, the bit map and the
112 // reference processor are currently all shared. Access to 132 // reference processor are currently all shared. Access to
113 // these shared mutable structures must use appropriate 133 // these shared mutable structures must use appropriate
114 // synchronization (for instance, via CAS). The marking stack 134 // synchronization (for instance, via CAS). The marking stack
115 // used in the non-parallel case above is here replaced with 135 // used in the non-parallel case above is here replaced with
116 // an OopTaskQueue structure to allow efficient work stealing. 136 // an OopTaskQueue structure to allow efficient work stealing.
117 class Par_PushAndMarkClosure: public OopClosure { 137 class Par_PushAndMarkClosure: public OopClosure {
118 CMSCollector* _collector; 138 private:
119 MemRegion _span; 139 CMSCollector* _collector;
120 CMSBitMap* _bit_map; 140 MemRegion _span;
121 OopTaskQueue* _work_queue; 141 CMSBitMap* _bit_map;
122 CMSMarkStack* _revisit_stack; 142 OopTaskQueue* _work_queue;
123 bool const _should_remember_klasses; 143 CMSMarkStack* _revisit_stack;
144 bool const _should_remember_klasses;
145 protected:
146 DO_OOP_WORK_DEFN
124 public: 147 public:
125 Par_PushAndMarkClosure(CMSCollector* collector, 148 Par_PushAndMarkClosure(CMSCollector* collector,
126 MemRegion span, 149 MemRegion span,
127 ReferenceProcessor* rp, 150 ReferenceProcessor* rp,
128 CMSBitMap* bit_map, 151 CMSBitMap* bit_map,
129 OopTaskQueue* work_queue, 152 OopTaskQueue* work_queue,
130 CMSMarkStack* revisit_stack); 153 CMSMarkStack* revisit_stack);
131 154 virtual void do_oop(oop* p);
132 void do_oop(oop* p); 155 virtual void do_oop(narrowOop* p);
133 void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); } 156 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
134 bool do_header() { return true; } 157 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
135 Prefetch::style prefetch_style() { 158 bool do_header() { return true; }
136 return Prefetch::do_read; 159 Prefetch::style prefetch_style() {
137 } 160 return Prefetch::do_read;
138 const bool should_remember_klasses() const { 161 }
162 virtual const bool should_remember_klasses() const {
139 return _should_remember_klasses; 163 return _should_remember_klasses;
140 } 164 }
141 void remember_klass(Klass* k); 165 virtual void remember_klass(Klass* k);
142 }; 166 };
143
144 167
145 // The non-parallel version (the parallel version appears further below). 168 // The non-parallel version (the parallel version appears further below).
146 class MarkRefsIntoAndScanClosure: public OopsInGenClosure { 169 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
147 MemRegion _span; 170 private:
148 CMSBitMap* _bit_map; 171 MemRegion _span;
149 CMSMarkStack* _mark_stack; 172 CMSBitMap* _bit_map;
150 PushAndMarkClosure _pushAndMarkClosure; 173 CMSMarkStack* _mark_stack;
151 CMSCollector* _collector; 174 PushAndMarkClosure _pushAndMarkClosure;
152 bool _yield; 175 CMSCollector* _collector;
176 Mutex* _freelistLock;
177 bool _yield;
153 // Whether closure is being used for concurrent precleaning 178 // Whether closure is being used for concurrent precleaning
154 bool _concurrent_precleaning; 179 bool _concurrent_precleaning;
155 Mutex* _freelistLock; 180 protected:
181 DO_OOP_WORK_DEFN
156 public: 182 public:
157 MarkRefsIntoAndScanClosure(MemRegion span, 183 MarkRefsIntoAndScanClosure(MemRegion span,
158 ReferenceProcessor* rp, 184 ReferenceProcessor* rp,
159 CMSBitMap* bit_map, 185 CMSBitMap* bit_map,
160 CMSBitMap* mod_union_table, 186 CMSBitMap* mod_union_table,
161 CMSMarkStack* mark_stack, 187 CMSMarkStack* mark_stack,
162 CMSMarkStack* revisit_stack, 188 CMSMarkStack* revisit_stack,
163 CMSCollector* collector, 189 CMSCollector* collector,
164 bool should_yield, 190 bool should_yield,
165 bool concurrent_precleaning); 191 bool concurrent_precleaning);
166 void do_oop(oop* p); 192 virtual void do_oop(oop* p);
167 void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); } 193 virtual void do_oop(narrowOop* p);
194 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
195 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
168 bool do_header() { return true; } 196 bool do_header() { return true; }
169 virtual const bool do_nmethods() const { return true; } 197 virtual const bool do_nmethods() const { return true; }
170 Prefetch::style prefetch_style() { 198 Prefetch::style prefetch_style() {
171 return Prefetch::do_read; 199 return Prefetch::do_read;
172 } 200 }
183 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit 211 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
184 // stack and the bitMap are shared, so access needs to be suitably 212 // stack and the bitMap are shared, so access needs to be suitably
185 // sycnhronized. An OopTaskQueue structure, supporting efficient 213 // sycnhronized. An OopTaskQueue structure, supporting efficient
186 // workstealing, replaces a CMSMarkStack for storing grey objects. 214 // workstealing, replaces a CMSMarkStack for storing grey objects.
187 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure { 215 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
188 MemRegion _span; 216 private:
189 CMSBitMap* _bit_map; 217 MemRegion _span;
190 OopTaskQueue* _work_queue; 218 CMSBitMap* _bit_map;
191 const uint _low_water_mark; 219 OopTaskQueue* _work_queue;
192 Par_PushAndMarkClosure _par_pushAndMarkClosure; 220 const uint _low_water_mark;
221 Par_PushAndMarkClosure _par_pushAndMarkClosure;
222 protected:
223 DO_OOP_WORK_DEFN
193 public: 224 public:
194 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, 225 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
195 MemRegion span, 226 MemRegion span,
196 ReferenceProcessor* rp, 227 ReferenceProcessor* rp,
197 CMSBitMap* bit_map, 228 CMSBitMap* bit_map,
198 OopTaskQueue* work_queue, 229 OopTaskQueue* work_queue,
199 CMSMarkStack* revisit_stack); 230 CMSMarkStack* revisit_stack);
200 void do_oop(oop* p); 231 virtual void do_oop(oop* p);
201 void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); } 232 virtual void do_oop(narrowOop* p);
233 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
234 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
202 bool do_header() { return true; } 235 bool do_header() { return true; }
203 virtual const bool do_nmethods() const { return true; } 236 virtual const bool do_nmethods() const { return true; }
204 Prefetch::style prefetch_style() { 237 Prefetch::style prefetch_style() {
205 return Prefetch::do_read; 238 return Prefetch::do_read;
206 } 239 }
209 242
210 // This closure is used during the concurrent marking phase 243 // This closure is used during the concurrent marking phase
211 // following the first checkpoint. Its use is buried in 244 // following the first checkpoint. Its use is buried in
212 // the closure MarkFromRootsClosure. 245 // the closure MarkFromRootsClosure.
213 class PushOrMarkClosure: public OopClosure { 246 class PushOrMarkClosure: public OopClosure {
214 CMSCollector* _collector; 247 private:
215 MemRegion _span; 248 CMSCollector* _collector;
216 CMSBitMap* _bitMap; 249 MemRegion _span;
217 CMSMarkStack* _markStack; 250 CMSBitMap* _bitMap;
218 CMSMarkStack* _revisitStack; 251 CMSMarkStack* _markStack;
219 HeapWord* const _finger; 252 CMSMarkStack* _revisitStack;
220 MarkFromRootsClosure* const _parent; 253 HeapWord* const _finger;
221 bool const _should_remember_klasses; 254 MarkFromRootsClosure* const
255 _parent;
256 bool const _should_remember_klasses;
257 protected:
258 DO_OOP_WORK_DEFN
222 public: 259 public:
223 PushOrMarkClosure(CMSCollector* cms_collector, 260 PushOrMarkClosure(CMSCollector* cms_collector,
224 MemRegion span, 261 MemRegion span,
225 CMSBitMap* bitMap, 262 CMSBitMap* bitMap,
226 CMSMarkStack* markStack, 263 CMSMarkStack* markStack,
227 CMSMarkStack* revisitStack, 264 CMSMarkStack* revisitStack,
228 HeapWord* finger, 265 HeapWord* finger,
229 MarkFromRootsClosure* parent); 266 MarkFromRootsClosure* parent);
230 void do_oop(oop* p); 267 virtual void do_oop(oop* p);
231 void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); } 268 virtual void do_oop(narrowOop* p);
232 const bool should_remember_klasses() const { 269 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
270 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
271 virtual const bool should_remember_klasses() const {
233 return _should_remember_klasses; 272 return _should_remember_klasses;
234 } 273 }
235 void remember_klass(Klass* k); 274 virtual void remember_klass(Klass* k);
236 // Deal with a stack overflow condition 275 // Deal with a stack overflow condition
237 void handle_stack_overflow(HeapWord* lost); 276 void handle_stack_overflow(HeapWord* lost);
238 private: 277 private:
239 inline void do_yield_check(); 278 inline void do_yield_check();
240 }; 279 };
242 // A parallel (MT) version of the above. 281 // A parallel (MT) version of the above.
243 // This closure is used during the concurrent marking phase 282 // This closure is used during the concurrent marking phase
244 // following the first checkpoint. Its use is buried in 283 // following the first checkpoint. Its use is buried in
245 // the closure Par_MarkFromRootsClosure. 284 // the closure Par_MarkFromRootsClosure.
246 class Par_PushOrMarkClosure: public OopClosure { 285 class Par_PushOrMarkClosure: public OopClosure {
286 private:
247 CMSCollector* _collector; 287 CMSCollector* _collector;
248 MemRegion _whole_span; 288 MemRegion _whole_span;
249 MemRegion _span; // local chunk 289 MemRegion _span; // local chunk
250 CMSBitMap* _bit_map; 290 CMSBitMap* _bit_map;
251 OopTaskQueue* _work_queue; 291 OopTaskQueue* _work_queue;
252 CMSMarkStack* _overflow_stack; 292 CMSMarkStack* _overflow_stack;
253 CMSMarkStack* _revisit_stack; 293 CMSMarkStack* _revisit_stack;
254 HeapWord* const _finger; 294 HeapWord* const _finger;
255 HeapWord** const _global_finger_addr; 295 HeapWord** const _global_finger_addr;
256 Par_MarkFromRootsClosure* const _parent; 296 Par_MarkFromRootsClosure* const
257 bool const _should_remember_klasses; 297 _parent;
298 bool const _should_remember_klasses;
299 protected:
300 DO_OOP_WORK_DEFN
258 public: 301 public:
259 Par_PushOrMarkClosure(CMSCollector* cms_collector, 302 Par_PushOrMarkClosure(CMSCollector* cms_collector,
260 MemRegion span, 303 MemRegion span,
261 CMSBitMap* bit_map, 304 CMSBitMap* bit_map,
262 OopTaskQueue* work_queue, 305 OopTaskQueue* work_queue,
263 CMSMarkStack* mark_stack, 306 CMSMarkStack* mark_stack,
264 CMSMarkStack* revisit_stack, 307 CMSMarkStack* revisit_stack,
265 HeapWord* finger, 308 HeapWord* finger,
266 HeapWord** global_finger_addr, 309 HeapWord** global_finger_addr,
267 Par_MarkFromRootsClosure* parent); 310 Par_MarkFromRootsClosure* parent);
268 void do_oop(oop* p); 311 virtual void do_oop(oop* p);
269 void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); } 312 virtual void do_oop(narrowOop* p);
270 const bool should_remember_klasses() const { 313 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
314 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
315 virtual const bool should_remember_klasses() const {
271 return _should_remember_klasses; 316 return _should_remember_klasses;
272 } 317 }
273 void remember_klass(Klass* k); 318 virtual void remember_klass(Klass* k);
274 // Deal with a stack overflow condition 319 // Deal with a stack overflow condition
275 void handle_stack_overflow(HeapWord* lost); 320 void handle_stack_overflow(HeapWord* lost);
276 private: 321 private:
277 inline void do_yield_check(); 322 inline void do_yield_check();
278 }; 323 };
280 // For objects in CMS generation, this closure marks 325 // For objects in CMS generation, this closure marks
281 // given objects (transitively) as being reachable/live. 326 // given objects (transitively) as being reachable/live.
282 // This is currently used during the (weak) reference object 327 // This is currently used during the (weak) reference object
283 // processing phase of the CMS final checkpoint step. 328 // processing phase of the CMS final checkpoint step.
284 class CMSKeepAliveClosure: public OopClosure { 329 class CMSKeepAliveClosure: public OopClosure {
330 private:
285 CMSCollector* _collector; 331 CMSCollector* _collector;
286 MemRegion _span; 332 MemRegion _span;
287 CMSMarkStack* _mark_stack; 333 CMSMarkStack* _mark_stack;
288 CMSBitMap* _bit_map; 334 CMSBitMap* _bit_map;
335 protected:
336 DO_OOP_WORK_DEFN
289 public: 337 public:
290 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 338 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
291 CMSBitMap* bit_map, CMSMarkStack* mark_stack): 339 CMSBitMap* bit_map, CMSMarkStack* mark_stack):
292 _collector(collector), 340 _collector(collector),
293 _span(span), 341 _span(span),
294 _bit_map(bit_map), 342 _bit_map(bit_map),
295 _mark_stack(mark_stack) { } 343 _mark_stack(mark_stack) { }
296 344 virtual void do_oop(oop* p);
297 void do_oop(oop* p); 345 virtual void do_oop(narrowOop* p);
298 void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); } 346 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
347 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
299 }; 348 };
300 349
301 class CMSInnerParMarkAndPushClosure: public OopClosure { 350 class CMSInnerParMarkAndPushClosure: public OopClosure {
351 private:
302 CMSCollector* _collector; 352 CMSCollector* _collector;
303 MemRegion _span; 353 MemRegion _span;
304 OopTaskQueue* _work_queue; 354 OopTaskQueue* _work_queue;
305 CMSBitMap* _bit_map; 355 CMSBitMap* _bit_map;
356 protected:
357 DO_OOP_WORK_DEFN
306 public: 358 public:
307 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 359 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
308 MemRegion span, CMSBitMap* bit_map, 360 MemRegion span, CMSBitMap* bit_map,
309 OopTaskQueue* work_queue): 361 OopTaskQueue* work_queue):
310 _collector(collector), 362 _collector(collector),
311 _span(span), 363 _span(span),
312 _bit_map(bit_map), 364 _bit_map(bit_map),
313 _work_queue(work_queue) { } 365 _work_queue(work_queue) { }
314 void do_oop(oop* p); 366 virtual void do_oop(oop* p);
315 void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); } 367 virtual void do_oop(narrowOop* p);
368 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
369 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
316 }; 370 };
317 371
318 // A parallel (MT) version of the above, used when 372 // A parallel (MT) version of the above, used when
319 // reference processing is parallel; the only difference 373 // reference processing is parallel; the only difference
320 // is in the do_oop method. 374 // is in the do_oop method.
321 class CMSParKeepAliveClosure: public OopClosure { 375 class CMSParKeepAliveClosure: public OopClosure {
376 private:
322 CMSCollector* _collector; 377 CMSCollector* _collector;
323 MemRegion _span; 378 MemRegion _span;
324 OopTaskQueue* _work_queue; 379 OopTaskQueue* _work_queue;
325 CMSBitMap* _bit_map; 380 CMSBitMap* _bit_map;
326 CMSInnerParMarkAndPushClosure _mark_and_push; 381 CMSInnerParMarkAndPushClosure
382 _mark_and_push;
327 const uint _low_water_mark; 383 const uint _low_water_mark;
328 void trim_queue(uint max); 384 void trim_queue(uint max);
385 protected:
386 DO_OOP_WORK_DEFN
329 public: 387 public:
330 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 388 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
331 CMSBitMap* bit_map, OopTaskQueue* work_queue); 389 CMSBitMap* bit_map, OopTaskQueue* work_queue);
332 void do_oop(oop* p); 390 virtual void do_oop(oop* p);
333 void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); } 391 virtual void do_oop(narrowOop* p);
334 }; 392 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
393 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
394 };