Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @ 1706:9d7a8ab3736b
6962589: remove breadth first scanning code from parallel gc
Summary: Remove the breadth-first copying order from ParallelScavenge and use depth-first by default.
Reviewed-by: jcoomes, ysr, johnc
author | tonyp |
---|---|
date | Thu, 22 Jul 2010 10:27:41 -0400 |
parents | a93a9eda13f7 |
children | 894b1d7c7e01 |
comparison
equal
deleted
inserted
replaced
1705:2d160770d2e5 | 1706:9d7a8ab3736b |
---|---|
25 #include "incls/_precompiled.incl" | 25 #include "incls/_precompiled.incl" |
26 #include "incls/_psPromotionManager.cpp.incl" | 26 #include "incls/_psPromotionManager.cpp.incl" |
27 | 27 |
28 PSPromotionManager** PSPromotionManager::_manager_array = NULL; | 28 PSPromotionManager** PSPromotionManager::_manager_array = NULL; |
29 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; | 29 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; |
30 OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL; | |
31 PSOldGen* PSPromotionManager::_old_gen = NULL; | 30 PSOldGen* PSPromotionManager::_old_gen = NULL; |
32 MutableSpace* PSPromotionManager::_young_space = NULL; | 31 MutableSpace* PSPromotionManager::_young_space = NULL; |
33 | 32 |
34 void PSPromotionManager::initialize() { | 33 void PSPromotionManager::initialize() { |
35 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 34 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
40 | 39 |
41 assert(_manager_array == NULL, "Attempt to initialize twice"); | 40 assert(_manager_array == NULL, "Attempt to initialize twice"); |
42 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 ); | 41 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 ); |
43 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); | 42 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); |
44 | 43 |
45 if (UseDepthFirstScavengeOrder) { | 44 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); |
46 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); | 45 guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager"); |
47 guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager"); | |
48 } else { | |
49 _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads); | |
50 guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager"); | |
51 } | |
52 | 46 |
53 // Create and register the PSPromotionManager(s) for the worker threads. | 47 // Create and register the PSPromotionManager(s) for the worker threads. |
54 for(uint i=0; i<ParallelGCThreads; i++) { | 48 for(uint i=0; i<ParallelGCThreads; i++) { |
55 _manager_array[i] = new PSPromotionManager(); | 49 _manager_array[i] = new PSPromotionManager(); |
56 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager"); | 50 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager"); |
57 if (UseDepthFirstScavengeOrder) { | 51 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth()); |
58 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth()); | |
59 } else { | |
60 stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth()); | |
61 } | |
62 } | 52 } |
63 | 53 |
64 // The VMThread gets its own PSPromotionManager, which is not available | 54 // The VMThread gets its own PSPromotionManager, which is not available |
65 // for work stealing. | 55 // for work stealing. |
66 _manager_array[ParallelGCThreads] = new PSPromotionManager(); | 56 _manager_array[ParallelGCThreads] = new PSPromotionManager(); |
91 | 81 |
92 void PSPromotionManager::post_scavenge() { | 82 void PSPromotionManager::post_scavenge() { |
93 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats()); | 83 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats()); |
94 for (uint i = 0; i < ParallelGCThreads + 1; i++) { | 84 for (uint i = 0; i < ParallelGCThreads + 1; i++) { |
95 PSPromotionManager* manager = manager_array(i); | 85 PSPromotionManager* manager = manager_array(i); |
96 if (UseDepthFirstScavengeOrder) { | 86 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); |
97 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); | |
98 } else { | |
99 assert(manager->claimed_stack_breadth()->is_empty(), "should be empty"); | |
100 } | |
101 manager->flush_labs(); | 87 manager->flush_labs(); |
102 } | 88 } |
103 } | 89 } |
104 | 90 |
105 #if TASKQUEUE_STATS | 91 #if TASKQUEUE_STATS |
106 void | 92 void |
107 PSPromotionManager::print_taskqueue_stats(uint i) const { | 93 PSPromotionManager::print_taskqueue_stats(uint i) const { |
108 const TaskQueueStats& stats = depth_first() ? | |
109 _claimed_stack_depth.stats : _claimed_stack_breadth.stats; | |
110 tty->print("%3u ", i); | 94 tty->print("%3u ", i); |
111 stats.print(); | 95 _claimed_stack_depth.stats.print(); |
112 tty->cr(); | 96 tty->cr(); |
113 } | 97 } |
114 | 98 |
115 void | 99 void |
116 PSPromotionManager::print_local_stats(uint i) const { | 100 PSPromotionManager::print_local_stats(uint i) const { |
126 "--- ---------- ---------- ---------- ----------" | 110 "--- ---------- ---------- ---------- ----------" |
127 }; | 111 }; |
128 | 112 |
129 void | 113 void |
130 PSPromotionManager::print_stats() { | 114 PSPromotionManager::print_stats() { |
131 const bool df = UseDepthFirstScavengeOrder; | 115 tty->print_cr("== GC Tasks Stats, GC %3d", |
132 tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth", | |
133 Universe::heap()->total_collections()); | 116 Universe::heap()->total_collections()); |
134 | 117 |
135 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr(); | 118 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr(); |
136 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr(); | 119 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr(); |
137 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { | 120 for (uint i = 0; i < ParallelGCThreads + 1; ++i) { |
145 } | 128 } |
146 } | 129 } |
147 | 130 |
148 void | 131 void |
149 PSPromotionManager::reset_stats() { | 132 PSPromotionManager::reset_stats() { |
150 TaskQueueStats& stats = depth_first() ? | 133 claimed_stack_depth()->stats.reset(); |
151 claimed_stack_depth()->stats : claimed_stack_breadth()->stats; | |
152 stats.reset(); | |
153 _masked_pushes = _masked_steals = 0; | 134 _masked_pushes = _masked_steals = 0; |
154 _arrays_chunked = _array_chunks_processed = 0; | 135 _arrays_chunked = _array_chunks_processed = 0; |
155 } | 136 } |
156 #endif // TASKQUEUE_STATS | 137 #endif // TASKQUEUE_STATS |
157 | 138 |
158 PSPromotionManager::PSPromotionManager() { | 139 PSPromotionManager::PSPromotionManager() { |
159 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 140 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
160 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | 141 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
161 _depth_first = UseDepthFirstScavengeOrder; | |
162 | 142 |
163 // We set the old lab's start array. | 143 // We set the old lab's start array. |
164 _old_lab.set_start_array(old_gen()->start_array()); | 144 _old_lab.set_start_array(old_gen()->start_array()); |
165 | 145 |
166 uint queue_size; | 146 uint queue_size; |
167 if (depth_first()) { | 147 claimed_stack_depth()->initialize(); |
168 claimed_stack_depth()->initialize(); | 148 queue_size = claimed_stack_depth()->max_elems(); |
169 queue_size = claimed_stack_depth()->max_elems(); | |
170 } else { | |
171 claimed_stack_breadth()->initialize(); | |
172 queue_size = claimed_stack_breadth()->max_elems(); | |
173 } | |
174 | 149 |
175 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); | 150 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); |
176 if (_totally_drain) { | 151 if (_totally_drain) { |
177 _target_stack_size = 0; | 152 _target_stack_size = 0; |
178 } else { | 153 } else { |
203 | 178 |
204 lab_base = old_gen()->object_space()->top(); | 179 lab_base = old_gen()->object_space()->top(); |
205 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); | 180 _old_lab.initialize(MemRegion(lab_base, (size_t)0)); |
206 _old_gen_is_full = false; | 181 _old_gen_is_full = false; |
207 | 182 |
208 _prefetch_queue.clear(); | |
209 | |
210 TASKQUEUE_STATS_ONLY(reset_stats()); | 183 TASKQUEUE_STATS_ONLY(reset_stats()); |
211 } | 184 } |
212 | 185 |
213 | 186 |
214 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { | 187 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { |
215 assert(depth_first(), "invariant"); | |
216 assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant"); | 188 assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant"); |
217 totally_drain = totally_drain || _totally_drain; | 189 totally_drain = totally_drain || _totally_drain; |
218 | 190 |
219 #ifdef ASSERT | 191 #ifdef ASSERT |
220 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 192 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
248 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); | 220 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); |
249 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); | 221 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); |
250 assert(tq->overflow_empty(), "Sanity"); | 222 assert(tq->overflow_empty(), "Sanity"); |
251 } | 223 } |
252 | 224 |
253 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) { | |
254 assert(!depth_first(), "invariant"); | |
255 assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant"); | |
256 totally_drain = totally_drain || _totally_drain; | |
257 | |
258 #ifdef ASSERT | |
259 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
260 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
261 MutableSpace* to_space = heap->young_gen()->to_space(); | |
262 MutableSpace* old_space = heap->old_gen()->object_space(); | |
263 MutableSpace* perm_space = heap->perm_gen()->object_space(); | |
264 #endif /* ASSERT */ | |
265 | |
266 OverflowTaskQueue<oop>* const tq = claimed_stack_breadth(); | |
267 do { | |
268 oop obj; | |
269 | |
270 // Drain overflow stack first, so other threads can steal from | |
271 // claimed stack while we work. | |
272 while (tq->pop_overflow(obj)) { | |
273 obj->copy_contents(this); | |
274 } | |
275 | |
276 if (totally_drain) { | |
277 while (tq->pop_local(obj)) { | |
278 obj->copy_contents(this); | |
279 } | |
280 } else { | |
281 while (tq->size() > _target_stack_size && tq->pop_local(obj)) { | |
282 obj->copy_contents(this); | |
283 } | |
284 } | |
285 | |
286 // If we could not find any other work, flush the prefetch queue | |
287 if (tq->is_empty()) { | |
288 flush_prefetch_queue(); | |
289 } | |
290 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); | |
291 | |
292 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); | |
293 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); | |
294 assert(tq->overflow_empty(), "Sanity"); | |
295 } | |
296 | |
297 void PSPromotionManager::flush_labs() { | 225 void PSPromotionManager::flush_labs() { |
298 assert(stacks_empty(), "Attempt to flush lab with live stack"); | 226 assert(stacks_empty(), "Attempt to flush lab with live stack"); |
299 | 227 |
300 // If either promotion lab fills up, we can flush the | 228 // If either promotion lab fills up, we can flush the |
301 // lab but not refill it, so check first. | 229 // lab but not refill it, so check first. |
317 // This method is pretty bulky. It would be nice to split it up | 245 // This method is pretty bulky. It would be nice to split it up |
318 // into smaller submethods, but we need to be careful not to hurt | 246 // into smaller submethods, but we need to be careful not to hurt |
319 // performance. | 247 // performance. |
320 // | 248 // |
321 | 249 |
322 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) { | 250 oop PSPromotionManager::copy_to_survivor_space(oop o) { |
323 assert(PSScavenge::should_scavenge(&o), "Sanity"); | 251 assert(PSScavenge::should_scavenge(&o), "Sanity"); |
324 | 252 |
325 oop new_obj = NULL; | 253 oop new_obj = NULL; |
326 | 254 |
327 // NOTE! We must be very careful with any methods that access the mark | 255 // NOTE! We must be very careful with any methods that access the mark |
421 if (!new_obj_is_tenured) { | 349 if (!new_obj_is_tenured) { |
422 new_obj->incr_age(); | 350 new_obj->incr_age(); |
423 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); | 351 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); |
424 } | 352 } |
425 | 353 |
426 if (depth_first) { | 354 // Do the size comparison first with new_obj_size, which we |
427 // Do the size comparison first with new_obj_size, which we | 355 // already have. Hopefully, only a few objects are larger than |
428 // already have. Hopefully, only a few objects are larger than | 356 // _min_array_size_for_chunking, and most of them will be arrays. |
429 // _min_array_size_for_chunking, and most of them will be arrays. | 357 // So, the is->objArray() test would be very infrequent. |
430 // So, the is->objArray() test would be very infrequent. | 358 if (new_obj_size > _min_array_size_for_chunking && |
431 if (new_obj_size > _min_array_size_for_chunking && | 359 new_obj->is_objArray() && |
432 new_obj->is_objArray() && | 360 PSChunkLargeArrays) { |
433 PSChunkLargeArrays) { | 361 // we'll chunk it |
434 // we'll chunk it | 362 oop* const masked_o = mask_chunked_array_oop(o); |
435 oop* const masked_o = mask_chunked_array_oop(o); | 363 push_depth(masked_o); |
436 push_depth(masked_o); | 364 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); |
437 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); | |
438 } else { | |
439 // we'll just push its contents | |
440 new_obj->push_contents(this); | |
441 } | |
442 } else { | 365 } else { |
443 push_breadth(new_obj); | 366 // we'll just push its contents |
367 new_obj->push_contents(this); | |
444 } | 368 } |
445 } else { | 369 } else { |
446 // We lost, someone else "owns" this object | 370 // We lost, someone else "owns" this object |
447 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); | 371 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); |
448 | 372 |
535 // it. | 459 // it. |
536 if (obj->cas_forward_to(obj, obj_mark)) { | 460 if (obj->cas_forward_to(obj, obj_mark)) { |
537 // We won any races, we "own" this object. | 461 // We won any races, we "own" this object. |
538 assert(obj == obj->forwardee(), "Sanity"); | 462 assert(obj == obj->forwardee(), "Sanity"); |
539 | 463 |
540 if (depth_first()) { | 464 obj->push_contents(this); |
541 obj->push_contents(this); | |
542 } else { | |
543 // Don't bother incrementing the age, just push | |
544 // onto the claimed_stack.. | |
545 push_breadth(obj); | |
546 } | |
547 | 465 |
548 // Save the mark if needed | 466 // Save the mark if needed |
549 PSScavenge::oop_promotion_failed(obj, obj_mark); | 467 PSScavenge::oop_promotion_failed(obj, obj_mark); |
550 } else { | 468 } else { |
551 // We lost, someone else "owns" this object | 469 // We lost, someone else "owns" this object |