Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @ 1638:b2a00dd3117c
6957084: simplify TaskQueue overflow handling
Reviewed-by: ysr, jmasa
author | jcoomes |
---|---|
date | Thu, 01 Jul 2010 21:40:45 -0700 |
parents | c18cbe5936b8 |
children | a93a9eda13f7 |
comparison
equal
deleted
inserted
replaced
1628:a00567c82f02 | 1638:b2a00dd3117c |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
92 void PSPromotionManager::post_scavenge() { | 92 void PSPromotionManager::post_scavenge() { |
93 #if PS_PM_STATS | 93 #if PS_PM_STATS |
94 print_stats(); | 94 print_stats(); |
95 #endif // PS_PM_STATS | 95 #endif // PS_PM_STATS |
96 | 96 |
97 for(uint i=0; i<ParallelGCThreads+1; i++) { | 97 for (uint i = 0; i < ParallelGCThreads + 1; i++) { |
98 PSPromotionManager* manager = manager_array(i); | 98 PSPromotionManager* manager = manager_array(i); |
99 | 99 if (UseDepthFirstScavengeOrder) { |
100 // the guarantees are a bit gratuitous but, if one fires, we'll | 100 assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); |
101 // have a better idea of what went wrong | |
102 if (i < ParallelGCThreads) { | |
103 guarantee((!UseDepthFirstScavengeOrder || | |
104 manager->overflow_stack_depth()->length() <= 0), | |
105 "promotion manager overflow stack must be empty"); | |
106 guarantee((UseDepthFirstScavengeOrder || | |
107 manager->overflow_stack_breadth()->length() <= 0), | |
108 "promotion manager overflow stack must be empty"); | |
109 | |
110 guarantee((!UseDepthFirstScavengeOrder || | |
111 manager->claimed_stack_depth()->size() <= 0), | |
112 "promotion manager claimed stack must be empty"); | |
113 guarantee((UseDepthFirstScavengeOrder || | |
114 manager->claimed_stack_breadth()->size() <= 0), | |
115 "promotion manager claimed stack must be empty"); | |
116 } else { | 101 } else { |
117 guarantee((!UseDepthFirstScavengeOrder || | 102 assert(manager->claimed_stack_breadth()->is_empty(), "should be empty"); |
118 manager->overflow_stack_depth()->length() <= 0), | 103 } |
119 "VM Thread promotion manager overflow stack " | |
120 "must be empty"); | |
121 guarantee((UseDepthFirstScavengeOrder || | |
122 manager->overflow_stack_breadth()->length() <= 0), | |
123 "VM Thread promotion manager overflow stack " | |
124 "must be empty"); | |
125 | |
126 guarantee((!UseDepthFirstScavengeOrder || | |
127 manager->claimed_stack_depth()->size() <= 0), | |
128 "VM Thread promotion manager claimed stack " | |
129 "must be empty"); | |
130 guarantee((UseDepthFirstScavengeOrder || | |
131 manager->claimed_stack_breadth()->size() <= 0), | |
132 "VM Thread promotion manager claimed stack " | |
133 "must be empty"); | |
134 } | |
135 | |
136 manager->flush_labs(); | 104 manager->flush_labs(); |
137 } | 105 } |
138 } | 106 } |
139 | 107 |
140 #if PS_PM_STATS | 108 #if PS_PM_STATS |
179 | 147 |
180 uint queue_size; | 148 uint queue_size; |
181 if (depth_first()) { | 149 if (depth_first()) { |
182 claimed_stack_depth()->initialize(); | 150 claimed_stack_depth()->initialize(); |
183 queue_size = claimed_stack_depth()->max_elems(); | 151 queue_size = claimed_stack_depth()->max_elems(); |
184 // We want the overflow stack to be permanent | |
185 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true); | |
186 _overflow_stack_breadth = NULL; | |
187 } else { | 152 } else { |
188 claimed_stack_breadth()->initialize(); | 153 claimed_stack_breadth()->initialize(); |
189 queue_size = claimed_stack_breadth()->max_elems(); | 154 queue_size = claimed_stack_breadth()->max_elems(); |
190 // We want the overflow stack to be permanent | |
191 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); | |
192 _overflow_stack_depth = NULL; | |
193 } | 155 } |
194 | 156 |
195 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); | 157 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); |
196 if (_totally_drain) { | 158 if (_totally_drain) { |
197 _target_stack_size = 0; | 159 _target_stack_size = 0; |
207 | 169 |
208 reset(); | 170 reset(); |
209 } | 171 } |
210 | 172 |
211 void PSPromotionManager::reset() { | 173 void PSPromotionManager::reset() { |
212 assert(claimed_stack_empty(), "reset of non-empty claimed stack"); | 174 assert(stacks_empty(), "reset of non-empty stack"); |
213 assert(overflow_stack_empty(), "reset of non-empty overflow stack"); | |
214 | 175 |
215 // We need to get an assert in here to make sure the labs are always flushed. | 176 // We need to get an assert in here to make sure the labs are always flushed. |
216 | 177 |
217 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 178 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
218 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | 179 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
241 } | 202 } |
242 | 203 |
243 | 204 |
244 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { | 205 void PSPromotionManager::drain_stacks_depth(bool totally_drain) { |
245 assert(depth_first(), "invariant"); | 206 assert(depth_first(), "invariant"); |
246 assert(overflow_stack_depth() != NULL, "invariant"); | 207 assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant"); |
247 totally_drain = totally_drain || _totally_drain; | 208 totally_drain = totally_drain || _totally_drain; |
248 | 209 |
249 #ifdef ASSERT | 210 #ifdef ASSERT |
250 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 211 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
251 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | 212 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
252 MutableSpace* to_space = heap->young_gen()->to_space(); | 213 MutableSpace* to_space = heap->young_gen()->to_space(); |
253 MutableSpace* old_space = heap->old_gen()->object_space(); | 214 MutableSpace* old_space = heap->old_gen()->object_space(); |
254 MutableSpace* perm_space = heap->perm_gen()->object_space(); | 215 MutableSpace* perm_space = heap->perm_gen()->object_space(); |
255 #endif /* ASSERT */ | 216 #endif /* ASSERT */ |
256 | 217 |
218 OopStarTaskQueue* const tq = claimed_stack_depth(); | |
257 do { | 219 do { |
258 StarTask p; | 220 StarTask p; |
259 | 221 |
260 // Drain overflow stack first, so other threads can steal from | 222 // Drain overflow stack first, so other threads can steal from |
261 // claimed stack while we work. | 223 // claimed stack while we work. |
262 while(!overflow_stack_depth()->is_empty()) { | 224 while (tq->pop_overflow(p)) { |
263 // linux compiler wants different overloaded operator= in taskqueue to | 225 process_popped_location_depth(p); |
264 // assign to p that the other compilers don't like. | |
265 StarTask ptr = overflow_stack_depth()->pop(); | |
266 process_popped_location_depth(ptr); | |
267 } | 226 } |
268 | 227 |
269 if (totally_drain) { | 228 if (totally_drain) { |
270 while (claimed_stack_depth()->pop_local(p)) { | 229 while (tq->pop_local(p)) { |
271 process_popped_location_depth(p); | 230 process_popped_location_depth(p); |
272 } | 231 } |
273 } else { | 232 } else { |
274 while (claimed_stack_depth()->size() > _target_stack_size && | 233 while (tq->size() > _target_stack_size && tq->pop_local(p)) { |
275 claimed_stack_depth()->pop_local(p)) { | |
276 process_popped_location_depth(p); | 234 process_popped_location_depth(p); |
277 } | 235 } |
278 } | 236 } |
279 } while( (totally_drain && claimed_stack_depth()->size() > 0) || | 237 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); |
280 (overflow_stack_depth()->length() > 0) ); | 238 |
281 | 239 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); |
282 assert(!totally_drain || claimed_stack_empty(), "Sanity"); | 240 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); |
283 assert(totally_drain || | 241 assert(tq->overflow_empty(), "Sanity"); |
284 claimed_stack_depth()->size() <= _target_stack_size, | |
285 "Sanity"); | |
286 assert(overflow_stack_empty(), "Sanity"); | |
287 } | 242 } |
288 | 243 |
289 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) { | 244 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) { |
290 assert(!depth_first(), "invariant"); | 245 assert(!depth_first(), "invariant"); |
291 assert(overflow_stack_breadth() != NULL, "invariant"); | 246 assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant"); |
292 totally_drain = totally_drain || _totally_drain; | 247 totally_drain = totally_drain || _totally_drain; |
293 | 248 |
294 #ifdef ASSERT | 249 #ifdef ASSERT |
295 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | 250 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
296 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | 251 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
297 MutableSpace* to_space = heap->young_gen()->to_space(); | 252 MutableSpace* to_space = heap->young_gen()->to_space(); |
298 MutableSpace* old_space = heap->old_gen()->object_space(); | 253 MutableSpace* old_space = heap->old_gen()->object_space(); |
299 MutableSpace* perm_space = heap->perm_gen()->object_space(); | 254 MutableSpace* perm_space = heap->perm_gen()->object_space(); |
300 #endif /* ASSERT */ | 255 #endif /* ASSERT */ |
301 | 256 |
257 OverflowTaskQueue<oop>* const tq = claimed_stack_breadth(); | |
302 do { | 258 do { |
303 oop obj; | 259 oop obj; |
304 | 260 |
305 // Drain overflow stack first, so other threads can steal from | 261 // Drain overflow stack first, so other threads can steal from |
306 // claimed stack while we work. | 262 // claimed stack while we work. |
307 while(!overflow_stack_breadth()->is_empty()) { | 263 while (tq->pop_overflow(obj)) { |
308 obj = overflow_stack_breadth()->pop(); | |
309 obj->copy_contents(this); | 264 obj->copy_contents(this); |
310 } | 265 } |
311 | 266 |
312 if (totally_drain) { | 267 if (totally_drain) { |
313 // obj is a reference!!! | 268 while (tq->pop_local(obj)) { |
314 while (claimed_stack_breadth()->pop_local(obj)) { | |
315 // It would be nice to assert about the type of objects we might | |
316 // pop, but they can come from anywhere, unfortunately. | |
317 obj->copy_contents(this); | 269 obj->copy_contents(this); |
318 } | 270 } |
319 } else { | 271 } else { |
320 // obj is a reference!!! | 272 while (tq->size() > _target_stack_size && tq->pop_local(obj)) { |
321 while (claimed_stack_breadth()->size() > _target_stack_size && | |
322 claimed_stack_breadth()->pop_local(obj)) { | |
323 // It would be nice to assert about the type of objects we might | |
324 // pop, but they can come from anywhere, unfortunately. | |
325 obj->copy_contents(this); | 273 obj->copy_contents(this); |
326 } | 274 } |
327 } | 275 } |
328 | 276 |
329 // If we could not find any other work, flush the prefetch queue | 277 // If we could not find any other work, flush the prefetch queue |
330 if (claimed_stack_breadth()->size() == 0 && | 278 if (tq->is_empty()) { |
331 (overflow_stack_breadth()->length() == 0)) { | |
332 flush_prefetch_queue(); | 279 flush_prefetch_queue(); |
333 } | 280 } |
334 } while((totally_drain && claimed_stack_breadth()->size() > 0) || | 281 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); |
335 (overflow_stack_breadth()->length() > 0)); | 282 |
336 | 283 assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); |
337 assert(!totally_drain || claimed_stack_empty(), "Sanity"); | 284 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); |
338 assert(totally_drain || | 285 assert(tq->overflow_empty(), "Sanity"); |
339 claimed_stack_breadth()->size() <= _target_stack_size, | |
340 "Sanity"); | |
341 assert(overflow_stack_empty(), "Sanity"); | |
342 } | 286 } |
343 | 287 |
344 void PSPromotionManager::flush_labs() { | 288 void PSPromotionManager::flush_labs() { |
345 assert(claimed_stack_empty(), "Attempt to flush lab with live stack"); | 289 assert(stacks_empty(), "Attempt to flush lab with live stack"); |
346 assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack"); | |
347 | 290 |
348 // If either promotion lab fills up, we can flush the | 291 // If either promotion lab fills up, we can flush the |
349 // lab but not refill it, so check first. | 292 // lab but not refill it, so check first. |
350 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); | 293 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); |
351 if (!_young_lab.is_flushed()) | 294 if (!_young_lab.is_flushed()) |