comparison src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @ 375:81cd571500b0

6725697: par compact - rename class ChunkData to RegionData Reviewed-by: iveresov, tonyp
author jcoomes
date Tue, 30 Sep 2008 12:20:22 -0700
parents a61af66fc99e
children ad8c8ca4ab0f
comparison
equal deleted inserted replaced
374:a4b729f5b611 375:81cd571500b0
28 PSOldGen* ParCompactionManager::_old_gen = NULL; 28 PSOldGen* ParCompactionManager::_old_gen = NULL;
29 ParCompactionManager** ParCompactionManager::_manager_array = NULL; 29 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; 30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
31 ObjectStartArray* ParCompactionManager::_start_array = NULL; 31 ObjectStartArray* ParCompactionManager::_start_array = NULL;
32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; 32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
33 ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL; 33 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
34 34
35 ParCompactionManager::ParCompactionManager() : 35 ParCompactionManager::ParCompactionManager() :
36 _action(CopyAndUpdate) { 36 _action(CopyAndUpdate) {
37 37
38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
44 44
45 marking_stack()->initialize(); 45 marking_stack()->initialize();
46 46
47 // We want the overflow stack to be permanent 47 // We want the overflow stack to be permanent
48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); 48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
49 #ifdef USE_ChunkTaskQueueWithOverflow 49 #ifdef USE_RegionTaskQueueWithOverflow
50 chunk_stack()->initialize(); 50 region_stack()->initialize();
51 #else 51 #else
52 chunk_stack()->initialize(); 52 region_stack()->initialize();
53 53
54 // We want the overflow stack to be permanent 54 // We want the overflow stack to be permanent
55 _chunk_overflow_stack = 55 _region_overflow_stack =
56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true); 56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
57 #endif 57 #endif
58 58
59 // Note that _revisit_klass_stack is allocated out of the 59 // Note that _revisit_klass_stack is allocated out of the
60 // C heap (as opposed to out of ResourceArena). 60 // C heap (as opposed to out of ResourceArena).
84 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); 84 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
85 guarantee(_manager_array != NULL, "Could not initialize promotion manager"); 85 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
86 86
87 _stack_array = new OopTaskQueueSet(parallel_gc_threads); 87 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
88 guarantee(_stack_array != NULL, "Count not initialize promotion manager"); 88 guarantee(_stack_array != NULL, "Count not initialize promotion manager");
89 _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads); 89 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
90 guarantee(_chunk_array != NULL, "Count not initialize promotion manager"); 90 guarantee(_region_array != NULL, "Count not initialize promotion manager");
91 91
92 // Create and register the ParCompactionManager(s) for the worker threads. 92 // Create and register the ParCompactionManager(s) for the worker threads.
93 for(uint i=0; i<parallel_gc_threads; i++) { 93 for(uint i=0; i<parallel_gc_threads; i++) {
94 _manager_array[i] = new ParCompactionManager(); 94 _manager_array[i] = new ParCompactionManager();
95 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); 95 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
96 stack_array()->register_queue(i, _manager_array[i]->marking_stack()); 96 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
97 #ifdef USE_ChunkTaskQueueWithOverflow 97 #ifdef USE_RegionTaskQueueWithOverflow
98 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue()); 98 region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
99 #else 99 #else
100 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()); 100 region_array()->register_queue(i, _manager_array[i]->region_stack());
101 #endif 101 #endif
102 } 102 }
103 103
104 // The VMThread gets its own ParCompactionManager, which is not available 104 // The VMThread gets its own ParCompactionManager, which is not available
105 // for work stealing. 105 // for work stealing.
151 // Should not be used in the parallel case 151 // Should not be used in the parallel case
152 ShouldNotReachHere(); 152 ShouldNotReachHere();
153 return NULL; 153 return NULL;
154 } 154 }
155 155
156 // Save chunk on a stack 156 // Save region on a stack
157 void ParCompactionManager::save_for_processing(size_t chunk_index) { 157 void ParCompactionManager::save_for_processing(size_t region_index) {
158 #ifdef ASSERT 158 #ifdef ASSERT
159 const ParallelCompactData& sd = PSParallelCompact::summary_data(); 159 const ParallelCompactData& sd = PSParallelCompact::summary_data();
160 ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index); 160 ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
161 assert(chunk_ptr->claimed(), "must be claimed"); 161 assert(region_ptr->claimed(), "must be claimed");
162 assert(chunk_ptr->_pushed++ == 0, "should only be pushed once"); 162 assert(region_ptr->_pushed++ == 0, "should only be pushed once");
163 #endif 163 #endif
164 chunk_stack_push(chunk_index); 164 region_stack_push(region_index);
165 } 165 }
166 166
167 void ParCompactionManager::chunk_stack_push(size_t chunk_index) { 167 void ParCompactionManager::region_stack_push(size_t region_index) {
168 168
169 #ifdef USE_ChunkTaskQueueWithOverflow 169 #ifdef USE_RegionTaskQueueWithOverflow
170 chunk_stack()->save(chunk_index); 170 region_stack()->save(region_index);
171 #else 171 #else
172 if(!chunk_stack()->push(chunk_index)) { 172 if(!region_stack()->push(region_index)) {
173 chunk_overflow_stack()->push(chunk_index); 173 region_overflow_stack()->push(region_index);
174 } 174 }
175 #endif 175 #endif
176 } 176 }
177 177
178 bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) { 178 bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
179 #ifdef USE_ChunkTaskQueueWithOverflow 179 #ifdef USE_RegionTaskQueueWithOverflow
180 return chunk_stack()->retrieve(chunk_index); 180 return region_stack()->retrieve(region_index);
181 #else 181 #else
182 // Should not be used in the parallel case 182 // Should not be used in the parallel case
183 ShouldNotReachHere(); 183 ShouldNotReachHere();
184 return false; 184 return false;
185 #endif 185 #endif
228 228
229 assert(marking_stack()->size() == 0, "Sanity"); 229 assert(marking_stack()->size() == 0, "Sanity");
230 assert(overflow_stack()->length() == 0, "Sanity"); 230 assert(overflow_stack()->length() == 0, "Sanity");
231 } 231 }
232 232
233 void ParCompactionManager::drain_chunk_overflow_stack() { 233 void ParCompactionManager::drain_region_overflow_stack() {
234 size_t chunk_index = (size_t) -1; 234 size_t region_index = (size_t) -1;
235 while(chunk_stack()->retrieve_from_overflow(chunk_index)) { 235 while(region_stack()->retrieve_from_overflow(region_index)) {
236 PSParallelCompact::fill_and_update_chunk(this, chunk_index); 236 PSParallelCompact::fill_and_update_region(this, region_index);
237 } 237 }
238 } 238 }
239 239
240 void ParCompactionManager::drain_chunk_stacks() { 240 void ParCompactionManager::drain_region_stacks() {
241 #ifdef ASSERT 241 #ifdef ASSERT
242 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 242 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
244 MutableSpace* to_space = heap->young_gen()->to_space(); 244 MutableSpace* to_space = heap->young_gen()->to_space();
245 MutableSpace* old_space = heap->old_gen()->object_space(); 245 MutableSpace* old_space = heap->old_gen()->object_space();
247 #endif /* ASSERT */ 247 #endif /* ASSERT */
248 248
249 #if 1 // def DO_PARALLEL - the serial code hasn't been updated 249 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
250 do { 250 do {
251 251
252 #ifdef USE_ChunkTaskQueueWithOverflow 252 #ifdef USE_RegionTaskQueueWithOverflow
253 // Drain overflow stack first, so other threads can steal from 253 // Drain overflow stack first, so other threads can steal from
254 // claimed stack while we work. 254 // claimed stack while we work.
255 size_t chunk_index = (size_t) -1; 255 size_t region_index = (size_t) -1;
256 while(chunk_stack()->retrieve_from_overflow(chunk_index)) { 256 while(region_stack()->retrieve_from_overflow(region_index)) {
257 PSParallelCompact::fill_and_update_chunk(this, chunk_index); 257 PSParallelCompact::fill_and_update_region(this, region_index);
258 } 258 }
259 259
260 while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) { 260 while (region_stack()->retrieve_from_stealable_queue(region_index)) {
261 PSParallelCompact::fill_and_update_chunk(this, chunk_index); 261 PSParallelCompact::fill_and_update_region(this, region_index);
262 } 262 }
263 } while (!chunk_stack()->is_empty()); 263 } while (!region_stack()->is_empty());
264 #else 264 #else
265 // Drain overflow stack first, so other threads can steal from 265 // Drain overflow stack first, so other threads can steal from
266 // claimed stack while we work. 266 // claimed stack while we work.
267 while(!chunk_overflow_stack()->is_empty()) { 267 while(!region_overflow_stack()->is_empty()) {
268 size_t chunk_index = chunk_overflow_stack()->pop(); 268 size_t region_index = region_overflow_stack()->pop();
269 PSParallelCompact::fill_and_update_chunk(this, chunk_index); 269 PSParallelCompact::fill_and_update_region(this, region_index);
270 } 270 }
271 271
272 size_t chunk_index = -1; 272 size_t region_index = -1;
273 // obj is a reference!!! 273 // obj is a reference!!!
274 while (chunk_stack()->pop_local(chunk_index)) { 274 while (region_stack()->pop_local(region_index)) {
275 // It would be nice to assert about the type of objects we might 275 // It would be nice to assert about the type of objects we might
276 // pop, but they can come from anywhere, unfortunately. 276 // pop, but they can come from anywhere, unfortunately.
277 PSParallelCompact::fill_and_update_chunk(this, chunk_index); 277 PSParallelCompact::fill_and_update_region(this, region_index);
278 } 278 }
279 } while((chunk_stack()->size() != 0) || 279 } while((region_stack()->size() != 0) ||
280 (chunk_overflow_stack()->length() != 0)); 280 (region_overflow_stack()->length() != 0));
281 #endif 281 #endif
282 282
283 #ifdef USE_ChunkTaskQueueWithOverflow 283 #ifdef USE_RegionTaskQueueWithOverflow
284 assert(chunk_stack()->is_empty(), "Sanity"); 284 assert(region_stack()->is_empty(), "Sanity");
285 #else 285 #else
286 assert(chunk_stack()->size() == 0, "Sanity"); 286 assert(region_stack()->size() == 0, "Sanity");
287 assert(chunk_overflow_stack()->length() == 0, "Sanity"); 287 assert(region_overflow_stack()->length() == 0, "Sanity");
288 #endif 288 #endif
289 #else 289 #else
290 oop obj; 290 oop obj;
291 while (obj = retrieve_for_scanning()) { 291 while (obj = retrieve_for_scanning()) {
292 obj->follow_contents(this); 292 obj->follow_contents(this);