comparison src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @ 1638:b2a00dd3117c

6957084: simplify TaskQueue overflow handling Reviewed-by: ysr, jmasa
author jcoomes
date Thu, 01 Jul 2010 21:40:45 -0700
parents c18cbe5936b8
children 894b1d7c7e01
comparison
equal deleted inserted replaced
1628:a00567c82f02 1638:b2a00dd3117c
1 /* 1 /*
2 * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; 30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
31 ParCompactionManager::ObjArrayTaskQueueSet* 31 ParCompactionManager::ObjArrayTaskQueueSet*
32 ParCompactionManager::_objarray_queues = NULL; 32 ParCompactionManager::_objarray_queues = NULL;
33 ObjectStartArray* ParCompactionManager::_start_array = NULL; 33 ObjectStartArray* ParCompactionManager::_start_array = NULL;
34 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; 34 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
35 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; 35 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
36 36
37 ParCompactionManager::ParCompactionManager() : 37 ParCompactionManager::ParCompactionManager() :
38 _action(CopyAndUpdate) { 38 _action(CopyAndUpdate) {
39 39
40 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 40 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
41 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 41 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
42 42
43 _old_gen = heap->old_gen(); 43 _old_gen = heap->old_gen();
44 _start_array = old_gen()->start_array(); 44 _start_array = old_gen()->start_array();
45 45
46
47 marking_stack()->initialize(); 46 marking_stack()->initialize();
48 47 _objarray_stack.initialize();
49 // We want the overflow stack to be permanent
50 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
51
52 _objarray_queue.initialize();
53 _objarray_overflow_stack =
54 new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
55
56 #ifdef USE_RegionTaskQueueWithOverflow
57 region_stack()->initialize(); 48 region_stack()->initialize();
58 #else
59 region_stack()->initialize();
60
61 // We want the overflow stack to be permanent
62 _region_overflow_stack =
63 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
64 #endif
65 49
66 // Note that _revisit_klass_stack is allocated out of the 50 // Note that _revisit_klass_stack is allocated out of the
67 // C heap (as opposed to out of ResourceArena). 51 // C heap (as opposed to out of ResourceArena).
68 int size = 52 int size =
69 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads; 53 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
70 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); 54 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
71 // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will 55 // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
72 // have to do for now until we are able to investigate a more optimal setting. 56 // have to do for now until we are able to investigate a more optimal setting.
73 _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true); 57 _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
74
75 } 58 }
76 59
77 ParCompactionManager::~ParCompactionManager() { 60 ParCompactionManager::~ParCompactionManager() {
78 delete _overflow_stack;
79 delete _objarray_overflow_stack;
80 delete _revisit_klass_stack; 61 delete _revisit_klass_stack;
81 delete _revisit_mdo_stack; 62 delete _revisit_mdo_stack;
82 // _manager_array and _stack_array are statics 63 // _manager_array and _stack_array are statics
83 // shared with all instances of ParCompactionManager 64 // shared with all instances of ParCompactionManager
84 // should not be deallocated. 65 // should not be deallocated.
106 // Create and register the ParCompactionManager(s) for the worker threads. 87 // Create and register the ParCompactionManager(s) for the worker threads.
107 for(uint i=0; i<parallel_gc_threads; i++) { 88 for(uint i=0; i<parallel_gc_threads; i++) {
108 _manager_array[i] = new ParCompactionManager(); 89 _manager_array[i] = new ParCompactionManager();
109 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); 90 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
110 stack_array()->register_queue(i, _manager_array[i]->marking_stack()); 91 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
111 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue); 92 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
112 #ifdef USE_RegionTaskQueueWithOverflow
113 region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
114 #else
115 region_array()->register_queue(i, _manager_array[i]->region_stack()); 93 region_array()->register_queue(i, _manager_array[i]->region_stack());
116 #endif
117 } 94 }
118 95
119 // The VMThread gets its own ParCompactionManager, which is not available 96 // The VMThread gets its own ParCompactionManager, which is not available
120 // for work stealing. 97 // for work stealing.
121 _manager_array[parallel_gc_threads] = new ParCompactionManager(); 98 _manager_array[parallel_gc_threads] = new ParCompactionManager();
147 bool ParCompactionManager::should_reset_only() { 124 bool ParCompactionManager::should_reset_only() {
148 assert(action() != NotValid, "Action is not set"); 125 assert(action() != NotValid, "Action is not set");
149 return action() == ParCompactionManager::ResetObjects; 126 return action() == ParCompactionManager::ResetObjects;
150 } 127 }
151 128
152 // For now save on a stack
153 void ParCompactionManager::save_for_scanning(oop m) {
154 stack_push(m);
155 }
156
157 void ParCompactionManager::stack_push(oop obj) {
158
159 if(!marking_stack()->push(obj)) {
160 overflow_stack()->push(obj);
161 }
162 }
163
164 oop ParCompactionManager::retrieve_for_scanning() {
165
166 // Should not be used in the parallel case
167 ShouldNotReachHere();
168 return NULL;
169 }
170
171 // Save region on a stack
172 void ParCompactionManager::save_for_processing(size_t region_index) {
173 #ifdef ASSERT
174 const ParallelCompactData& sd = PSParallelCompact::summary_data();
175 ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
176 assert(region_ptr->claimed(), "must be claimed");
177 assert(region_ptr->_pushed++ == 0, "should only be pushed once");
178 #endif
179 region_stack_push(region_index);
180 }
181
182 void ParCompactionManager::region_stack_push(size_t region_index) {
183
184 #ifdef USE_RegionTaskQueueWithOverflow
185 region_stack()->save(region_index);
186 #else
187 if(!region_stack()->push(region_index)) {
188 region_overflow_stack()->push(region_index);
189 }
190 #endif
191 }
192
193 bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
194 #ifdef USE_RegionTaskQueueWithOverflow
195 return region_stack()->retrieve(region_index);
196 #else
197 // Should not be used in the parallel case
198 ShouldNotReachHere();
199 return false;
200 #endif
201 }
202
203 ParCompactionManager* 129 ParCompactionManager*
204 ParCompactionManager::gc_thread_compaction_manager(int index) { 130 ParCompactionManager::gc_thread_compaction_manager(int index) {
205 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range"); 131 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
206 assert(_manager_array != NULL, "Sanity"); 132 assert(_manager_array != NULL, "Sanity");
207 return _manager_array[index]; 133 return _manager_array[index];
216 142
217 void ParCompactionManager::follow_marking_stacks() { 143 void ParCompactionManager::follow_marking_stacks() {
218 do { 144 do {
219 // Drain the overflow stack first, to allow stealing from the marking stack. 145 // Drain the overflow stack first, to allow stealing from the marking stack.
220 oop obj; 146 oop obj;
221 while (!overflow_stack()->is_empty()) { 147 while (marking_stack()->pop_overflow(obj)) {
222 overflow_stack()->pop()->follow_contents(this); 148 obj->follow_contents(this);
223 } 149 }
224 while (marking_stack()->pop_local(obj)) { 150 while (marking_stack()->pop_local(obj)) {
225 obj->follow_contents(this); 151 obj->follow_contents(this);
226 } 152 }
227 153
228 // Process ObjArrays one at a time to avoid marking stack bloat. 154 // Process ObjArrays one at a time to avoid marking stack bloat.
229 ObjArrayTask task; 155 ObjArrayTask task;
230 if (!_objarray_overflow_stack->is_empty()) { 156 if (_objarray_stack.pop_overflow(task)) {
231 task = _objarray_overflow_stack->pop();
232 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); 157 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
233 k->oop_follow_contents(this, task.obj(), task.index()); 158 k->oop_follow_contents(this, task.obj(), task.index());
234 } else if (_objarray_queue.pop_local(task)) { 159 } else if (_objarray_stack.pop_local(task)) {
235 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); 160 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
236 k->oop_follow_contents(this, task.obj(), task.index()); 161 k->oop_follow_contents(this, task.obj(), task.index());
237 } 162 }
238 } while (!marking_stacks_empty()); 163 } while (!marking_stacks_empty());
239 164
240 assert(marking_stacks_empty(), "Sanity"); 165 assert(marking_stacks_empty(), "Sanity");
241 } 166 }
242 167
243 void ParCompactionManager::drain_region_overflow_stack() {
244 size_t region_index = (size_t) -1;
245 while(region_stack()->retrieve_from_overflow(region_index)) {
246 PSParallelCompact::fill_and_update_region(this, region_index);
247 }
248 }
249
250 void ParCompactionManager::drain_region_stacks() { 168 void ParCompactionManager::drain_region_stacks() {
251 #ifdef ASSERT
252 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
253 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
254 MutableSpace* to_space = heap->young_gen()->to_space();
255 MutableSpace* old_space = heap->old_gen()->object_space();
256 MutableSpace* perm_space = heap->perm_gen()->object_space();
257 #endif /* ASSERT */
258
259 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
260 do { 169 do {
261 170 // Drain overflow stack first so other threads can steal.
262 #ifdef USE_RegionTaskQueueWithOverflow 171 size_t region_index;
263 // Drain overflow stack first, so other threads can steal from 172 while (region_stack()->pop_overflow(region_index)) {
264 // claimed stack while we work.
265 size_t region_index = (size_t) -1;
266 while(region_stack()->retrieve_from_overflow(region_index)) {
267 PSParallelCompact::fill_and_update_region(this, region_index); 173 PSParallelCompact::fill_and_update_region(this, region_index);
268 } 174 }
269 175
270 while (region_stack()->retrieve_from_stealable_queue(region_index)) { 176 while (region_stack()->pop_local(region_index)) {
271 PSParallelCompact::fill_and_update_region(this, region_index); 177 PSParallelCompact::fill_and_update_region(this, region_index);
272 } 178 }
273 } while (!region_stack()->is_empty()); 179 } while (!region_stack()->is_empty());
274 #else
275 // Drain overflow stack first, so other threads can steal from
276 // claimed stack while we work.
277 while(!region_overflow_stack()->is_empty()) {
278 size_t region_index = region_overflow_stack()->pop();
279 PSParallelCompact::fill_and_update_region(this, region_index);
280 }
281
282 size_t region_index = -1;
283 // obj is a reference!!!
284 while (region_stack()->pop_local(region_index)) {
285 // It would be nice to assert about the type of objects we might
286 // pop, but they can come from anywhere, unfortunately.
287 PSParallelCompact::fill_and_update_region(this, region_index);
288 }
289 } while((region_stack()->size() != 0) ||
290 (region_overflow_stack()->length() != 0));
291 #endif
292
293 #ifdef USE_RegionTaskQueueWithOverflow
294 assert(region_stack()->is_empty(), "Sanity");
295 #else
296 assert(region_stack()->size() == 0, "Sanity");
297 assert(region_overflow_stack()->length() == 0, "Sanity");
298 #endif
299 #else
300 oop obj;
301 while (obj = retrieve_for_scanning()) {
302 obj->follow_contents(this);
303 }
304 #endif
305 } 180 }
306 181
307 #ifdef ASSERT 182 #ifdef ASSERT
308 bool ParCompactionManager::stacks_have_been_allocated() { 183 bool ParCompactionManager::stacks_have_been_allocated() {
309 return (revisit_klass_stack()->data_addr() != NULL && 184 return (revisit_klass_stack()->data_addr() != NULL &&