comparison src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 81cd571500b0
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_psCompactionManager.cpp.incl"
27
28 PSOldGen* ParCompactionManager::_old_gen = NULL;
29 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
31 ObjectStartArray* ParCompactionManager::_start_array = NULL;
32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
33 ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL;
34
35 ParCompactionManager::ParCompactionManager() :
36 _action(CopyAndUpdate) {
37
38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
39 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
40
41 _old_gen = heap->old_gen();
42 _start_array = old_gen()->start_array();
43
44
45 marking_stack()->initialize();
46
47 // We want the overflow stack to be permanent
48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
49 #ifdef USE_ChunkTaskQueueWithOverflow
50 chunk_stack()->initialize();
51 #else
52 chunk_stack()->initialize();
53
54 // We want the overflow stack to be permanent
55 _chunk_overflow_stack =
56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
57 #endif
58
59 // Note that _revisit_klass_stack is allocated out of the
60 // C heap (as opposed to out of ResourceArena).
61 int size =
62 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
63 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
64
65 }
66
67 ParCompactionManager::~ParCompactionManager() {
68 delete _overflow_stack;
69 delete _revisit_klass_stack;
70 // _manager_array and _stack_array are statics
71 // shared with all instances of ParCompactionManager
72 // should not be deallocated.
73 }
74
75 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
76 assert(PSParallelCompact::gc_task_manager() != NULL,
77 "Needed for initialization");
78
79 _mark_bitmap = mbm;
80
81 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
82
83 assert(_manager_array == NULL, "Attempt to initialize twice");
84 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
85 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
86
87 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
88 guarantee(_stack_array != NULL, "Count not initialize promotion manager");
89 _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
90 guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
91
92 // Create and register the ParCompactionManager(s) for the worker threads.
93 for(uint i=0; i<parallel_gc_threads; i++) {
94 _manager_array[i] = new ParCompactionManager();
95 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
96 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
97 #ifdef USE_ChunkTaskQueueWithOverflow
98 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
99 #else
100 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
101 #endif
102 }
103
104 // The VMThread gets its own ParCompactionManager, which is not available
105 // for work stealing.
106 _manager_array[parallel_gc_threads] = new ParCompactionManager();
107 guarantee(_manager_array[parallel_gc_threads] != NULL,
108 "Could not create ParCompactionManager");
109 assert(PSParallelCompact::gc_task_manager()->workers() != 0,
110 "Not initialized?");
111 }
112
113 bool ParCompactionManager::should_update() {
114 assert(action() != NotValid, "Action is not set");
115 return (action() == ParCompactionManager::Update) ||
116 (action() == ParCompactionManager::CopyAndUpdate) ||
117 (action() == ParCompactionManager::UpdateAndCopy);
118 }
119
120 bool ParCompactionManager::should_copy() {
121 assert(action() != NotValid, "Action is not set");
122 return (action() == ParCompactionManager::Copy) ||
123 (action() == ParCompactionManager::CopyAndUpdate) ||
124 (action() == ParCompactionManager::UpdateAndCopy);
125 }
126
127 bool ParCompactionManager::should_verify_only() {
128 assert(action() != NotValid, "Action is not set");
129 return action() == ParCompactionManager::VerifyUpdate;
130 }
131
132 bool ParCompactionManager::should_reset_only() {
133 assert(action() != NotValid, "Action is not set");
134 return action() == ParCompactionManager::ResetObjects;
135 }
136
137 // For now save on a stack
138 void ParCompactionManager::save_for_scanning(oop m) {
139 stack_push(m);
140 }
141
142 void ParCompactionManager::stack_push(oop obj) {
143
144 if(!marking_stack()->push(obj)) {
145 overflow_stack()->push(obj);
146 }
147 }
148
149 oop ParCompactionManager::retrieve_for_scanning() {
150
151 // Should not be used in the parallel case
152 ShouldNotReachHere();
153 return NULL;
154 }
155
156 // Save chunk on a stack
157 void ParCompactionManager::save_for_processing(size_t chunk_index) {
158 #ifdef ASSERT
159 const ParallelCompactData& sd = PSParallelCompact::summary_data();
160 ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
161 assert(chunk_ptr->claimed(), "must be claimed");
162 assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
163 #endif
164 chunk_stack_push(chunk_index);
165 }
166
167 void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
168
169 #ifdef USE_ChunkTaskQueueWithOverflow
170 chunk_stack()->save(chunk_index);
171 #else
172 if(!chunk_stack()->push(chunk_index)) {
173 chunk_overflow_stack()->push(chunk_index);
174 }
175 #endif
176 }
177
178 bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
179 #ifdef USE_ChunkTaskQueueWithOverflow
180 return chunk_stack()->retrieve(chunk_index);
181 #else
182 // Should not be used in the parallel case
183 ShouldNotReachHere();
184 return false;
185 #endif
186 }
187
188 ParCompactionManager*
189 ParCompactionManager::gc_thread_compaction_manager(int index) {
190 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
191 assert(_manager_array != NULL, "Sanity");
192 return _manager_array[index];
193 }
194
195 void ParCompactionManager::reset() {
196 for(uint i=0; i<ParallelGCThreads+1; i++) {
197 manager_array(i)->revisit_klass_stack()->clear();
198 }
199 }
200
201 void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
202 #ifdef ASSERT
203 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
204 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
205 MutableSpace* to_space = heap->young_gen()->to_space();
206 MutableSpace* old_space = heap->old_gen()->object_space();
207 MutableSpace* perm_space = heap->perm_gen()->object_space();
208 #endif /* ASSERT */
209
210
211 do {
212
213 // Drain overflow stack first, so other threads can steal from
214 // claimed stack while we work.
215 while(!overflow_stack()->is_empty()) {
216 oop obj = overflow_stack()->pop();
217 obj->follow_contents(this);
218 }
219
220 oop obj;
221 // obj is a reference!!!
222 while (marking_stack()->pop_local(obj)) {
223 // It would be nice to assert about the type of objects we might
224 // pop, but they can come from anywhere, unfortunately.
225 obj->follow_contents(this);
226 }
227 } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
228
229 assert(marking_stack()->size() == 0, "Sanity");
230 assert(overflow_stack()->length() == 0, "Sanity");
231 }
232
233 void ParCompactionManager::drain_chunk_overflow_stack() {
234 size_t chunk_index = (size_t) -1;
235 while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
236 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
237 }
238 }
239
240 void ParCompactionManager::drain_chunk_stacks() {
241 #ifdef ASSERT
242 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
244 MutableSpace* to_space = heap->young_gen()->to_space();
245 MutableSpace* old_space = heap->old_gen()->object_space();
246 MutableSpace* perm_space = heap->perm_gen()->object_space();
247 #endif /* ASSERT */
248
249 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
250 do {
251
252 #ifdef USE_ChunkTaskQueueWithOverflow
253 // Drain overflow stack first, so other threads can steal from
254 // claimed stack while we work.
255 size_t chunk_index = (size_t) -1;
256 while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
257 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
258 }
259
260 while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
261 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
262 }
263 } while (!chunk_stack()->is_empty());
264 #else
265 // Drain overflow stack first, so other threads can steal from
266 // claimed stack while we work.
267 while(!chunk_overflow_stack()->is_empty()) {
268 size_t chunk_index = chunk_overflow_stack()->pop();
269 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
270 }
271
272 size_t chunk_index = -1;
273 // obj is a reference!!!
274 while (chunk_stack()->pop_local(chunk_index)) {
275 // It would be nice to assert about the type of objects we might
276 // pop, but they can come from anywhere, unfortunately.
277 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
278 }
279 } while((chunk_stack()->size() != 0) ||
280 (chunk_overflow_stack()->length() != 0));
281 #endif
282
283 #ifdef USE_ChunkTaskQueueWithOverflow
284 assert(chunk_stack()->is_empty(), "Sanity");
285 #else
286 assert(chunk_stack()->size() == 0, "Sanity");
287 assert(chunk_overflow_stack()->length() == 0, "Sanity");
288 #endif
289 #else
290 oop obj;
291 while (obj = retrieve_for_scanning()) {
292 obj->follow_contents(this);
293 }
294 #endif
295 }
296
297 #ifdef ASSERT
298 bool ParCompactionManager::stacks_have_been_allocated() {
299 return (revisit_klass_stack()->data_addr() != NULL);
300 }
301 #endif