0
|
1 /*
|
|
2 * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_pcTasks.cpp.incl"
|
|
27
|
|
28 //
|
|
29 // ThreadRootsMarkingTask
|
|
30 //
|
|
31
|
|
32 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
|
33 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
34
|
|
35 ResourceMark rm;
|
|
36
|
|
37 NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
|
|
38 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
39 ParCompactionManager* cm =
|
|
40 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
41 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
|
42
|
|
43 if (_java_thread != NULL)
|
|
44 _java_thread->oops_do(&mark_and_push_closure);
|
|
45
|
|
46 if (_vm_thread != NULL)
|
|
47 _vm_thread->oops_do(&mark_and_push_closure);
|
|
48
|
|
49 // Do the real work
|
|
50 cm->drain_marking_stacks(&mark_and_push_closure);
|
|
51 }
|
|
52
|
|
53
|
|
54 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
|
|
55 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
56
|
|
57 NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
|
|
58 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
59 ParCompactionManager* cm =
|
|
60 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
61 // cm->allocate_stacks();
|
|
62 assert(cm->stacks_have_been_allocated(),
|
|
63 "Stack space has not been allocated");
|
|
64 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
|
65
|
|
66 switch (_root_type) {
|
|
67 case universe:
|
|
68 Universe::oops_do(&mark_and_push_closure);
|
|
69 break;
|
|
70
|
|
71 case reference_processing:
|
|
72 ReferenceProcessor::oops_do(&mark_and_push_closure);
|
|
73 break;
|
|
74
|
|
75 case jni_handles:
|
|
76 JNIHandles::oops_do(&mark_and_push_closure);
|
|
77 break;
|
|
78
|
|
79 case threads:
|
|
80 {
|
|
81 ResourceMark rm;
|
|
82 Threads::oops_do(&mark_and_push_closure);
|
|
83 }
|
|
84 break;
|
|
85
|
|
86 case object_synchronizer:
|
|
87 ObjectSynchronizer::oops_do(&mark_and_push_closure);
|
|
88 break;
|
|
89
|
|
90 case flat_profiler:
|
|
91 FlatProfiler::oops_do(&mark_and_push_closure);
|
|
92 break;
|
|
93
|
|
94 case management:
|
|
95 Management::oops_do(&mark_and_push_closure);
|
|
96 break;
|
|
97
|
|
98 case jvmti:
|
|
99 JvmtiExport::oops_do(&mark_and_push_closure);
|
|
100 break;
|
|
101
|
|
102 case system_dictionary:
|
|
103 SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
|
|
104 break;
|
|
105
|
|
106 case vm_symbols:
|
|
107 vmSymbols::oops_do(&mark_and_push_closure);
|
|
108 break;
|
|
109
|
|
110 default:
|
|
111 fatal("Unknown root type");
|
|
112 }
|
|
113
|
|
114 // Do the real work
|
|
115 cm->drain_marking_stacks(&mark_and_push_closure);
|
|
116 // cm->deallocate_stacks();
|
|
117 }
|
|
118
|
|
119
|
|
120 //
|
|
121 // RefProcTaskProxy
|
|
122 //
|
|
123
|
|
124 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
|
|
125 {
|
|
126 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
127
|
|
128 NOT_PRODUCT(TraceTime tm("RefProcTask",
|
|
129 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
130 ParCompactionManager* cm =
|
|
131 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
132 // cm->allocate_stacks();
|
|
133 assert(cm->stacks_have_been_allocated(),
|
|
134 "Stack space has not been allocated");
|
|
135 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
|
136 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
|
|
137 _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
|
|
138 mark_and_push_closure, follow_stack_closure);
|
|
139 }
|
|
140
|
|
141 //
|
|
142 // RefProcTaskExecutor
|
|
143 //
|
|
144
|
|
145 void RefProcTaskExecutor::execute(ProcessTask& task)
|
|
146 {
|
|
147 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
|
148 uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
|
149 TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
|
|
150 ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
|
151 GCTaskQueue* q = GCTaskQueue::create();
|
|
152 for(uint i=0; i<parallel_gc_threads; i++) {
|
|
153 q->enqueue(new RefProcTaskProxy(task, i));
|
|
154 }
|
|
155 if (task.marks_oops_alive()) {
|
|
156 if (parallel_gc_threads>1) {
|
|
157 for (uint j=0; j<parallel_gc_threads; j++) {
|
|
158 q->enqueue(new StealMarkingTask(&terminator));
|
|
159 }
|
|
160 }
|
|
161 }
|
|
162 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
|
|
163 }
|
|
164
|
|
165 void RefProcTaskExecutor::execute(EnqueueTask& task)
|
|
166 {
|
|
167 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
|
168 uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
|
169 GCTaskQueue* q = GCTaskQueue::create();
|
|
170 for(uint i=0; i<parallel_gc_threads; i++) {
|
|
171 q->enqueue(new RefEnqueueTaskProxy(task, i));
|
|
172 }
|
|
173 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
|
|
174 }
|
|
175
|
|
176 //
|
|
177 // StealMarkingTask
|
|
178 //
|
|
179
|
|
180 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
|
|
181 _terminator(t) {}
|
|
182
|
|
183 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
|
184 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
185
|
|
186 NOT_PRODUCT(TraceTime tm("StealMarkingTask",
|
|
187 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
188
|
|
189 ParCompactionManager* cm =
|
|
190 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
191 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
|
|
192
|
|
193 oop obj = NULL;
|
|
194 int random_seed = 17;
|
|
195 while(true) {
|
|
196 if (ParCompactionManager::steal(which, &random_seed, obj)) {
|
|
197 obj->follow_contents(cm);
|
|
198 cm->drain_marking_stacks(&mark_and_push_closure);
|
|
199 } else {
|
|
200 if (terminator()->offer_termination()) {
|
|
201 break;
|
|
202 }
|
|
203 }
|
|
204 }
|
|
205 }
|
|
206
|
|
207 //
|
|
208 // StealChunkCompactionTask
|
|
209 //
|
|
210
|
|
211
|
|
212 StealChunkCompactionTask::StealChunkCompactionTask(ParallelTaskTerminator* t) :
|
|
213 _terminator(t) {};
|
|
214
|
|
215 void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|
216 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
217
|
|
218 NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
|
|
219 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
220
|
|
221 ParCompactionManager* cm =
|
|
222 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
223
|
|
224 // Has to drain stacks first because there may be chunks on
|
|
225 // preloaded onto the stack and this thread may never have
|
|
226 // done a draining task. Are the draining tasks needed?
|
|
227
|
|
228 cm->drain_chunk_stacks();
|
|
229
|
|
230 size_t chunk_index = 0;
|
|
231 int random_seed = 17;
|
|
232
|
|
233 // If we're the termination task, try 10 rounds of stealing before
|
|
234 // setting the termination flag
|
|
235
|
|
236 while(true) {
|
|
237 if (ParCompactionManager::steal(which, &random_seed, chunk_index)) {
|
|
238 PSParallelCompact::fill_and_update_chunk(cm, chunk_index);
|
|
239 cm->drain_chunk_stacks();
|
|
240 } else {
|
|
241 if (terminator()->offer_termination()) {
|
|
242 break;
|
|
243 }
|
|
244 // Go around again.
|
|
245 }
|
|
246 }
|
|
247 return;
|
|
248 }
|
|
249
|
|
250 UpdateDensePrefixTask::UpdateDensePrefixTask(
|
|
251 PSParallelCompact::SpaceId space_id,
|
|
252 size_t chunk_index_start,
|
|
253 size_t chunk_index_end) :
|
|
254 _space_id(space_id), _chunk_index_start(chunk_index_start),
|
|
255 _chunk_index_end(chunk_index_end)
|
|
256 {}
|
|
257
|
|
258 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
|
259
|
|
260 NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
|
|
261 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
262
|
|
263 ParCompactionManager* cm =
|
|
264 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
265
|
|
266 PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
|
267 _space_id,
|
|
268 _chunk_index_start,
|
|
269 _chunk_index_end);
|
|
270 }
|
|
271
|
|
272 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|
273 assert(Universe::heap()->is_gc_active(), "called outside gc");
|
|
274
|
|
275 NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
|
|
276 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
|
277
|
|
278 ParCompactionManager* cm =
|
|
279 ParCompactionManager::gc_thread_compaction_manager(which);
|
|
280
|
|
281 // Process any chunks already in the compaction managers stacks.
|
|
282 cm->drain_chunk_stacks();
|
|
283 }
|