comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 8506:c3657d00e343

-Merge with tip
author Christos Kotselidis <christos.kotselidis@oracle.com>
date Thu, 21 Mar 2013 14:11:13 +0100
parents 5e401ef52ec0
children 47bc9800972c 9def4075da6d
comparison
equal deleted inserted replaced
8505:dee7c8b578c7 8506:c3657d00e343
569 _parallel_marking_threads = 0; 569 _parallel_marking_threads = 0;
570 _max_parallel_marking_threads = 0; 570 _max_parallel_marking_threads = 0;
571 _sleep_factor = 0.0; 571 _sleep_factor = 0.0;
572 _marking_task_overhead = 1.0; 572 _marking_task_overhead = 1.0;
573 } else { 573 } else {
574 if (ConcGCThreads > 0) { 574 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
575 // notice that ConcGCThreads overwrites G1MarkingOverheadPercent 575 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
576 // if both are set 576 // if both are set
577
578 _parallel_marking_threads = (uint) ConcGCThreads;
579 _max_parallel_marking_threads = _parallel_marking_threads;
580 _sleep_factor = 0.0; 577 _sleep_factor = 0.0;
581 _marking_task_overhead = 1.0; 578 _marking_task_overhead = 1.0;
582 } else if (G1MarkingOverheadPercent > 0) { 579 } else if (G1MarkingOverheadPercent > 0) {
583 // we will calculate the number of parallel marking threads 580 // We will calculate the number of parallel marking threads based
584 // based on a target overhead with respect to the soft real-time 581 // on a target overhead with respect to the soft real-time goal
585 // goal
586
587 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 582 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
588 double overall_cm_overhead = 583 double overall_cm_overhead =
589 (double) MaxGCPauseMillis * marking_overhead / 584 (double) MaxGCPauseMillis * marking_overhead /
590 (double) GCPauseIntervalMillis; 585 (double) GCPauseIntervalMillis;
591 double cpu_ratio = 1.0 / (double) os::processor_count(); 586 double cpu_ratio = 1.0 / (double) os::processor_count();
594 overall_cm_overhead / marking_thread_num * 589 overall_cm_overhead / marking_thread_num *
595 (double) os::processor_count(); 590 (double) os::processor_count();
596 double sleep_factor = 591 double sleep_factor =
597 (1.0 - marking_task_overhead) / marking_task_overhead; 592 (1.0 - marking_task_overhead) / marking_task_overhead;
598 593
599 _parallel_marking_threads = (uint) marking_thread_num; 594 FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
600 _max_parallel_marking_threads = _parallel_marking_threads;
601 _sleep_factor = sleep_factor; 595 _sleep_factor = sleep_factor;
602 _marking_task_overhead = marking_task_overhead; 596 _marking_task_overhead = marking_task_overhead;
603 } else { 597 } else {
604 _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads); 598 // Calculate the number of parallel marking threads by scaling
605 _max_parallel_marking_threads = _parallel_marking_threads; 599 // the number of parallel GC threads.
600 uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
601 FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
606 _sleep_factor = 0.0; 602 _sleep_factor = 0.0;
607 _marking_task_overhead = 1.0; 603 _marking_task_overhead = 1.0;
608 } 604 }
605
606 assert(ConcGCThreads > 0, "Should have been set");
607 _parallel_marking_threads = (uint) ConcGCThreads;
608 _max_parallel_marking_threads = _parallel_marking_threads;
609 609
610 if (parallel_marking_threads() > 1) { 610 if (parallel_marking_threads() > 1) {
611 _cleanup_task_overhead = 1.0; 611 _cleanup_task_overhead = 1.0;
612 } else { 612 } else {
613 _cleanup_task_overhead = marking_task_overhead(); 613 _cleanup_task_overhead = marking_task_overhead();
1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1188 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1189 "Maximum number of marking threads exceeded"); 1189 "Maximum number of marking threads exceeded");
1190 uint active_workers = MAX2(1U, parallel_marking_threads()); 1190 uint active_workers = MAX2(1U, parallel_marking_threads());
1191 1191
1192 CMRootRegionScanTask task(this); 1192 CMRootRegionScanTask task(this);
1193 if (parallel_marking_threads() > 0) { 1193 if (use_parallel_marking_threads()) {
1194 _parallel_workers->set_active_workers((int) active_workers); 1194 _parallel_workers->set_active_workers((int) active_workers);
1195 _parallel_workers->run_task(&task); 1195 _parallel_workers->run_task(&task);
1196 } else { 1196 } else {
1197 task.work(0); 1197 task.work(0);
1198 } 1198 }
1224 1224
1225 // Parallel task terminator is set in "set_phase()" 1225 // Parallel task terminator is set in "set_phase()"
1226 set_phase(active_workers, true /* concurrent */); 1226 set_phase(active_workers, true /* concurrent */);
1227 1227
1228 CMConcurrentMarkingTask markingTask(this, cmThread()); 1228 CMConcurrentMarkingTask markingTask(this, cmThread());
1229 if (parallel_marking_threads() > 0) { 1229 if (use_parallel_marking_threads()) {
1230 _parallel_workers->set_active_workers((int)active_workers); 1230 _parallel_workers->set_active_workers((int)active_workers);
1231 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1231 // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
1232 // and the decisions on that MT processing is made elsewhere. 1232 // and the decisions on that MT processing is made elsewhere.
1233 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1233 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1234 _parallel_workers->run_task(&markingTask); 1234 _parallel_workers->run_task(&markingTask);
2165 } 2165 }
2166 } 2166 }
2167 assert(tmp_free_list.is_empty(), "post-condition"); 2167 assert(tmp_free_list.is_empty(), "post-condition");
2168 } 2168 }
2169 2169
2170 // Support closures for reference procssing in G1 2170 // Supporting Object and Oop closures for reference discovery
2171 // and processing in during marking
2171 2172
2172 bool G1CMIsAliveClosure::do_object_b(oop obj) { 2173 bool G1CMIsAliveClosure::do_object_b(oop obj) {
2173 HeapWord* addr = (HeapWord*)obj; 2174 HeapWord* addr = (HeapWord*)obj;
2174 return addr != NULL && 2175 return addr != NULL &&
2175 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 2176 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
2176 } 2177 }
2177 2178
2178 class G1CMKeepAliveClosure: public ExtendedOopClosure { 2179 // 'Keep Alive' oop closure used by both serial parallel reference processing.
2179 G1CollectedHeap* _g1; 2180 // Uses the CMTask associated with a worker thread (for serial reference
2180 ConcurrentMark* _cm; 2181 // processing the CMTask for worker 0 is used) to preserve (mark) and
2181 public: 2182 // trace referent objects.
2182 G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) : 2183 //
2183 _g1(g1), _cm(cm) { 2184 // Using the CMTask and embedded local queues avoids having the worker
2184 assert(Thread::current()->is_VM_thread(), "otherwise fix worker id"); 2185 // threads operating on the global mark stack. This reduces the risk
2185 } 2186 // of overflowing the stack - which we would rather avoid at this late
2186 2187 // state. Also using the tasks' local queues removes the potential
2187 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2188 // of the workers interfering with each other that could occur if
2188 virtual void do_oop( oop* p) { do_oop_work(p); } 2189 // operating on the global stack.
2189 2190
2190 template <class T> void do_oop_work(T* p) { 2191 class G1CMKeepAliveAndDrainClosure: public OopClosure {
2191 oop obj = oopDesc::load_decode_heap_oop(p);
2192 HeapWord* addr = (HeapWord*)obj;
2193
2194 if (_cm->verbose_high()) {
2195 gclog_or_tty->print_cr("\t[0] we're looking at location "
2196 "*"PTR_FORMAT" = "PTR_FORMAT,
2197 p, (void*) obj);
2198 }
2199
2200 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
2201 _cm->mark_and_count(obj);
2202 _cm->mark_stack_push(obj);
2203 }
2204 }
2205 };
2206
2207 class G1CMDrainMarkingStackClosure: public VoidClosure {
2208 ConcurrentMark* _cm;
2209 CMMarkStack* _markStack;
2210 G1CMKeepAliveClosure* _oopClosure;
2211 public:
2212 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack,
2213 G1CMKeepAliveClosure* oopClosure) :
2214 _cm(cm),
2215 _markStack(markStack),
2216 _oopClosure(oopClosure) { }
2217
2218 void do_void() {
2219 _markStack->drain(_oopClosure, _cm->nextMarkBitMap(), false);
2220 }
2221 };
2222
2223 // 'Keep Alive' closure used by parallel reference processing.
2224 // An instance of this closure is used in the parallel reference processing
2225 // code rather than an instance of G1CMKeepAliveClosure. We could have used
2226 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
2227 // placed on to discovered ref lists once so we can mark and push with no
2228 // need to check whether the object has already been marked. Using the
2229 // G1CMKeepAliveClosure would mean, however, having all the worker threads
2230 // operating on the global mark stack. This means that an individual
2231 // worker would be doing lock-free pushes while it processes its own
2232 // discovered ref list followed by drain call. If the discovered ref lists
2233 // are unbalanced then this could cause interference with the other
2234 // workers. Using a CMTask (and its embedded local data structures)
2235 // avoids that potential interference.
2236 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
2237 ConcurrentMark* _cm; 2192 ConcurrentMark* _cm;
2238 CMTask* _task; 2193 CMTask* _task;
2239 int _ref_counter_limit; 2194 int _ref_counter_limit;
2240 int _ref_counter; 2195 int _ref_counter;
2241 public: 2196 public:
2242 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) : 2197 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
2243 _cm(cm), _task(task), 2198 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) {
2244 _ref_counter_limit(G1RefProcDrainInterval) {
2245 assert(_ref_counter_limit > 0, "sanity"); 2199 assert(_ref_counter_limit > 0, "sanity");
2246 _ref_counter = _ref_counter_limit; 2200 _ref_counter = _ref_counter_limit;
2247 } 2201 }
2248 2202
2249 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 2203 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
2260 2214
2261 _task->deal_with_reference(obj); 2215 _task->deal_with_reference(obj);
2262 _ref_counter--; 2216 _ref_counter--;
2263 2217
2264 if (_ref_counter == 0) { 2218 if (_ref_counter == 0) {
2265 // We have dealt with _ref_counter_limit references, pushing them and objects 2219 // We have dealt with _ref_counter_limit references, pushing them
2266 // reachable from them on to the local stack (and possibly the global stack). 2220 // and objects reachable from them on to the local stack (and
2267 // Call do_marking_step() to process these entries. We call the routine in a 2221 // possibly the global stack). Call CMTask::do_marking_step() to
2268 // loop, which we'll exit if there's nothing more to do (i.e. we're done 2222 // process these entries.
2269 // with the entries that we've pushed as a result of the deal_with_reference 2223 //
2270 // calls above) or we overflow. 2224 // We call CMTask::do_marking_step() in a loop, which we'll exit if
2271 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2225 // there's nothing more to do (i.e. we're done with the entries that
2272 // while there may still be some work to do. (See the comment at the 2226 // were pushed as a result of the CMTask::deal_with_reference() calls
2273 // beginning of CMTask::do_marking_step() for those conditions - one of which 2227 // above) or we overflow.
2274 // is reaching the specified time target.) It is only when 2228 //
2275 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2229 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2276 // that the marking has completed. 2230 // flag while there may still be some work to do. (See the comment at
2231 // the beginning of CMTask::do_marking_step() for those conditions -
2232 // one of which is reaching the specified time target.) It is only
2233 // when CMTask::do_marking_step() returns without setting the
2234 // has_aborted() flag that the marking step has completed.
2277 do { 2235 do {
2278 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 2236 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
2279 _task->do_marking_step(mark_step_duration_ms, 2237 _task->do_marking_step(mark_step_duration_ms,
2280 false /* do_stealing */, 2238 false /* do_stealing */,
2281 false /* do_termination */); 2239 false /* do_termination */);
2288 } 2246 }
2289 } 2247 }
2290 } 2248 }
2291 }; 2249 };
2292 2250
2293 class G1CMParDrainMarkingStackClosure: public VoidClosure { 2251 // 'Drain' oop closure used by both serial and parallel reference processing.
2252 // Uses the CMTask associated with a given worker thread (for serial
2253 // reference processing the CMtask for worker 0 is used). Calls the
2254 // do_marking_step routine, with an unbelievably large timeout value,
2255 // to drain the marking data structures of the remaining entries
2256 // added by the 'keep alive' oop closure above.
2257
2258 class G1CMDrainMarkingStackClosure: public VoidClosure {
2294 ConcurrentMark* _cm; 2259 ConcurrentMark* _cm;
2295 CMTask* _task; 2260 CMTask* _task;
2261 bool _do_stealing;
2262 bool _do_termination;
2296 public: 2263 public:
2297 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : 2264 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) :
2298 _cm(cm), _task(task) { } 2265 _cm(cm), _task(task) {
2266 assert(is_par || _task->worker_id() == 0,
2267 "Only task for worker 0 should be used if ref processing is single threaded");
2268 // We only allow stealing and only enter the termination protocol
2269 // in CMTask::do_marking_step() if this closure is being instantiated
2270 // for parallel reference processing.
2271 _do_stealing = _do_termination = is_par;
2272 }
2299 2273
2300 void do_void() { 2274 void do_void() {
2301 do { 2275 do {
2302 if (_cm->verbose_high()) { 2276 if (_cm->verbose_high()) {
2303 gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step", 2277 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - "
2304 _task->worker_id()); 2278 "stealing: %s, termination: %s",
2279 _task->worker_id(),
2280 BOOL_TO_STR(_do_stealing),
2281 BOOL_TO_STR(_do_termination));
2305 } 2282 }
2306 2283
2307 // We call CMTask::do_marking_step() to completely drain the local and 2284 // We call CMTask::do_marking_step() to completely drain the local
2308 // global marking stacks. The routine is called in a loop, which we'll 2285 // and global marking stacks of entries pushed by the 'keep alive'
2309 // exit if there's nothing more to do (i.e. we'completely drained the 2286 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2310 // entries that were pushed as a result of applying the 2287 //
2311 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref 2288 // CMTask::do_marking_step() is called in a loop, which we'll exit
2312 // lists above) or we overflow the global marking stack. 2289 // if there's nothing more to do (i.e. we'completely drained the
2313 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag 2290 // entries that were pushed as a a result of applying the 'keep alive'
2314 // while there may still be some work to do. (See the comment at the 2291 // closure to the entries on the discovered ref lists) or we overflow
2315 // beginning of CMTask::do_marking_step() for those conditions - one of which 2292 // the global marking stack.
2316 // is reaching the specified time target.) It is only when 2293 //
2317 // CMTask::do_marking_step() returns without setting the has_aborted() flag 2294 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2318 // that the marking has completed. 2295 // flag while there may still be some work to do. (See the comment at
2296 // the beginning of CMTask::do_marking_step() for those conditions -
2297 // one of which is reaching the specified time target.) It is only
2298 // when CMTask::do_marking_step() returns without setting the
2299 // has_aborted() flag that the marking step has completed.
2319 2300
2320 _task->do_marking_step(1000000000.0 /* something very large */, 2301 _task->do_marking_step(1000000000.0 /* something very large */,
2321 true /* do_stealing */, 2302 _do_stealing,
2322 true /* do_termination */); 2303 _do_termination);
2323 } while (_task->has_aborted() && !_cm->has_overflown()); 2304 } while (_task->has_aborted() && !_cm->has_overflown());
2324 } 2305 }
2325 }; 2306 };
2326 2307
2327 // Implementation of AbstractRefProcTaskExecutor for parallel 2308 // Implementation of AbstractRefProcTaskExecutor for parallel
2350 class G1CMRefProcTaskProxy: public AbstractGangTask { 2331 class G1CMRefProcTaskProxy: public AbstractGangTask {
2351 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2332 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2352 ProcessTask& _proc_task; 2333 ProcessTask& _proc_task;
2353 G1CollectedHeap* _g1h; 2334 G1CollectedHeap* _g1h;
2354 ConcurrentMark* _cm; 2335 ConcurrentMark* _cm;
2336 bool _processing_is_mt;
2355 2337
2356 public: 2338 public:
2357 G1CMRefProcTaskProxy(ProcessTask& proc_task, 2339 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2358 G1CollectedHeap* g1h, 2340 G1CollectedHeap* g1h,
2359 ConcurrentMark* cm) : 2341 ConcurrentMark* cm) :
2360 AbstractGangTask("Process reference objects in parallel"), 2342 AbstractGangTask("Process reference objects in parallel"),
2361 _proc_task(proc_task), _g1h(g1h), _cm(cm) { } 2343 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
2344 ReferenceProcessor* rp = _g1h->ref_processor_cm();
2345 _processing_is_mt = rp->processing_is_mt();
2346 }
2362 2347
2363 virtual void work(uint worker_id) { 2348 virtual void work(uint worker_id) {
2364 CMTask* marking_task = _cm->task(worker_id); 2349 CMTask* marking_task = _cm->task(worker_id);
2365 G1CMIsAliveClosure g1_is_alive(_g1h); 2350 G1CMIsAliveClosure g1_is_alive(_g1h);
2366 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); 2351 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
2367 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); 2352 G1CMDrainMarkingStackClosure g1_par_drain(_cm, marking_task, _processing_is_mt);
2368 2353
2369 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2354 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2370 } 2355 }
2371 }; 2356 };
2372 2357
2373 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 2358 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2374 assert(_workers != NULL, "Need parallel worker threads."); 2359 assert(_workers != NULL, "Need parallel worker threads.");
2360 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2375 2361
2376 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 2362 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2377 2363
2378 // We need to reset the phase for each task execution so that 2364 // We need to reset the phase for each task execution so that
2379 // the termination protocol of CMTask::do_marking_step works. 2365 // the termination protocol of CMTask::do_marking_step works.
2397 } 2383 }
2398 }; 2384 };
2399 2385
2400 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2386 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2401 assert(_workers != NULL, "Need parallel worker threads."); 2387 assert(_workers != NULL, "Need parallel worker threads.");
2388 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2402 2389
2403 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 2390 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2404 2391
2405 _g1h->set_par_threads(_active_workers); 2392 _g1h->set_par_threads(_active_workers);
2406 _workers->run_task(&enq_task_proxy); 2393 _workers->run_task(&enq_task_proxy);
2427 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2414 ReferenceProcessor* rp = g1h->ref_processor_cm();
2428 2415
2429 // See the comment in G1CollectedHeap::ref_processing_init() 2416 // See the comment in G1CollectedHeap::ref_processing_init()
2430 // about how reference processing currently works in G1. 2417 // about how reference processing currently works in G1.
2431 2418
2432 // Process weak references. 2419 // Set the soft reference policy
2433 rp->setup_policy(clear_all_soft_refs); 2420 rp->setup_policy(clear_all_soft_refs);
2434 assert(_markStack.isEmpty(), "mark stack should be empty"); 2421 assert(_markStack.isEmpty(), "mark stack should be empty");
2435 2422
2436 G1CMKeepAliveClosure g1_keep_alive(g1h, this); 2423 // Non-MT instances 'Keep Alive' and 'Complete GC' oop closures.
2437 G1CMDrainMarkingStackClosure 2424 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0));
2438 g1_drain_mark_stack(this, &_markStack, &g1_keep_alive); 2425 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false);
2439 2426
2440 // We use the work gang from the G1CollectedHeap and we utilize all 2427 // We need at least one active thread. If reference processing is
2441 // the worker threads. 2428 // not multi-threaded we use the current (ConcurrentMarkThread) thread,
2442 uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U; 2429 // otherwise we use the work gang from the G1CollectedHeap and we
2430 // utilize all the worker threads we can.
2431 uint active_workers = (rp->processing_is_mt() && g1h->workers() != NULL
2432 ? g1h->workers()->active_workers()
2433 : 1U);
2434
2443 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2435 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
2444 2436
2445 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2437 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
2446 g1h->workers(), active_workers); 2438 g1h->workers(), active_workers);
2447 2439
2448 if (rp->processing_is_mt()) { 2440 AbstractRefProcTaskExecutor* executor = (rp->processing_is_mt()
2449 // Set the degree of MT here. If the discovery is done MT, there 2441 ? &par_task_executor
2450 // may have been a different number of threads doing the discovery 2442 : NULL);
2451 // and a different number of discovered lists may have Ref objects. 2443
2452 // That is OK as long as the Reference lists are balanced (see 2444 // Set the degree of MT processing here. If the discovery was done MT,
2453 // balance_all_queues() and balance_queues()). 2445 // the number of threads involved during discovery could differ from
2454 rp->set_active_mt_degree(active_workers); 2446 // the number of active workers. This is OK as long as the discovered
2455 2447 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
2456 rp->process_discovered_references(&g1_is_alive, 2448 rp->set_active_mt_degree(active_workers);
2449
2450 // Process the weak references.
2451 rp->process_discovered_references(&g1_is_alive,
2457 &g1_keep_alive, 2452 &g1_keep_alive,
2458 &g1_drain_mark_stack, 2453 &g1_drain_mark_stack,
2459 &par_task_executor); 2454 executor);
2460 2455
2461 // The work routines of the parallel keep_alive and drain_marking_stack 2456 // The do_oop work routines of the keep_alive and drain_marking_stack
2462 // will set the has_overflown flag if we overflow the global marking 2457 // oop closures will set the has_overflown flag if we overflow the
2463 // stack. 2458 // global marking stack.
2464 } else {
2465 rp->process_discovered_references(&g1_is_alive,
2466 &g1_keep_alive,
2467 &g1_drain_mark_stack,
2468 NULL);
2469 }
2470 2459
2471 assert(_markStack.overflow() || _markStack.isEmpty(), 2460 assert(_markStack.overflow() || _markStack.isEmpty(),
2472 "mark stack should be empty (unless it overflowed)"); 2461 "mark stack should be empty (unless it overflowed)");
2473 if (_markStack.overflow()) { 2462 if (_markStack.overflow()) {
2474 // Should have been done already when we tried to push an 2463 // This should have been done already when we tried to push an
2475 // entry on to the global mark stack. But let's do it again. 2464 // entry on to the global mark stack. But let's do it again.
2476 set_has_overflown(); 2465 set_has_overflown();
2477 } 2466 }
2478 2467
2479 if (rp->processing_is_mt()) { 2468 assert(rp->num_q() == active_workers, "why not");
2480 assert(rp->num_q() == active_workers, "why not"); 2469
2481 rp->enqueue_discovered_references(&par_task_executor); 2470 rp->enqueue_discovered_references(executor);
2482 } else {
2483 rp->enqueue_discovered_references();
2484 }
2485 2471
2486 rp->verify_no_references_recorded(); 2472 rp->verify_no_references_recorded();
2487 assert(!rp->discovery_enabled(), "Post condition"); 2473 assert(!rp->discovery_enabled(), "Post condition");
2488 } 2474 }
2489 2475
3240 cmThread()->vtime_accum(), 3226 cmThread()->vtime_accum(),
3241 cmThread()->vtime_mark_accum()); 3227 cmThread()->vtime_mark_accum());
3242 } 3228 }
3243 3229
3244 void ConcurrentMark::print_worker_threads_on(outputStream* st) const { 3230 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
3245 _parallel_workers->print_worker_threads_on(st); 3231 if (use_parallel_marking_threads()) {
3232 _parallel_workers->print_worker_threads_on(st);
3233 }
3246 } 3234 }
3247 3235
3248 // We take a break if someone is trying to stop the world. 3236 // We take a break if someone is trying to stop the world.
3249 bool ConcurrentMark::do_yield_check(uint worker_id) { 3237 bool ConcurrentMark::do_yield_check(uint worker_id) {
3250 if (should_yield()) { 3238 if (should_yield()) {
4072 MemRegion mr = MemRegion(_finger, _region_limit); 4060 MemRegion mr = MemRegion(_finger, _region_limit);
4073 4061
4074 if (_cm->verbose_low()) { 4062 if (_cm->verbose_low()) {
4075 gclog_or_tty->print_cr("[%u] we're scanning part " 4063 gclog_or_tty->print_cr("[%u] we're scanning part "
4076 "["PTR_FORMAT", "PTR_FORMAT") " 4064 "["PTR_FORMAT", "PTR_FORMAT") "
4077 "of region "PTR_FORMAT, 4065 "of region "HR_FORMAT,
4078 _worker_id, _finger, _region_limit, _curr_region); 4066 _worker_id, _finger, _region_limit,
4067 HR_FORMAT_PARAMS(_curr_region));
4079 } 4068 }
4080 4069
4081 // Let's iterate over the bitmap of the part of the 4070 assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
4082 // region that is left. 4071 "humongous regions should go around loop once only");
4083 if (mr.is_empty() || _nextMarkBitMap->iterate(&bitmap_closure, mr)) { 4072
4084 // We successfully completed iterating over the region. Now, 4073 // Some special cases:
4085 // let's give up the region. 4074 // If the memory region is empty, we can just give up the region.
4075 // If the current region is humongous then we only need to check
4076 // the bitmap for the bit associated with the start of the object,
4077 // scan the object if it's live, and give up the region.
4078 // Otherwise, let's iterate over the bitmap of the part of the region
4079 // that is left.
4080 // If the iteration is successful, give up the region.
4081 if (mr.is_empty()) {
4082 giveup_current_region();
4083 regular_clock_call();
4084 } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
4085 if (_nextMarkBitMap->isMarked(mr.start())) {
4086 // The object is marked - apply the closure
4087 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
4088 bitmap_closure.do_bit(offset);
4089 }
4090 // Even if this task aborted while scanning the humongous object
4091 // we can (and should) give up the current region.
4092 giveup_current_region();
4093 regular_clock_call();
4094 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
4086 giveup_current_region(); 4095 giveup_current_region();
4087 regular_clock_call(); 4096 regular_clock_call();
4088 } else { 4097 } else {
4089 assert(has_aborted(), "currently the only way to do so"); 4098 assert(has_aborted(), "currently the only way to do so");
4090 // The only way to abort the bitmap iteration is to return 4099 // The only way to abort the bitmap iteration is to return