Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 8787:fa08949fe0cb
8009536: G1: Apache Lucene hang during reference processing
Summary: In CMTask::do_marking_step(), Skip offering termination and entering the first and second synchronization barriers if called from a serial context, i.e. the VM thread.
Reviewed-by: brutisso, tschatzl
author | johnc |
---|---|
date | Mon, 18 Mar 2013 11:05:27 -0700 |
parents | 15401203db6b |
children | e864cc14ca75 |
comparison
equal
deleted
inserted
replaced
8786:19f9fabd94cc | 8787:fa08949fe0cb |
---|---|
1063 double start_vtime_sec = os::elapsedVTime(); | 1063 double start_vtime_sec = os::elapsedVTime(); |
1064 double start_time_sec = os::elapsedTime(); | 1064 double start_time_sec = os::elapsedTime(); |
1065 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; | 1065 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
1066 | 1066 |
1067 the_task->do_marking_step(mark_step_duration_ms, | 1067 the_task->do_marking_step(mark_step_duration_ms, |
1068 true /* do_stealing */, | 1068 true /* do_termination */, |
1069 true /* do_termination */); | 1069 false /* is_serial*/); |
1070 | 1070 |
1071 double end_time_sec = os::elapsedTime(); | 1071 double end_time_sec = os::elapsedTime(); |
1072 double end_vtime_sec = os::elapsedVTime(); | 1072 double end_vtime_sec = os::elapsedVTime(); |
1073 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; | 1073 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; |
1074 double elapsed_time_sec = end_time_sec - start_time_sec; | 1074 double elapsed_time_sec = end_time_sec - start_time_sec; |
2182 // state. Also using the tasks' local queues removes the potential | 2182 // state. Also using the tasks' local queues removes the potential |
2183 // of the workers interfering with each other that could occur if | 2183 // of the workers interfering with each other that could occur if |
2184 // operating on the global stack. | 2184 // operating on the global stack. |
2185 | 2185 |
2186 class G1CMKeepAliveAndDrainClosure: public OopClosure { | 2186 class G1CMKeepAliveAndDrainClosure: public OopClosure { |
2187 ConcurrentMark* _cm; | 2187 ConcurrentMark* _cm; |
2188 CMTask* _task; | 2188 CMTask* _task; |
2189 int _ref_counter_limit; | 2189 int _ref_counter_limit; |
2190 int _ref_counter; | 2190 int _ref_counter; |
2191 bool _is_serial; | |
2191 public: | 2192 public: |
2192 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) : | 2193 G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : |
2193 _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) { | 2194 _cm(cm), _task(task), _is_serial(is_serial), |
2195 _ref_counter_limit(G1RefProcDrainInterval) { | |
2194 assert(_ref_counter_limit > 0, "sanity"); | 2196 assert(_ref_counter_limit > 0, "sanity"); |
2197 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); | |
2195 _ref_counter = _ref_counter_limit; | 2198 _ref_counter = _ref_counter_limit; |
2196 } | 2199 } |
2197 | 2200 |
2198 virtual void do_oop(narrowOop* p) { do_oop_work(p); } | 2201 virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
2199 virtual void do_oop( oop* p) { do_oop_work(p); } | 2202 virtual void do_oop( oop* p) { do_oop_work(p); } |
2228 // when CMTask::do_marking_step() returns without setting the | 2231 // when CMTask::do_marking_step() returns without setting the |
2229 // has_aborted() flag that the marking step has completed. | 2232 // has_aborted() flag that the marking step has completed. |
2230 do { | 2233 do { |
2231 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; | 2234 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; |
2232 _task->do_marking_step(mark_step_duration_ms, | 2235 _task->do_marking_step(mark_step_duration_ms, |
2233 false /* do_stealing */, | 2236 false /* do_termination */, |
2234 false /* do_termination */); | 2237 _is_serial); |
2235 } while (_task->has_aborted() && !_cm->has_overflown()); | 2238 } while (_task->has_aborted() && !_cm->has_overflown()); |
2236 _ref_counter = _ref_counter_limit; | 2239 _ref_counter = _ref_counter_limit; |
2237 } | 2240 } |
2238 } else { | 2241 } else { |
2239 if (_cm->verbose_high()) { | 2242 if (_cm->verbose_high()) { |
2251 // added by the 'keep alive' oop closure above. | 2254 // added by the 'keep alive' oop closure above. |
2252 | 2255 |
2253 class G1CMDrainMarkingStackClosure: public VoidClosure { | 2256 class G1CMDrainMarkingStackClosure: public VoidClosure { |
2254 ConcurrentMark* _cm; | 2257 ConcurrentMark* _cm; |
2255 CMTask* _task; | 2258 CMTask* _task; |
2256 bool _do_stealing; | 2259 bool _is_serial; |
2257 bool _do_termination; | |
2258 public: | 2260 public: |
2259 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) : | 2261 G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) : |
2260 _cm(cm), _task(task) { | 2262 _cm(cm), _task(task), _is_serial(is_serial) { |
2261 assert(is_par || _task->worker_id() == 0, | 2263 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); |
2262 "Only task for worker 0 should be used if ref processing is single threaded"); | |
2263 // We only allow stealing and only enter the termination protocol | |
2264 // in CMTask::do_marking_step() if this closure is being instantiated | |
2265 // for parallel reference processing. | |
2266 _do_stealing = _do_termination = is_par; | |
2267 } | 2264 } |
2268 | 2265 |
2269 void do_void() { | 2266 void do_void() { |
2270 do { | 2267 do { |
2271 if (_cm->verbose_high()) { | 2268 if (_cm->verbose_high()) { |
2272 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - " | 2269 gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s", |
2273 "stealing: %s, termination: %s", | 2270 _task->worker_id(), BOOL_TO_STR(_is_serial)); |
2274 _task->worker_id(), | |
2275 BOOL_TO_STR(_do_stealing), | |
2276 BOOL_TO_STR(_do_termination)); | |
2277 } | 2271 } |
2278 | 2272 |
2279 // We call CMTask::do_marking_step() to completely drain the local | 2273 // We call CMTask::do_marking_step() to completely drain the local |
2280 // and global marking stacks of entries pushed by the 'keep alive' | 2274 // and global marking stacks of entries pushed by the 'keep alive' |
2281 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). | 2275 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). |
2292 // one of which is reaching the specified time target.) It is only | 2286 // one of which is reaching the specified time target.) It is only |
2293 // when CMTask::do_marking_step() returns without setting the | 2287 // when CMTask::do_marking_step() returns without setting the |
2294 // has_aborted() flag that the marking step has completed. | 2288 // has_aborted() flag that the marking step has completed. |
2295 | 2289 |
2296 _task->do_marking_step(1000000000.0 /* something very large */, | 2290 _task->do_marking_step(1000000000.0 /* something very large */, |
2297 _do_stealing, | 2291 true /* do_termination */, |
2298 _do_termination); | 2292 _is_serial); |
2299 } while (_task->has_aborted() && !_cm->has_overflown()); | 2293 } while (_task->has_aborted() && !_cm->has_overflown()); |
2300 } | 2294 } |
2301 }; | 2295 }; |
2302 | 2296 |
2303 // Implementation of AbstractRefProcTaskExecutor for parallel | 2297 // Implementation of AbstractRefProcTaskExecutor for parallel |
2326 class G1CMRefProcTaskProxy: public AbstractGangTask { | 2320 class G1CMRefProcTaskProxy: public AbstractGangTask { |
2327 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; | 2321 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
2328 ProcessTask& _proc_task; | 2322 ProcessTask& _proc_task; |
2329 G1CollectedHeap* _g1h; | 2323 G1CollectedHeap* _g1h; |
2330 ConcurrentMark* _cm; | 2324 ConcurrentMark* _cm; |
2331 bool _processing_is_mt; | |
2332 | 2325 |
2333 public: | 2326 public: |
2334 G1CMRefProcTaskProxy(ProcessTask& proc_task, | 2327 G1CMRefProcTaskProxy(ProcessTask& proc_task, |
2335 G1CollectedHeap* g1h, | 2328 G1CollectedHeap* g1h, |
2336 ConcurrentMark* cm) : | 2329 ConcurrentMark* cm) : |
2337 AbstractGangTask("Process reference objects in parallel"), | 2330 AbstractGangTask("Process reference objects in parallel"), |
2338 _proc_task(proc_task), _g1h(g1h), _cm(cm) { | 2331 _proc_task(proc_task), _g1h(g1h), _cm(cm) { |
2339 ReferenceProcessor* rp = _g1h->ref_processor_cm(); | 2332 ReferenceProcessor* rp = _g1h->ref_processor_cm(); |
2340 _processing_is_mt = rp->processing_is_mt(); | 2333 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); |
2341 } | 2334 } |
2342 | 2335 |
2343 virtual void work(uint worker_id) { | 2336 virtual void work(uint worker_id) { |
2344 CMTask* marking_task = _cm->task(worker_id); | 2337 CMTask* task = _cm->task(worker_id); |
2345 G1CMIsAliveClosure g1_is_alive(_g1h); | 2338 G1CMIsAliveClosure g1_is_alive(_g1h); |
2346 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); | 2339 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); |
2347 G1CMDrainMarkingStackClosure g1_par_drain(_cm, marking_task, _processing_is_mt); | 2340 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); |
2348 | 2341 |
2349 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); | 2342 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); |
2350 } | 2343 } |
2351 }; | 2344 }; |
2352 | 2345 |
2413 | 2406 |
2414 // Set the soft reference policy | 2407 // Set the soft reference policy |
2415 rp->setup_policy(clear_all_soft_refs); | 2408 rp->setup_policy(clear_all_soft_refs); |
2416 assert(_markStack.isEmpty(), "mark stack should be empty"); | 2409 assert(_markStack.isEmpty(), "mark stack should be empty"); |
2417 | 2410 |
2418 // Non-MT instances 'Keep Alive' and 'Complete GC' oop closures. | 2411 // Instances of the 'Keep Alive' and 'Complete GC' closures used |
2419 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0)); | 2412 // in serial reference processing. Note these closures are also |
2420 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false); | 2413 // used for serially processing (by the the current thread) the |
2421 | 2414 // JNI references during parallel reference processing. |
2422 // We need at least one active thread. If reference processing is | 2415 // |
2423 // not multi-threaded we use the current (ConcurrentMarkThread) thread, | 2416 // These closures do not need to synchronize with the worker |
2424 // otherwise we use the work gang from the G1CollectedHeap and we | 2417 // threads involved in parallel reference processing as these |
2425 // utilize all the worker threads we can. | 2418 // instances are executed serially by the current thread (e.g. |
2426 uint active_workers = (rp->processing_is_mt() && g1h->workers() != NULL | 2419 // reference processing is not multi-threaded and is thus |
2427 ? g1h->workers()->active_workers() | 2420 // performed by the current thread instead of a gang worker). |
2428 : 1U); | 2421 // |
2429 | 2422 // The gang tasks involved in parallel reference procssing create |
2423 // their own instances of these closures, which do their own | |
2424 // synchronization among themselves. | |
2425 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); | |
2426 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); | |
2427 | |
2428 // We need at least one active thread. If reference processing | |
2429 // is not multi-threaded we use the current (VMThread) thread, | |
2430 // otherwise we use the work gang from the G1CollectedHeap and | |
2431 // we utilize all the worker threads we can. | |
2432 bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL; | |
2433 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); | |
2430 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); | 2434 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); |
2431 | 2435 |
2436 // Parallel processing task executor. | |
2432 G1CMRefProcTaskExecutor par_task_executor(g1h, this, | 2437 G1CMRefProcTaskExecutor par_task_executor(g1h, this, |
2433 g1h->workers(), active_workers); | 2438 g1h->workers(), active_workers); |
2434 | 2439 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); |
2435 AbstractRefProcTaskExecutor* executor = (rp->processing_is_mt() | |
2436 ? &par_task_executor | |
2437 : NULL); | |
2438 | 2440 |
2439 // Set the degree of MT processing here. If the discovery was done MT, | 2441 // Set the degree of MT processing here. If the discovery was done MT, |
2440 // the number of threads involved during discovery could differ from | 2442 // the number of threads involved during discovery could differ from |
2441 // the number of active workers. This is OK as long as the discovered | 2443 // the number of active workers. This is OK as long as the discovered |
2442 // Reference lists are balanced (see balance_all_queues() and balance_queues()). | 2444 // Reference lists are balanced (see balance_all_queues() and balance_queues()). |
2452 // oop closures will set the has_overflown flag if we overflow the | 2454 // oop closures will set the has_overflown flag if we overflow the |
2453 // global marking stack. | 2455 // global marking stack. |
2454 | 2456 |
2455 assert(_markStack.overflow() || _markStack.isEmpty(), | 2457 assert(_markStack.overflow() || _markStack.isEmpty(), |
2456 "mark stack should be empty (unless it overflowed)"); | 2458 "mark stack should be empty (unless it overflowed)"); |
2459 | |
2457 if (_markStack.overflow()) { | 2460 if (_markStack.overflow()) { |
2458 // This should have been done already when we tried to push an | 2461 // This should have been done already when we tried to push an |
2459 // entry on to the global mark stack. But let's do it again. | 2462 // entry on to the global mark stack. But let's do it again. |
2460 set_has_overflown(); | 2463 set_has_overflown(); |
2461 } | 2464 } |
2480 _nextMarkBitMap = (CMBitMap*) temp; | 2483 _nextMarkBitMap = (CMBitMap*) temp; |
2481 } | 2484 } |
2482 | 2485 |
2483 class CMRemarkTask: public AbstractGangTask { | 2486 class CMRemarkTask: public AbstractGangTask { |
2484 private: | 2487 private: |
2485 ConcurrentMark *_cm; | 2488 ConcurrentMark* _cm; |
2486 | 2489 bool _is_serial; |
2487 public: | 2490 public: |
2488 void work(uint worker_id) { | 2491 void work(uint worker_id) { |
2489 // Since all available tasks are actually started, we should | 2492 // Since all available tasks are actually started, we should |
2490 // only proceed if we're supposed to be actived. | 2493 // only proceed if we're supposed to be actived. |
2491 if (worker_id < _cm->active_tasks()) { | 2494 if (worker_id < _cm->active_tasks()) { |
2492 CMTask* task = _cm->task(worker_id); | 2495 CMTask* task = _cm->task(worker_id); |
2493 task->record_start_time(); | 2496 task->record_start_time(); |
2494 do { | 2497 do { |
2495 task->do_marking_step(1000000000.0 /* something very large */, | 2498 task->do_marking_step(1000000000.0 /* something very large */, |
2496 true /* do_stealing */, | 2499 true /* do_termination */, |
2497 true /* do_termination */); | 2500 _is_serial); |
2498 } while (task->has_aborted() && !_cm->has_overflown()); | 2501 } while (task->has_aborted() && !_cm->has_overflown()); |
2499 // If we overflow, then we do not want to restart. We instead | 2502 // If we overflow, then we do not want to restart. We instead |
2500 // want to abort remark and do concurrent marking again. | 2503 // want to abort remark and do concurrent marking again. |
2501 task->record_end_time(); | 2504 task->record_end_time(); |
2502 } | 2505 } |
2503 } | 2506 } |
2504 | 2507 |
2505 CMRemarkTask(ConcurrentMark* cm, int active_workers) : | 2508 CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) : |
2506 AbstractGangTask("Par Remark"), _cm(cm) { | 2509 AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) { |
2507 _cm->terminator()->reset_for_reuse(active_workers); | 2510 _cm->terminator()->reset_for_reuse(active_workers); |
2508 } | 2511 } |
2509 }; | 2512 }; |
2510 | 2513 |
2511 void ConcurrentMark::checkpointRootsFinalWork() { | 2514 void ConcurrentMark::checkpointRootsFinalWork() { |
2528 // Leave _parallel_marking_threads at it's | 2531 // Leave _parallel_marking_threads at it's |
2529 // value originally calculated in the ConcurrentMark | 2532 // value originally calculated in the ConcurrentMark |
2530 // constructor and pass values of the active workers | 2533 // constructor and pass values of the active workers |
2531 // through the gang in the task. | 2534 // through the gang in the task. |
2532 | 2535 |
2533 CMRemarkTask remarkTask(this, active_workers); | 2536 CMRemarkTask remarkTask(this, active_workers, false /* is_serial */); |
2537 // We will start all available threads, even if we decide that the | |
2538 // active_workers will be fewer. The extra ones will just bail out | |
2539 // immediately. | |
2534 g1h->set_par_threads(active_workers); | 2540 g1h->set_par_threads(active_workers); |
2535 g1h->workers()->run_task(&remarkTask); | 2541 g1h->workers()->run_task(&remarkTask); |
2536 g1h->set_par_threads(0); | 2542 g1h->set_par_threads(0); |
2537 } else { | 2543 } else { |
2538 G1CollectedHeap::StrongRootsScope srs(g1h); | 2544 G1CollectedHeap::StrongRootsScope srs(g1h); |
2539 // this is remark, so we'll use up all available threads | |
2540 uint active_workers = 1; | 2545 uint active_workers = 1; |
2541 set_phase(active_workers, false /* concurrent */); | 2546 set_phase(active_workers, false /* concurrent */); |
2542 | 2547 |
2543 CMRemarkTask remarkTask(this, active_workers); | 2548 // Note - if there's no work gang then the VMThread will be |
2544 // We will start all available threads, even if we decide that the | 2549 // the thread to execute the remark - serially. We have |
2545 // active_workers will be fewer. The extra ones will just bail out | 2550 // to pass true for the is_serial parameter so that |
2546 // immediately. | 2551 // CMTask::do_marking_step() doesn't enter the sync |
2552 // barriers in the event of an overflow. Doing so will | |
2553 // cause an assert that the current thread is not a | |
2554 // concurrent GC thread. | |
2555 CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/); | |
2547 remarkTask.work(0); | 2556 remarkTask.work(0); |
2548 } | 2557 } |
2549 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); | 2558 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); |
2550 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); | 2559 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); |
2551 | 2560 |
3852 #endif // _MARKING_STATS_ | 3861 #endif // _MARKING_STATS_ |
3853 } | 3862 } |
3854 | 3863 |
3855 /***************************************************************************** | 3864 /***************************************************************************** |
3856 | 3865 |
3857 The do_marking_step(time_target_ms) method is the building block | 3866 The do_marking_step(time_target_ms, ...) method is the building |
3858 of the parallel marking framework. It can be called in parallel | 3867 block of the parallel marking framework. It can be called in parallel |
3859 with other invocations of do_marking_step() on different tasks | 3868 with other invocations of do_marking_step() on different tasks |
3860 (but only one per task, obviously) and concurrently with the | 3869 (but only one per task, obviously) and concurrently with the |
3861 mutator threads, or during remark, hence it eliminates the need | 3870 mutator threads, or during remark, hence it eliminates the need |
3862 for two versions of the code. When called during remark, it will | 3871 for two versions of the code. When called during remark, it will |
3863 pick up from where the task left off during the concurrent marking | 3872 pick up from where the task left off during the concurrent marking |
3864 phase. Interestingly, tasks are also claimable during evacuation | 3873 phase. Interestingly, tasks are also claimable during evacuation |
3865 pauses too, since do_marking_step() ensures that it aborts before | 3874 pauses too, since do_marking_step() ensures that it aborts before |
3866 it needs to yield. | 3875 it needs to yield. |
3867 | 3876 |
3868 The data structures that is uses to do marking work are the | 3877 The data structures that it uses to do marking work are the |
3869 following: | 3878 following: |
3870 | 3879 |
3871 (1) Marking Bitmap. If there are gray objects that appear only | 3880 (1) Marking Bitmap. If there are gray objects that appear only |
3872 on the bitmap (this happens either when dealing with an overflow | 3881 on the bitmap (this happens either when dealing with an overflow |
3873 or when the initial marking phase has simply marked the roots | 3882 or when the initial marking phase has simply marked the roots |
3949 too. The initial reason for the clock method was to avoid calling | 3958 too. The initial reason for the clock method was to avoid calling |
3950 vtime too regularly, as it is quite expensive. So, once it was in | 3959 vtime too regularly, as it is quite expensive. So, once it was in |
3951 place, it was natural to piggy-back all the other conditions on it | 3960 place, it was natural to piggy-back all the other conditions on it |
3952 too and not constantly check them throughout the code. | 3961 too and not constantly check them throughout the code. |
3953 | 3962 |
3963 If do_termination is true then do_marking_step will enter its | |
3964 termination protocol. | |
3965 | |
3966 The value of is_serial must be true when do_marking_step is being | |
3967 called serially (i.e. by the VMThread) and do_marking_step should | |
3968 skip any synchronization in the termination and overflow code. | |
3969 Examples include the serial remark code and the serial reference | |
3970 processing closures. | |
3971 | |
3972 The value of is_serial must be false when do_marking_step is | |
3973 being called by any of the worker threads in a work gang. | |
3974 Examples include the concurrent marking code (CMMarkingTask), | |
3975 the MT remark code, and the MT reference processing closures. | |
3976 | |
3954 *****************************************************************************/ | 3977 *****************************************************************************/ |
3955 | 3978 |
3956 void CMTask::do_marking_step(double time_target_ms, | 3979 void CMTask::do_marking_step(double time_target_ms, |
3957 bool do_stealing, | 3980 bool do_termination, |
3958 bool do_termination) { | 3981 bool is_serial) { |
3959 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); | 3982 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); |
3960 assert(concurrent() == _cm->concurrent(), "they should be the same"); | 3983 assert(concurrent() == _cm->concurrent(), "they should be the same"); |
3961 | 3984 |
3962 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); | 3985 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); |
3963 assert(_task_queues != NULL, "invariant"); | 3986 assert(_task_queues != NULL, "invariant"); |
3973 // catch most problems. | 3996 // catch most problems. |
3974 _claimed = true; | 3997 _claimed = true; |
3975 | 3998 |
3976 _start_time_ms = os::elapsedVTime() * 1000.0; | 3999 _start_time_ms = os::elapsedVTime() * 1000.0; |
3977 statsOnly( _interval_start_time_ms = _start_time_ms ); | 4000 statsOnly( _interval_start_time_ms = _start_time_ms ); |
4001 | |
4002 // If do_stealing is true then do_marking_step will attempt to | |
4003 // steal work from the other CMTasks. It only makes sense to | |
4004 // enable stealing when the termination protocol is enabled | |
4005 // and do_marking_step() is not being called serially. | |
4006 bool do_stealing = do_termination && !is_serial; | |
3978 | 4007 |
3979 double diff_prediction_ms = | 4008 double diff_prediction_ms = |
3980 g1_policy->get_new_prediction(&_marking_step_diffs_ms); | 4009 g1_policy->get_new_prediction(&_marking_step_diffs_ms); |
3981 _time_target_ms = time_target_ms - diff_prediction_ms; | 4010 _time_target_ms = time_target_ms - diff_prediction_ms; |
3982 | 4011 |
4235 if (_cm->verbose_low()) { | 4264 if (_cm->verbose_low()) { |
4236 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); | 4265 gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id); |
4237 } | 4266 } |
4238 | 4267 |
4239 _termination_start_time_ms = os::elapsedVTime() * 1000.0; | 4268 _termination_start_time_ms = os::elapsedVTime() * 1000.0; |
4269 | |
4240 // The CMTask class also extends the TerminatorTerminator class, | 4270 // The CMTask class also extends the TerminatorTerminator class, |
4241 // hence its should_exit_termination() method will also decide | 4271 // hence its should_exit_termination() method will also decide |
4242 // whether to exit the termination protocol or not. | 4272 // whether to exit the termination protocol or not. |
4243 bool finished = _cm->terminator()->offer_termination(this); | 4273 bool finished = (is_serial || |
4274 _cm->terminator()->offer_termination(this)); | |
4244 double termination_end_time_ms = os::elapsedVTime() * 1000.0; | 4275 double termination_end_time_ms = os::elapsedVTime() * 1000.0; |
4245 _termination_time_ms += | 4276 _termination_time_ms += |
4246 termination_end_time_ms - _termination_start_time_ms; | 4277 termination_end_time_ms - _termination_start_time_ms; |
4247 | 4278 |
4248 if (finished) { | 4279 if (finished) { |
4318 | 4349 |
4319 if (_cm->verbose_low()) { | 4350 if (_cm->verbose_low()) { |
4320 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); | 4351 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); |
4321 } | 4352 } |
4322 | 4353 |
4323 _cm->enter_first_sync_barrier(_worker_id); | 4354 if (!is_serial) { |
4324 // When we exit this sync barrier we know that all tasks have | 4355 // We only need to enter the sync barrier if being called |
4325 // stopped doing marking work. So, it's now safe to | 4356 // from a parallel context |
4326 // re-initialise our data structures. At the end of this method, | 4357 _cm->enter_first_sync_barrier(_worker_id); |
4327 // task 0 will clear the global data structures. | 4358 |
4359 // When we exit this sync barrier we know that all tasks have | |
4360 // stopped doing marking work. So, it's now safe to | |
4361 // re-initialise our data structures. At the end of this method, | |
4362 // task 0 will clear the global data structures. | |
4363 } | |
4328 | 4364 |
4329 statsOnly( ++_aborted_overflow ); | 4365 statsOnly( ++_aborted_overflow ); |
4330 | 4366 |
4331 // We clear the local state of this task... | 4367 // We clear the local state of this task... |
4332 clear_region_fields(); | 4368 clear_region_fields(); |
4333 | 4369 |
4334 // ...and enter the second barrier. | 4370 if (!is_serial) { |
4335 _cm->enter_second_sync_barrier(_worker_id); | 4371 // ...and enter the second barrier. |
4372 _cm->enter_second_sync_barrier(_worker_id); | |
4373 } | |
4336 // At this point everything has bee re-initialised and we're | 4374 // At this point everything has bee re-initialised and we're |
4337 // ready to restart. | 4375 // ready to restart. |
4338 } | 4376 } |
4339 | 4377 |
4340 if (_cm->verbose_low()) { | 4378 if (_cm->verbose_low()) { |