comparison src/share/vm/gc_implementation/g1/concurrentMark.cpp @ 14309:63a4eb8bcd23

8025856: Fix typos in the GC code Summary: Fix about 440 typos in comments in the VM code Reviewed-by: mgerdin, tschatzl, coleenp, kmo, jcoomes
author jwilhelm
date Thu, 23 Jan 2014 14:47:23 +0100
parents a2866d45e99e
children 4ca6dc0799b6
comparison
equal deleted inserted replaced
14308:870aedf4ba4f 14309:63a4eb8bcd23
907 print_reachable("at-cycle-start", 907 print_reachable("at-cycle-start",
908 VerifyOption_G1UsePrevMarking, true /* all */); 908 VerifyOption_G1UsePrevMarking, true /* all */);
909 } 909 }
910 #endif 910 #endif
911 911
912 // Initialise marking structures. This has to be done in a STW phase. 912 // Initialize marking structures. This has to be done in a STW phase.
913 reset(); 913 reset();
914 914
915 // For each region note start of marking. 915 // For each region note start of marking.
916 NoteStartOfMarkHRClosure startcl; 916 NoteStartOfMarkHRClosure startcl;
917 g1h->heap_region_iterate(&startcl); 917 g1h->heap_region_iterate(&startcl);
921 void ConcurrentMark::checkpointRootsInitialPost() { 921 void ConcurrentMark::checkpointRootsInitialPost() {
922 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 922 G1CollectedHeap* g1h = G1CollectedHeap::heap();
923 923
924 // If we force an overflow during remark, the remark operation will 924 // If we force an overflow during remark, the remark operation will
925 // actually abort and we'll restart concurrent marking. If we always 925 // actually abort and we'll restart concurrent marking. If we always
926 // force an oveflow during remark we'll never actually complete the 926 // force an overflow during remark we'll never actually complete the
927 // marking phase. So, we initilize this here, at the start of the 927 // marking phase. So, we initialize this here, at the start of the
928 // cycle, so that at the remaining overflow number will decrease at 928 // cycle, so that at the remaining overflow number will decrease at
929 // every remark and we'll eventually not need to cause one. 929 // every remark and we'll eventually not need to cause one.
930 force_overflow_stw()->init(); 930 force_overflow_stw()->init();
931 931
932 // Start Concurrent Marking weak-reference discovery. 932 // Start Concurrent Marking weak-reference discovery.
957 * sync up, whereas another one could be trying to yield, while also 957 * sync up, whereas another one could be trying to yield, while also
958 * waiting for the other threads to sync up too. 958 * waiting for the other threads to sync up too.
959 * 959 *
960 * Note, however, that this code is also used during remark and in 960 * Note, however, that this code is also used during remark and in
961 * this case we should not attempt to leave / enter the STS, otherwise 961 * this case we should not attempt to leave / enter the STS, otherwise
962 * we'll either hit an asseert (debug / fastdebug) or deadlock 962 * we'll either hit an assert (debug / fastdebug) or deadlock
963 * (product). So we should only leave / enter the STS if we are 963 * (product). So we should only leave / enter the STS if we are
964 * operating concurrently. 964 * operating concurrently.
965 * 965 *
966 * Because the thread that does the sync barrier has left the STS, it 966 * Because the thread that does the sync barrier has left the STS, it
967 * is possible to be suspended for a Full GC or an evacuation pause 967 * is possible to be suspended for a Full GC or an evacuation pause
999 // let the task associated with with worker 0 do this 999 // let the task associated with with worker 0 do this
1000 if (worker_id == 0) { 1000 if (worker_id == 0) {
1001 // task 0 is responsible for clearing the global data structures 1001 // task 0 is responsible for clearing the global data structures
1002 // We should be here because of an overflow. During STW we should 1002 // We should be here because of an overflow. During STW we should
1003 // not clear the overflow flag since we rely on it being true when 1003 // not clear the overflow flag since we rely on it being true when
1004 // we exit this method to abort the pause and restart concurent 1004 // we exit this method to abort the pause and restart concurrent
1005 // marking. 1005 // marking.
1006 reset_marking_state(true /* clear_overflow */); 1006 reset_marking_state(true /* clear_overflow */);
1007 force_overflow()->update(); 1007 force_overflow()->update();
1008 1008
1009 if (G1Log::fine()) { 1009 if (G1Log::fine()) {
1249 set_concurrency_and_phase(active_workers, true /* concurrent */); 1249 set_concurrency_and_phase(active_workers, true /* concurrent */);
1250 1250
1251 CMConcurrentMarkingTask markingTask(this, cmThread()); 1251 CMConcurrentMarkingTask markingTask(this, cmThread());
1252 if (use_parallel_marking_threads()) { 1252 if (use_parallel_marking_threads()) {
1253 _parallel_workers->set_active_workers((int)active_workers); 1253 _parallel_workers->set_active_workers((int)active_workers);
1254 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1254 // Don't set _n_par_threads because it affects MT in process_strong_roots()
1255 // and the decisions on that MT processing is made elsewhere. 1255 // and the decisions on that MT processing is made elsewhere.
1256 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1256 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1257 _parallel_workers->run_task(&markingTask); 1257 _parallel_workers->run_task(&markingTask);
1258 } else { 1258 } else {
1259 markingTask.work(0); 1259 markingTask.work(0);
1482 if (marked_bytes > 0) { 1482 if (marked_bytes > 0) {
1483 set_bit_for_region(hr); 1483 set_bit_for_region(hr);
1484 } 1484 }
1485 1485
1486 // Set the marked bytes for the current region so that 1486 // Set the marked bytes for the current region so that
1487 // it can be queried by a calling verificiation routine 1487 // it can be queried by a calling verification routine
1488 _region_marked_bytes = marked_bytes; 1488 _region_marked_bytes = marked_bytes;
1489 1489
1490 return false; 1490 return false;
1491 } 1491 }
1492 1492
2304 // We call CMTask::do_marking_step() to completely drain the local 2304 // We call CMTask::do_marking_step() to completely drain the local
2305 // and global marking stacks of entries pushed by the 'keep alive' 2305 // and global marking stacks of entries pushed by the 'keep alive'
2306 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 2306 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
2307 // 2307 //
2308 // CMTask::do_marking_step() is called in a loop, which we'll exit 2308 // CMTask::do_marking_step() is called in a loop, which we'll exit
2309 // if there's nothing more to do (i.e. we'completely drained the 2309 // if there's nothing more to do (i.e. we've completely drained the
2310 // entries that were pushed as a a result of applying the 'keep alive' 2310 // entries that were pushed as a a result of applying the 'keep alive'
2311 // closure to the entries on the discovered ref lists) or we overflow 2311 // closure to the entries on the discovered ref lists) or we overflow
2312 // the global marking stack. 2312 // the global marking stack.
2313 // 2313 //
2314 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() 2314 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
2467 // threads involved in parallel reference processing as these 2467 // threads involved in parallel reference processing as these
2468 // instances are executed serially by the current thread (e.g. 2468 // instances are executed serially by the current thread (e.g.
2469 // reference processing is not multi-threaded and is thus 2469 // reference processing is not multi-threaded and is thus
2470 // performed by the current thread instead of a gang worker). 2470 // performed by the current thread instead of a gang worker).
2471 // 2471 //
2472 // The gang tasks involved in parallel reference procssing create 2472 // The gang tasks involved in parallel reference processing create
2473 // their own instances of these closures, which do their own 2473 // their own instances of these closures, which do their own
2474 // synchronization among themselves. 2474 // synchronization among themselves.
2475 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2475 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
2476 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2476 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
2477 2477
2544 ConcurrentMark* _cm; 2544 ConcurrentMark* _cm;
2545 bool _is_serial; 2545 bool _is_serial;
2546 public: 2546 public:
2547 void work(uint worker_id) { 2547 void work(uint worker_id) {
2548 // Since all available tasks are actually started, we should 2548 // Since all available tasks are actually started, we should
2549 // only proceed if we're supposed to be actived. 2549 // only proceed if we're supposed to be active.
2550 if (worker_id < _cm->active_tasks()) { 2550 if (worker_id < _cm->active_tasks()) {
2551 CMTask* task = _cm->task(worker_id); 2551 CMTask* task = _cm->task(worker_id);
2552 task->record_start_time(); 2552 task->record_start_time();
2553 do { 2553 do {
2554 task->do_marking_step(1000000000.0 /* something very large */, 2554 task->do_marking_step(1000000000.0 /* something very large */,
3064 return false; 3064 return false;
3065 } 3065 }
3066 3066
3067 // 'start' should be in the heap. 3067 // 'start' should be in the heap.
3068 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 3068 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
3069 // 'end' *may* be just beyone the end of the heap (if hr is the last region) 3069 // 'end' *may* be just beyond the end of the heap (if hr is the last region)
3070 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 3070 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
3071 3071
3072 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 3072 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
3073 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 3073 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
3074 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 3074 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
4412 if (_cm->has_overflown()) { 4412 if (_cm->has_overflown()) {
4413 // This is the interesting one. We aborted because a global 4413 // This is the interesting one. We aborted because a global
4414 // overflow was raised. This means we have to restart the 4414 // overflow was raised. This means we have to restart the
4415 // marking phase and start iterating over regions. However, in 4415 // marking phase and start iterating over regions. However, in
4416 // order to do this we have to make sure that all tasks stop 4416 // order to do this we have to make sure that all tasks stop
4417 // what they are doing and re-initialise in a safe manner. We 4417 // what they are doing and re-initialize in a safe manner. We
4418 // will achieve this with the use of two barrier sync points. 4418 // will achieve this with the use of two barrier sync points.
4419 4419
4420 if (_cm->verbose_low()) { 4420 if (_cm->verbose_low()) {
4421 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id); 4421 gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
4422 } 4422 }
4426 // from a parallel context 4426 // from a parallel context
4427 _cm->enter_first_sync_barrier(_worker_id); 4427 _cm->enter_first_sync_barrier(_worker_id);
4428 4428
4429 // When we exit this sync barrier we know that all tasks have 4429 // When we exit this sync barrier we know that all tasks have
4430 // stopped doing marking work. So, it's now safe to 4430 // stopped doing marking work. So, it's now safe to
4431 // re-initialise our data structures. At the end of this method, 4431 // re-initialize our data structures. At the end of this method,
4432 // task 0 will clear the global data structures. 4432 // task 0 will clear the global data structures.
4433 } 4433 }
4434 4434
4435 statsOnly( ++_aborted_overflow ); 4435 statsOnly( ++_aborted_overflow );
4436 4436