Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 14309:63a4eb8bcd23
8025856: Fix typos in the GC code
Summary: Fix about 440 typos in comments in the VM code
Reviewed-by: mgerdin, tschatzl, coleenp, kmo, jcoomes
author | jwilhelm |
---|---|
date | Thu, 23 Jan 2014 14:47:23 +0100 |
parents | 5888334c9c24 |
children | 4ca6dc0799b6 |
comparison
equal
deleted
inserted
replaced
14308:870aedf4ba4f | 14309:63a4eb8bcd23 |
---|---|
376 friend class G1CMRefProcTaskExecutor; | 376 friend class G1CMRefProcTaskExecutor; |
377 friend class G1CMKeepAliveAndDrainClosure; | 377 friend class G1CMKeepAliveAndDrainClosure; |
378 friend class G1CMDrainMarkingStackClosure; | 378 friend class G1CMDrainMarkingStackClosure; |
379 | 379 |
380 protected: | 380 protected: |
381 ConcurrentMarkThread* _cmThread; // the thread doing the work | 381 ConcurrentMarkThread* _cmThread; // The thread doing the work |
382 G1CollectedHeap* _g1h; // the heap. | 382 G1CollectedHeap* _g1h; // The heap |
383 uint _parallel_marking_threads; // the number of marking | 383 uint _parallel_marking_threads; // The number of marking |
384 // threads we're use | 384 // threads we're using |
385 uint _max_parallel_marking_threads; // max number of marking | 385 uint _max_parallel_marking_threads; // Max number of marking |
386 // threads we'll ever use | 386 // threads we'll ever use |
387 double _sleep_factor; // how much we have to sleep, with | 387 double _sleep_factor; // How much we have to sleep, with |
388 // respect to the work we just did, to | 388 // respect to the work we just did, to |
389 // meet the marking overhead goal | 389 // meet the marking overhead goal |
390 double _marking_task_overhead; // marking target overhead for | 390 double _marking_task_overhead; // Marking target overhead for |
391 // a single task | 391 // a single task |
392 | 392 |
393 // same as the two above, but for the cleanup task | 393 // Same as the two above, but for the cleanup task |
394 double _cleanup_sleep_factor; | 394 double _cleanup_sleep_factor; |
395 double _cleanup_task_overhead; | 395 double _cleanup_task_overhead; |
396 | 396 |
397 FreeRegionList _cleanup_list; | 397 FreeRegionList _cleanup_list; |
398 | 398 |
399 // Concurrent marking support structures | 399 // Concurrent marking support structures |
400 CMBitMap _markBitMap1; | 400 CMBitMap _markBitMap1; |
401 CMBitMap _markBitMap2; | 401 CMBitMap _markBitMap2; |
402 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap | 402 CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap |
403 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap | 403 CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap |
404 | 404 |
405 BitMap _region_bm; | 405 BitMap _region_bm; |
406 BitMap _card_bm; | 406 BitMap _card_bm; |
407 | 407 |
408 // Heap bounds | 408 // Heap bounds |
409 HeapWord* _heap_start; | 409 HeapWord* _heap_start; |
410 HeapWord* _heap_end; | 410 HeapWord* _heap_end; |
411 | 411 |
412 // Root region tracking and claiming. | 412 // Root region tracking and claiming |
413 CMRootRegions _root_regions; | 413 CMRootRegions _root_regions; |
414 | 414 |
415 // For gray objects | 415 // For gray objects |
416 CMMarkStack _markStack; // Grey objects behind global finger. | 416 CMMarkStack _markStack; // Grey objects behind global finger |
417 HeapWord* volatile _finger; // the global finger, region aligned, | 417 HeapWord* volatile _finger; // The global finger, region aligned, |
418 // always points to the end of the | 418 // always points to the end of the |
419 // last claimed region | 419 // last claimed region |
420 | 420 |
421 // marking tasks | 421 // Marking tasks |
422 uint _max_worker_id;// maximum worker id | 422 uint _max_worker_id;// Maximum worker id |
423 uint _active_tasks; // task num currently active | 423 uint _active_tasks; // Task num currently active |
424 CMTask** _tasks; // task queue array (max_worker_id len) | 424 CMTask** _tasks; // Task queue array (max_worker_id len) |
425 CMTaskQueueSet* _task_queues; // task queue set | 425 CMTaskQueueSet* _task_queues; // Task queue set |
426 ParallelTaskTerminator _terminator; // for termination | 426 ParallelTaskTerminator _terminator; // For termination |
427 | 427 |
428 // Two sync barriers that are used to synchronise tasks when an | 428 // Two sync barriers that are used to synchronize tasks when an |
429 // overflow occurs. The algorithm is the following. All tasks enter | 429 // overflow occurs. The algorithm is the following. All tasks enter |
430 // the first one to ensure that they have all stopped manipulating | 430 // the first one to ensure that they have all stopped manipulating |
431 // the global data structures. After they exit it, they re-initialise | 431 // the global data structures. After they exit it, they re-initialize |
432 // their data structures and task 0 re-initialises the global data | 432 // their data structures and task 0 re-initializes the global data |
433 // structures. Then, they enter the second sync barrier. This | 433 // structures. Then, they enter the second sync barrier. This |
434 // ensure, that no task starts doing work before all data | 434 // ensure, that no task starts doing work before all data |
435 // structures (local and global) have been re-initialised. When they | 435 // structures (local and global) have been re-initialized. When they |
436 // exit it, they are free to start working again. | 436 // exit it, they are free to start working again. |
437 WorkGangBarrierSync _first_overflow_barrier_sync; | 437 WorkGangBarrierSync _first_overflow_barrier_sync; |
438 WorkGangBarrierSync _second_overflow_barrier_sync; | 438 WorkGangBarrierSync _second_overflow_barrier_sync; |
439 | 439 |
440 // this is set by any task, when an overflow on the global data | 440 // This is set by any task, when an overflow on the global data |
441 // structures is detected. | 441 // structures is detected |
442 volatile bool _has_overflown; | 442 volatile bool _has_overflown; |
443 // true: marking is concurrent, false: we're in remark | 443 // True: marking is concurrent, false: we're in remark |
444 volatile bool _concurrent; | 444 volatile bool _concurrent; |
445 // set at the end of a Full GC so that marking aborts | 445 // Set at the end of a Full GC so that marking aborts |
446 volatile bool _has_aborted; | 446 volatile bool _has_aborted; |
447 | 447 |
448 // used when remark aborts due to an overflow to indicate that | 448 // Used when remark aborts due to an overflow to indicate that |
449 // another concurrent marking phase should start | 449 // another concurrent marking phase should start |
450 volatile bool _restart_for_overflow; | 450 volatile bool _restart_for_overflow; |
451 | 451 |
452 // This is true from the very start of concurrent marking until the | 452 // This is true from the very start of concurrent marking until the |
453 // point when all the tasks complete their work. It is really used | 453 // point when all the tasks complete their work. It is really used |
454 // to determine the points between the end of concurrent marking and | 454 // to determine the points between the end of concurrent marking and |
455 // time of remark. | 455 // time of remark. |
456 volatile bool _concurrent_marking_in_progress; | 456 volatile bool _concurrent_marking_in_progress; |
457 | 457 |
458 // verbose level | 458 // Verbose level |
459 CMVerboseLevel _verbose_level; | 459 CMVerboseLevel _verbose_level; |
460 | 460 |
461 // All of these times are in ms. | 461 // All of these times are in ms |
462 NumberSeq _init_times; | 462 NumberSeq _init_times; |
463 NumberSeq _remark_times; | 463 NumberSeq _remark_times; |
464 NumberSeq _remark_mark_times; | 464 NumberSeq _remark_mark_times; |
465 NumberSeq _remark_weak_ref_times; | 465 NumberSeq _remark_weak_ref_times; |
466 NumberSeq _cleanup_times; | 466 NumberSeq _cleanup_times; |
467 double _total_counting_time; | 467 double _total_counting_time; |
468 double _total_rs_scrub_time; | 468 double _total_rs_scrub_time; |
469 | 469 |
470 double* _accum_task_vtime; // accumulated task vtime | 470 double* _accum_task_vtime; // Accumulated task vtime |
471 | 471 |
472 FlexibleWorkGang* _parallel_workers; | 472 FlexibleWorkGang* _parallel_workers; |
473 | 473 |
474 ForceOverflowSettings _force_overflow_conc; | 474 ForceOverflowSettings _force_overflow_conc; |
475 ForceOverflowSettings _force_overflow_stw; | 475 ForceOverflowSettings _force_overflow_stw; |
485 // Resets all the marking data structures. Called when we have to restart | 485 // Resets all the marking data structures. Called when we have to restart |
486 // marking or when marking completes (via set_non_marking_state below). | 486 // marking or when marking completes (via set_non_marking_state below). |
487 void reset_marking_state(bool clear_overflow = true); | 487 void reset_marking_state(bool clear_overflow = true); |
488 | 488 |
489 // We do this after we're done with marking so that the marking data | 489 // We do this after we're done with marking so that the marking data |
490 // structures are initialised to a sensible and predictable state. | 490 // structures are initialized to a sensible and predictable state. |
491 void set_non_marking_state(); | 491 void set_non_marking_state(); |
492 | 492 |
493 // Called to indicate how many threads are currently active. | 493 // Called to indicate how many threads are currently active. |
494 void set_concurrency(uint active_tasks); | 494 void set_concurrency(uint active_tasks); |
495 | 495 |
496 // It should be called to indicate which phase we're in (concurrent | 496 // It should be called to indicate which phase we're in (concurrent |
497 // mark or remark) and how many threads are currently active. | 497 // mark or remark) and how many threads are currently active. |
498 void set_concurrency_and_phase(uint active_tasks, bool concurrent); | 498 void set_concurrency_and_phase(uint active_tasks, bool concurrent); |
499 | 499 |
500 // prints all gathered CM-related statistics | 500 // Prints all gathered CM-related statistics |
501 void print_stats(); | 501 void print_stats(); |
502 | 502 |
503 bool cleanup_list_is_empty() { | 503 bool cleanup_list_is_empty() { |
504 return _cleanup_list.is_empty(); | 504 return _cleanup_list.is_empty(); |
505 } | 505 } |
506 | 506 |
507 // accessor methods | 507 // Accessor methods |
508 uint parallel_marking_threads() const { return _parallel_marking_threads; } | 508 uint parallel_marking_threads() const { return _parallel_marking_threads; } |
509 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} | 509 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} |
510 double sleep_factor() { return _sleep_factor; } | 510 double sleep_factor() { return _sleep_factor; } |
511 double marking_task_overhead() { return _marking_task_overhead;} | 511 double marking_task_overhead() { return _marking_task_overhead;} |
512 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } | 512 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } |
540 // method. So, this way, each task will spend very little time in | 540 // method. So, this way, each task will spend very little time in |
541 // claim_region() and is allowed to call the regular clock method | 541 // claim_region() and is allowed to call the regular clock method |
542 // frequently. | 542 // frequently. |
543 HeapRegion* claim_region(uint worker_id); | 543 HeapRegion* claim_region(uint worker_id); |
544 | 544 |
545 // It determines whether we've run out of regions to scan. | 545 // It determines whether we've run out of regions to scan |
546 bool out_of_regions() { return _finger == _heap_end; } | 546 bool out_of_regions() { return _finger == _heap_end; } |
547 | 547 |
548 // Returns the task with the given id | 548 // Returns the task with the given id |
549 CMTask* task(int id) { | 549 CMTask* task(int id) { |
550 assert(0 <= id && id < (int) _active_tasks, | 550 assert(0 <= id && id < (int) _active_tasks, |
814 } | 814 } |
815 | 815 |
816 inline bool do_yield_check(uint worker_i = 0); | 816 inline bool do_yield_check(uint worker_i = 0); |
817 inline bool should_yield(); | 817 inline bool should_yield(); |
818 | 818 |
819 // Called to abort the marking cycle after a Full GC takes palce. | 819 // Called to abort the marking cycle after a Full GC takes place. |
820 void abort(); | 820 void abort(); |
821 | 821 |
822 bool has_aborted() { return _has_aborted; } | 822 bool has_aborted() { return _has_aborted; } |
823 | 823 |
824 // This prints the global/local fingers. It is used for debugging. | 824 // This prints the global/local fingers. It is used for debugging. |
931 // contains the object to be marked/counted, which this routine looks up. | 931 // contains the object to be marked/counted, which this routine looks up. |
932 inline bool par_mark_and_count(oop obj, uint worker_id); | 932 inline bool par_mark_and_count(oop obj, uint worker_id); |
933 | 933 |
934 // Similar to the above routine but there are times when we cannot | 934 // Similar to the above routine but there are times when we cannot |
935 // safely calculate the size of obj due to races and we, therefore, | 935 // safely calculate the size of obj due to races and we, therefore, |
936 // pass the size in as a parameter. It is the caller's reponsibility | 936 // pass the size in as a parameter. It is the caller's responsibility |
937 // to ensure that the size passed in for obj is valid. | 937 // to ensure that the size passed in for obj is valid. |
938 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); | 938 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); |
939 | 939 |
940 // Unconditionally mark the given object, and unconditinally count | 940 // Unconditionally mark the given object, and unconditionally count |
941 // the object in the counting structures for worker id 0. | 941 // the object in the counting structures for worker id 0. |
942 // Should *not* be called from parallel code. | 942 // Should *not* be called from parallel code. |
943 inline bool mark_and_count(oop obj, HeapRegion* hr); | 943 inline bool mark_and_count(oop obj, HeapRegion* hr); |
944 | 944 |
945 // Similar to the above routine but we don't know the heap region that | 945 // Similar to the above routine but we don't know the heap region that |