Mercurial > hg > truffle
diff src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 14909:4ca6dc0799b6
Backout jdk9 merge
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Tue, 01 Apr 2014 13:57:07 +0200 |
parents | 63a4eb8bcd23 |
children | 52b4284cb496 |
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Apr 01 14:09:03 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Tue Apr 01 13:57:07 2014 +0200 @@ -378,19 +378,19 @@ friend class G1CMDrainMarkingStackClosure; protected: - ConcurrentMarkThread* _cmThread; // The thread doing the work - G1CollectedHeap* _g1h; // The heap - uint _parallel_marking_threads; // The number of marking - // threads we're using - uint _max_parallel_marking_threads; // Max number of marking - // threads we'll ever use - double _sleep_factor; // How much we have to sleep, with + ConcurrentMarkThread* _cmThread; // the thread doing the work + G1CollectedHeap* _g1h; // the heap. + uint _parallel_marking_threads; // the number of marking + // threads we're use + uint _max_parallel_marking_threads; // max number of marking + // threads we'll ever use + double _sleep_factor; // how much we have to sleep, with // respect to the work we just did, to // meet the marking overhead goal - double _marking_task_overhead; // Marking target overhead for + double _marking_task_overhead; // marking target overhead for // a single task - // Same as the two above, but for the cleanup task + // same as the two above, but for the cleanup task double _cleanup_sleep_factor; double _cleanup_task_overhead; @@ -399,8 +399,8 @@ // Concurrent marking support structures CMBitMap _markBitMap1; CMBitMap _markBitMap2; - CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap - CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap + CMBitMapRO* _prevMarkBitMap; // completed mark bitmap + CMBitMap* _nextMarkBitMap; // under-construction mark bitmap BitMap _region_bm; BitMap _card_bm; @@ -409,43 +409,43 @@ HeapWord* _heap_start; HeapWord* _heap_end; - // Root region tracking and claiming + // Root region tracking and claiming. CMRootRegions _root_regions; // For gray objects - CMMarkStack _markStack; // Grey objects behind global finger - HeapWord* volatile _finger; // The global finger, region aligned, + CMMarkStack _markStack; // Grey objects behind global finger. + HeapWord* volatile _finger; // the global finger, region aligned, // always points to the end of the // last claimed region - // Marking tasks - uint _max_worker_id;// Maximum worker id - uint _active_tasks; // Task num currently active - CMTask** _tasks; // Task queue array (max_worker_id len) - CMTaskQueueSet* _task_queues; // Task queue set - ParallelTaskTerminator _terminator; // For termination + // marking tasks + uint _max_worker_id;// maximum worker id + uint _active_tasks; // task num currently active + CMTask** _tasks; // task queue array (max_worker_id len) + CMTaskQueueSet* _task_queues; // task queue set + ParallelTaskTerminator _terminator; // for termination - // Two sync barriers that are used to synchronize tasks when an + // Two sync barriers that are used to synchronise tasks when an // overflow occurs. The algorithm is the following. All tasks enter // the first one to ensure that they have all stopped manipulating - // the global data structures. After they exit it, they re-initialize - // their data structures and task 0 re-initializes the global data + // the global data structures. After they exit it, they re-initialise + // their data structures and task 0 re-initialises the global data // structures. Then, they enter the second sync barrier. This // ensure, that no task starts doing work before all data - // structures (local and global) have been re-initialized. When they + // structures (local and global) have been re-initialised. When they // exit it, they are free to start working again. WorkGangBarrierSync _first_overflow_barrier_sync; WorkGangBarrierSync _second_overflow_barrier_sync; - // This is set by any task, when an overflow on the global data - // structures is detected + // this is set by any task, when an overflow on the global data + // structures is detected. volatile bool _has_overflown; - // True: marking is concurrent, false: we're in remark + // true: marking is concurrent, false: we're in remark volatile bool _concurrent; - // Set at the end of a Full GC so that marking aborts + // set at the end of a Full GC so that marking aborts volatile bool _has_aborted; - // Used when remark aborts due to an overflow to indicate that + // used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start volatile bool _restart_for_overflow; @@ -455,10 +455,10 @@ // time of remark. volatile bool _concurrent_marking_in_progress; - // Verbose level + // verbose level CMVerboseLevel _verbose_level; - // All of these times are in ms + // All of these times are in ms. NumberSeq _init_times; NumberSeq _remark_times; NumberSeq _remark_mark_times; @@ -467,7 +467,7 @@ double _total_counting_time; double _total_rs_scrub_time; - double* _accum_task_vtime; // Accumulated task vtime + double* _accum_task_vtime; // accumulated task vtime FlexibleWorkGang* _parallel_workers; @@ -487,7 +487,7 @@ void reset_marking_state(bool clear_overflow = true); // We do this after we're done with marking so that the marking data - // structures are initialized to a sensible and predictable state. + // structures are initialised to a sensible and predictable state. void set_non_marking_state(); // Called to indicate how many threads are currently active. @@ -497,14 +497,14 @@ // mark or remark) and how many threads are currently active. void set_concurrency_and_phase(uint active_tasks, bool concurrent); - // Prints all gathered CM-related statistics + // prints all gathered CM-related statistics void print_stats(); bool cleanup_list_is_empty() { return _cleanup_list.is_empty(); } - // Accessor methods + // accessor methods uint parallel_marking_threads() const { return _parallel_marking_threads; } uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} double sleep_factor() { return _sleep_factor; } @@ -542,7 +542,7 @@ // frequently. HeapRegion* claim_region(uint worker_id); - // It determines whether we've run out of regions to scan + // It determines whether we've run out of regions to scan. bool out_of_regions() { return _finger == _heap_end; } // Returns the task with the given id @@ -816,7 +816,7 @@ inline bool do_yield_check(uint worker_i = 0); inline bool should_yield(); - // Called to abort the marking cycle after a Full GC takes place. + // Called to abort the marking cycle after a Full GC takes palce. void abort(); bool has_aborted() { return _has_aborted; } @@ -933,11 +933,11 @@ // Similar to the above routine but there are times when we cannot // safely calculate the size of obj due to races and we, therefore, - // pass the size in as a parameter. It is the caller's responsibility + // pass the size in as a parameter. It is the caller's reponsibility // to ensure that the size passed in for obj is valid. inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); - // Unconditionally mark the given object, and unconditionally count + // Unconditionally mark the given object, and unconditinally count // the object in the counting structures for worker id 0. // Should *not* be called from parallel code. inline bool mark_and_count(oop obj, HeapRegion* hr);