comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 6862:8a5ea0a9ccc4

7127708: G1: change task num types from int to uint in concurrent mark Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich. Reviewed-by: johnc Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>
author johnc
date Sat, 06 Oct 2012 01:17:44 -0700
parents 988bf00cc564
children 442f942757c0
comparison
equal deleted inserted replaced
6830:81e878c53615 6862:8a5ea0a9ccc4
397 HeapWord* volatile _finger; // the global finger, region aligned, 397 HeapWord* volatile _finger; // the global finger, region aligned,
398 // always points to the end of the 398 // always points to the end of the
399 // last claimed region 399 // last claimed region
400 400
401 // marking tasks 401 // marking tasks
402 uint _max_task_num; // maximum task number 402 uint _max_worker_id;// maximum worker id
403 uint _active_tasks; // task num currently active 403 uint _active_tasks; // task num currently active
404 CMTask** _tasks; // task queue array (max_task_num len) 404 CMTask** _tasks; // task queue array (max_worker_id len)
405 CMTaskQueueSet* _task_queues; // task queue set 405 CMTaskQueueSet* _task_queues; // task queue set
406 ParallelTaskTerminator _terminator; // for termination 406 ParallelTaskTerminator _terminator; // for termination
407 407
408 // Two sync barriers that are used to synchronise tasks when an 408 // Two sync barriers that are used to synchronise tasks when an
409 // overflow occurs. The algorithm is the following. All tasks enter 409 // overflow occurs. The algorithm is the following. All tasks enter
490 bool concurrent() { return _concurrent; } 490 bool concurrent() { return _concurrent; }
491 uint active_tasks() { return _active_tasks; } 491 uint active_tasks() { return _active_tasks; }
492 ParallelTaskTerminator* terminator() { return &_terminator; } 492 ParallelTaskTerminator* terminator() { return &_terminator; }
493 493
494 // It claims the next available region to be scanned by a marking 494 // It claims the next available region to be scanned by a marking
495 // task. It might return NULL if the next region is empty or we have 495 // task/thread. It might return NULL if the next region is empty or
496 // run out of regions. In the latter case, out_of_regions() 496 // we have run out of regions. In the latter case, out_of_regions()
497 // determines whether we've really run out of regions or the task 497 // determines whether we've really run out of regions or the task
498 // should call claim_region() again. This might seem a bit 498 // should call claim_region() again. This might seem a bit
499 // awkward. Originally, the code was written so that claim_region() 499 // awkward. Originally, the code was written so that claim_region()
500 // either successfully returned with a non-empty region or there 500 // either successfully returned with a non-empty region or there
501 // were no more regions to be claimed. The problem with this was 501 // were no more regions to be claimed. The problem with this was
502 // that, in certain circumstances, it iterated over large chunks of 502 // that, in certain circumstances, it iterated over large chunks of
503 // the heap finding only empty regions and, while it was working, it 503 // the heap finding only empty regions and, while it was working, it
504 // was preventing the calling task to call its regular clock 504 // was preventing the calling task to call its regular clock
505 // method. So, this way, each task will spend very little time in 505 // method. So, this way, each task will spend very little time in
506 // claim_region() and is allowed to call the regular clock method 506 // claim_region() and is allowed to call the regular clock method
507 // frequently. 507 // frequently.
508 HeapRegion* claim_region(int task); 508 HeapRegion* claim_region(uint worker_id);
509 509
510 // It determines whether we've run out of regions to scan. 510 // It determines whether we've run out of regions to scan.
511 bool out_of_regions() { return _finger == _heap_end; } 511 bool out_of_regions() { return _finger == _heap_end; }
512 512
513 // Returns the task with the given id 513 // Returns the task with the given id
535 bool restart_for_overflow() { return _restart_for_overflow; } 535 bool restart_for_overflow() { return _restart_for_overflow; }
536 536
537 bool has_aborted() { return _has_aborted; } 537 bool has_aborted() { return _has_aborted; }
538 538
539 // Methods to enter the two overflow sync barriers 539 // Methods to enter the two overflow sync barriers
540 void enter_first_sync_barrier(int task_num); 540 void enter_first_sync_barrier(uint worker_id);
541 void enter_second_sync_barrier(int task_num); 541 void enter_second_sync_barrier(uint worker_id);
542 542
543 ForceOverflowSettings* force_overflow_conc() { 543 ForceOverflowSettings* force_overflow_conc() {
544 return &_force_overflow_conc; 544 return &_force_overflow_conc;
545 } 545 }
546 546
624 _accum_task_vtime[i] += vtime; 624 _accum_task_vtime[i] += vtime;
625 } 625 }
626 626
627 double all_task_accum_vtime() { 627 double all_task_accum_vtime() {
628 double ret = 0.0; 628 double ret = 0.0;
629 for (int i = 0; i < (int)_max_task_num; ++i) 629 for (uint i = 0; i < _max_worker_id; ++i)
630 ret += _accum_task_vtime[i]; 630 ret += _accum_task_vtime[i];
631 return ret; 631 return ret;
632 } 632 }
633 633
634 // Attempts to steal an object from the task queues of other tasks 634 // Attempts to steal an object from the task queues of other tasks
635 bool try_stealing(int task_num, int* hash_seed, oop& obj) { 635 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
636 return _task_queues->steal(task_num, hash_seed, obj); 636 return _task_queues->steal(worker_id, hash_seed, obj);
637 } 637 }
638 638
639 ConcurrentMark(ReservedSpace rs, uint max_regions); 639 ConcurrentMark(ReservedSpace rs, uint max_regions);
640 ~ConcurrentMark(); 640 ~ConcurrentMark();
641 641
821 return _heap_bottom_card_num; 821 return _heap_bottom_card_num;
822 } 822 }
823 823
824 // Returns the card bitmap for a given task or worker id. 824 // Returns the card bitmap for a given task or worker id.
825 BitMap* count_card_bitmap_for(uint worker_id) { 825 BitMap* count_card_bitmap_for(uint worker_id) {
826 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 826 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
827 assert(_count_card_bitmaps != NULL, "uninitialized"); 827 assert(_count_card_bitmaps != NULL, "uninitialized");
828 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 828 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
829 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 829 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
830 return task_card_bm; 830 return task_card_bm;
831 } 831 }
832 832
833 // Returns the array containing the marked bytes for each region, 833 // Returns the array containing the marked bytes for each region,
834 // for the given worker or task id. 834 // for the given worker or task id.
835 size_t* count_marked_bytes_array_for(uint worker_id) { 835 size_t* count_marked_bytes_array_for(uint worker_id) {
836 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 836 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
837 assert(_count_marked_bytes != NULL, "uninitialized"); 837 assert(_count_marked_bytes != NULL, "uninitialized");
838 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 838 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
839 assert(marked_bytes_array != NULL, "uninitialized"); 839 assert(marked_bytes_array != NULL, "uninitialized");
840 return marked_bytes_array; 840 return marked_bytes_array;
841 } 841 }
937 // how many entries will be transferred between global stack and 937 // how many entries will be transferred between global stack and
938 // local queues 938 // local queues
939 global_stack_transfer_size = 16 939 global_stack_transfer_size = 16
940 }; 940 };
941 941
942 int _task_id; 942 uint _worker_id;
943 G1CollectedHeap* _g1h; 943 G1CollectedHeap* _g1h;
944 ConcurrentMark* _cm; 944 ConcurrentMark* _cm;
945 CMBitMap* _nextMarkBitMap; 945 CMBitMap* _nextMarkBitMap;
946 // the task queue of this task 946 // the task queue of this task
947 CMTaskQueue* _task_queue; 947 CMTaskQueue* _task_queue;
1113 } 1113 }
1114 void record_end_time() { 1114 void record_end_time() {
1115 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1115 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1116 } 1116 }
1117 1117
1118 // returns the task ID 1118 // returns the worker ID associated with this task.
1119 int task_id() { return _task_id; } 1119 uint worker_id() { return _worker_id; }
1120 1120
1121 // From TerminatorTerminator. It determines whether this task should 1121 // From TerminatorTerminator. It determines whether this task should
1122 // exit the termination protocol after it's entered it. 1122 // exit the termination protocol after it's entered it.
1123 virtual bool should_exit_termination(); 1123 virtual bool should_exit_termination();
1124 1124
1168 inline void move_finger_to(HeapWord* new_finger) { 1168 inline void move_finger_to(HeapWord* new_finger) {
1169 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 1169 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1170 _finger = new_finger; 1170 _finger = new_finger;
1171 } 1171 }
1172 1172
1173 CMTask(int task_num, ConcurrentMark *cm, 1173 CMTask(uint worker_id, ConcurrentMark *cm,
1174 size_t* marked_bytes, BitMap* card_bm, 1174 size_t* marked_bytes, BitMap* card_bm,
1175 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1175 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1176 1176
1177 // it prints statistics associated with this task 1177 // it prints statistics associated with this task
1178 void print_stats(); 1178 void print_stats();