Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 4837:eff609af17d7
7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc
author | tonyp |
---|---|
date | Wed, 25 Jan 2012 12:58:23 -0500 |
parents | d30fa85f9994 |
children | 2a0172480595 |
comparison
equal
deleted
inserted
replaced
4836:d30fa85f9994 | 4837:eff609af17d7 |
---|---|
347 low_verbose, // low verbose, mostly per region and per major event | 347 low_verbose, // low verbose, mostly per region and per major event |
348 medium_verbose, // a bit more detailed than low | 348 medium_verbose, // a bit more detailed than low |
349 high_verbose // per object verbose | 349 high_verbose // per object verbose |
350 } CMVerboseLevel; | 350 } CMVerboseLevel; |
351 | 351 |
352 class YoungList; | |
353 | |
354 // Root Regions are regions that are not empty at the beginning of a | |
355 // marking cycle and which we might collect during an evacuation pause | |
356 // while the cycle is active. Given that, during evacuation pauses, we | |
357 // do not copy objects that are explicitly marked, what we have to do | |
358 // for the root regions is to scan them and mark all objects reachable | |
359 // from them. According to the SATB assumptions, we only need to visit | |
360 // each object once during marking. So, as long as we finish this scan | |
361 // before the next evacuation pause, we can copy the objects from the | |
362 // root regions without having to mark them or do anything else to them. | |
363 // | |
364 // Currently, we only support root region scanning once (at the start | |
365 // of the marking cycle) and the root regions are all the survivor | |
366 // regions populated during the initial-mark pause. | |
367 class CMRootRegions VALUE_OBJ_CLASS_SPEC { | |
368 private: | |
369 YoungList* _young_list; | |
370 ConcurrentMark* _cm; | |
371 | |
372 volatile bool _scan_in_progress; | |
373 volatile bool _should_abort; | |
374 HeapRegion* volatile _next_survivor; | |
375 | |
376 public: | |
377 CMRootRegions(); | |
378 // We actually do most of the initialization in this method. | |
379 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); | |
380 | |
381 // Reset the claiming / scanning of the root regions. | |
382 void prepare_for_scan(); | |
383 | |
384 // Forces get_next() to return NULL so that the iteration aborts early. | |
385 void abort() { _should_abort = true; } | |
386 | |
387 // Return true if the CM thread are actively scanning root regions, | |
388 // false otherwise. | |
389 bool scan_in_progress() { return _scan_in_progress; } | |
390 | |
391 // Claim the next root region to scan atomically, or return NULL if | |
392 // all have been claimed. | |
393 HeapRegion* claim_next(); | |
394 | |
395 // Flag that we're done with root region scanning and notify anyone | |
396 // who's waiting on it. If aborted is false, assume that all regions | |
397 // have been claimed. | |
398 void scan_finished(); | |
399 | |
400 // If CM threads are still scanning root regions, wait until they | |
401 // are done. Return true if we had to wait, false otherwise. | |
402 bool wait_until_scan_finished(); | |
403 }; | |
352 | 404 |
353 class ConcurrentMarkThread; | 405 class ConcurrentMarkThread; |
354 | 406 |
355 class ConcurrentMark: public CHeapObj { | 407 class ConcurrentMark : public CHeapObj { |
356 friend class ConcurrentMarkThread; | 408 friend class ConcurrentMarkThread; |
357 friend class CMTask; | 409 friend class CMTask; |
358 friend class CMBitMapClosure; | 410 friend class CMBitMapClosure; |
359 friend class CSetMarkOopClosure; | 411 friend class CSetMarkOopClosure; |
360 friend class CMGlobalObjectClosure; | 412 friend class CMGlobalObjectClosure; |
397 BitMap _card_bm; | 449 BitMap _card_bm; |
398 | 450 |
399 // Heap bounds | 451 // Heap bounds |
400 HeapWord* _heap_start; | 452 HeapWord* _heap_start; |
401 HeapWord* _heap_end; | 453 HeapWord* _heap_end; |
454 | |
455 // Root region tracking and claiming. | |
456 CMRootRegions _root_regions; | |
402 | 457 |
403 // For gray objects | 458 // For gray objects |
404 CMMarkStack _markStack; // Grey objects behind global finger. | 459 CMMarkStack _markStack; // Grey objects behind global finger. |
405 CMRegionStack _regionStack; // Grey regions behind global finger. | 460 CMRegionStack _regionStack; // Grey regions behind global finger. |
406 HeapWord* volatile _finger; // the global finger, region aligned, | 461 HeapWord* volatile _finger; // the global finger, region aligned, |
551 // Access / manipulation of the overflow flag which is set to | 606 // Access / manipulation of the overflow flag which is set to |
552 // indicate that the global stack or region stack has overflown | 607 // indicate that the global stack or region stack has overflown |
553 bool has_overflown() { return _has_overflown; } | 608 bool has_overflown() { return _has_overflown; } |
554 void set_has_overflown() { _has_overflown = true; } | 609 void set_has_overflown() { _has_overflown = true; } |
555 void clear_has_overflown() { _has_overflown = false; } | 610 void clear_has_overflown() { _has_overflown = false; } |
611 bool restart_for_overflow() { return _restart_for_overflow; } | |
556 | 612 |
557 bool has_aborted() { return _has_aborted; } | 613 bool has_aborted() { return _has_aborted; } |
558 bool restart_for_overflow() { return _restart_for_overflow; } | |
559 | 614 |
560 // Methods to enter the two overflow sync barriers | 615 // Methods to enter the two overflow sync barriers |
561 void enter_first_sync_barrier(int task_num); | 616 void enter_first_sync_barrier(int task_num); |
562 void enter_second_sync_barrier(int task_num); | 617 void enter_second_sync_barrier(int task_num); |
563 | 618 |
689 bool invalidate_aborted_regions_in_cset(); | 744 bool invalidate_aborted_regions_in_cset(); |
690 | 745 |
691 // Returns true if there are any aborted memory regions. | 746 // Returns true if there are any aborted memory regions. |
692 bool has_aborted_regions(); | 747 bool has_aborted_regions(); |
693 | 748 |
749 CMRootRegions* root_regions() { return &_root_regions; } | |
750 | |
694 bool concurrent_marking_in_progress() { | 751 bool concurrent_marking_in_progress() { |
695 return _concurrent_marking_in_progress; | 752 return _concurrent_marking_in_progress; |
696 } | 753 } |
697 void set_concurrent_marking_in_progress() { | 754 void set_concurrent_marking_in_progress() { |
698 _concurrent_marking_in_progress = true; | 755 _concurrent_marking_in_progress = true; |
739 | 796 |
740 // The following three are interaction between CM and | 797 // The following three are interaction between CM and |
741 // G1CollectedHeap | 798 // G1CollectedHeap |
742 | 799 |
743 // This notifies CM that a root during initial-mark needs to be | 800 // This notifies CM that a root during initial-mark needs to be |
744 // grayed. It is MT-safe. | 801 // grayed. It is MT-safe. word_size is the size of the object in |
745 inline void grayRoot(oop obj, size_t word_size, uint worker_id); | 802 // words. It is passed explicitly as sometimes we cannot calculate |
803 // it from the given object because it might be in an inconsistent | |
804 // state (e.g., in to-space and being copied). So the caller is | |
805 // responsible for dealing with this issue (e.g., get the size from | |
806 // the from-space image when the to-space image might be | |
807 // inconsistent) and always passing the size. hr is the region that | |
808 // contains the object and it's passed optionally from callers who | |
809 // might already have it (no point in recalculating it). | |
810 inline void grayRoot(oop obj, size_t word_size, | |
811 uint worker_id, HeapRegion* hr = NULL); | |
746 | 812 |
747 // It's used during evacuation pauses to gray a region, if | 813 // It's used during evacuation pauses to gray a region, if |
748 // necessary, and it's MT-safe. It assumes that the caller has | 814 // necessary, and it's MT-safe. It assumes that the caller has |
749 // marked any objects on that region. If _should_gray_objects is | 815 // marked any objects on that region. If _should_gray_objects is |
750 // true and we're still doing concurrent marking, the region is | 816 // true and we're still doing concurrent marking, the region is |
791 // pre/post code. It might be the case that we can put everything in | 857 // pre/post code. It might be the case that we can put everything in |
792 // the post method. TP | 858 // the post method. TP |
793 void checkpointRootsInitialPre(); | 859 void checkpointRootsInitialPre(); |
794 void checkpointRootsInitialPost(); | 860 void checkpointRootsInitialPost(); |
795 | 861 |
862 // Scan all the root regions and mark everything reachable from | |
863 // them. | |
864 void scanRootRegions(); | |
865 | |
866 // Scan a single root region and mark everything reachable from it. | |
867 void scanRootRegion(HeapRegion* hr, uint worker_id); | |
868 | |
796 // Do concurrent phase of marking, to a tentative transitive closure. | 869 // Do concurrent phase of marking, to a tentative transitive closure. |
797 void markFromRoots(); | 870 void markFromRoots(); |
798 | 871 |
799 // Process all unprocessed SATB buffers. It is called at the | 872 // Process all unprocessed SATB buffers. It is called at the |
800 // beginning of an evacuation pause. | 873 // beginning of an evacuation pause. |
972 size_t* marked_bytes_array, | 1045 size_t* marked_bytes_array, |
973 BitMap* task_card_bm); | 1046 BitMap* task_card_bm); |
974 | 1047 |
975 // Counts the given memory region in the task/worker counting | 1048 // Counts the given memory region in the task/worker counting |
976 // data structures for the given worker id. | 1049 // data structures for the given worker id. |
1050 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); | |
1051 | |
1052 // Counts the given memory region in the task/worker counting | |
1053 // data structures for the given worker id. | |
977 inline void count_region(MemRegion mr, uint worker_id); | 1054 inline void count_region(MemRegion mr, uint worker_id); |
978 | 1055 |
979 // Counts the given object in the given task/worker counting | 1056 // Counts the given object in the given task/worker counting |
980 // data structures. | 1057 // data structures. |
981 inline void count_object(oop obj, HeapRegion* hr, | 1058 inline void count_object(oop obj, HeapRegion* hr, |
989 // Attempts to mark the given object and, if successful, counts | 1066 // Attempts to mark the given object and, if successful, counts |
990 // the object in the given task/worker counting structures. | 1067 // the object in the given task/worker counting structures. |
991 inline bool par_mark_and_count(oop obj, HeapRegion* hr, | 1068 inline bool par_mark_and_count(oop obj, HeapRegion* hr, |
992 size_t* marked_bytes_array, | 1069 size_t* marked_bytes_array, |
993 BitMap* task_card_bm); | 1070 BitMap* task_card_bm); |
1071 | |
1072 // Attempts to mark the given object and, if successful, counts | |
1073 // the object in the task/worker counting structures for the | |
1074 // given worker id. | |
1075 inline bool par_mark_and_count(oop obj, size_t word_size, | |
1076 HeapRegion* hr, uint worker_id); | |
994 | 1077 |
995 // Attempts to mark the given object and, if successful, counts | 1078 // Attempts to mark the given object and, if successful, counts |
996 // the object in the task/worker counting structures for the | 1079 // the object in the task/worker counting structures for the |
997 // given worker id. | 1080 // given worker id. |
998 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); | 1081 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); |