comparison src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @ 4013:074f0252cc13

7088680: G1: Cleanup in the G1CollectorPolicy class Summary: Removed unused fields and methods, removed the G1CollectoryPolicy_BestRegionsFirst class and folded its functionality into the G1CollectorPolicy class. Reviewed-by: ysr, brutisso, jcoomes
author tonyp
date Fri, 14 Oct 2011 11:12:24 -0400
parents 8229bd737950
children c6a6e936dc68
comparison
equal deleted inserted replaced
4012:ec4b032a4977 4013:074f0252cc13
82 public: 82 public:
83 virtual MainBodySummary* main_body_summary() { return this; } 83 virtual MainBodySummary* main_body_summary() { return this; }
84 }; 84 };
85 85
86 class G1CollectorPolicy: public CollectorPolicy { 86 class G1CollectorPolicy: public CollectorPolicy {
87 protected: 87 private:
88 // The number of pauses during the execution. 88 // The number of pauses during the execution.
89 long _n_pauses; 89 long _n_pauses;
90 90
91 // either equal to the number of parallel threads, if ParallelGCThreads 91 // either equal to the number of parallel threads, if ParallelGCThreads
92 // has been set, or 1 otherwise 92 // has been set, or 1 otherwise
104 initialize_flags(); 104 initialize_flags();
105 initialize_size_info(); 105 initialize_size_info();
106 initialize_perm_generation(PermGen::MarkSweepCompact); 106 initialize_perm_generation(PermGen::MarkSweepCompact);
107 } 107 }
108 108
109 virtual size_t default_init_heap_size() { 109 CollectionSetChooser* _collectionSetChooser;
110 // Pick some reasonable default.
111 return 8*M;
112 }
113 110
114 double _cur_collection_start_sec; 111 double _cur_collection_start_sec;
115 size_t _cur_collection_pause_used_at_start_bytes; 112 size_t _cur_collection_pause_used_at_start_bytes;
116 size_t _cur_collection_pause_used_regions_at_start; 113 size_t _cur_collection_pause_used_regions_at_start;
117 size_t _prev_collection_pause_used_at_end_bytes; 114 size_t _prev_collection_pause_used_at_end_bytes;
314 311
315 void adjust_concurrent_refinement(double update_rs_time, 312 void adjust_concurrent_refinement(double update_rs_time,
316 double update_rs_processed_buffers, 313 double update_rs_processed_buffers,
317 double goal_ms); 314 double goal_ms);
318 315
319 protected:
320 double _pause_time_target_ms; 316 double _pause_time_target_ms;
321 double _recorded_young_cset_choice_time_ms; 317 double _recorded_young_cset_choice_time_ms;
322 double _recorded_non_young_cset_choice_time_ms; 318 double _recorded_non_young_cset_choice_time_ms;
323 bool _within_target; 319 bool _within_target;
324 size_t _pending_cards; 320 size_t _pending_cards;
552 548
553 double accum_yg_surv_rate_pred(int age) { 549 double accum_yg_surv_rate_pred(int age) {
554 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 550 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
555 } 551 }
556 552
557 protected: 553 private:
558 void print_stats(int level, const char* str, double value); 554 void print_stats(int level, const char* str, double value);
559 void print_stats(int level, const char* str, int value); 555 void print_stats(int level, const char* str, int value);
560 556
561 void print_par_stats(int level, const char* str, double* data); 557 void print_par_stats(int level, const char* str, double* data);
562 void print_par_sizes(int level, const char* str, double* data); 558 void print_par_sizes(int level, const char* str, double* data);
586 friend class CountCSClosure; 582 friend class CountCSClosure;
587 583
588 // Statistics kept per GC stoppage, pause or full. 584 // Statistics kept per GC stoppage, pause or full.
589 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 585 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
590 586
591 // We track markings.
592 int _num_markings;
593 double _mark_thread_startup_sec; // Time at startup of marking thread
594
595 // Add a new GC of the given duration and end time to the record. 587 // Add a new GC of the given duration and end time to the record.
596 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 588 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
597 589
598 // The head of the list (via "next_in_collection_set()") representing the 590 // The head of the list (via "next_in_collection_set()") representing the
599 // current collection set. Set from the incrementally built collection 591 // current collection set. Set from the incrementally built collection
661 653
662 // The predicted bytes to copy for the regions in the collection 654 // The predicted bytes to copy for the regions in the collection
663 // set (updated by the periodic sampling of the regions in the 655 // set (updated by the periodic sampling of the regions in the
664 // young list/collection set). 656 // young list/collection set).
665 size_t _inc_cset_predicted_bytes_to_copy; 657 size_t _inc_cset_predicted_bytes_to_copy;
666
667 // Info about marking.
668 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
669
670 // The number of collection pauses at the end of the last mark.
671 size_t _n_pauses_at_mark_end;
672 658
673 // Stash a pointer to the g1 heap. 659 // Stash a pointer to the g1 heap.
674 G1CollectedHeap* _g1; 660 G1CollectedHeap* _g1;
675 661
676 // The average time in ms per collection pause, averaged over recent pauses. 662 // The average time in ms per collection pause, averaged over recent pauses.
734 return _recent_avg_pause_time_ratio; 720 return _recent_avg_pause_time_ratio;
735 } 721 }
736 722
737 // Number of pauses between concurrent marking. 723 // Number of pauses between concurrent marking.
738 size_t _pauses_btwn_concurrent_mark; 724 size_t _pauses_btwn_concurrent_mark;
739
740 size_t _n_marks_since_last_pause;
741 725
742 // At the end of a pause we check the heap occupancy and we decide 726 // At the end of a pause we check the heap occupancy and we decide
743 // whether we will start a marking cycle during the next pause. If 727 // whether we will start a marking cycle during the next pause. If
744 // we decide that we want to do that, we will set this parameter to 728 // we decide that we want to do that, we will set this parameter to
745 // true. So, this parameter will stay true between the end of a 729 // true. So, this parameter will stay true between the end of a
808 // given free space (expressed by base_free_regions). It is used by 792 // given free space (expressed by base_free_regions). It is used by
809 // calculate_young_list_target_length(). 793 // calculate_young_list_target_length().
810 bool predict_will_fit(size_t young_length, double base_time_ms, 794 bool predict_will_fit(size_t young_length, double base_time_ms,
811 size_t base_free_regions, double target_pause_time_ms); 795 size_t base_free_regions, double target_pause_time_ms);
812 796
797 // Count the number of bytes used in the CS.
798 void count_CS_bytes_used();
799
800 void update_young_list_size_using_newratio(size_t number_of_heap_regions);
801
813 public: 802 public:
814 803
815 G1CollectorPolicy(); 804 G1CollectorPolicy();
816 805
817 virtual G1CollectorPolicy* as_g1_policy() { return this; } 806 virtual G1CollectorPolicy* as_g1_policy() { return this; }
834 } 823 }
835 824
836 // This should be called after the heap is resized. 825 // This should be called after the heap is resized.
837 void record_new_heap_size(size_t new_number_of_regions); 826 void record_new_heap_size(size_t new_number_of_regions);
838 827
839 protected:
840
841 // Count the number of bytes used in the CS.
842 void count_CS_bytes_used();
843
844 // Together these do the base cleanup-recording work. Subclasses might
845 // want to put something between them.
846 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
847 size_t max_live_bytes);
848 void record_concurrent_mark_cleanup_end_work2();
849
850 void update_young_list_size_using_newratio(size_t number_of_heap_regions);
851
852 public: 828 public:
853 829
854 virtual void init(); 830 void init();
855 831
856 // Create jstat counters for the policy. 832 // Create jstat counters for the policy.
857 virtual void initialize_gc_policy_counters(); 833 virtual void initialize_gc_policy_counters();
858 834
859 virtual HeapWord* mem_allocate_work(size_t size, 835 virtual HeapWord* mem_allocate_work(size_t size,
874 850
875 // Update the heuristic info to record a collection pause of the given 851 // Update the heuristic info to record a collection pause of the given
876 // start time, where the given number of bytes were used at the start. 852 // start time, where the given number of bytes were used at the start.
877 // This may involve changing the desired size of a collection set. 853 // This may involve changing the desired size of a collection set.
878 854
879 virtual void record_stop_world_start(); 855 void record_stop_world_start();
880 856
881 virtual void record_collection_pause_start(double start_time_sec, 857 void record_collection_pause_start(double start_time_sec, size_t start_used);
882 size_t start_used);
883 858
884 // Must currently be called while the world is stopped. 859 // Must currently be called while the world is stopped.
885 void record_concurrent_mark_init_end(double 860 void record_concurrent_mark_init_end(double
886 mark_init_elapsed_time_ms); 861 mark_init_elapsed_time_ms);
887 862
888 void record_mark_closure_time(double mark_closure_time_ms); 863 void record_mark_closure_time(double mark_closure_time_ms);
889 864
890 virtual void record_concurrent_mark_remark_start(); 865 void record_concurrent_mark_remark_start();
891 virtual void record_concurrent_mark_remark_end(); 866 void record_concurrent_mark_remark_end();
892 867
893 virtual void record_concurrent_mark_cleanup_start(); 868 void record_concurrent_mark_cleanup_start();
894 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, 869 void record_concurrent_mark_cleanup_end();
895 size_t max_live_bytes); 870 void record_concurrent_mark_cleanup_completed();
896 virtual void record_concurrent_mark_cleanup_completed(); 871
897 872 void record_concurrent_pause();
898 virtual void record_concurrent_pause(); 873 void record_concurrent_pause_end();
899 virtual void record_concurrent_pause_end(); 874
900 875 void record_collection_pause_end();
901 virtual void record_collection_pause_end();
902 void print_heap_transition(); 876 void print_heap_transition();
903 877
904 // Record the fact that a full collection occurred. 878 // Record the fact that a full collection occurred.
905 virtual void record_full_collection_start(); 879 void record_full_collection_start();
906 virtual void record_full_collection_end(); 880 void record_full_collection_end();
907 881
908 void record_gc_worker_start_time(int worker_i, double ms) { 882 void record_gc_worker_start_time(int worker_i, double ms) {
909 _par_last_gc_worker_start_times_ms[worker_i] = ms; 883 _par_last_gc_worker_start_times_ms[worker_i] = ms;
910 } 884 }
911 885
1020 } 994 }
1021 995
1022 // Choose a new collection set. Marks the chosen regions as being 996 // Choose a new collection set. Marks the chosen regions as being
1023 // "in_collection_set", and links them together. The head and number of 997 // "in_collection_set", and links them together. The head and number of
1024 // the collection set are available via access methods. 998 // the collection set are available via access methods.
1025 virtual void choose_collection_set(double target_pause_time_ms) = 0; 999 void choose_collection_set(double target_pause_time_ms);
1026 1000
1027 // The head of the list (via "next_in_collection_set()") representing the 1001 // The head of the list (via "next_in_collection_set()") representing the
1028 // current collection set. 1002 // current collection set.
1029 HeapRegion* collection_set() { return _collection_set; } 1003 HeapRegion* collection_set() { return _collection_set; }
1030 1004
1105 // the initial-mark work and start a marking cycle. 1079 // the initial-mark work and start a marking cycle.
1106 void decide_on_conc_mark_initiation(); 1080 void decide_on_conc_mark_initiation();
1107 1081
1108 // If an expansion would be appropriate, because recent GC overhead had 1082 // If an expansion would be appropriate, because recent GC overhead had
1109 // exceeded the desired limit, return an amount to expand by. 1083 // exceeded the desired limit, return an amount to expand by.
1110 virtual size_t expansion_amount(); 1084 size_t expansion_amount();
1111
1112 // note start of mark thread
1113 void note_start_of_mark_thread();
1114
1115 // The marked bytes of the "r" has changed; reclassify it's desirability
1116 // for marking. Also asserts that "r" is eligible for a CS.
1117 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
1118 1085
1119 #ifndef PRODUCT 1086 #ifndef PRODUCT
1120 // Check any appropriate marked bytes info, asserting false if 1087 // Check any appropriate marked bytes info, asserting false if
1121 // something's wrong, else returning "true". 1088 // something's wrong, else returning "true".
1122 virtual bool assertMarkedBytesDataOK() = 0; 1089 bool assertMarkedBytesDataOK();
1123 #endif 1090 #endif
1124 1091
1125 // Print tracing information. 1092 // Print tracing information.
1126 void print_tracing_info() const; 1093 void print_tracing_info() const;
1127 1094
1180 #endif // 0 1147 #endif // 0
1181 guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 1148 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
1182 return ret; 1149 return ret;
1183 } 1150 }
1184 1151
1152 private:
1185 // 1153 //
1186 // Survivor regions policy. 1154 // Survivor regions policy.
1187 // 1155 //
1188 protected:
1189 1156
1190 // Current tenuring threshold, set to 0 if the collector reaches the 1157 // Current tenuring threshold, set to 0 if the collector reaches the
1191 // maximum amount of suvivors regions. 1158 // maximum amount of suvivors regions.
1192 int _tenuring_threshold; 1159 int _tenuring_threshold;
1193 1160
1261 void update_max_gc_locker_expansion(); 1228 void update_max_gc_locker_expansion();
1262 1229
1263 // Calculates survivor space parameters. 1230 // Calculates survivor space parameters.
1264 void update_survivors_policy(); 1231 void update_survivors_policy();
1265 1232
1266 };
1267
1268 // This encapsulates a particular strategy for a g1 Collector.
1269 //
1270 // Start a concurrent mark when our heap size is n bytes
1271 // greater then our heap size was at the last concurrent
1272 // mark. Where n is a function of the CMSTriggerRatio
1273 // and the MinHeapFreeRatio.
1274 //
1275 // Start a g1 collection pause when we have allocated the
1276 // average number of bytes currently being freed in
1277 // a collection, but only if it is at least one region
1278 // full
1279 //
1280 // Resize Heap based on desired
1281 // allocation space, where desired allocation space is
1282 // a function of survival rate and desired future to size.
1283 //
1284 // Choose collection set by first picking all older regions
1285 // which have a survival rate which beats our projected young
1286 // survival rate. Then fill out the number of needed regions
1287 // with young regions.
1288
1289 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
1290 CollectionSetChooser* _collectionSetChooser;
1291
1292 virtual void choose_collection_set(double target_pause_time_ms);
1293 virtual void record_collection_pause_start(double start_time_sec,
1294 size_t start_used);
1295 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
1296 size_t max_live_bytes);
1297 virtual void record_full_collection_end();
1298
1299 public:
1300 G1CollectorPolicy_BestRegionsFirst() {
1301 _collectionSetChooser = new CollectionSetChooser();
1302 }
1303 void record_collection_pause_end();
1304 // This is not needed any more, after the CSet choosing code was
1305 // changed to use the pause prediction work. But let's leave the
1306 // hook in just in case.
1307 void note_change_in_marked_bytes(HeapRegion* r) { }
1308 #ifndef PRODUCT
1309 bool assertMarkedBytesDataOK();
1310 #endif
1311 }; 1233 };
1312 1234
1313 // This should move to some place more general... 1235 // This should move to some place more general...
1314 1236
1315 // If we have "n" measurements, and we've kept track of their "sum" and the 1237 // If we have "n" measurements, and we've kept track of their "sum" and the