comparison src/share/vm/gc_implementation/shared/allocationStats.hpp @ 1145:e018e6884bd8

6631166: CMS: better heuristics when combatting fragmentation Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking. Reviewed-by: jmasa
author ysr
date Wed, 23 Dec 2009 09:23:54 -0800
parents d1605aabd0a1
children c18cbe5936b8
comparison
equal deleted inserted replaced
1111:44f61c24ddab 1145:e018e6884bd8
29 29
30 // We measure the demand between the end of the previous sweep and 30 // We measure the demand between the end of the previous sweep and
31 // beginning of this sweep: 31 // beginning of this sweep:
32 // Count(end_last_sweep) - Count(start_this_sweep) 32 // Count(end_last_sweep) - Count(start_this_sweep)
33 // + splitBirths(between) - splitDeaths(between) 33 // + splitBirths(between) - splitDeaths(between)
34 // The above number divided by the time since the start [END???] of the 34 // The above number divided by the time since the end of the
35 // previous sweep gives us a time rate of demand for blocks 35 // previous sweep gives us a time rate of demand for blocks
36 // of this size. We compute a padded average of this rate as 36 // of this size. We compute a padded average of this rate as
37 // our current estimate for the time rate of demand for blocks 37 // our current estimate for the time rate of demand for blocks
38 // of this size. Similarly, we keep a padded average for the time 38 // of this size. Similarly, we keep a padded average for the time
39 // between sweeps. Our current estimate for demand for blocks of 39 // between sweeps. Our current estimate for demand for blocks of
40 // this size is then simply computed as the product of these two 40 // this size is then simply computed as the product of these two
41 // estimates. 41 // estimates.
42 AdaptivePaddedAverage _demand_rate_estimate; 42 AdaptivePaddedAverage _demand_rate_estimate;
43 43
44 ssize_t _desired; // Estimate computed as described above 44 ssize_t _desired; // Demand stimate computed as described above
45 ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing 45 ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
46 46
47 ssize_t _surplus; // count - (desired +/- small-percent), 47 ssize_t _surplus; // count - (desired +/- small-percent),
48 // used to tune splitting in best fit 48 // used to tune splitting in best fit
49 ssize_t _bfrSurp; // surplus at start of current sweep 49 ssize_t _bfrSurp; // surplus at start of current sweep
51 ssize_t _beforeSweep; // count from before current sweep 51 ssize_t _beforeSweep; // count from before current sweep
52 ssize_t _coalBirths; // additional chunks from coalescing 52 ssize_t _coalBirths; // additional chunks from coalescing
53 ssize_t _coalDeaths; // loss from coalescing 53 ssize_t _coalDeaths; // loss from coalescing
54 ssize_t _splitBirths; // additional chunks from splitting 54 ssize_t _splitBirths; // additional chunks from splitting
55 ssize_t _splitDeaths; // loss from splitting 55 ssize_t _splitDeaths; // loss from splitting
56 size_t _returnedBytes; // number of bytes returned to list. 56 size_t _returnedBytes; // number of bytes returned to list.
57 public: 57 public:
58 void initialize() { 58 void initialize(bool split_birth = false) {
59 AdaptivePaddedAverage* dummy = 59 AdaptivePaddedAverage* dummy =
60 new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight, 60 new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
61 CMS_FLSPadding); 61 CMS_FLSPadding);
62 _desired = 0; 62 _desired = 0;
63 _coalDesired = 0; 63 _coalDesired = 0;
65 _bfrSurp = 0; 65 _bfrSurp = 0;
66 _prevSweep = 0; 66 _prevSweep = 0;
67 _beforeSweep = 0; 67 _beforeSweep = 0;
68 _coalBirths = 0; 68 _coalBirths = 0;
69 _coalDeaths = 0; 69 _coalDeaths = 0;
70 _splitBirths = 0; 70 _splitBirths = split_birth? 1 : 0;
71 _splitDeaths = 0; 71 _splitDeaths = 0;
72 _returnedBytes = 0; 72 _returnedBytes = 0;
73 } 73 }
74 74
75 AllocationStats() { 75 AllocationStats() {
76 initialize(); 76 initialize();
77 } 77 }
78
78 // The rate estimate is in blocks per second. 79 // The rate estimate is in blocks per second.
79 void compute_desired(size_t count, 80 void compute_desired(size_t count,
80 float inter_sweep_current, 81 float inter_sweep_current,
81 float inter_sweep_estimate) { 82 float inter_sweep_estimate,
83 float intra_sweep_estimate) {
82 // If the latest inter-sweep time is below our granularity 84 // If the latest inter-sweep time is below our granularity
83 // of measurement, we may call in here with 85 // of measurement, we may call in here with
84 // inter_sweep_current == 0. However, even for suitably small 86 // inter_sweep_current == 0. However, even for suitably small
85 // but non-zero inter-sweep durations, we may not trust the accuracy 87 // but non-zero inter-sweep durations, we may not trust the accuracy
86 // of accumulated data, since it has not been "integrated" 88 // of accumulated data, since it has not been "integrated"
87 // (read "low-pass-filtered") long enough, and would be 89 // (read "low-pass-filtered") long enough, and would be
88 // vulnerable to noisy glitches. In such cases, we 90 // vulnerable to noisy glitches. In such cases, we
89 // ignore the current sample and use currently available 91 // ignore the current sample and use currently available
90 // historical estimates. 92 // historical estimates.
93 // XXX NEEDS TO BE FIXED
94 // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
95 // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
96 // "Total Stock" "Not used at this block size"
91 if (inter_sweep_current > _threshold) { 97 if (inter_sweep_current > _threshold) {
92 ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths(); 98 ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
99 // XXX NEEDS TO BE FIXED
100 // assert(demand >= 0, "Demand should be non-negative");
101 // Defensive: adjust for imprecision in event counting
102 if (demand < 0) {
103 demand = 0;
104 }
105 float old_rate = _demand_rate_estimate.padded_average();
93 float rate = ((float)demand)/inter_sweep_current; 106 float rate = ((float)demand)/inter_sweep_current;
94 _demand_rate_estimate.sample(rate); 107 _demand_rate_estimate.sample(rate);
95 _desired = (ssize_t)(_demand_rate_estimate.padded_average() 108 float new_rate = _demand_rate_estimate.padded_average();
96 *inter_sweep_estimate); 109 ssize_t old_desired = _desired;
110 _desired = (ssize_t)(new_rate * (inter_sweep_estimate
111 + CMSExtrapolateSweep
112 ? intra_sweep_estimate
113 : 0.0));
114 if (PrintFLSStatistics > 1) {
115 gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
116 demand, old_rate, rate, new_rate, old_desired, _desired);
117 }
97 } 118 }
98 } 119 }
99 120
100 ssize_t desired() const { return _desired; } 121 ssize_t desired() const { return _desired; }
101 void set_desired(ssize_t v) { _desired = v; } 122 void set_desired(ssize_t v) { _desired = v; }