comparison src/share/vm/memory/allocationStats.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 class AllocationStats VALUE_OBJ_CLASS_SPEC {
26 // A duration threshold (in ms) used to filter
27 // possibly unreliable samples.
28 static float _threshold;
29
30 // We measure the demand between the end of the previous sweep and
31 // beginning of this sweep:
32 // Count(end_last_sweep) - Count(start_this_sweep)
33 // + splitBirths(between) - splitDeaths(between)
34 // The above number divided by the time since the start [END???] of the
35 // previous sweep gives us a time rate of demand for blocks
36 // of this size. We compute a padded average of this rate as
37 // our current estimate for the time rate of demand for blocks
38 // of this size. Similarly, we keep a padded average for the time
39 // between sweeps. Our current estimate for demand for blocks of
40 // this size is then simply computed as the product of these two
41 // estimates.
42 AdaptivePaddedAverage _demand_rate_estimate;
43
44 ssize_t _desired; // Estimate computed as described above
45 ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
46
47 ssize_t _surplus; // count - (desired +/- small-percent),
48 // used to tune splitting in best fit
49 ssize_t _bfrSurp; // surplus at start of current sweep
50 ssize_t _prevSweep; // count from end of previous sweep
51 ssize_t _beforeSweep; // count from before current sweep
52 ssize_t _coalBirths; // additional chunks from coalescing
53 ssize_t _coalDeaths; // loss from coalescing
54 ssize_t _splitBirths; // additional chunks from splitting
55 ssize_t _splitDeaths; // loss from splitting
56 size_t _returnedBytes; // number of bytes returned to list.
57 public:
58 void initialize() {
59 AdaptivePaddedAverage* dummy =
60 new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
61 CMS_FLSPadding);
62 _desired = 0;
63 _coalDesired = 0;
64 _surplus = 0;
65 _bfrSurp = 0;
66 _prevSweep = 0;
67 _beforeSweep = 0;
68 _coalBirths = 0;
69 _coalDeaths = 0;
70 _splitBirths = 0;
71 _splitDeaths = 0;
72 _returnedBytes = 0;
73 }
74
75 AllocationStats() {
76 initialize();
77 }
78 // The rate estimate is in blocks per second.
79 void compute_desired(size_t count,
80 float inter_sweep_current,
81 float inter_sweep_estimate) {
82 // If the latest inter-sweep time is below our granularity
83 // of measurement, we may call in here with
84 // inter_sweep_current == 0. However, even for suitably small
85 // but non-zero inter-sweep durations, we may not trust the accuracy
86 // of accumulated data, since it has not been "integrated"
87 // (read "low-pass-filtered") long enough, and would be
88 // vulnerable to noisy glitches. In such cases, we
89 // ignore the current sample and use currently available
90 // historical estimates.
91 if (inter_sweep_current > _threshold) {
92 ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
93 float rate = ((float)demand)/inter_sweep_current;
94 _demand_rate_estimate.sample(rate);
95 _desired = (ssize_t)(_demand_rate_estimate.padded_average()
96 *inter_sweep_estimate);
97 }
98 }
99
100 ssize_t desired() const { return _desired; }
101 ssize_t coalDesired() const { return _coalDesired; }
102 void set_coalDesired(ssize_t v) { _coalDesired = v; }
103
104 ssize_t surplus() const { return _surplus; }
105 void set_surplus(ssize_t v) { _surplus = v; }
106 void increment_surplus() { _surplus++; }
107 void decrement_surplus() { _surplus--; }
108
109 ssize_t bfrSurp() const { return _bfrSurp; }
110 void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
111 ssize_t prevSweep() const { return _prevSweep; }
112 void set_prevSweep(ssize_t v) { _prevSweep = v; }
113 ssize_t beforeSweep() const { return _beforeSweep; }
114 void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
115
116 ssize_t coalBirths() const { return _coalBirths; }
117 void set_coalBirths(ssize_t v) { _coalBirths = v; }
118 void increment_coalBirths() { _coalBirths++; }
119
120 ssize_t coalDeaths() const { return _coalDeaths; }
121 void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
122 void increment_coalDeaths() { _coalDeaths++; }
123
124 ssize_t splitBirths() const { return _splitBirths; }
125 void set_splitBirths(ssize_t v) { _splitBirths = v; }
126 void increment_splitBirths() { _splitBirths++; }
127
128 ssize_t splitDeaths() const { return _splitDeaths; }
129 void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
130 void increment_splitDeaths() { _splitDeaths++; }
131
132 NOT_PRODUCT(
133 size_t returnedBytes() const { return _returnedBytes; }
134 void set_returnedBytes(size_t v) { _returnedBytes = v; }
135 )
136 };