Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp @ 3289:b52782ae3880
6946417: G1: Java VisualVM does not support G1 properly.
Summary: Added counters for jstat
Reviewed-by: tonyp, jwilhelm, stefank, ysr, johnc
author | jmasa |
---|---|
date | Thu, 21 Apr 2011 10:23:44 -0700 |
parents | |
children | 8229bd737950 |
comparison
equal
deleted
inserted
replaced
3288:c0dcda80820f | 3289:b52782ae3880 |
---|---|
1 /* | |
2 * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
20 * or visit www.oracle.com if you need additional information or have any | |
21 * questions. | |
22 * | |
23 */ | |
24 | |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP | |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP | |
27 | |
28 #include "gc_implementation/shared/hSpaceCounters.hpp" | |
29 | |
30 class G1CollectedHeap; | |
31 class G1SpaceMonitoringSupport; | |
32 | |
33 // Class for monitoring logical spaces in G1. | |
34 // G1 defines a set of regions as a young | |
35 // collection (analogous to a young generation). | |
36 // The young collection is a logical generation | |
37 // with no fixed chunk (see space.hpp) reflecting | |
38 // the address space for the generation. In addition | |
39 // to the young collection there is its complement | |
40 // the non-young collection that is simply the regions | |
41 // not in the young collection. The non-young collection | |
42 // is treated here as a logical old generation only | |
43 // because the monitoring tools expect a generational | |
44 // heap. The monitoring tools expect that a Space | |
45 // (see space.hpp) exists that describe the | |
46 // address space of young collection and non-young | |
47 // collection and such a view is provided here. | |
48 // | |
49 // This class provides interfaces to access | |
50 // the value of variables for the young collection | |
51 // that include the "capacity" and "used" of the | |
52 // young collection along with constant values | |
53 // for the minimum and maximum capacities for | |
54 // the logical spaces. Similarly for the non-young | |
55 // collection. | |
56 // | |
57 // Also provided are counters for G1 concurrent collections | |
58 // and stop-the-world full heap collecitons. | |
59 // | |
60 // Below is a description of how "used" and "capactiy" | |
61 // (or committed) is calculated for the logical spaces. | |
62 // | |
63 // 1) The used space calculation for a pool is not necessarily | |
64 // independent of the others. We can easily get from G1 the overall | |
65 // used space in the entire heap, the number of regions in the young | |
66 // generation (includes both eden and survivors), and the number of | |
67 // survivor regions. So, from that we calculate: | |
68 // | |
69 // survivor_used = survivor_num * region_size | |
70 // eden_used = young_region_num * region_size - survivor_used | |
71 // old_gen_used = overall_used - eden_used - survivor_used | |
72 // | |
73 // Note that survivor_used and eden_used are upper bounds. To get the | |
74 // actual value we would have to iterate over the regions and add up | |
75 // ->used(). But that'd be expensive. So, we'll accept some lack of | |
76 // accuracy for those two. But, we have to be careful when calculating | |
77 // old_gen_used, in case we subtract from overall_used more then the | |
78 // actual number and our result goes negative. | |
79 // | |
80 // 2) Calculating the used space is straightforward, as described | |
81 // above. However, how do we calculate the committed space, given that | |
82 // we allocate space for the eden, survivor, and old gen out of the | |
83 // same pool of regions? One way to do this is to use the used value | |
84 // as also the committed value for the eden and survivor spaces and | |
85 // then calculate the old gen committed space as follows: | |
86 // | |
87 // old_gen_committed = overall_committed - eden_committed - survivor_committed | |
88 // | |
89 // Maybe a better way to do that would be to calculate used for eden | |
90 // and survivor as a sum of ->used() over their regions and then | |
91 // calculate committed as region_num * region_size (i.e., what we use | |
92 // to calculate the used space now). This is something to consider | |
93 // in the future. | |
94 // | |
95 // 3) Another decision that is again not straightforward is what is | |
96 // the max size that each memory pool can grow to. One way to do this | |
97 // would be to use the committed size for the max for the eden and | |
98 // survivors and calculate the old gen max as follows (basically, it's | |
99 // a similar pattern to what we use for the committed space, as | |
100 // described above): | |
101 // | |
102 // old_gen_max = overall_max - eden_max - survivor_max | |
103 // | |
104 // Unfortunately, the above makes the max of each pool fluctuate over | |
105 // time and, even though this is allowed according to the spec, it | |
106 // broke several assumptions in the M&M framework (there were cases | |
107 // where used would reach a value greater than max). So, for max we | |
108 // use -1, which means "undefined" according to the spec. | |
109 // | |
110 // 4) Now, there is a very subtle issue with all the above. The | |
111 // framework will call get_memory_usage() on the three pools | |
112 // asynchronously. As a result, each call might get a different value | |
113 // for, say, survivor_num which will yield inconsistent values for | |
114 // eden_used, survivor_used, and old_gen_used (as survivor_num is used | |
115 // in the calculation of all three). This would normally be | |
116 // ok. However, it's possible that this might cause the sum of | |
117 // eden_used, survivor_used, and old_gen_used to go over the max heap | |
118 // size and this seems to sometimes cause JConsole (and maybe other | |
119 // clients) to get confused. There's not a really an easy / clean | |
120 // solution to this problem, due to the asynchrounous nature of the | |
121 // framework. | |
122 | |
123 class G1MonitoringSupport : public CHeapObj { | |
124 G1CollectedHeap* _g1h; | |
125 VirtualSpace* _g1_storage_addr; | |
126 | |
127 // jstat performance counters | |
128 // incremental collections both fully and partially young | |
129 CollectorCounters* _incremental_collection_counters; | |
130 // full stop-the-world collections | |
131 CollectorCounters* _full_collection_counters; | |
132 // young collection set counters. The _eden_counters, | |
133 // _from_counters, and _to_counters are associated with | |
134 // this "generational" counter. | |
135 GenerationCounters* _young_collection_counters; | |
136 // non-young collection set counters. The _old_space_counters | |
137 // below are associated with this "generational" counter. | |
138 GenerationCounters* _non_young_collection_counters; | |
139 // Counters for the capacity and used for | |
140 // the whole heap | |
141 HSpaceCounters* _old_space_counters; | |
142 // the young collection | |
143 HSpaceCounters* _eden_counters; | |
144 // the survivor collection (only one, _to_counters, is actively used) | |
145 HSpaceCounters* _from_counters; | |
146 HSpaceCounters* _to_counters; | |
147 | |
148 // It returns x - y if x > y, 0 otherwise. | |
149 // As described in the comment above, some of the inputs to the | |
150 // calculations we have to do are obtained concurrently and hence | |
151 // may be inconsistent with each other. So, this provides a | |
152 // defensive way of performing the subtraction and avoids the value | |
153 // going negative (which would mean a very large result, given that | |
154 // the parameter are size_t). | |
155 static size_t subtract_up_to_zero(size_t x, size_t y) { | |
156 if (x > y) { | |
157 return x - y; | |
158 } else { | |
159 return 0; | |
160 } | |
161 } | |
162 | |
163 public: | |
164 G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr); | |
165 | |
166 G1CollectedHeap* g1h() { return _g1h; } | |
167 VirtualSpace* g1_storage_addr() { return _g1_storage_addr; } | |
168 | |
169 // Performance Counter accessors | |
170 void update_counters(); | |
171 void update_eden_counters(); | |
172 | |
173 CollectorCounters* incremental_collection_counters() { | |
174 return _incremental_collection_counters; | |
175 } | |
176 CollectorCounters* full_collection_counters() { | |
177 return _full_collection_counters; | |
178 } | |
179 GenerationCounters* non_young_collection_counters() { | |
180 return _non_young_collection_counters; | |
181 } | |
182 HSpaceCounters* old_space_counters() { return _old_space_counters; } | |
183 HSpaceCounters* eden_counters() { return _eden_counters; } | |
184 HSpaceCounters* from_counters() { return _from_counters; } | |
185 HSpaceCounters* to_counters() { return _to_counters; } | |
186 | |
187 // Monitoring support used by | |
188 // MemoryService | |
189 // jstat counters | |
190 size_t overall_committed(); | |
191 size_t overall_used(); | |
192 | |
193 size_t eden_space_committed(); | |
194 size_t eden_space_used(); | |
195 | |
196 size_t survivor_space_committed(); | |
197 size_t survivor_space_used(); | |
198 | |
199 size_t old_space_committed(); | |
200 size_t old_space_used(); | |
201 }; | |
202 | |
203 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP |