comparison src/share/vm/opto/callGenerator.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 7c57aead6d3e
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 //---------------------------CallGenerator-------------------------------------
26 // The subclasses of this class handle generation of ideal nodes for
27 // call sites and method entry points.
28
29 class CallGenerator : public ResourceObj {
30 public:
31 enum {
32 xxxunusedxxx
33 };
34
35 private:
36 ciMethod* _method; // The method being called.
37
38 protected:
39 CallGenerator(ciMethod* method);
40
41 public:
42 // Accessors
43 ciMethod* method() const { return _method; }
44
45 // is_inline: At least some code implementing the method is copied here.
46 virtual bool is_inline() const { return false; }
47 // is_intrinsic: There's a method-specific way of generating the inline code.
48 virtual bool is_intrinsic() const { return false; }
49 // is_parse: Bytecodes implementing the specific method are copied here.
50 virtual bool is_parse() const { return false; }
51 // is_virtual: The call uses the receiver type to select or check the method.
52 virtual bool is_virtual() const { return false; }
53 // is_deferred: The decision whether to inline or not is deferred.
54 virtual bool is_deferred() const { return false; }
55 // is_predicted: Uses an explicit check against a predicted type.
56 virtual bool is_predicted() const { return false; }
57 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
58 virtual bool is_trap() const { return false; }
59
60 // Note: It is possible for a CG to be both inline and virtual.
61 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
62
63 // Utilities:
64 const TypeFunc* tf() const;
65
66 // The given jvms has state and arguments for a call to my method.
67 // Edges after jvms->argoff() carry all (pre-popped) argument values.
68 //
69 // Update the map with state and return values (if any) and return it.
70 // The return values (0, 1, or 2) must be pushed on the map's stack,
71 // and the sp of the jvms incremented accordingly.
72 //
73 // The jvms is returned on success. Alternatively, a copy of the
74 // given jvms, suitably updated, may be returned, in which case the
75 // caller should discard the original jvms.
76 //
77 // The non-Parm edges of the returned map will contain updated global state,
78 // and one or two edges before jvms->sp() will carry any return values.
79 // Other map edges may contain locals or monitors, and should not
80 // be changed in meaning.
81 //
82 // If the call traps, the returned map must have a control edge of top.
83 // If the call can throw, the returned map must report has_exceptions().
84 //
85 // If the result is NULL, it means that this CallGenerator was unable
86 // to handle the given call, and another CallGenerator should be consulted.
87 virtual JVMState* generate(JVMState* jvms) = 0;
88
89 // How to generate a call site that is inlined:
90 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
91 // How to generate code for an on-stack replacement handler.
92 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
93
94 // How to generate vanilla out-of-line call sites:
95 static CallGenerator* for_direct_call(ciMethod* m); // static, special
96 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
97
98 // How to make a call but defer the decision whether to inline or not.
99 static CallGenerator* for_warm_call(WarmCallInfo* ci,
100 CallGenerator* if_cold,
101 CallGenerator* if_hot);
102
103 // How to make a call that optimistically assumes a receiver type:
104 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
105 CallGenerator* if_missed,
106 CallGenerator* if_hit,
107 float hit_prob);
108
109 // How to make a call that gives up and goes back to the interpreter:
110 static CallGenerator* for_uncommon_trap(ciMethod* m,
111 Deoptimization::DeoptReason reason,
112 Deoptimization::DeoptAction action);
113
114 // Registry for intrinsics:
115 static CallGenerator* for_intrinsic(ciMethod* m);
116 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
117 };
118
119 class InlineCallGenerator : public CallGenerator {
120 virtual bool is_inline() const { return true; }
121
122 protected:
123 InlineCallGenerator(ciMethod* method) : CallGenerator(method) { }
124 };
125
126
127 //---------------------------WarmCallInfo--------------------------------------
128 // A struct to collect information about a given call site.
129 // Helps sort call sites into "hot", "medium", and "cold".
130 // Participates in the queueing of "medium" call sites for possible inlining.
131 class WarmCallInfo : public ResourceObj {
132 private:
133
134 CallNode* _call; // The CallNode which may be inlined.
135 CallGenerator* _hot_cg;// CG for expanding the call node
136
137 // These are the metrics we use to evaluate call sites:
138
139 float _count; // How often do we expect to reach this site?
140 float _profit; // How much time do we expect to save by inlining?
141 float _work; // How long do we expect the average call to take?
142 float _size; // How big do we expect the inlined code to be?
143
144 float _heat; // Combined score inducing total order on call sites.
145 WarmCallInfo* _next; // Next cooler call info in pending queue.
146
147 // Count is the number of times this call site is expected to be executed.
148 // Large count is favorable for inlining, because the extra compilation
149 // work will be amortized more completely.
150
151 // Profit is a rough measure of the amount of time we expect to save
152 // per execution of this site if we inline it. (1.0 == call overhead)
153 // Large profit favors inlining. Negative profit disables inlining.
154
155 // Work is a rough measure of the amount of time a typical out-of-line
156 // call from this site is expected to take. (1.0 == call, no-op, return)
157 // Small work is somewhat favorable for inlining, since methods with
158 // short "hot" traces are more likely to inline smoothly.
159
160 // Size is the number of graph nodes we expect this method to produce,
161 // not counting the inlining of any further warm calls it may include.
162 // Small size favors inlining, since small methods are more likely to
163 // inline smoothly. The size is estimated by examining the native code
164 // if available. The method bytecodes are also examined, assuming
165 // empirically observed node counts for each kind of bytecode.
166
167 // Heat is the combined "goodness" of a site's inlining. If we were
168 // omniscient, it would be the difference of two sums of future execution
169 // times of code emitted for this site (amortized across multiple sites if
170 // sharing applies). The two sums are for versions of this call site with
171 // and without inlining.
172
173 // We approximate this mythical quantity by playing with averages,
174 // rough estimates, and assumptions that history repeats itself.
175 // The basic formula count * profit is heuristically adjusted
176 // by looking at the expected compilation and execution times of
177 // of the inlined call.
178
179 // Note: Some of these metrics may not be present in the final product,
180 // but exist in development builds to experiment with inline policy tuning.
181
182 // This heuristic framework does not model well the very significant
183 // effects of multiple-level inlining. It is possible to see no immediate
184 // profit from inlining X->Y, but to get great profit from a subsequent
185 // inlining X->Y->Z.
186
187 // This framework does not take well into account the problem of N**2 code
188 // size in a clique of mutually inlinable methods.
189
190 WarmCallInfo* next() const { return _next; }
191 void set_next(WarmCallInfo* n) { _next = n; }
192
193 static WarmCallInfo* _always_hot;
194 static WarmCallInfo* _always_cold;
195
196 public:
197 // Because WarmInfo objects live over the entire lifetime of the
198 // Compile object, they are allocated into the comp_arena, which
199 // does not get resource marked or reset during the compile process
200 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
201 void operator delete( void * ) { } // fast deallocation
202
203 static WarmCallInfo* always_hot();
204 static WarmCallInfo* always_cold();
205
206 WarmCallInfo() {
207 _call = NULL;
208 _hot_cg = NULL;
209 _next = NULL;
210 _count = _profit = _work = _size = _heat = 0;
211 }
212
213 CallNode* call() const { return _call; }
214 float count() const { return _count; }
215 float size() const { return _size; }
216 float work() const { return _work; }
217 float profit() const { return _profit; }
218 float heat() const { return _heat; }
219
220 void set_count(float x) { _count = x; }
221 void set_size(float x) { _size = x; }
222 void set_work(float x) { _work = x; }
223 void set_profit(float x) { _profit = x; }
224 void set_heat(float x) { _heat = x; }
225
226 // Load initial heuristics from profiles, etc.
227 // The heuristics can be tweaked further by the caller.
228 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
229
230 static float MAX_VALUE() { return +1.0e10; }
231 static float MIN_VALUE() { return -1.0e10; }
232
233 float compute_heat() const;
234
235 void set_call(CallNode* call) { _call = call; }
236 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
237
238 // Do not queue very hot or very cold calls.
239 // Make very cold ones out of line immediately.
240 // Inline very hot ones immediately.
241 // These queries apply various tunable limits
242 // to the above metrics in a systematic way.
243 // Test for coldness before testing for hotness.
244 bool is_cold() const;
245 bool is_hot() const;
246
247 // Force a warm call to be hot. This worklists the call node for inlining.
248 void make_hot();
249
250 // Force a warm call to be cold. This worklists the call node for out-of-lining.
251 void make_cold();
252
253 // A reproducible total ordering, in which heat is the major key.
254 bool warmer_than(WarmCallInfo* that);
255
256 // List management. These methods are called with the list head,
257 // and return the new list head, inserting or removing the receiver.
258 WarmCallInfo* insert_into(WarmCallInfo* head);
259 WarmCallInfo* remove_from(WarmCallInfo* head);
260
261 #ifndef PRODUCT
262 void print() const;
263 void print_all() const;
264 int count_all() const;
265 #endif
266 };