Mercurial > hg > truffle
annotate src/share/vm/c1/c1_GraphBuilder.hpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | a32de5085326 |
rev | line source |
---|---|
0 | 1 /* |
1783 | 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
470
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
470
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
470
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_C1_C1_GRAPHBUILDER_HPP |
26 #define SHARE_VM_C1_C1_GRAPHBUILDER_HPP | |
27 | |
28 #include "c1/c1_IR.hpp" | |
29 #include "c1/c1_Instruction.hpp" | |
30 #include "c1/c1_ValueMap.hpp" | |
31 #include "c1/c1_ValueStack.hpp" | |
32 #include "ci/ciMethodData.hpp" | |
33 #include "ci/ciStreams.hpp" | |
34 | |
0 | 35 class MemoryBuffer; |
36 | |
37 class GraphBuilder VALUE_OBJ_CLASS_SPEC { | |
38 private: | |
39 // Per-scope data. These are pushed and popped as we descend into | |
40 // inlined methods. Currently in order to generate good code in the | |
41 // inliner we have to attempt to inline methods directly into the | |
42 // basic block we are parsing; this adds complexity. | |
43 class ScopeData: public CompilationResourceObj { | |
44 private: | |
45 ScopeData* _parent; | |
46 // bci-to-block mapping | |
47 BlockList* _bci2block; | |
48 // Scope | |
49 IRScope* _scope; | |
50 // Whether this scope or any parent scope has exception handlers | |
51 bool _has_handler; | |
52 // The bytecodes | |
53 ciBytecodeStream* _stream; | |
54 | |
55 // Work list | |
56 BlockList* _work_list; | |
57 | |
58 // Maximum inline size for this scope | |
59 intx _max_inline_size; | |
60 // Expression stack depth at point where inline occurred | |
61 int _caller_stack_size; | |
62 | |
63 // The continuation point for the inline. Currently only used in | |
64 // multi-block inlines, but eventually would like to use this for | |
65 // all inlines for uniformity and simplicity; in this case would | |
66 // get the continuation point from the BlockList instead of | |
67 // fabricating it anew because Invokes would be considered to be | |
68 // BlockEnds. | |
69 BlockBegin* _continuation; | |
70 | |
71 // Was this ScopeData created only for the parsing and inlining of | |
72 // a jsr? | |
73 bool _parsing_jsr; | |
74 // We track the destination bci of the jsr only to determine | |
75 // bailout conditions, since we only handle a subset of all of the | |
76 // possible jsr-ret control structures. Recursive invocations of a | |
77 // jsr are disallowed by the verifier. | |
78 int _jsr_entry_bci; | |
79 // We need to track the local variable in which the return address | |
80 // was stored to ensure we can handle inlining the jsr, because we | |
81 // don't handle arbitrary jsr/ret constructs. | |
82 int _jsr_ret_addr_local; | |
83 // If we are parsing a jsr, the continuation point for rets | |
84 BlockBegin* _jsr_continuation; | |
85 // Cloned XHandlers for jsr-related ScopeDatas | |
86 XHandlers* _jsr_xhandlers; | |
87 | |
88 // Number of returns seen in this scope | |
89 int _num_returns; | |
90 | |
91 // In order to generate profitable code for inlining, we currently | |
92 // have to perform an optimization for single-block inlined | |
93 // methods where we continue parsing into the same block. This | |
94 // allows us to perform CSE across inlined scopes and to avoid | |
95 // storing parameters to the stack. Having a global register | |
96 // allocator and being able to perform global CSE would allow this | |
97 // code to be removed and thereby simplify the inliner. | |
98 BlockBegin* _cleanup_block; // The block to which the return was added | |
99 Instruction* _cleanup_return_prev; // Instruction before return instruction | |
100 ValueStack* _cleanup_state; // State of that block (not yet pinned) | |
101 | |
102 public: | |
103 ScopeData(ScopeData* parent); | |
104 | |
105 ScopeData* parent() const { return _parent; } | |
106 | |
107 BlockList* bci2block() const { return _bci2block; } | |
108 void set_bci2block(BlockList* bci2block) { _bci2block = bci2block; } | |
109 | |
110 // NOTE: this has a different effect when parsing jsrs | |
111 BlockBegin* block_at(int bci); | |
112 | |
113 IRScope* scope() const { return _scope; } | |
114 // Has side-effect of setting has_handler flag | |
115 void set_scope(IRScope* scope); | |
116 | |
117 // Whether this or any parent scope has exception handlers | |
118 bool has_handler() const { return _has_handler; } | |
119 void set_has_handler() { _has_handler = true; } | |
120 | |
121 // Exception handlers list to be used for this scope | |
122 XHandlers* xhandlers() const; | |
123 | |
124 // How to get a block to be parsed | |
125 void add_to_work_list(BlockBegin* block); | |
126 // How to remove the next block to be parsed; returns NULL if none left | |
127 BlockBegin* remove_from_work_list(); | |
128 // Indicates parse is over | |
129 bool is_work_list_empty() const; | |
130 | |
131 ciBytecodeStream* stream() { return _stream; } | |
132 void set_stream(ciBytecodeStream* stream) { _stream = stream; } | |
133 | |
134 intx max_inline_size() const { return _max_inline_size; } | |
135 | |
136 BlockBegin* continuation() const { return _continuation; } | |
137 void set_continuation(BlockBegin* cont) { _continuation = cont; } | |
138 | |
139 // Indicates whether this ScopeData was pushed only for the | |
140 // parsing and inlining of a jsr | |
141 bool parsing_jsr() const { return _parsing_jsr; } | |
142 void set_parsing_jsr() { _parsing_jsr = true; } | |
143 int jsr_entry_bci() const { return _jsr_entry_bci; } | |
144 void set_jsr_entry_bci(int bci) { _jsr_entry_bci = bci; } | |
145 void set_jsr_return_address_local(int local_no){ _jsr_ret_addr_local = local_no; } | |
146 int jsr_return_address_local() const { return _jsr_ret_addr_local; } | |
147 // Must be called after scope is set up for jsr ScopeData | |
148 void setup_jsr_xhandlers(); | |
149 | |
150 // The jsr continuation is only used when parsing_jsr is true, and | |
151 // is different from the "normal" continuation since we can end up | |
152 // doing a return (rather than a ret) from within a subroutine | |
153 BlockBegin* jsr_continuation() const { return _jsr_continuation; } | |
154 void set_jsr_continuation(BlockBegin* cont) { _jsr_continuation = cont; } | |
155 | |
156 int num_returns(); | |
157 void incr_num_returns(); | |
158 | |
159 void set_inline_cleanup_info(BlockBegin* block, | |
160 Instruction* return_prev, | |
161 ValueStack* return_state); | |
162 BlockBegin* inline_cleanup_block() const { return _cleanup_block; } | |
163 Instruction* inline_cleanup_return_prev() const{ return _cleanup_return_prev; } | |
164 ValueStack* inline_cleanup_state() const { return _cleanup_state; } | |
165 }; | |
166 | |
167 // for all GraphBuilders | |
168 static bool _can_trap[Bytecodes::number_of_java_codes]; | |
169 | |
170 // for each instance of GraphBuilder | |
171 ScopeData* _scope_data; // Per-scope data; used for inlining | |
172 Compilation* _compilation; // the current compilation | |
173 ValueMap* _vmap; // the map of values encountered (for CSE) | |
174 MemoryBuffer* _memory; | |
175 const char* _inline_bailout_msg; // non-null if most recent inline attempt failed | |
176 int _instruction_count; // for bailing out in pathological jsr/ret cases | |
177 BlockBegin* _start; // the start block | |
178 BlockBegin* _osr_entry; // the osr entry block block | |
179 ValueStack* _initial_state; // The state for the start block | |
180 | |
181 // for each call to connect_to_end; can also be set by inliner | |
182 BlockBegin* _block; // the current block | |
183 ValueStack* _state; // the current execution state | |
184 Instruction* _last; // the last instruction added | |
185 bool _skip_block; // skip processing of the rest of this block | |
186 | |
187 // accessors | |
188 ScopeData* scope_data() const { return _scope_data; } | |
189 Compilation* compilation() const { return _compilation; } | |
190 BlockList* bci2block() const { return scope_data()->bci2block(); } | |
191 ValueMap* vmap() const { assert(UseLocalValueNumbering, "should not access otherwise"); return _vmap; } | |
192 bool has_handler() const { return scope_data()->has_handler(); } | |
193 | |
194 BlockBegin* block() const { return _block; } | |
195 ValueStack* state() const { return _state; } | |
196 void set_state(ValueStack* state) { _state = state; } | |
197 IRScope* scope() const { return scope_data()->scope(); } | |
198 ciMethod* method() const { return scope()->method(); } | |
199 ciBytecodeStream* stream() const { return scope_data()->stream(); } | |
200 Instruction* last() const { return _last; } | |
201 Bytecodes::Code code() const { return stream()->cur_bc(); } | |
202 int bci() const { return stream()->cur_bci(); } | |
203 int next_bci() const { return stream()->next_bci(); } | |
204 | |
205 // unified bailout support | |
206 void bailout(const char* msg) const { compilation()->bailout(msg); } | |
207 bool bailed_out() const { return compilation()->bailed_out(); } | |
208 | |
209 // stack manipulation helpers | |
210 void ipush(Value t) const { state()->ipush(t); } | |
211 void lpush(Value t) const { state()->lpush(t); } | |
212 void fpush(Value t) const { state()->fpush(t); } | |
213 void dpush(Value t) const { state()->dpush(t); } | |
214 void apush(Value t) const { state()->apush(t); } | |
215 void push(ValueType* type, Value t) const { state()-> push(type, t); } | |
216 | |
217 Value ipop() { return state()->ipop(); } | |
218 Value lpop() { return state()->lpop(); } | |
219 Value fpop() { return state()->fpop(); } | |
220 Value dpop() { return state()->dpop(); } | |
221 Value apop() { return state()->apop(); } | |
222 Value pop(ValueType* type) { return state()-> pop(type); } | |
223 | |
224 // instruction helpers | |
225 void load_constant(); | |
226 void load_local(ValueType* type, int index); | |
227 void store_local(ValueType* type, int index); | |
228 void store_local(ValueStack* state, Value value, ValueType* type, int index); | |
229 void load_indexed (BasicType type); | |
230 void store_indexed(BasicType type); | |
231 void stack_op(Bytecodes::Code code); | |
1819 | 232 void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before = NULL); |
0 | 233 void negate_op(ValueType* type); |
234 void shift_op(ValueType* type, Bytecodes::Code code); | |
235 void logic_op(ValueType* type, Bytecodes::Code code); | |
236 void compare_op(ValueType* type, Bytecodes::Code code); | |
237 void convert(Bytecodes::Code op, BasicType from, BasicType to); | |
238 void increment(); | |
239 void _goto(int from_bci, int to_bci); | |
240 void if_node(Value x, If::Condition cond, Value y, ValueStack* stack_before); | |
241 void if_zero(ValueType* type, If::Condition cond); | |
242 void if_null(ValueType* type, If::Condition cond); | |
243 void if_same(ValueType* type, If::Condition cond); | |
244 void jsr(int dest); | |
245 void ret(int local_index); | |
246 void table_switch(); | |
247 void lookup_switch(); | |
248 void method_return(Value x); | |
249 void call_register_finalizer(); | |
250 void access_field(Bytecodes::Code code); | |
251 void invoke(Bytecodes::Code code); | |
252 void new_instance(int klass_index); | |
253 void new_type_array(); | |
254 void new_object_array(); | |
255 void check_cast(int klass_index); | |
256 void instance_of(int klass_index); | |
257 void monitorenter(Value x, int bci); | |
258 void monitorexit(Value x, int bci); | |
259 void new_multi_array(int dimensions); | |
260 void throw_op(int bci); | |
261 Value round_fp(Value fp_value); | |
262 | |
263 // stack/code manipulation helpers | |
264 Instruction* append_with_bci(Instruction* instr, int bci); | |
265 Instruction* append(Instruction* instr); | |
266 Instruction* append_split(StateSplit* instr); | |
267 | |
268 // other helpers | |
269 BlockBegin* block_at(int bci) { return scope_data()->block_at(bci); } | |
1819 | 270 XHandlers* handle_exception(Instruction* instruction); |
0 | 271 void connect_to_end(BlockBegin* beg); |
272 void null_check(Value value); | |
273 void eliminate_redundant_phis(BlockBegin* start); | |
274 BlockEnd* iterate_bytecodes_for_block(int bci); | |
275 void iterate_all_blocks(bool start_in_current_block_for_inlining = false); | |
276 Dependencies* dependency_recorder() const; // = compilation()->dependencies() | |
277 bool direct_compare(ciKlass* k); | |
278 | |
279 void kill_all(); | |
280 | |
1819 | 281 // use of state copy routines (try to minimize unnecessary state |
282 // object allocations): | |
283 | |
284 // - if the instruction unconditionally needs a full copy of the | |
285 // state (for patching for example), then use copy_state_before* | |
286 | |
287 // - if the instruction needs a full copy of the state only for | |
288 // handler generation (Instruction::needs_exception_state() returns | |
289 // false) then use copy_state_exhandling* | |
290 | |
291 // - if the instruction needs either a full copy of the state for | |
292 // handler generation and a least a minimal copy of the state (as | |
293 // returned by Instruction::exception_state()) for debug info | |
294 // generation (that is when Instruction::needs_exception_state() | |
295 // returns true) then use copy_state_for_exception* | |
296 | |
297 ValueStack* copy_state_before_with_bci(int bci); | |
298 ValueStack* copy_state_before(); | |
299 ValueStack* copy_state_exhandling_with_bci(int bci); | |
300 ValueStack* copy_state_exhandling(); | |
301 ValueStack* copy_state_for_exception_with_bci(int bci); | |
302 ValueStack* copy_state_for_exception(); | |
0 | 303 |
304 // | |
305 // Inlining support | |
306 // | |
307 | |
308 // accessors | |
309 bool parsing_jsr() const { return scope_data()->parsing_jsr(); } | |
310 BlockBegin* continuation() const { return scope_data()->continuation(); } | |
311 BlockBegin* jsr_continuation() const { return scope_data()->jsr_continuation(); } | |
312 void set_continuation(BlockBegin* continuation) { scope_data()->set_continuation(continuation); } | |
313 void set_inline_cleanup_info(BlockBegin* block, | |
314 Instruction* return_prev, | |
315 ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block, | |
316 return_prev, | |
317 return_state); } | |
318 BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); } | |
319 Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); } | |
320 ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); } | |
321 void incr_num_returns() { scope_data()->incr_num_returns(); } | |
322 int num_returns() const { return scope_data()->num_returns(); } | |
323 intx max_inline_size() const { return scope_data()->max_inline_size(); } | |
324 int inline_level() const { return scope()->level(); } | |
325 int recursive_inline_level(ciMethod* callee) const; | |
326 | |
327 // inlining of synchronized methods | |
328 void inline_sync_entry(Value lock, BlockBegin* sync_handler); | |
329 void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false); | |
330 | |
331 // inliners | |
332 bool try_inline(ciMethod* callee, bool holder_known); | |
333 bool try_inline_intrinsics(ciMethod* callee); | |
334 bool try_inline_full (ciMethod* callee, bool holder_known); | |
335 bool try_inline_jsr(int jsr_dest_bci); | |
336 | |
337 // helpers | |
338 void inline_bailout(const char* msg); | |
339 BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state); | |
340 BlockBegin* setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* init_state); | |
341 void setup_osr_entry_block(); | |
342 void clear_inline_bailout(); | |
343 ValueStack* state_at_entry(); | |
344 void push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start); | |
345 void push_scope(ciMethod* callee, BlockBegin* continuation); | |
346 void push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci); | |
347 void pop_scope(); | |
348 void pop_scope_for_jsr(); | |
349 | |
350 bool append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile); | |
351 bool append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile); | |
352 bool append_unsafe_get_raw(ciMethod* callee, BasicType t); | |
353 bool append_unsafe_put_raw(ciMethod* callee, BasicType t); | |
354 bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static); | |
355 void append_unsafe_CAS(ciMethod* callee); | |
356 | |
357 NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) | |
358 | |
359 void profile_call(Value recv, ciKlass* predicted_holder); | |
1825 | 360 void profile_invocation(ciMethod* inlinee, ValueStack* state); |
0 | 361 |
1783 | 362 // Shortcuts to profiling control. |
363 bool is_profiling() { return _compilation->is_profiling(); } | |
364 bool count_invocations() { return _compilation->count_invocations(); } | |
365 bool count_backedges() { return _compilation->count_backedges(); } | |
366 bool profile_branches() { return _compilation->profile_branches(); } | |
367 bool profile_calls() { return _compilation->profile_calls(); } | |
368 bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); } | |
369 bool profile_checkcasts() { return _compilation->profile_checkcasts(); } | |
0 | 370 |
371 public: | |
372 NOT_PRODUCT(void print_stats();) | |
373 | |
374 // initialization | |
375 static void initialize(); | |
376 | |
377 // public | |
378 static bool can_trap(ciMethod* method, Bytecodes::Code code) { | |
379 assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode"); | |
380 if (_can_trap[code]) return true; | |
381 // special handling for finalizer registration | |
382 return code == Bytecodes::_return && method->intrinsic_id() == vmIntrinsics::_Object_init; | |
383 } | |
384 | |
385 // creation | |
386 GraphBuilder(Compilation* compilation, IRScope* scope); | |
387 static void sort_top_into_worklist(BlockList* worklist, BlockBegin* top); | |
388 | |
389 BlockBegin* start() const { return _start; } | |
390 }; | |
1972 | 391 |
392 #endif // SHARE_VM_C1_C1_GRAPHBUILDER_HPP |