Mercurial > hg > truffle
annotate src/share/vm/opto/parse.hpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | c7f3d0b4570f 9dc311b8473e |
rev | line source |
---|---|
0 | 1 /* |
1972 | 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_OPTO_PARSE_HPP |
26 #define SHARE_VM_OPTO_PARSE_HPP | |
27 | |
28 #include "ci/ciMethodData.hpp" | |
29 #include "ci/ciTypeFlow.hpp" | |
30 #include "compiler/methodLiveness.hpp" | |
31 #include "libadt/vectset.hpp" | |
32 #include "oops/generateOopMap.hpp" | |
33 #include "opto/graphKit.hpp" | |
34 #include "opto/subnode.hpp" | |
35 | |
0 | 36 class BytecodeParseHistogram; |
37 class InlineTree; | |
38 class Parse; | |
39 class SwitchRange; | |
40 | |
41 | |
42 //------------------------------InlineTree------------------------------------- | |
43 class InlineTree : public ResourceObj { | |
44 Compile* C; // cache | |
45 JVMState* _caller_jvms; // state of caller | |
46 ciMethod* _method; // method being called by the caller_jvms | |
47 InlineTree* _caller_tree; | |
48 uint _count_inline_bcs; // Accumulated count of inlined bytecodes | |
49 // Call-site count / interpreter invocation count, scaled recursively. | |
50 // Always between 0.0 and 1.0. Represents the percentage of the method's | |
51 // total execution time used at this call site. | |
52 const float _site_invoke_ratio; | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
53 const int _site_depth_adjust; |
0 | 54 float compute_callee_frequency( int caller_bci ) const; |
55 | |
56 GrowableArray<InlineTree*> _subtrees; | |
57 friend class Compile; | |
58 | |
59 protected: | |
60 InlineTree(Compile* C, | |
61 const InlineTree* caller_tree, | |
62 ciMethod* callee_method, | |
63 JVMState* caller_jvms, | |
64 int caller_bci, | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
65 float site_invoke_ratio, |
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
66 int site_depth_adjust); |
0 | 67 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, |
68 JVMState* caller_jvms, | |
69 int caller_bci); | |
41
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
70 const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); |
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
71 const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; |
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
72 const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; |
0 | 73 void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN; |
74 | |
75 InlineTree *caller_tree() const { return _caller_tree; } | |
76 InlineTree* callee_at(int bci, ciMethod* m) const; | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
77 int inline_depth() const { return stack_depth() + _site_depth_adjust; } |
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
78 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } |
0 | 79 |
80 public: | |
81 static InlineTree* build_inline_tree_root(); | |
82 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); | |
83 | |
84 // For temporary (stack-allocated, stateless) ilts: | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
85 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust); |
0 | 86 |
87 // InlineTree enum | |
88 enum InlineStyle { | |
89 Inline_do_not_inline = 0, // | |
90 Inline_cha_is_monomorphic = 1, // | |
91 Inline_type_profile_monomorphic = 2 // | |
92 }; | |
93 | |
94 // See if it is OK to inline. | |
605 | 95 // The receiver is the inline tree for the caller. |
0 | 96 // |
97 // The result is a temperature indication. If it is hot or cold, | |
98 // inlining is immediate or undesirable. Otherwise, the info block | |
99 // returned is newly allocated and may be enqueued. | |
100 // | |
101 // If the method is inlinable, a new inline subtree is created on the fly, | |
102 // and may be accessed by find_subtree_from_root. | |
103 // The call_method is the dest_method for a special or static invocation. | |
104 // The call_method is an optimized virtual method candidate otherwise. | |
105 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci); | |
106 | |
107 // Information about inlined method | |
108 JVMState* caller_jvms() const { return _caller_jvms; } | |
109 ciMethod *method() const { return _method; } | |
110 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } | |
111 uint count_inline_bcs() const { return _count_inline_bcs; } | |
112 float site_invoke_ratio() const { return _site_invoke_ratio; }; | |
113 | |
114 #ifndef PRODUCT | |
115 private: | |
116 uint _count_inlines; // Count of inlined methods | |
117 public: | |
118 // Debug information collected during parse | |
119 uint count_inlines() const { return _count_inlines; }; | |
120 #endif | |
121 GrowableArray<InlineTree*> subtrees() { return _subtrees; } | |
122 }; | |
123 | |
124 | |
125 //----------------------------------------------------------------------------- | |
126 //------------------------------Parse------------------------------------------ | |
127 // Parse bytecodes, build a Graph | |
128 class Parse : public GraphKit { | |
129 public: | |
130 // Per-block information needed by the parser: | |
131 class Block { | |
132 private: | |
133 ciTypeFlow::Block* _flow; | |
134 int _pred_count; // how many predecessors in CFG? | |
135 int _preds_parsed; // how many of these have been parsed? | |
136 uint _count; // how many times executed? Currently only set by _goto's | |
137 bool _is_parsed; // has this block been parsed yet? | |
138 bool _is_handler; // is this block an exception handler? | |
139 SafePointNode* _start_map; // all values flowing into this block | |
140 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap | |
141 | |
142 int _num_successors; // Includes only normal control flow. | |
143 int _all_successors; // Include exception paths also. | |
144 Block** _successors; | |
145 | |
146 // Use init_node/init_graph to initialize Blocks. | |
147 // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); } | |
148 Block() : _live_locals(NULL,0) { ShouldNotReachHere(); } | |
149 | |
150 public: | |
151 | |
152 // Set up the block data structure itself. | |
153 void init_node(Parse* outer, int po); | |
154 // Set up the block's relations to other blocks. | |
155 void init_graph(Parse* outer); | |
156 | |
157 ciTypeFlow::Block* flow() const { return _flow; } | |
158 int pred_count() const { return _pred_count; } | |
159 int preds_parsed() const { return _preds_parsed; } | |
160 bool is_parsed() const { return _is_parsed; } | |
161 bool is_handler() const { return _is_handler; } | |
162 void set_count( uint x ) { _count = x; } | |
163 uint count() const { return _count; } | |
164 | |
165 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } | |
166 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } | |
167 | |
168 // True after any predecessor flows control into this block | |
169 bool is_merged() const { return _start_map != NULL; } | |
170 | |
171 // True when all non-exception predecessors have been parsed. | |
172 bool is_ready() const { return preds_parsed() == pred_count(); } | |
173 | |
174 int num_successors() const { return _num_successors; } | |
175 int all_successors() const { return _all_successors; } | |
176 Block* successor_at(int i) const { | |
177 assert((uint)i < (uint)all_successors(), ""); | |
178 return _successors[i]; | |
179 } | |
180 Block* successor_for_bci(int bci); | |
181 | |
182 int start() const { return flow()->start(); } | |
183 int limit() const { return flow()->limit(); } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
184 int rpo() const { return flow()->rpo(); } |
0 | 185 int start_sp() const { return flow()->stack_size(); } |
186 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
187 bool is_loop_head() const { return flow()->is_loop_head(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
188 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
189 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
190 bool is_invariant_local(uint i) const { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
191 const JVMState* jvms = start_map()->jvms(); |
435
b1d6a3e95810
6766316: assert(!nocreate,"Cannot build a phi for a block already parsed.")
kvn
parents:
367
diff
changeset
|
192 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
193 return flow()->is_invariant_local(i - jvms->locoff()); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
194 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
195 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
196 |
0 | 197 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } |
198 | |
199 const Type* stack_type_at(int i) const; | |
200 const Type* local_type_at(int i) const; | |
201 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } | |
202 | |
203 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } | |
204 | |
205 // Call this just before parsing a block. | |
206 void mark_parsed() { | |
207 assert(!_is_parsed, "must parse each block exactly once"); | |
208 _is_parsed = true; | |
209 } | |
210 | |
211 // Return the phi/region input index for the "current" pred, | |
212 // and bump the pred number. For historical reasons these index | |
213 // numbers are handed out in descending order. The last index is | |
214 // always PhiNode::Input (i.e., 1). The value returned is known | |
215 // as a "path number" because it distinguishes by which path we are | |
216 // entering the block. | |
217 int next_path_num() { | |
218 assert(preds_parsed() < pred_count(), "too many preds?"); | |
219 return pred_count() - _preds_parsed++; | |
220 } | |
221 | |
222 // Add a previously unaccounted predecessor to this block. | |
223 // This operates by increasing the size of the block's region | |
224 // and all its phi nodes (if any). The value returned is a | |
225 // path number ("pnum"). | |
226 int add_new_path(); | |
227 | |
228 // Initialize me by recording the parser's map. My own map must be NULL. | |
229 void record_state(Parse* outer); | |
230 }; | |
231 | |
232 #ifndef PRODUCT | |
233 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. | |
234 class BytecodeParseHistogram : public ResourceObj { | |
235 private: | |
236 enum BPHType { | |
237 BPH_transforms, | |
238 BPH_values | |
239 }; | |
240 static bool _initialized; | |
241 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; | |
242 static uint _nodes_constructed[Bytecodes::number_of_codes]; | |
243 static uint _nodes_transformed[Bytecodes::number_of_codes]; | |
244 static uint _new_values [Bytecodes::number_of_codes]; | |
245 | |
246 Bytecodes::Code _initial_bytecode; | |
247 int _initial_node_count; | |
248 int _initial_transforms; | |
249 int _initial_values; | |
250 | |
251 Parse *_parser; | |
252 Compile *_compiler; | |
253 | |
254 // Initialization | |
255 static void reset(); | |
256 | |
257 // Return info being collected, select with global flag 'BytecodeParseInfo' | |
258 int current_count(BPHType info_selector); | |
259 | |
260 public: | |
261 BytecodeParseHistogram(Parse *p, Compile *c); | |
262 static bool initialized(); | |
263 | |
264 // Record info when starting to parse one bytecode | |
265 void set_initial_state( Bytecodes::Code bc ); | |
266 // Record results of parsing one bytecode | |
267 void record_change(); | |
268 | |
269 // Profile printing | |
270 static void print(float cutoff = 0.01F); // cutoff in percent | |
271 }; | |
272 | |
273 public: | |
274 // Record work done during parsing | |
275 BytecodeParseHistogram* _parse_histogram; | |
276 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } | |
277 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } | |
278 #endif | |
279 | |
280 private: | |
281 friend class Block; | |
282 | |
283 // Variables which characterize this compilation as a whole: | |
284 | |
285 JVMState* _caller; // JVMS which carries incoming args & state. | |
286 float _expected_uses; // expected number of calls to this code | |
287 float _prof_factor; // discount applied to my profile counts | |
288 int _depth; // Inline tree depth, for debug printouts | |
289 const TypeFunc*_tf; // My kind of function type | |
290 int _entry_bci; // the osr bci or InvocationEntryBci | |
291 | |
292 ciTypeFlow* _flow; // Results of previous flow pass. | |
293 Block* _blocks; // Array of basic-block structs. | |
294 int _block_count; // Number of elements in _blocks. | |
295 | |
296 GraphKit _exits; // Record all normal returns and throws here. | |
297 bool _wrote_final; // Did we write a final field? | |
298 bool _count_invocations; // update and test invocation counter | |
299 bool _method_data_update; // update method data oop | |
300 | |
301 // Variables which track Java semantics during bytecode parsing: | |
302 | |
303 Block* _block; // block currently getting parsed | |
304 ciBytecodeStream _iter; // stream of this method's bytecodes | |
305 | |
306 int _blocks_merged; // Progress meter: state merges from BB preds | |
307 int _blocks_parsed; // Progress meter: BBs actually parsed | |
308 | |
309 const FastLockNode* _synch_lock; // FastLockNode for synchronized method | |
310 | |
311 #ifndef PRODUCT | |
312 int _max_switch_depth; // Debugging SwitchRanges. | |
313 int _est_switch_depth; // Debugging SwitchRanges. | |
314 #endif | |
315 | |
316 public: | |
317 // Constructor | |
318 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); | |
319 | |
320 virtual Parse* is_Parse() const { return (Parse*)this; } | |
321 | |
322 public: | |
323 // Accessors. | |
324 JVMState* caller() const { return _caller; } | |
325 float expected_uses() const { return _expected_uses; } | |
326 float prof_factor() const { return _prof_factor; } | |
327 int depth() const { return _depth; } | |
328 const TypeFunc* tf() const { return _tf; } | |
329 // entry_bci() -- see osr_bci, etc. | |
330 | |
331 ciTypeFlow* flow() const { return _flow; } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
332 // blocks() -- see rpo_at, start_block, etc. |
0 | 333 int block_count() const { return _block_count; } |
334 | |
335 GraphKit& exits() { return _exits; } | |
336 bool wrote_final() const { return _wrote_final; } | |
337 void set_wrote_final(bool z) { _wrote_final = z; } | |
338 bool count_invocations() const { return _count_invocations; } | |
339 bool method_data_update() const { return _method_data_update; } | |
340 | |
341 Block* block() const { return _block; } | |
342 ciBytecodeStream& iter() { return _iter; } | |
343 Bytecodes::Code bc() const { return _iter.cur_bc(); } | |
344 | |
345 void set_block(Block* b) { _block = b; } | |
346 | |
347 // Derived accessors: | |
348 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } | |
349 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } | |
350 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } | |
351 | |
352 void set_parse_bci(int bci); | |
353 | |
354 // Must this parse be aborted? | |
355 bool failing() { return C->failing(); } | |
356 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
357 Block* rpo_at(int rpo) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
358 assert(0 <= rpo && rpo < _block_count, "oob"); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
359 return &_blocks[rpo]; |
0 | 360 } |
361 Block* start_block() { | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
362 return rpo_at(flow()->start_block()->rpo()); |
0 | 363 } |
364 // Can return NULL if the flow pass did not complete a block. | |
365 Block* successor_for_bci(int bci) { | |
366 return block()->successor_for_bci(bci); | |
367 } | |
368 | |
369 private: | |
370 // Create a JVMS & map for the initial state of this method. | |
371 SafePointNode* create_entry_map(); | |
372 | |
373 // OSR helpers | |
374 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); | |
375 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); | |
376 void load_interpreter_state(Node* osr_buf); | |
377 | |
378 // Functions for managing basic blocks: | |
379 void init_blocks(); | |
380 void load_state_from(Block* b); | |
381 void store_state_to(Block* b) { b->record_state(this); } | |
382 | |
383 // Parse all the basic blocks. | |
384 void do_all_blocks(); | |
385 | |
386 // Parse the current basic block | |
387 void do_one_block(); | |
388 | |
389 // Raise an error if we get a bad ciTypeFlow CFG. | |
390 void handle_missing_successor(int bci); | |
391 | |
392 // first actions (before BCI 0) | |
393 void do_method_entry(); | |
394 | |
395 // implementation of monitorenter/monitorexit | |
396 void do_monitor_enter(); | |
397 void do_monitor_exit(); | |
398 | |
399 // Eagerly create phie throughout the state, to cope with back edges. | |
400 void ensure_phis_everywhere(); | |
401 | |
402 // Merge the current mapping into the basic block starting at bci | |
403 void merge( int target_bci); | |
404 // Same as plain merge, except that it allocates a new path number. | |
405 void merge_new_path( int target_bci); | |
406 // Merge the current mapping into an exception handler. | |
407 void merge_exception(int target_bci); | |
408 // Helper: Merge the current mapping into the given basic block | |
409 void merge_common(Block* target, int pnum); | |
410 // Helper functions for merging individual cells. | |
411 PhiNode *ensure_phi( int idx, bool nocreate = false); | |
412 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); | |
413 // Helper to merge the current memory state into the given basic block | |
414 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); | |
415 | |
416 // Parse this bytecode, and alter the Parsers JVM->Node mapping | |
417 void do_one_bytecode(); | |
418 | |
419 // helper function to generate array store check | |
420 void array_store_check(); | |
421 // Helper function to generate array load | |
422 void array_load(BasicType etype); | |
423 // Helper function to generate array store | |
424 void array_store(BasicType etype); | |
425 // Helper function to compute array addressing | |
426 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); | |
427 | |
428 // Pass current map to exits | |
429 void return_current(Node* value); | |
430 | |
431 // Register finalizers on return from Object.<init> | |
432 void call_register_finalizer(); | |
433 | |
434 // Insert a compiler safepoint into the graph | |
435 void add_safepoint(); | |
436 | |
437 // Insert a compiler safepoint into the graph, if there is a back-branch. | |
438 void maybe_add_safepoint(int target_bci) { | |
439 if (UseLoopSafepoints && target_bci <= bci()) { | |
440 add_safepoint(); | |
441 } | |
442 } | |
443 | |
1172 | 444 // Return true if the parser should add a loop predicate |
445 bool should_add_predicate(int target_bci); | |
446 // Insert a loop predicate into the graph | |
447 void add_predicate(); | |
448 | |
0 | 449 // Note: Intrinsic generation routines may be found in library_call.cpp. |
450 | |
451 // Helper function to setup Ideal Call nodes | |
452 void do_call(); | |
453 | |
454 // Helper function to uncommon-trap or bailout for non-compilable call-sites | |
455 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); | |
456 | |
457 // Helper function to identify inlining potential at call-site | |
458 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, | |
459 ciMethod *dest_method, const TypeOopPtr* receiver_type); | |
460 | |
461 // Helper function to setup for type-profile based inlining | |
462 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); | |
463 | |
464 // Helper functions for type checking bytecodes: | |
465 void do_checkcast(); | |
466 void do_instanceof(); | |
467 | |
468 // Helper functions for shifting & arithmetic | |
469 void modf(); | |
470 void modd(); | |
471 void l2f(); | |
472 | |
473 void do_irem(); | |
474 | |
475 // implementation of _get* and _put* bytecodes | |
476 void do_getstatic() { do_field_access(true, false); } | |
477 void do_getfield () { do_field_access(true, true); } | |
478 void do_putstatic() { do_field_access(false, false); } | |
479 void do_putfield () { do_field_access(false, true); } | |
480 | |
481 // common code for making initial checks and forming addresses | |
482 void do_field_access(bool is_get, bool is_field); | |
483 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); | |
484 | |
485 // common code for actually performing the load or store | |
486 void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); | |
487 void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); | |
488 | |
489 // loading from a constant field or the constant pool | |
490 // returns false if push failed (non-perm field constants only, not ldcs) | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
491 bool push_constant(ciConstant con, bool require_constant = false); |
0 | 492 |
493 // implementation of object creation bytecodes | |
1645
3941674cc7fa
6958668: repeated uncommon trapping for new of klass which is being initialized
never
parents:
1552
diff
changeset
|
494 void emit_guard_for_new(ciInstanceKlass* klass); |
0 | 495 void do_new(); |
496 void do_newarray(BasicType elemtype); | |
497 void do_anewarray(); | |
498 void do_multianewarray(); | |
730
9c6be3edf0dc
6589834: deoptimization problem with -XX:+DeoptimizeALot
cfang
parents:
605
diff
changeset
|
499 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); |
0 | 500 |
501 // implementation of jsr/ret | |
502 void do_jsr(); | |
503 void do_ret(); | |
504 | |
505 float dynamic_branch_prediction(float &cnt); | |
506 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); | |
507 bool seems_never_taken(float prob); | |
1746
4b29a725c43c
6912064: type profiles need to be exploited more for dynamic language support
jrose
parents:
1645
diff
changeset
|
508 bool seems_stable_comparison(BoolTest::mask btest, Node* c); |
0 | 509 |
248
18aab3cdd513
6726504: handle do_ifxxx calls in parser more uniformly
rasbold
parents:
196
diff
changeset
|
510 void do_ifnull(BoolTest::mask btest, Node* c); |
0 | 511 void do_if(BoolTest::mask btest, Node* c); |
1172 | 512 int repush_if_args(); |
0 | 513 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, |
514 Block* path, Block* other_path); | |
515 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); | |
516 Node* jump_if_join(Node* iffalse, Node* iftrue); | |
517 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); | |
518 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index); | |
519 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index); | |
520 | |
521 friend class SwitchRange; | |
522 void do_tableswitch(); | |
523 void do_lookupswitch(); | |
524 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); | |
525 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); | |
526 | |
527 // helper functions for methodData style profiling | |
528 void test_counter_against_threshold(Node* cnt, int limit); | |
529 void increment_and_test_invocation_counter(int limit); | |
530 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); | |
531 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); | |
532 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); | |
533 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); | |
534 | |
535 void profile_method_entry(); | |
536 void profile_taken_branch(int target_bci, bool force_update = false); | |
537 void profile_not_taken_branch(bool force_update = false); | |
538 void profile_call(Node* receiver); | |
539 void profile_generic_call(); | |
540 void profile_receiver_type(Node* receiver); | |
541 void profile_ret(int target_bci); | |
542 void profile_null_checkcast(); | |
543 void profile_switch_case(int table_index); | |
544 | |
545 // helper function for call statistics | |
546 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; | |
547 | |
548 Node_Notes* make_node_notes(Node_Notes* caller_nn); | |
549 | |
550 // Helper functions for handling normal and abnormal exits. | |
551 void build_exits(); | |
552 | |
553 // Fix up all exceptional control flow exiting a single bytecode. | |
554 void do_exceptions(); | |
555 | |
556 // Fix up all exiting control flow at the end of the parse. | |
557 void do_exits(); | |
558 | |
559 // Add Catch/CatchProjs | |
560 // The call is either a Java call or the VM's rethrow stub | |
561 void catch_call_exceptions(ciExceptionHandlerStream&); | |
562 | |
563 // Handle all exceptions thrown by the inlined method. | |
564 // Also handles exceptions for individual bytecodes. | |
565 void catch_inline_exceptions(SafePointNode* ex_map); | |
566 | |
567 // Merge the given map into correct exceptional exit state. | |
568 // Assumes that there is no applicable local handler. | |
569 void throw_to_exit(SafePointNode* ex_map); | |
570 | |
571 public: | |
572 #ifndef PRODUCT | |
573 // Handle PrintOpto, etc. | |
574 void show_parse_info(); | |
575 void dump_map_adr_mem() const; | |
576 static void print_statistics(); // Print some performance counters | |
577 void dump(); | |
578 void dump_bci(int bci); | |
579 #endif | |
580 }; | |
1972 | 581 |
582 #endif // SHARE_VM_OPTO_PARSE_HPP |