Mercurial > hg > truffle
annotate src/share/vm/opto/parse.hpp @ 1721:413ad0331a0c
6977924: Changes for 6975078 produce build error with certain gcc versions
Summary: The changes introduced for 6975078 assign badHeapOopVal to the _allocation field in the ResourceObj class. In 32 bit linux builds with certain versions of gcc this assignment will be flagged as an error while compiling allocation.cpp. In 32 bit builds the constant value badHeapOopVal (which is cast to an intptr_t) is negative. The _allocation field is typed as an unsigned intptr_t and gcc catches this as an error.
Reviewed-by: jcoomes, ysr, phh
author | johnc |
---|---|
date | Wed, 18 Aug 2010 10:59:06 -0700 |
parents | 3941674cc7fa |
children | 4b29a725c43c |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1344
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 class BytecodeParseHistogram; | |
26 class InlineTree; | |
27 class Parse; | |
28 class SwitchRange; | |
29 | |
30 | |
31 //------------------------------InlineTree------------------------------------- | |
32 class InlineTree : public ResourceObj { | |
33 Compile* C; // cache | |
34 JVMState* _caller_jvms; // state of caller | |
35 ciMethod* _method; // method being called by the caller_jvms | |
36 InlineTree* _caller_tree; | |
37 uint _count_inline_bcs; // Accumulated count of inlined bytecodes | |
38 // Call-site count / interpreter invocation count, scaled recursively. | |
39 // Always between 0.0 and 1.0. Represents the percentage of the method's | |
40 // total execution time used at this call site. | |
41 const float _site_invoke_ratio; | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
42 const int _site_depth_adjust; |
0 | 43 float compute_callee_frequency( int caller_bci ) const; |
44 | |
45 GrowableArray<InlineTree*> _subtrees; | |
46 friend class Compile; | |
47 | |
48 protected: | |
49 InlineTree(Compile* C, | |
50 const InlineTree* caller_tree, | |
51 ciMethod* callee_method, | |
52 JVMState* caller_jvms, | |
53 int caller_bci, | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
54 float site_invoke_ratio, |
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
55 int site_depth_adjust); |
0 | 56 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, |
57 JVMState* caller_jvms, | |
58 int caller_bci); | |
41
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
59 const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); |
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
60 const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; |
874b2c4f43d1
6667605: (Escape Analysis) inline java constructors when EA is on
kvn
parents:
0
diff
changeset
|
61 const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; |
0 | 62 void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN; |
63 | |
64 InlineTree *caller_tree() const { return _caller_tree; } | |
65 InlineTree* callee_at(int bci, ciMethod* m) const; | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
66 int inline_depth() const { return stack_depth() + _site_depth_adjust; } |
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
67 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } |
0 | 68 |
69 public: | |
70 static InlineTree* build_inline_tree_root(); | |
71 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); | |
72 | |
73 // For temporary (stack-allocated, stateless) ilts: | |
1157
c3b315a0d58a
6912063: inlining parameters need to be adjusted for some uses of the JVM
jrose
parents:
989
diff
changeset
|
74 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust); |
0 | 75 |
76 // InlineTree enum | |
77 enum InlineStyle { | |
78 Inline_do_not_inline = 0, // | |
79 Inline_cha_is_monomorphic = 1, // | |
80 Inline_type_profile_monomorphic = 2 // | |
81 }; | |
82 | |
83 // See if it is OK to inline. | |
605 | 84 // The receiver is the inline tree for the caller. |
0 | 85 // |
86 // The result is a temperature indication. If it is hot or cold, | |
87 // inlining is immediate or undesirable. Otherwise, the info block | |
88 // returned is newly allocated and may be enqueued. | |
89 // | |
90 // If the method is inlinable, a new inline subtree is created on the fly, | |
91 // and may be accessed by find_subtree_from_root. | |
92 // The call_method is the dest_method for a special or static invocation. | |
93 // The call_method is an optimized virtual method candidate otherwise. | |
94 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci); | |
95 | |
96 // Information about inlined method | |
97 JVMState* caller_jvms() const { return _caller_jvms; } | |
98 ciMethod *method() const { return _method; } | |
99 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } | |
100 uint count_inline_bcs() const { return _count_inline_bcs; } | |
101 float site_invoke_ratio() const { return _site_invoke_ratio; }; | |
102 | |
103 #ifndef PRODUCT | |
104 private: | |
105 uint _count_inlines; // Count of inlined methods | |
106 public: | |
107 // Debug information collected during parse | |
108 uint count_inlines() const { return _count_inlines; }; | |
109 #endif | |
110 GrowableArray<InlineTree*> subtrees() { return _subtrees; } | |
111 }; | |
112 | |
113 | |
114 //----------------------------------------------------------------------------- | |
115 //------------------------------Parse------------------------------------------ | |
116 // Parse bytecodes, build a Graph | |
117 class Parse : public GraphKit { | |
118 public: | |
119 // Per-block information needed by the parser: | |
120 class Block { | |
121 private: | |
122 ciTypeFlow::Block* _flow; | |
123 int _pred_count; // how many predecessors in CFG? | |
124 int _preds_parsed; // how many of these have been parsed? | |
125 uint _count; // how many times executed? Currently only set by _goto's | |
126 bool _is_parsed; // has this block been parsed yet? | |
127 bool _is_handler; // is this block an exception handler? | |
128 SafePointNode* _start_map; // all values flowing into this block | |
129 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap | |
130 | |
131 int _num_successors; // Includes only normal control flow. | |
132 int _all_successors; // Include exception paths also. | |
133 Block** _successors; | |
134 | |
135 // Use init_node/init_graph to initialize Blocks. | |
136 // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); } | |
137 Block() : _live_locals(NULL,0) { ShouldNotReachHere(); } | |
138 | |
139 public: | |
140 | |
141 // Set up the block data structure itself. | |
142 void init_node(Parse* outer, int po); | |
143 // Set up the block's relations to other blocks. | |
144 void init_graph(Parse* outer); | |
145 | |
146 ciTypeFlow::Block* flow() const { return _flow; } | |
147 int pred_count() const { return _pred_count; } | |
148 int preds_parsed() const { return _preds_parsed; } | |
149 bool is_parsed() const { return _is_parsed; } | |
150 bool is_handler() const { return _is_handler; } | |
151 void set_count( uint x ) { _count = x; } | |
152 uint count() const { return _count; } | |
153 | |
154 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } | |
155 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } | |
156 | |
157 // True after any predecessor flows control into this block | |
158 bool is_merged() const { return _start_map != NULL; } | |
159 | |
160 // True when all non-exception predecessors have been parsed. | |
161 bool is_ready() const { return preds_parsed() == pred_count(); } | |
162 | |
163 int num_successors() const { return _num_successors; } | |
164 int all_successors() const { return _all_successors; } | |
165 Block* successor_at(int i) const { | |
166 assert((uint)i < (uint)all_successors(), ""); | |
167 return _successors[i]; | |
168 } | |
169 Block* successor_for_bci(int bci); | |
170 | |
171 int start() const { return flow()->start(); } | |
172 int limit() const { return flow()->limit(); } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
173 int rpo() const { return flow()->rpo(); } |
0 | 174 int start_sp() const { return flow()->stack_size(); } |
175 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
176 bool is_loop_head() const { return flow()->is_loop_head(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
177 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
178 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
179 bool is_invariant_local(uint i) const { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
180 const JVMState* jvms = start_map()->jvms(); |
435
b1d6a3e95810
6766316: assert(!nocreate,"Cannot build a phi for a block already parsed.")
kvn
parents:
367
diff
changeset
|
181 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
182 return flow()->is_invariant_local(i - jvms->locoff()); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
183 } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
184 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
185 |
0 | 186 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } |
187 | |
188 const Type* stack_type_at(int i) const; | |
189 const Type* local_type_at(int i) const; | |
190 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } | |
191 | |
192 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } | |
193 | |
194 // Call this just before parsing a block. | |
195 void mark_parsed() { | |
196 assert(!_is_parsed, "must parse each block exactly once"); | |
197 _is_parsed = true; | |
198 } | |
199 | |
200 // Return the phi/region input index for the "current" pred, | |
201 // and bump the pred number. For historical reasons these index | |
202 // numbers are handed out in descending order. The last index is | |
203 // always PhiNode::Input (i.e., 1). The value returned is known | |
204 // as a "path number" because it distinguishes by which path we are | |
205 // entering the block. | |
206 int next_path_num() { | |
207 assert(preds_parsed() < pred_count(), "too many preds?"); | |
208 return pred_count() - _preds_parsed++; | |
209 } | |
210 | |
211 // Add a previously unaccounted predecessor to this block. | |
212 // This operates by increasing the size of the block's region | |
213 // and all its phi nodes (if any). The value returned is a | |
214 // path number ("pnum"). | |
215 int add_new_path(); | |
216 | |
217 // Initialize me by recording the parser's map. My own map must be NULL. | |
218 void record_state(Parse* outer); | |
219 }; | |
220 | |
221 #ifndef PRODUCT | |
222 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. | |
223 class BytecodeParseHistogram : public ResourceObj { | |
224 private: | |
225 enum BPHType { | |
226 BPH_transforms, | |
227 BPH_values | |
228 }; | |
229 static bool _initialized; | |
230 static uint _bytecodes_parsed [Bytecodes::number_of_codes]; | |
231 static uint _nodes_constructed[Bytecodes::number_of_codes]; | |
232 static uint _nodes_transformed[Bytecodes::number_of_codes]; | |
233 static uint _new_values [Bytecodes::number_of_codes]; | |
234 | |
235 Bytecodes::Code _initial_bytecode; | |
236 int _initial_node_count; | |
237 int _initial_transforms; | |
238 int _initial_values; | |
239 | |
240 Parse *_parser; | |
241 Compile *_compiler; | |
242 | |
243 // Initialization | |
244 static void reset(); | |
245 | |
246 // Return info being collected, select with global flag 'BytecodeParseInfo' | |
247 int current_count(BPHType info_selector); | |
248 | |
249 public: | |
250 BytecodeParseHistogram(Parse *p, Compile *c); | |
251 static bool initialized(); | |
252 | |
253 // Record info when starting to parse one bytecode | |
254 void set_initial_state( Bytecodes::Code bc ); | |
255 // Record results of parsing one bytecode | |
256 void record_change(); | |
257 | |
258 // Profile printing | |
259 static void print(float cutoff = 0.01F); // cutoff in percent | |
260 }; | |
261 | |
262 public: | |
263 // Record work done during parsing | |
264 BytecodeParseHistogram* _parse_histogram; | |
265 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } | |
266 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } | |
267 #endif | |
268 | |
269 private: | |
270 friend class Block; | |
271 | |
272 // Variables which characterize this compilation as a whole: | |
273 | |
274 JVMState* _caller; // JVMS which carries incoming args & state. | |
275 float _expected_uses; // expected number of calls to this code | |
276 float _prof_factor; // discount applied to my profile counts | |
277 int _depth; // Inline tree depth, for debug printouts | |
278 const TypeFunc*_tf; // My kind of function type | |
279 int _entry_bci; // the osr bci or InvocationEntryBci | |
280 | |
281 ciTypeFlow* _flow; // Results of previous flow pass. | |
282 Block* _blocks; // Array of basic-block structs. | |
283 int _block_count; // Number of elements in _blocks. | |
284 | |
285 GraphKit _exits; // Record all normal returns and throws here. | |
286 bool _wrote_final; // Did we write a final field? | |
287 bool _count_invocations; // update and test invocation counter | |
288 bool _method_data_update; // update method data oop | |
289 | |
290 // Variables which track Java semantics during bytecode parsing: | |
291 | |
292 Block* _block; // block currently getting parsed | |
293 ciBytecodeStream _iter; // stream of this method's bytecodes | |
294 | |
295 int _blocks_merged; // Progress meter: state merges from BB preds | |
296 int _blocks_parsed; // Progress meter: BBs actually parsed | |
297 | |
298 const FastLockNode* _synch_lock; // FastLockNode for synchronized method | |
299 | |
300 #ifndef PRODUCT | |
301 int _max_switch_depth; // Debugging SwitchRanges. | |
302 int _est_switch_depth; // Debugging SwitchRanges. | |
303 #endif | |
304 | |
305 public: | |
306 // Constructor | |
307 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); | |
308 | |
309 virtual Parse* is_Parse() const { return (Parse*)this; } | |
310 | |
311 public: | |
312 // Accessors. | |
313 JVMState* caller() const { return _caller; } | |
314 float expected_uses() const { return _expected_uses; } | |
315 float prof_factor() const { return _prof_factor; } | |
316 int depth() const { return _depth; } | |
317 const TypeFunc* tf() const { return _tf; } | |
318 // entry_bci() -- see osr_bci, etc. | |
319 | |
320 ciTypeFlow* flow() const { return _flow; } | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
321 // blocks() -- see rpo_at, start_block, etc. |
0 | 322 int block_count() const { return _block_count; } |
323 | |
324 GraphKit& exits() { return _exits; } | |
325 bool wrote_final() const { return _wrote_final; } | |
326 void set_wrote_final(bool z) { _wrote_final = z; } | |
327 bool count_invocations() const { return _count_invocations; } | |
328 bool method_data_update() const { return _method_data_update; } | |
329 | |
330 Block* block() const { return _block; } | |
331 ciBytecodeStream& iter() { return _iter; } | |
332 Bytecodes::Code bc() const { return _iter.cur_bc(); } | |
333 | |
334 void set_block(Block* b) { _block = b; } | |
335 | |
336 // Derived accessors: | |
337 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } | |
338 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } | |
339 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } | |
340 | |
341 void set_parse_bci(int bci); | |
342 | |
343 // Must this parse be aborted? | |
344 bool failing() { return C->failing(); } | |
345 | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
346 Block* rpo_at(int rpo) { |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
347 assert(0 <= rpo && rpo < _block_count, "oob"); |
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
348 return &_blocks[rpo]; |
0 | 349 } |
350 Block* start_block() { | |
367
194b8e3a2fc4
6384206: Phis which are later unneeded are impairing our ability to inline based on static types
never
parents:
248
diff
changeset
|
351 return rpo_at(flow()->start_block()->rpo()); |
0 | 352 } |
353 // Can return NULL if the flow pass did not complete a block. | |
354 Block* successor_for_bci(int bci) { | |
355 return block()->successor_for_bci(bci); | |
356 } | |
357 | |
358 private: | |
359 // Create a JVMS & map for the initial state of this method. | |
360 SafePointNode* create_entry_map(); | |
361 | |
362 // OSR helpers | |
363 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); | |
364 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); | |
365 void load_interpreter_state(Node* osr_buf); | |
366 | |
367 // Functions for managing basic blocks: | |
368 void init_blocks(); | |
369 void load_state_from(Block* b); | |
370 void store_state_to(Block* b) { b->record_state(this); } | |
371 | |
372 // Parse all the basic blocks. | |
373 void do_all_blocks(); | |
374 | |
375 // Parse the current basic block | |
376 void do_one_block(); | |
377 | |
378 // Raise an error if we get a bad ciTypeFlow CFG. | |
379 void handle_missing_successor(int bci); | |
380 | |
381 // first actions (before BCI 0) | |
382 void do_method_entry(); | |
383 | |
384 // implementation of monitorenter/monitorexit | |
385 void do_monitor_enter(); | |
386 void do_monitor_exit(); | |
387 | |
388 // Eagerly create phie throughout the state, to cope with back edges. | |
389 void ensure_phis_everywhere(); | |
390 | |
391 // Merge the current mapping into the basic block starting at bci | |
392 void merge( int target_bci); | |
393 // Same as plain merge, except that it allocates a new path number. | |
394 void merge_new_path( int target_bci); | |
395 // Merge the current mapping into an exception handler. | |
396 void merge_exception(int target_bci); | |
397 // Helper: Merge the current mapping into the given basic block | |
398 void merge_common(Block* target, int pnum); | |
399 // Helper functions for merging individual cells. | |
400 PhiNode *ensure_phi( int idx, bool nocreate = false); | |
401 PhiNode *ensure_memory_phi(int idx, bool nocreate = false); | |
402 // Helper to merge the current memory state into the given basic block | |
403 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); | |
404 | |
405 // Parse this bytecode, and alter the Parsers JVM->Node mapping | |
406 void do_one_bytecode(); | |
407 | |
408 // helper function to generate array store check | |
409 void array_store_check(); | |
410 // Helper function to generate array load | |
411 void array_load(BasicType etype); | |
412 // Helper function to generate array store | |
413 void array_store(BasicType etype); | |
414 // Helper function to compute array addressing | |
415 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); | |
416 | |
417 // Pass current map to exits | |
418 void return_current(Node* value); | |
419 | |
420 // Register finalizers on return from Object.<init> | |
421 void call_register_finalizer(); | |
422 | |
423 // Insert a compiler safepoint into the graph | |
424 void add_safepoint(); | |
425 | |
426 // Insert a compiler safepoint into the graph, if there is a back-branch. | |
427 void maybe_add_safepoint(int target_bci) { | |
428 if (UseLoopSafepoints && target_bci <= bci()) { | |
429 add_safepoint(); | |
430 } | |
431 } | |
432 | |
1172 | 433 // Return true if the parser should add a loop predicate |
434 bool should_add_predicate(int target_bci); | |
435 // Insert a loop predicate into the graph | |
436 void add_predicate(); | |
437 | |
0 | 438 // Note: Intrinsic generation routines may be found in library_call.cpp. |
439 | |
440 // Helper function to setup Ideal Call nodes | |
441 void do_call(); | |
442 | |
443 // Helper function to uncommon-trap or bailout for non-compilable call-sites | |
444 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); | |
445 | |
446 // Helper function to identify inlining potential at call-site | |
447 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, | |
448 ciMethod *dest_method, const TypeOopPtr* receiver_type); | |
449 | |
450 // Helper function to setup for type-profile based inlining | |
451 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); | |
452 | |
453 // Helper functions for type checking bytecodes: | |
454 void do_checkcast(); | |
455 void do_instanceof(); | |
456 | |
457 // Helper functions for shifting & arithmetic | |
458 void modf(); | |
459 void modd(); | |
460 void l2f(); | |
461 | |
462 void do_irem(); | |
463 | |
464 // implementation of _get* and _put* bytecodes | |
465 void do_getstatic() { do_field_access(true, false); } | |
466 void do_getfield () { do_field_access(true, true); } | |
467 void do_putstatic() { do_field_access(false, false); } | |
468 void do_putfield () { do_field_access(false, true); } | |
469 | |
470 // common code for making initial checks and forming addresses | |
471 void do_field_access(bool is_get, bool is_field); | |
472 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); | |
473 | |
474 // common code for actually performing the load or store | |
475 void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); | |
476 void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); | |
477 | |
478 // loading from a constant field or the constant pool | |
479 // returns false if push failed (non-perm field constants only, not ldcs) | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
480 bool push_constant(ciConstant con, bool require_constant = false); |
0 | 481 |
482 // implementation of object creation bytecodes | |
1645
3941674cc7fa
6958668: repeated uncommon trapping for new of klass which is being initialized
never
parents:
1552
diff
changeset
|
483 void emit_guard_for_new(ciInstanceKlass* klass); |
0 | 484 void do_new(); |
485 void do_newarray(BasicType elemtype); | |
486 void do_anewarray(); | |
487 void do_multianewarray(); | |
730
9c6be3edf0dc
6589834: deoptimization problem with -XX:+DeoptimizeALot
cfang
parents:
605
diff
changeset
|
488 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); |
0 | 489 |
490 // implementation of jsr/ret | |
491 void do_jsr(); | |
492 void do_ret(); | |
493 | |
494 float dynamic_branch_prediction(float &cnt); | |
495 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); | |
496 bool seems_never_taken(float prob); | |
497 | |
248
18aab3cdd513
6726504: handle do_ifxxx calls in parser more uniformly
rasbold
parents:
196
diff
changeset
|
498 void do_ifnull(BoolTest::mask btest, Node* c); |
0 | 499 void do_if(BoolTest::mask btest, Node* c); |
1172 | 500 int repush_if_args(); |
0 | 501 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, |
502 Block* path, Block* other_path); | |
503 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); | |
504 Node* jump_if_join(Node* iffalse, Node* iftrue); | |
505 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); | |
506 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index); | |
507 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index); | |
508 | |
509 friend class SwitchRange; | |
510 void do_tableswitch(); | |
511 void do_lookupswitch(); | |
512 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); | |
513 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); | |
514 | |
515 // helper functions for methodData style profiling | |
516 void test_counter_against_threshold(Node* cnt, int limit); | |
517 void increment_and_test_invocation_counter(int limit); | |
518 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); | |
519 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); | |
520 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); | |
521 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); | |
522 | |
523 void profile_method_entry(); | |
524 void profile_taken_branch(int target_bci, bool force_update = false); | |
525 void profile_not_taken_branch(bool force_update = false); | |
526 void profile_call(Node* receiver); | |
527 void profile_generic_call(); | |
528 void profile_receiver_type(Node* receiver); | |
529 void profile_ret(int target_bci); | |
530 void profile_null_checkcast(); | |
531 void profile_switch_case(int table_index); | |
532 | |
533 // helper function for call statistics | |
534 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; | |
535 | |
536 Node_Notes* make_node_notes(Node_Notes* caller_nn); | |
537 | |
538 // Helper functions for handling normal and abnormal exits. | |
539 void build_exits(); | |
540 | |
541 // Fix up all exceptional control flow exiting a single bytecode. | |
542 void do_exceptions(); | |
543 | |
544 // Fix up all exiting control flow at the end of the parse. | |
545 void do_exits(); | |
546 | |
547 // Add Catch/CatchProjs | |
548 // The call is either a Java call or the VM's rethrow stub | |
549 void catch_call_exceptions(ciExceptionHandlerStream&); | |
550 | |
551 // Handle all exceptions thrown by the inlined method. | |
552 // Also handles exceptions for individual bytecodes. | |
553 void catch_inline_exceptions(SafePointNode* ex_map); | |
554 | |
555 // Merge the given map into correct exceptional exit state. | |
556 // Assumes that there is no applicable local handler. | |
557 void throw_to_exit(SafePointNode* ex_map); | |
558 | |
559 public: | |
560 #ifndef PRODUCT | |
561 // Handle PrintOpto, etc. | |
562 void show_parse_info(); | |
563 void dump_map_adr_mem() const; | |
564 static void print_statistics(); // Print some performance counters | |
565 void dump(); | |
566 void dump_bci(int bci); | |
567 #endif | |
568 }; |