0
|
1 /*
|
|
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // Optimization - Graph Style
|
|
26
|
|
27 class Block;
|
|
28 class CFGLoop;
|
|
29 class MachCallNode;
|
|
30 class Matcher;
|
|
31 class RootNode;
|
|
32 class VectorSet;
|
|
33 struct Tarjan;
|
|
34
|
|
35 //------------------------------Block_Array------------------------------------
|
|
36 // Map dense integer indices to Blocks. Uses classic doubling-array trick.
|
|
37 // Abstractly provides an infinite array of Block*'s, initialized to NULL.
|
|
38 // Note that the constructor just zeros things, and since I use Arena
|
|
39 // allocation I do not need a destructor to reclaim storage.
|
|
40 class Block_Array : public ResourceObj {
|
|
41 uint _size; // allocated size, as opposed to formal limit
|
|
42 debug_only(uint _limit;) // limit to formal domain
|
|
43 protected:
|
|
44 Block **_blocks;
|
|
45 void grow( uint i ); // Grow array node to fit
|
|
46
|
|
47 public:
|
|
48 Arena *_arena; // Arena to allocate in
|
|
49
|
|
50 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
|
|
51 debug_only(_limit=0);
|
|
52 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
|
|
53 for( int i = 0; i < OptoBlockListSize; i++ ) {
|
|
54 _blocks[i] = NULL;
|
|
55 }
|
|
56 }
|
|
57 Block *lookup( uint i ) const // Lookup, or NULL for not mapped
|
|
58 { return (i<Max()) ? _blocks[i] : (Block*)NULL; }
|
|
59 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
|
|
60 { assert( i < Max(), "oob" ); return _blocks[i]; }
|
|
61 // Extend the mapping: index i maps to Block *n.
|
|
62 void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; }
|
|
63 uint Max() const { debug_only(return _limit); return _size; }
|
|
64 };
|
|
65
|
|
66
|
|
67 class Block_List : public Block_Array {
|
|
68 public:
|
|
69 uint _cnt;
|
|
70 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
|
|
71 void push( Block *b ) { map(_cnt++,b); }
|
|
72 Block *pop() { return _blocks[--_cnt]; }
|
|
73 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
|
|
74 void remove( uint i );
|
|
75 void insert( uint i, Block *n );
|
|
76 uint size() const { return _cnt; }
|
|
77 void reset() { _cnt = 0; }
|
|
78 };
|
|
79
|
|
80
|
|
81 class CFGElement : public ResourceObj {
|
|
82 public:
|
|
83 float _freq; // Execution frequency (estimate)
|
|
84
|
|
85 CFGElement() : _freq(0.0f) {}
|
|
86 virtual bool is_block() { return false; }
|
|
87 virtual bool is_loop() { return false; }
|
|
88 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
|
|
89 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; }
|
|
90 };
|
|
91
|
|
92 //------------------------------Block------------------------------------------
|
|
93 // This class defines a Basic Block.
|
|
94 // Basic blocks are used during the output routines, and are not used during
|
|
95 // any optimization pass. They are created late in the game.
|
|
96 class Block : public CFGElement {
|
|
97 public:
|
|
98 // Nodes in this block, in order
|
|
99 Node_List _nodes;
|
|
100
|
|
101 // Basic blocks have a Node which defines Control for all Nodes pinned in
|
|
102 // this block. This Node is a RegionNode. Exception-causing Nodes
|
|
103 // (division, subroutines) and Phi functions are always pinned. Later,
|
|
104 // every Node will get pinned to some block.
|
|
105 Node *head() const { return _nodes[0]; }
|
|
106
|
|
107 // CAUTION: num_preds() is ONE based, so that predecessor numbers match
|
|
108 // input edges to Regions and Phis.
|
|
109 uint num_preds() const { return head()->req(); }
|
|
110 Node *pred(uint i) const { return head()->in(i); }
|
|
111
|
|
112 // Array of successor blocks, same size as projs array
|
|
113 Block_Array _succs;
|
|
114
|
|
115 // Basic blocks have some number of Nodes which split control to all
|
|
116 // following blocks. These Nodes are always Projections. The field in
|
|
117 // the Projection and the block-ending Node determine which Block follows.
|
|
118 uint _num_succs;
|
|
119
|
|
120 // Basic blocks also carry all sorts of good old fashioned DFS information
|
|
121 // used to find loops, loop nesting depth, dominators, etc.
|
|
122 uint _pre_order; // Pre-order DFS number
|
|
123
|
|
124 // Dominator tree
|
|
125 uint _dom_depth; // Depth in dominator tree for fast LCA
|
|
126 Block* _idom; // Immediate dominator block
|
|
127
|
|
128 CFGLoop *_loop; // Loop to which this block belongs
|
|
129 uint _rpo; // Number in reverse post order walk
|
|
130
|
|
131 virtual bool is_block() { return true; }
|
|
132 float succ_prob(uint i); // return probability of i'th successor
|
|
133
|
|
134 Block* dom_lca(Block* that); // Compute LCA in dominator tree.
|
|
135 #ifdef ASSERT
|
|
136 bool dominates(Block* that) {
|
|
137 int dom_diff = this->_dom_depth - that->_dom_depth;
|
|
138 if (dom_diff > 0) return false;
|
|
139 for (; dom_diff < 0; dom_diff++) that = that->_idom;
|
|
140 return this == that;
|
|
141 }
|
|
142 #endif
|
|
143
|
|
144 // Report the alignment required by this block. Must be a power of 2.
|
|
145 // The previous block will insert nops to get this alignment.
|
|
146 uint code_alignment();
|
|
147
|
|
148 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
|
|
149 // It is currently also used to scale such frequencies relative to
|
|
150 // FreqCountInvocations relative to the old value of 1500.
|
|
151 #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
|
|
152
|
|
153 // Register Pressure (estimate) for Splitting heuristic
|
|
154 uint _reg_pressure;
|
|
155 uint _ihrp_index;
|
|
156 uint _freg_pressure;
|
|
157 uint _fhrp_index;
|
|
158
|
|
159 // Mark and visited bits for an LCA calculation in insert_anti_dependences.
|
|
160 // Since they hold unique node indexes, they do not need reinitialization.
|
|
161 node_idx_t _raise_LCA_mark;
|
|
162 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; }
|
|
163 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; }
|
|
164 node_idx_t _raise_LCA_visited;
|
|
165 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
|
|
166 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; }
|
|
167
|
|
168 // Estimated size in bytes of first instructions in a loop.
|
|
169 uint _first_inst_size;
|
|
170 uint first_inst_size() const { return _first_inst_size; }
|
|
171 void set_first_inst_size(uint s) { _first_inst_size = s; }
|
|
172
|
|
173 // Compute the size of first instructions in this block.
|
|
174 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
|
|
175
|
|
176 // Compute alignment padding if the block needs it.
|
|
177 // Align a loop if loop's padding is less or equal to padding limit
|
|
178 // or the size of first instructions in the loop > padding.
|
|
179 uint alignment_padding(int current_offset) {
|
|
180 int block_alignment = code_alignment();
|
|
181 int max_pad = block_alignment-relocInfo::addr_unit();
|
|
182 if( max_pad > 0 ) {
|
|
183 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
|
|
184 int current_alignment = current_offset & max_pad;
|
|
185 if( current_alignment != 0 ) {
|
|
186 uint padding = (block_alignment-current_alignment) & max_pad;
|
|
187 if( !head()->is_Loop() ||
|
|
188 padding <= (uint)MaxLoopPad ||
|
|
189 first_inst_size() > padding ) {
|
|
190 return padding;
|
|
191 }
|
|
192 }
|
|
193 }
|
|
194 return 0;
|
|
195 }
|
|
196
|
|
197 // Connector blocks. Connector blocks are basic blocks devoid of
|
|
198 // instructions, but may have relevant non-instruction Nodes, such as
|
|
199 // Phis or MergeMems. Such blocks are discovered and marked during the
|
|
200 // RemoveEmpty phase, and elided during Output.
|
|
201 bool _connector;
|
|
202 void set_connector() { _connector = true; }
|
|
203 bool is_connector() const { return _connector; };
|
|
204
|
|
205 // Create a new Block with given head Node.
|
|
206 // Creates the (empty) predecessor arrays.
|
|
207 Block( Arena *a, Node *headnode )
|
|
208 : CFGElement(),
|
|
209 _nodes(a),
|
|
210 _succs(a),
|
|
211 _num_succs(0),
|
|
212 _pre_order(0),
|
|
213 _idom(0),
|
|
214 _loop(NULL),
|
|
215 _reg_pressure(0),
|
|
216 _ihrp_index(1),
|
|
217 _freg_pressure(0),
|
|
218 _fhrp_index(1),
|
|
219 _raise_LCA_mark(0),
|
|
220 _raise_LCA_visited(0),
|
|
221 _first_inst_size(999999),
|
|
222 _connector(false) {
|
|
223 _nodes.push(headnode);
|
|
224 }
|
|
225
|
|
226 // Index of 'end' Node
|
|
227 uint end_idx() const {
|
|
228 // %%%%% add a proj after every goto
|
|
229 // so (last->is_block_proj() != last) always, then simplify this code
|
|
230 // This will not give correct end_idx for block 0 when it only contains root.
|
|
231 int last_idx = _nodes.size() - 1;
|
|
232 Node *last = _nodes[last_idx];
|
|
233 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
|
|
234 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
|
|
235 }
|
|
236
|
|
237 // Basic blocks have a Node which ends them. This Node determines which
|
|
238 // basic block follows this one in the program flow. This Node is either an
|
|
239 // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
|
|
240 Node *end() const { return _nodes[end_idx()]; }
|
|
241
|
|
242 // Add an instruction to an existing block. It must go after the head
|
|
243 // instruction and before the end instruction.
|
|
244 void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
|
|
245 // Find node in block
|
|
246 uint find_node( const Node *n ) const;
|
|
247 // Find and remove n from block list
|
|
248 void find_remove( const Node *n );
|
|
249
|
|
250 // Schedule a call next in the block
|
|
251 uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
|
|
252
|
|
253 // Perform basic-block local scheduling
|
|
254 Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
|
|
255 void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
|
|
256 void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
|
|
257 bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
|
|
258 // Cleanup if any code lands between a Call and his Catch
|
|
259 void call_catch_cleanup(Block_Array &bbs);
|
|
260 // Detect implicit-null-check opportunities. Basically, find NULL checks
|
|
261 // with suitable memory ops nearby. Use the memory op to do the NULL check.
|
|
262 // I can generate a memory op if there is not one nearby.
|
|
263 void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
|
|
264
|
|
265 // Return the empty status of a block
|
|
266 enum { not_empty, empty_with_goto, completely_empty };
|
|
267 int is_Empty() const;
|
|
268
|
|
269 // Forward through connectors
|
|
270 Block* non_connector() {
|
|
271 Block* s = this;
|
|
272 while (s->is_connector()) {
|
|
273 s = s->_succs[0];
|
|
274 }
|
|
275 return s;
|
|
276 }
|
|
277
|
|
278 // Successor block, after forwarding through connectors
|
|
279 Block* non_connector_successor(int i) const {
|
|
280 return _succs[i]->non_connector();
|
|
281 }
|
|
282
|
|
283 // Examine block's code shape to predict if it is not commonly executed.
|
|
284 bool has_uncommon_code() const;
|
|
285
|
|
286 // Use frequency calculations and code shape to predict if the block
|
|
287 // is uncommon.
|
|
288 bool is_uncommon( Block_Array &bbs ) const;
|
|
289
|
|
290 #ifndef PRODUCT
|
|
291 // Debugging print of basic block
|
|
292 void dump_bidx(const Block* orig) const;
|
|
293 void dump_pred(const Block_Array *bbs, Block* orig) const;
|
|
294 void dump_head( const Block_Array *bbs ) const;
|
|
295 void dump( ) const;
|
|
296 void dump( const Block_Array *bbs ) const;
|
|
297 #endif
|
|
298 };
|
|
299
|
|
300
|
|
301 //------------------------------PhaseCFG---------------------------------------
|
|
302 // Build an array of Basic Block pointers, one per Node.
|
|
303 class PhaseCFG : public Phase {
|
|
304 private:
|
|
305 // Build a proper looking cfg. Return count of basic blocks
|
|
306 uint build_cfg();
|
|
307
|
|
308 // Perform DFS search.
|
|
309 // Setup 'vertex' as DFS to vertex mapping.
|
|
310 // Setup 'semi' as vertex to DFS mapping.
|
|
311 // Set 'parent' to DFS parent.
|
|
312 uint DFS( Tarjan *tarjan );
|
|
313
|
|
314 // Helper function to insert a node into a block
|
|
315 void schedule_node_into_block( Node *n, Block *b );
|
|
316
|
|
317 // Set the basic block for pinned Nodes
|
|
318 void schedule_pinned_nodes( VectorSet &visited );
|
|
319
|
|
320 // I'll need a few machine-specific GotoNodes. Clone from this one.
|
|
321 MachNode *_goto;
|
|
322 void insert_goto_at(uint block_no, uint succ_no);
|
|
323
|
|
324 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
|
|
325 void verify_anti_dependences(Block* LCA, Node* load) {
|
|
326 assert(LCA == _bbs[load->_idx], "should already be scheduled");
|
|
327 insert_anti_dependences(LCA, load, true);
|
|
328 }
|
|
329
|
|
330 public:
|
|
331 PhaseCFG( Arena *a, RootNode *r, Matcher &m );
|
|
332
|
|
333 uint _num_blocks; // Count of basic blocks
|
|
334 Block_List _blocks; // List of basic blocks
|
|
335 RootNode *_root; // Root of whole program
|
|
336 Block_Array _bbs; // Map Nodes to owning Basic Block
|
|
337 Block *_broot; // Basic block of root
|
|
338 uint _rpo_ctr;
|
|
339 CFGLoop* _root_loop;
|
|
340
|
|
341 // Per node latency estimation, valid only during GCM
|
|
342 GrowableArray<uint> _node_latency;
|
|
343
|
|
344 #ifndef PRODUCT
|
|
345 bool _trace_opto_pipelining; // tracing flag
|
|
346 #endif
|
|
347
|
|
348 // Build dominators
|
|
349 void Dominators();
|
|
350
|
|
351 // Estimate block frequencies based on IfNode probabilities
|
|
352 void Estimate_Block_Frequency();
|
|
353
|
|
354 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
|
|
355 // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
|
|
356 void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
|
|
357
|
|
358 // Compute the (backwards) latency of a node from the uses
|
|
359 void latency_from_uses(Node *n);
|
|
360
|
|
361 // Compute the (backwards) latency of a node from a single use
|
|
362 int latency_from_use(Node *n, const Node *def, Node *use);
|
|
363
|
|
364 // Compute the (backwards) latency of a node from the uses of this instruction
|
|
365 void partial_latency_of_defs(Node *n);
|
|
366
|
|
367 // Schedule Nodes early in their basic blocks.
|
|
368 bool schedule_early(VectorSet &visited, Node_List &roots);
|
|
369
|
|
370 // For each node, find the latest block it can be scheduled into
|
|
371 // and then select the cheapest block between the latest and earliest
|
|
372 // block to place the node.
|
|
373 void schedule_late(VectorSet &visited, Node_List &stack);
|
|
374
|
|
375 // Pick a block between early and late that is a cheaper alternative
|
|
376 // to late. Helper for schedule_late.
|
|
377 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
|
|
378
|
|
379 // Compute the instruction global latency with a backwards walk
|
|
380 void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack);
|
|
381
|
|
382 // Remove empty basic blocks
|
|
383 void RemoveEmpty();
|
|
384 bool MoveToNext(Block* bx, uint b_index);
|
|
385 void MoveToEnd(Block* bx, uint b_index);
|
|
386
|
|
387 // Check for NeverBranch at block end. This needs to become a GOTO to the
|
|
388 // true target. NeverBranch are treated as a conditional branch that always
|
|
389 // goes the same direction for most of the optimizer and are used to give a
|
|
390 // fake exit path to infinite loops. At this late stage they need to turn
|
|
391 // into Goto's so that when you enter the infinite loop you indeed hang.
|
|
392 void convert_NeverBranch_to_Goto(Block *b);
|
|
393
|
|
394 CFGLoop* create_loop_tree();
|
|
395
|
|
396 // Insert a node into a block, and update the _bbs
|
|
397 void insert( Block *b, uint idx, Node *n ) {
|
|
398 b->_nodes.insert( idx, n );
|
|
399 _bbs.map( n->_idx, b );
|
|
400 }
|
|
401
|
|
402 #ifndef PRODUCT
|
|
403 bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
|
|
404
|
|
405 // Debugging print of CFG
|
|
406 void dump( ) const; // CFG only
|
|
407 void _dump_cfg( const Node *end, VectorSet &visited ) const;
|
|
408 void verify() const;
|
|
409 void dump_headers();
|
|
410 #else
|
|
411 bool trace_opto_pipelining() const { return false; }
|
|
412 #endif
|
|
413 };
|
|
414
|
|
415
|
|
416 //------------------------------UnionFindInfo----------------------------------
|
|
417 // Map Block indices to a block-index for a cfg-cover.
|
|
418 // Array lookup in the optimized case.
|
|
419 class UnionFind : public ResourceObj {
|
|
420 uint _cnt, _max;
|
|
421 uint* _indices;
|
|
422 ReallocMark _nesting; // assertion check for reallocations
|
|
423 public:
|
|
424 UnionFind( uint max );
|
|
425 void reset( uint max ); // Reset to identity map for [0..max]
|
|
426
|
|
427 uint lookup( uint nidx ) const {
|
|
428 return _indices[nidx];
|
|
429 }
|
|
430 uint operator[] (uint nidx) const { return lookup(nidx); }
|
|
431
|
|
432 void map( uint from_idx, uint to_idx ) {
|
|
433 assert( from_idx < _cnt, "oob" );
|
|
434 _indices[from_idx] = to_idx;
|
|
435 }
|
|
436 void extend( uint from_idx, uint to_idx );
|
|
437
|
|
438 uint Size() const { return _cnt; }
|
|
439
|
|
440 uint Find( uint idx ) {
|
|
441 assert( idx < 65536, "Must fit into uint");
|
|
442 uint uf_idx = lookup(idx);
|
|
443 return (uf_idx == idx) ? uf_idx : Find_compress(idx);
|
|
444 }
|
|
445 uint Find_compress( uint idx );
|
|
446 uint Find_const( uint idx ) const;
|
|
447 void Union( uint idx1, uint idx2 );
|
|
448
|
|
449 };
|
|
450
|
|
451 //----------------------------BlockProbPair---------------------------
|
|
452 // Ordered pair of Node*.
|
|
453 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
|
|
454 protected:
|
|
455 Block* _target; // block target
|
|
456 float _prob; // probability of edge to block
|
|
457 public:
|
|
458 BlockProbPair() : _target(NULL), _prob(0.0) {}
|
|
459 BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
|
|
460
|
|
461 Block* get_target() const { return _target; }
|
|
462 float get_prob() const { return _prob; }
|
|
463 };
|
|
464
|
|
465 //------------------------------CFGLoop-------------------------------------------
|
|
466 class CFGLoop : public CFGElement {
|
|
467 int _id;
|
|
468 int _depth;
|
|
469 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null
|
|
470 CFGLoop *_sibling; // null terminated list
|
|
471 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops
|
|
472 GrowableArray<CFGElement*> _members; // list of members of loop
|
|
473 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
|
|
474 float _exit_prob; // probability any loop exit is taken on a single loop iteration
|
|
475 void update_succ_freq(Block* b, float freq);
|
|
476
|
|
477 public:
|
|
478 CFGLoop(int id) :
|
|
479 CFGElement(),
|
|
480 _id(id),
|
|
481 _depth(0),
|
|
482 _parent(NULL),
|
|
483 _sibling(NULL),
|
|
484 _child(NULL),
|
|
485 _exit_prob(1.0f) {}
|
|
486 CFGLoop* parent() { return _parent; }
|
|
487 void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
|
|
488 void add_member(CFGElement *s) { _members.push(s); }
|
|
489 void add_nested_loop(CFGLoop* cl);
|
|
490 Block* head() {
|
|
491 assert(_members.at(0)->is_block(), "head must be a block");
|
|
492 Block* hd = _members.at(0)->as_Block();
|
|
493 assert(hd->_loop == this, "just checking");
|
|
494 assert(hd->head()->is_Loop(), "must begin with loop head node");
|
|
495 return hd;
|
|
496 }
|
|
497 Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
|
|
498 void compute_loop_depth(int depth);
|
|
499 void compute_freq(); // compute frequency with loop assuming head freq 1.0f
|
|
500 void scale_freq(); // scale frequency by loop trip count (including outer loops)
|
|
501 bool in_loop_nest(Block* b);
|
|
502 float trip_count() const { return 1.0f / _exit_prob; }
|
|
503 virtual bool is_loop() { return true; }
|
|
504 int id() { return _id; }
|
|
505
|
|
506 #ifndef PRODUCT
|
|
507 void dump( ) const;
|
|
508 void dump_tree() const;
|
|
509 #endif
|
|
510 };
|