Mercurial > hg > truffle
annotate src/share/vm/opto/block.hpp @ 909:b32a809aab08
6866585: debug code in ciObjectFactory too slow for large objects
Reviewed-by: ysr, never, kvn
author | jcoomes |
---|---|
date | Tue, 11 Aug 2009 23:24:41 -0700 |
parents | bd02caa94611 |
children | c18cbe5936b8 |
rev | line source |
---|---|
0 | 1 /* |
844 | 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Optimization - Graph Style | |
26 | |
27 class Block; | |
28 class CFGLoop; | |
29 class MachCallNode; | |
30 class Matcher; | |
31 class RootNode; | |
32 class VectorSet; | |
33 struct Tarjan; | |
34 | |
35 //------------------------------Block_Array------------------------------------ | |
36 // Map dense integer indices to Blocks. Uses classic doubling-array trick. | |
37 // Abstractly provides an infinite array of Block*'s, initialized to NULL. | |
38 // Note that the constructor just zeros things, and since I use Arena | |
39 // allocation I do not need a destructor to reclaim storage. | |
40 class Block_Array : public ResourceObj { | |
41 uint _size; // allocated size, as opposed to formal limit | |
42 debug_only(uint _limit;) // limit to formal domain | |
43 protected: | |
44 Block **_blocks; | |
45 void grow( uint i ); // Grow array node to fit | |
46 | |
47 public: | |
48 Arena *_arena; // Arena to allocate in | |
49 | |
50 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { | |
51 debug_only(_limit=0); | |
52 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); | |
53 for( int i = 0; i < OptoBlockListSize; i++ ) { | |
54 _blocks[i] = NULL; | |
55 } | |
56 } | |
57 Block *lookup( uint i ) const // Lookup, or NULL for not mapped | |
58 { return (i<Max()) ? _blocks[i] : (Block*)NULL; } | |
59 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped | |
60 { assert( i < Max(), "oob" ); return _blocks[i]; } | |
61 // Extend the mapping: index i maps to Block *n. | |
62 void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; } | |
63 uint Max() const { debug_only(return _limit); return _size; } | |
64 }; | |
65 | |
66 | |
67 class Block_List : public Block_Array { | |
68 public: | |
69 uint _cnt; | |
70 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} | |
71 void push( Block *b ) { map(_cnt++,b); } | |
72 Block *pop() { return _blocks[--_cnt]; } | |
73 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} | |
74 void remove( uint i ); | |
75 void insert( uint i, Block *n ); | |
76 uint size() const { return _cnt; } | |
77 void reset() { _cnt = 0; } | |
418 | 78 void print(); |
0 | 79 }; |
80 | |
81 | |
82 class CFGElement : public ResourceObj { | |
83 public: | |
84 float _freq; // Execution frequency (estimate) | |
85 | |
86 CFGElement() : _freq(0.0f) {} | |
87 virtual bool is_block() { return false; } | |
88 virtual bool is_loop() { return false; } | |
89 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; } | |
90 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; } | |
91 }; | |
92 | |
93 //------------------------------Block------------------------------------------ | |
94 // This class defines a Basic Block. | |
95 // Basic blocks are used during the output routines, and are not used during | |
96 // any optimization pass. They are created late in the game. | |
97 class Block : public CFGElement { | |
98 public: | |
99 // Nodes in this block, in order | |
100 Node_List _nodes; | |
101 | |
102 // Basic blocks have a Node which defines Control for all Nodes pinned in | |
103 // this block. This Node is a RegionNode. Exception-causing Nodes | |
104 // (division, subroutines) and Phi functions are always pinned. Later, | |
105 // every Node will get pinned to some block. | |
106 Node *head() const { return _nodes[0]; } | |
107 | |
108 // CAUTION: num_preds() is ONE based, so that predecessor numbers match | |
109 // input edges to Regions and Phis. | |
110 uint num_preds() const { return head()->req(); } | |
111 Node *pred(uint i) const { return head()->in(i); } | |
112 | |
113 // Array of successor blocks, same size as projs array | |
114 Block_Array _succs; | |
115 | |
116 // Basic blocks have some number of Nodes which split control to all | |
117 // following blocks. These Nodes are always Projections. The field in | |
118 // the Projection and the block-ending Node determine which Block follows. | |
119 uint _num_succs; | |
120 | |
121 // Basic blocks also carry all sorts of good old fashioned DFS information | |
122 // used to find loops, loop nesting depth, dominators, etc. | |
123 uint _pre_order; // Pre-order DFS number | |
124 | |
125 // Dominator tree | |
126 uint _dom_depth; // Depth in dominator tree for fast LCA | |
127 Block* _idom; // Immediate dominator block | |
128 | |
129 CFGLoop *_loop; // Loop to which this block belongs | |
130 uint _rpo; // Number in reverse post order walk | |
131 | |
132 virtual bool is_block() { return true; } | |
418 | 133 float succ_prob(uint i); // return probability of i'th successor |
134 int num_fall_throughs(); // How many fall-through candidate this block has | |
135 void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code | |
136 bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate | |
137 Block* lone_fall_through(); // Return lone fall-through Block or null | |
0 | 138 |
139 Block* dom_lca(Block* that); // Compute LCA in dominator tree. | |
140 #ifdef ASSERT | |
141 bool dominates(Block* that) { | |
142 int dom_diff = this->_dom_depth - that->_dom_depth; | |
143 if (dom_diff > 0) return false; | |
144 for (; dom_diff < 0; dom_diff++) that = that->_idom; | |
145 return this == that; | |
146 } | |
147 #endif | |
148 | |
149 // Report the alignment required by this block. Must be a power of 2. | |
150 // The previous block will insert nops to get this alignment. | |
151 uint code_alignment(); | |
418 | 152 uint compute_loop_alignment(); |
0 | 153 |
154 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. | |
155 // It is currently also used to scale such frequencies relative to | |
156 // FreqCountInvocations relative to the old value of 1500. | |
157 #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations) | |
158 | |
159 // Register Pressure (estimate) for Splitting heuristic | |
160 uint _reg_pressure; | |
161 uint _ihrp_index; | |
162 uint _freg_pressure; | |
163 uint _fhrp_index; | |
164 | |
165 // Mark and visited bits for an LCA calculation in insert_anti_dependences. | |
166 // Since they hold unique node indexes, they do not need reinitialization. | |
167 node_idx_t _raise_LCA_mark; | |
168 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; } | |
169 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; } | |
170 node_idx_t _raise_LCA_visited; | |
171 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; } | |
172 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; } | |
173 | |
174 // Estimated size in bytes of first instructions in a loop. | |
175 uint _first_inst_size; | |
176 uint first_inst_size() const { return _first_inst_size; } | |
177 void set_first_inst_size(uint s) { _first_inst_size = s; } | |
178 | |
179 // Compute the size of first instructions in this block. | |
180 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra); | |
181 | |
182 // Compute alignment padding if the block needs it. | |
183 // Align a loop if loop's padding is less or equal to padding limit | |
184 // or the size of first instructions in the loop > padding. | |
185 uint alignment_padding(int current_offset) { | |
186 int block_alignment = code_alignment(); | |
187 int max_pad = block_alignment-relocInfo::addr_unit(); | |
188 if( max_pad > 0 ) { | |
189 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), ""); | |
190 int current_alignment = current_offset & max_pad; | |
191 if( current_alignment != 0 ) { | |
192 uint padding = (block_alignment-current_alignment) & max_pad; | |
418 | 193 if( has_loop_alignment() && |
194 padding > (uint)MaxLoopPad && | |
195 first_inst_size() <= padding ) { | |
196 return 0; | |
0 | 197 } |
418 | 198 return padding; |
0 | 199 } |
200 } | |
201 return 0; | |
202 } | |
203 | |
204 // Connector blocks. Connector blocks are basic blocks devoid of | |
205 // instructions, but may have relevant non-instruction Nodes, such as | |
206 // Phis or MergeMems. Such blocks are discovered and marked during the | |
207 // RemoveEmpty phase, and elided during Output. | |
208 bool _connector; | |
209 void set_connector() { _connector = true; } | |
210 bool is_connector() const { return _connector; }; | |
211 | |
418 | 212 // Loop_alignment will be set for blocks which are at the top of loops. |
213 // The block layout pass may rotate loops such that the loop head may not | |
214 // be the sequentially first block of the loop encountered in the linear | |
215 // list of blocks. If the layout pass is not run, loop alignment is set | |
216 // for each block which is the head of a loop. | |
217 uint _loop_alignment; | |
218 void set_loop_alignment(Block *loop_top) { | |
219 uint new_alignment = loop_top->compute_loop_alignment(); | |
220 if (new_alignment > _loop_alignment) { | |
221 _loop_alignment = new_alignment; | |
222 } | |
223 } | |
224 uint loop_alignment() const { return _loop_alignment; } | |
225 bool has_loop_alignment() const { return loop_alignment() > 0; } | |
226 | |
0 | 227 // Create a new Block with given head Node. |
228 // Creates the (empty) predecessor arrays. | |
229 Block( Arena *a, Node *headnode ) | |
230 : CFGElement(), | |
231 _nodes(a), | |
232 _succs(a), | |
233 _num_succs(0), | |
234 _pre_order(0), | |
235 _idom(0), | |
236 _loop(NULL), | |
237 _reg_pressure(0), | |
238 _ihrp_index(1), | |
239 _freg_pressure(0), | |
240 _fhrp_index(1), | |
241 _raise_LCA_mark(0), | |
242 _raise_LCA_visited(0), | |
243 _first_inst_size(999999), | |
418 | 244 _connector(false), |
245 _loop_alignment(0) { | |
0 | 246 _nodes.push(headnode); |
247 } | |
248 | |
249 // Index of 'end' Node | |
250 uint end_idx() const { | |
251 // %%%%% add a proj after every goto | |
252 // so (last->is_block_proj() != last) always, then simplify this code | |
253 // This will not give correct end_idx for block 0 when it only contains root. | |
254 int last_idx = _nodes.size() - 1; | |
255 Node *last = _nodes[last_idx]; | |
256 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], ""); | |
257 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs); | |
258 } | |
259 | |
260 // Basic blocks have a Node which ends them. This Node determines which | |
261 // basic block follows this one in the program flow. This Node is either an | |
262 // IfNode, a GotoNode, a JmpNode, or a ReturnNode. | |
263 Node *end() const { return _nodes[end_idx()]; } | |
264 | |
265 // Add an instruction to an existing block. It must go after the head | |
266 // instruction and before the end instruction. | |
267 void add_inst( Node *n ) { _nodes.insert(end_idx(),n); } | |
268 // Find node in block | |
269 uint find_node( const Node *n ) const; | |
270 // Find and remove n from block list | |
271 void find_remove( const Node *n ); | |
272 | |
273 // Schedule a call next in the block | |
274 uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call); | |
275 | |
276 // Perform basic-block local scheduling | |
277 Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot); | |
278 void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); | |
279 void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); | |
280 bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call); | |
281 // Cleanup if any code lands between a Call and his Catch | |
282 void call_catch_cleanup(Block_Array &bbs); | |
283 // Detect implicit-null-check opportunities. Basically, find NULL checks | |
284 // with suitable memory ops nearby. Use the memory op to do the NULL check. | |
285 // I can generate a memory op if there is not one nearby. | |
286 void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); | |
287 | |
288 // Return the empty status of a block | |
289 enum { not_empty, empty_with_goto, completely_empty }; | |
290 int is_Empty() const; | |
291 | |
292 // Forward through connectors | |
293 Block* non_connector() { | |
294 Block* s = this; | |
295 while (s->is_connector()) { | |
296 s = s->_succs[0]; | |
297 } | |
298 return s; | |
299 } | |
300 | |
418 | 301 // Return true if b is a successor of this block |
302 bool has_successor(Block* b) const { | |
303 for (uint i = 0; i < _num_succs; i++ ) { | |
304 if (non_connector_successor(i) == b) { | |
305 return true; | |
306 } | |
307 } | |
308 return false; | |
309 } | |
310 | |
0 | 311 // Successor block, after forwarding through connectors |
312 Block* non_connector_successor(int i) const { | |
313 return _succs[i]->non_connector(); | |
314 } | |
315 | |
316 // Examine block's code shape to predict if it is not commonly executed. | |
317 bool has_uncommon_code() const; | |
318 | |
319 // Use frequency calculations and code shape to predict if the block | |
320 // is uncommon. | |
321 bool is_uncommon( Block_Array &bbs ) const; | |
322 | |
323 #ifndef PRODUCT | |
324 // Debugging print of basic block | |
325 void dump_bidx(const Block* orig) const; | |
326 void dump_pred(const Block_Array *bbs, Block* orig) const; | |
327 void dump_head( const Block_Array *bbs ) const; | |
328 void dump( ) const; | |
329 void dump( const Block_Array *bbs ) const; | |
330 #endif | |
331 }; | |
332 | |
333 | |
334 //------------------------------PhaseCFG--------------------------------------- | |
335 // Build an array of Basic Block pointers, one per Node. | |
336 class PhaseCFG : public Phase { | |
337 private: | |
338 // Build a proper looking cfg. Return count of basic blocks | |
339 uint build_cfg(); | |
340 | |
341 // Perform DFS search. | |
342 // Setup 'vertex' as DFS to vertex mapping. | |
343 // Setup 'semi' as vertex to DFS mapping. | |
344 // Set 'parent' to DFS parent. | |
345 uint DFS( Tarjan *tarjan ); | |
346 | |
347 // Helper function to insert a node into a block | |
348 void schedule_node_into_block( Node *n, Block *b ); | |
349 | |
604 | 350 void replace_block_proj_ctrl( Node *n ); |
601
523ded093c31
6809798: SafePointScalarObject node placed into incorrect block during GCM
kvn
parents:
470
diff
changeset
|
351 |
0 | 352 // Set the basic block for pinned Nodes |
353 void schedule_pinned_nodes( VectorSet &visited ); | |
354 | |
355 // I'll need a few machine-specific GotoNodes. Clone from this one. | |
356 MachNode *_goto; | |
357 | |
358 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); | |
359 void verify_anti_dependences(Block* LCA, Node* load) { | |
360 assert(LCA == _bbs[load->_idx], "should already be scheduled"); | |
361 insert_anti_dependences(LCA, load, true); | |
362 } | |
363 | |
364 public: | |
365 PhaseCFG( Arena *a, RootNode *r, Matcher &m ); | |
366 | |
367 uint _num_blocks; // Count of basic blocks | |
368 Block_List _blocks; // List of basic blocks | |
369 RootNode *_root; // Root of whole program | |
370 Block_Array _bbs; // Map Nodes to owning Basic Block | |
371 Block *_broot; // Basic block of root | |
372 uint _rpo_ctr; | |
373 CFGLoop* _root_loop; | |
673 | 374 float _outer_loop_freq; // Outmost loop frequency |
0 | 375 |
376 // Per node latency estimation, valid only during GCM | |
377 GrowableArray<uint> _node_latency; | |
378 | |
379 #ifndef PRODUCT | |
380 bool _trace_opto_pipelining; // tracing flag | |
381 #endif | |
382 | |
833
acba6af809c8
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
673
diff
changeset
|
383 #ifdef ASSERT |
acba6af809c8
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
673
diff
changeset
|
384 Unique_Node_List _raw_oops; |
acba6af809c8
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
673
diff
changeset
|
385 #endif |
acba6af809c8
6840775: Multiple JVM crashes seen with 1.6.0_10 through 1.6.0_14
kvn
parents:
673
diff
changeset
|
386 |
0 | 387 // Build dominators |
388 void Dominators(); | |
389 | |
390 // Estimate block frequencies based on IfNode probabilities | |
391 void Estimate_Block_Frequency(); | |
392 | |
393 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific | |
394 // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. | |
395 void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); | |
396 | |
397 // Compute the (backwards) latency of a node from the uses | |
398 void latency_from_uses(Node *n); | |
399 | |
400 // Compute the (backwards) latency of a node from a single use | |
401 int latency_from_use(Node *n, const Node *def, Node *use); | |
402 | |
403 // Compute the (backwards) latency of a node from the uses of this instruction | |
404 void partial_latency_of_defs(Node *n); | |
405 | |
406 // Schedule Nodes early in their basic blocks. | |
407 bool schedule_early(VectorSet &visited, Node_List &roots); | |
408 | |
409 // For each node, find the latest block it can be scheduled into | |
410 // and then select the cheapest block between the latest and earliest | |
411 // block to place the node. | |
412 void schedule_late(VectorSet &visited, Node_List &stack); | |
413 | |
414 // Pick a block between early and late that is a cheaper alternative | |
415 // to late. Helper for schedule_late. | |
416 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); | |
417 | |
418 // Compute the instruction global latency with a backwards walk | |
419 void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack); | |
420 | |
418 | 421 // Set loop alignment |
422 void set_loop_alignment(); | |
423 | |
0 | 424 // Remove empty basic blocks |
418 | 425 void remove_empty(); |
426 void fixup_flow(); | |
427 bool move_to_next(Block* bx, uint b_index); | |
428 void move_to_end(Block* bx, uint b_index); | |
429 void insert_goto_at(uint block_no, uint succ_no); | |
0 | 430 |
431 // Check for NeverBranch at block end. This needs to become a GOTO to the | |
432 // true target. NeverBranch are treated as a conditional branch that always | |
433 // goes the same direction for most of the optimizer and are used to give a | |
434 // fake exit path to infinite loops. At this late stage they need to turn | |
435 // into Goto's so that when you enter the infinite loop you indeed hang. | |
436 void convert_NeverBranch_to_Goto(Block *b); | |
437 | |
438 CFGLoop* create_loop_tree(); | |
439 | |
440 // Insert a node into a block, and update the _bbs | |
441 void insert( Block *b, uint idx, Node *n ) { | |
442 b->_nodes.insert( idx, n ); | |
443 _bbs.map( n->_idx, b ); | |
444 } | |
445 | |
446 #ifndef PRODUCT | |
447 bool trace_opto_pipelining() const { return _trace_opto_pipelining; } | |
448 | |
449 // Debugging print of CFG | |
450 void dump( ) const; // CFG only | |
451 void _dump_cfg( const Node *end, VectorSet &visited ) const; | |
452 void verify() const; | |
453 void dump_headers(); | |
454 #else | |
455 bool trace_opto_pipelining() const { return false; } | |
456 #endif | |
457 }; | |
458 | |
459 | |
418 | 460 //------------------------------UnionFind-------------------------------------- |
0 | 461 // Map Block indices to a block-index for a cfg-cover. |
462 // Array lookup in the optimized case. | |
463 class UnionFind : public ResourceObj { | |
464 uint _cnt, _max; | |
465 uint* _indices; | |
466 ReallocMark _nesting; // assertion check for reallocations | |
467 public: | |
468 UnionFind( uint max ); | |
469 void reset( uint max ); // Reset to identity map for [0..max] | |
470 | |
471 uint lookup( uint nidx ) const { | |
472 return _indices[nidx]; | |
473 } | |
474 uint operator[] (uint nidx) const { return lookup(nidx); } | |
475 | |
476 void map( uint from_idx, uint to_idx ) { | |
477 assert( from_idx < _cnt, "oob" ); | |
478 _indices[from_idx] = to_idx; | |
479 } | |
480 void extend( uint from_idx, uint to_idx ); | |
481 | |
482 uint Size() const { return _cnt; } | |
483 | |
484 uint Find( uint idx ) { | |
485 assert( idx < 65536, "Must fit into uint"); | |
486 uint uf_idx = lookup(idx); | |
487 return (uf_idx == idx) ? uf_idx : Find_compress(idx); | |
488 } | |
489 uint Find_compress( uint idx ); | |
490 uint Find_const( uint idx ) const; | |
491 void Union( uint idx1, uint idx2 ); | |
492 | |
493 }; | |
494 | |
495 //----------------------------BlockProbPair--------------------------- | |
496 // Ordered pair of Node*. | |
497 class BlockProbPair VALUE_OBJ_CLASS_SPEC { | |
498 protected: | |
499 Block* _target; // block target | |
500 float _prob; // probability of edge to block | |
501 public: | |
502 BlockProbPair() : _target(NULL), _prob(0.0) {} | |
503 BlockProbPair(Block* b, float p) : _target(b), _prob(p) {} | |
504 | |
505 Block* get_target() const { return _target; } | |
506 float get_prob() const { return _prob; } | |
507 }; | |
508 | |
509 //------------------------------CFGLoop------------------------------------------- | |
510 class CFGLoop : public CFGElement { | |
511 int _id; | |
512 int _depth; | |
513 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null | |
514 CFGLoop *_sibling; // null terminated list | |
515 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops | |
516 GrowableArray<CFGElement*> _members; // list of members of loop | |
517 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities | |
518 float _exit_prob; // probability any loop exit is taken on a single loop iteration | |
519 void update_succ_freq(Block* b, float freq); | |
520 | |
521 public: | |
522 CFGLoop(int id) : | |
523 CFGElement(), | |
524 _id(id), | |
525 _depth(0), | |
526 _parent(NULL), | |
527 _sibling(NULL), | |
528 _child(NULL), | |
529 _exit_prob(1.0f) {} | |
530 CFGLoop* parent() { return _parent; } | |
531 void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); | |
532 void add_member(CFGElement *s) { _members.push(s); } | |
533 void add_nested_loop(CFGLoop* cl); | |
534 Block* head() { | |
535 assert(_members.at(0)->is_block(), "head must be a block"); | |
536 Block* hd = _members.at(0)->as_Block(); | |
537 assert(hd->_loop == this, "just checking"); | |
538 assert(hd->head()->is_Loop(), "must begin with loop head node"); | |
539 return hd; | |
540 } | |
541 Block* backedge_block(); // Return the block on the backedge of the loop (else NULL) | |
542 void compute_loop_depth(int depth); | |
543 void compute_freq(); // compute frequency with loop assuming head freq 1.0f | |
544 void scale_freq(); // scale frequency by loop trip count (including outer loops) | |
673 | 545 float outer_loop_freq() const; // frequency of outer loop |
0 | 546 bool in_loop_nest(Block* b); |
547 float trip_count() const { return 1.0f / _exit_prob; } | |
548 virtual bool is_loop() { return true; } | |
549 int id() { return _id; } | |
550 | |
551 #ifndef PRODUCT | |
552 void dump( ) const; | |
553 void dump_tree() const; | |
554 #endif | |
555 }; | |
418 | 556 |
557 | |
558 //----------------------------------CFGEdge------------------------------------ | |
559 // A edge between two basic blocks that will be embodied by a branch or a | |
560 // fall-through. | |
561 class CFGEdge : public ResourceObj { | |
562 private: | |
563 Block * _from; // Source basic block | |
564 Block * _to; // Destination basic block | |
565 float _freq; // Execution frequency (estimate) | |
566 int _state; | |
567 bool _infrequent; | |
568 int _from_pct; | |
569 int _to_pct; | |
570 | |
571 // Private accessors | |
572 int from_pct() const { return _from_pct; } | |
573 int to_pct() const { return _to_pct; } | |
574 int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; } | |
575 int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; } | |
576 | |
577 public: | |
578 enum { | |
579 open, // initial edge state; unprocessed | |
580 connected, // edge used to connect two traces together | |
581 interior // edge is interior to trace (could be backedge) | |
582 }; | |
583 | |
584 CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) : | |
585 _from(from), _to(to), _freq(freq), | |
586 _from_pct(from_pct), _to_pct(to_pct), _state(open) { | |
587 _infrequent = from_infrequent() || to_infrequent(); | |
588 } | |
589 | |
590 float freq() const { return _freq; } | |
591 Block* from() const { return _from; } | |
592 Block* to () const { return _to; } | |
593 int infrequent() const { return _infrequent; } | |
594 int state() const { return _state; } | |
595 | |
596 void set_state(int state) { _state = state; } | |
597 | |
598 #ifndef PRODUCT | |
599 void dump( ) const; | |
600 #endif | |
601 }; | |
602 | |
603 | |
604 //-----------------------------------Trace------------------------------------- | |
605 // An ordered list of basic blocks. | |
606 class Trace : public ResourceObj { | |
607 private: | |
608 uint _id; // Unique Trace id (derived from initial block) | |
609 Block ** _next_list; // Array mapping index to next block | |
610 Block ** _prev_list; // Array mapping index to previous block | |
611 Block * _first; // First block in the trace | |
612 Block * _last; // Last block in the trace | |
613 | |
614 // Return the block that follows "b" in the trace. | |
615 Block * next(Block *b) const { return _next_list[b->_pre_order]; } | |
616 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } | |
617 | |
605 | 618 // Return the block that precedes "b" in the trace. |
418 | 619 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } |
620 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } | |
621 | |
622 // We've discovered a loop in this trace. Reset last to be "b", and first as | |
623 // the block following "b | |
624 void break_loop_after(Block *b) { | |
625 _last = b; | |
626 _first = next(b); | |
627 set_prev(_first, NULL); | |
628 set_next(_last, NULL); | |
629 } | |
630 | |
631 public: | |
632 | |
633 Trace(Block *b, Block **next_list, Block **prev_list) : | |
634 _first(b), | |
635 _last(b), | |
636 _next_list(next_list), | |
637 _prev_list(prev_list), | |
638 _id(b->_pre_order) { | |
639 set_next(b, NULL); | |
640 set_prev(b, NULL); | |
641 }; | |
642 | |
643 // Return the id number | |
644 uint id() const { return _id; } | |
645 void set_id(uint id) { _id = id; } | |
646 | |
647 // Return the first block in the trace | |
648 Block * first_block() const { return _first; } | |
649 | |
650 // Return the last block in the trace | |
651 Block * last_block() const { return _last; } | |
652 | |
653 // Insert a trace in the middle of this one after b | |
654 void insert_after(Block *b, Trace *tr) { | |
655 set_next(tr->last_block(), next(b)); | |
656 if (next(b) != NULL) { | |
657 set_prev(next(b), tr->last_block()); | |
658 } | |
659 | |
660 set_next(b, tr->first_block()); | |
661 set_prev(tr->first_block(), b); | |
662 | |
663 if (b == _last) { | |
664 _last = tr->last_block(); | |
665 } | |
666 } | |
667 | |
668 void insert_before(Block *b, Trace *tr) { | |
669 Block *p = prev(b); | |
670 assert(p != NULL, "use append instead"); | |
671 insert_after(p, tr); | |
672 } | |
673 | |
674 // Append another trace to this one. | |
675 void append(Trace *tr) { | |
676 insert_after(_last, tr); | |
677 } | |
678 | |
679 // Append a block at the end of this trace | |
680 void append(Block *b) { | |
681 set_next(_last, b); | |
682 set_prev(b, _last); | |
683 _last = b; | |
684 } | |
685 | |
686 // Adjust the the blocks in this trace | |
687 void fixup_blocks(PhaseCFG &cfg); | |
688 bool backedge(CFGEdge *e); | |
689 | |
690 #ifndef PRODUCT | |
691 void dump( ) const; | |
692 #endif | |
693 }; | |
694 | |
695 //------------------------------PhaseBlockLayout------------------------------- | |
696 // Rearrange blocks into some canonical order, based on edges and their frequencies | |
697 class PhaseBlockLayout : public Phase { | |
698 PhaseCFG &_cfg; // Control flow graph | |
699 | |
700 GrowableArray<CFGEdge *> *edges; | |
701 Trace **traces; | |
702 Block **next; | |
703 Block **prev; | |
704 UnionFind *uf; | |
705 | |
706 // Given a block, find its encompassing Trace | |
707 Trace * trace(Block *b) { | |
708 return traces[uf->Find_compress(b->_pre_order)]; | |
709 } | |
710 public: | |
711 PhaseBlockLayout(PhaseCFG &cfg); | |
712 | |
713 void find_edges(); | |
714 void grow_traces(); | |
715 void merge_traces(bool loose_connections); | |
716 void reorder_traces(int count); | |
717 void union_traces(Trace* from, Trace* to); | |
718 }; |