Mercurial > hg > truffle
annotate src/share/vm/opto/gcm.cpp @ 34:545c277a3ecf
6667581: Don't generate initialization (by 0) code for arrays with size 0
Summary: generate_arraycopy() does not check the size of allocated array.
Reviewed-by: jrose, never
author | kvn |
---|---|
date | Fri, 29 Feb 2008 11:22:27 -0800 |
parents | 6152cbb08ce9 |
children | d1605aabd0a1 273eaa04d9a1 |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Portions of code courtesy of Clifford Click | |
26 | |
27 // Optimization - Graph Style | |
28 | |
29 #include "incls/_precompiled.incl" | |
30 #include "incls/_gcm.cpp.incl" | |
31 | |
32 //----------------------------schedule_node_into_block------------------------- | |
33 // Insert node n into block b. Look for projections of n and make sure they | |
34 // are in b also. | |
35 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { | |
36 // Set basic block of n, Add n to b, | |
37 _bbs.map(n->_idx, b); | |
38 b->add_inst(n); | |
39 | |
40 // After Matching, nearly any old Node may have projections trailing it. | |
41 // These are usually machine-dependent flags. In any case, they might | |
42 // float to another block below this one. Move them up. | |
43 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | |
44 Node* use = n->fast_out(i); | |
45 if (use->is_Proj()) { | |
46 Block* buse = _bbs[use->_idx]; | |
47 if (buse != b) { // In wrong block? | |
48 if (buse != NULL) | |
49 buse->find_remove(use); // Remove from wrong block | |
50 _bbs.map(use->_idx, b); // Re-insert in this block | |
51 b->add_inst(use); | |
52 } | |
53 } | |
54 } | |
55 } | |
56 | |
57 | |
58 //------------------------------schedule_pinned_nodes-------------------------- | |
59 // Set the basic block for Nodes pinned into blocks | |
60 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) { | |
61 // Allocate node stack of size C->unique()+8 to avoid frequent realloc | |
62 GrowableArray <Node *> spstack(C->unique()+8); | |
63 spstack.push(_root); | |
64 while ( spstack.is_nonempty() ) { | |
65 Node *n = spstack.pop(); | |
66 if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited | |
67 if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down! | |
68 Node *input = n->in(0); | |
69 assert( input, "pinned Node must have Control" ); | |
70 while( !input->is_block_start() ) | |
71 input = input->in(0); | |
72 Block *b = _bbs[input->_idx]; // Basic block of controlling input | |
73 schedule_node_into_block(n, b); | |
74 } | |
75 for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs | |
76 if( n->in(i) != NULL ) | |
77 spstack.push(n->in(i)); | |
78 } | |
79 } | |
80 } | |
81 } | |
82 | |
83 #ifdef ASSERT | |
84 // Assert that new input b2 is dominated by all previous inputs. | |
85 // Check this by by seeing that it is dominated by b1, the deepest | |
86 // input observed until b2. | |
87 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) { | |
88 if (b1 == NULL) return; | |
89 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); | |
90 Block* tmp = b2; | |
91 while (tmp != b1 && tmp != NULL) { | |
92 tmp = tmp->_idom; | |
93 } | |
94 if (tmp != b1) { | |
95 // Detected an unschedulable graph. Print some nice stuff and die. | |
96 tty->print_cr("!!! Unschedulable graph !!!"); | |
97 for (uint j=0; j<n->len(); j++) { // For all inputs | |
98 Node* inn = n->in(j); // Get input | |
99 if (inn == NULL) continue; // Ignore NULL, missing inputs | |
100 Block* inb = bbs[inn->_idx]; | |
101 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, | |
102 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); | |
103 inn->dump(); | |
104 } | |
105 tty->print("Failing node: "); | |
106 n->dump(); | |
107 assert(false, "unscheduable graph"); | |
108 } | |
109 } | |
110 #endif | |
111 | |
112 static Block* find_deepest_input(Node* n, Block_Array &bbs) { | |
113 // Find the last input dominated by all other inputs. | |
114 Block* deepb = NULL; // Deepest block so far | |
115 int deepb_dom_depth = 0; | |
116 for (uint k = 0; k < n->len(); k++) { // For all inputs | |
117 Node* inn = n->in(k); // Get input | |
118 if (inn == NULL) continue; // Ignore NULL, missing inputs | |
119 Block* inb = bbs[inn->_idx]; | |
120 assert(inb != NULL, "must already have scheduled this input"); | |
121 if (deepb_dom_depth < (int) inb->_dom_depth) { | |
122 // The new inb must be dominated by the previous deepb. | |
123 // The various inputs must be linearly ordered in the dom | |
124 // tree, or else there will not be a unique deepest block. | |
125 DEBUG_ONLY(assert_dom(deepb, inb, n, bbs)); | |
126 deepb = inb; // Save deepest block | |
127 deepb_dom_depth = deepb->_dom_depth; | |
128 } | |
129 } | |
130 assert(deepb != NULL, "must be at least one input to n"); | |
131 return deepb; | |
132 } | |
133 | |
134 | |
135 //------------------------------schedule_early--------------------------------- | |
136 // Find the earliest Block any instruction can be placed in. Some instructions | |
137 // are pinned into Blocks. Unpinned instructions can appear in last block in | |
138 // which all their inputs occur. | |
139 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { | |
140 // Allocate stack with enough space to avoid frequent realloc | |
141 Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats | |
142 // roots.push(_root); _root will be processed among C->top() inputs | |
143 roots.push(C->top()); | |
144 visited.set(C->top()->_idx); | |
145 | |
146 while (roots.size() != 0) { | |
147 // Use local variables nstack_top_n & nstack_top_i to cache values | |
148 // on stack's top. | |
149 Node *nstack_top_n = roots.pop(); | |
150 uint nstack_top_i = 0; | |
151 //while_nstack_nonempty: | |
152 while (true) { | |
153 // Get parent node and next input's index from stack's top. | |
154 Node *n = nstack_top_n; | |
155 uint i = nstack_top_i; | |
156 | |
157 if (i == 0) { | |
158 // Special control input processing. | |
159 // While I am here, go ahead and look for Nodes which are taking control | |
160 // from a is_block_proj Node. After I inserted RegionNodes to make proper | |
161 // blocks, the control at a is_block_proj more properly comes from the | |
162 // Region being controlled by the block_proj Node. | |
163 const Node *in0 = n->in(0); | |
164 if (in0 != NULL) { // Control-dependent? | |
165 const Node *p = in0->is_block_proj(); | |
166 if (p != NULL && p != n) { // Control from a block projection? | |
167 // Find trailing Region | |
168 Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block | |
169 uint j = 0; | |
170 if (pb->_num_succs != 1) { // More then 1 successor? | |
171 // Search for successor | |
172 uint max = pb->_nodes.size(); | |
173 assert( max > 1, "" ); | |
174 uint start = max - pb->_num_succs; | |
175 // Find which output path belongs to projection | |
176 for (j = start; j < max; j++) { | |
177 if( pb->_nodes[j] == in0 ) | |
178 break; | |
179 } | |
180 assert( j < max, "must find" ); | |
181 // Change control to match head of successor basic block | |
182 j -= start; | |
183 } | |
184 n->set_req(0, pb->_succs[j]->head()); | |
185 } | |
186 } else { // n->in(0) == NULL | |
187 if (n->req() == 1) { // This guy is a constant with NO inputs? | |
188 n->set_req(0, _root); | |
189 } | |
190 } | |
191 } | |
192 | |
193 // First, visit all inputs and force them to get a block. If an | |
194 // input is already in a block we quit following inputs (to avoid | |
195 // cycles). Instead we put that Node on a worklist to be handled | |
196 // later (since IT'S inputs may not have a block yet). | |
197 bool done = true; // Assume all n's inputs will be processed | |
198 while (i < n->len()) { // For all inputs | |
199 Node *in = n->in(i); // Get input | |
200 ++i; | |
201 if (in == NULL) continue; // Ignore NULL, missing inputs | |
202 int is_visited = visited.test_set(in->_idx); | |
203 if (!_bbs.lookup(in->_idx)) { // Missing block selection? | |
204 if (is_visited) { | |
205 // assert( !visited.test(in->_idx), "did not schedule early" ); | |
206 return false; | |
207 } | |
208 nstack.push(n, i); // Save parent node and next input's index. | |
209 nstack_top_n = in; // Process current input now. | |
210 nstack_top_i = 0; | |
211 done = false; // Not all n's inputs processed. | |
212 break; // continue while_nstack_nonempty; | |
213 } else if (!is_visited) { // Input not yet visited? | |
214 roots.push(in); // Visit this guy later, using worklist | |
215 } | |
216 } | |
217 if (done) { | |
218 // All of n's inputs have been processed, complete post-processing. | |
219 | |
220 // Some instructions are pinned into a block. These include Region, | |
221 // Phi, Start, Return, and other control-dependent instructions and | |
222 // any projections which depend on them. | |
223 if (!n->pinned()) { | |
224 // Set earliest legal block. | |
225 _bbs.map(n->_idx, find_deepest_input(n, _bbs)); | |
226 } | |
227 | |
228 if (nstack.is_empty()) { | |
229 // Finished all nodes on stack. | |
230 // Process next node on the worklist 'roots'. | |
231 break; | |
232 } | |
233 // Get saved parent node and next input's index. | |
234 nstack_top_n = nstack.node(); | |
235 nstack_top_i = nstack.index(); | |
236 nstack.pop(); | |
237 } // if (done) | |
238 } // while (true) | |
239 } // while (roots.size() != 0) | |
240 return true; | |
241 } | |
242 | |
243 //------------------------------dom_lca---------------------------------------- | |
244 // Find least common ancestor in dominator tree | |
245 // LCA is a current notion of LCA, to be raised above 'this'. | |
246 // As a convenient boundary condition, return 'this' if LCA is NULL. | |
247 // Find the LCA of those two nodes. | |
248 Block* Block::dom_lca(Block* LCA) { | |
249 if (LCA == NULL || LCA == this) return this; | |
250 | |
251 Block* anc = this; | |
252 while (anc->_dom_depth > LCA->_dom_depth) | |
253 anc = anc->_idom; // Walk up till anc is as high as LCA | |
254 | |
255 while (LCA->_dom_depth > anc->_dom_depth) | |
256 LCA = LCA->_idom; // Walk up till LCA is as high as anc | |
257 | |
258 while (LCA != anc) { // Walk both up till they are the same | |
259 LCA = LCA->_idom; | |
260 anc = anc->_idom; | |
261 } | |
262 | |
263 return LCA; | |
264 } | |
265 | |
266 //--------------------------raise_LCA_above_use-------------------------------- | |
267 // We are placing a definition, and have been given a def->use edge. | |
268 // The definition must dominate the use, so move the LCA upward in the | |
269 // dominator tree to dominate the use. If the use is a phi, adjust | |
270 // the LCA only with the phi input paths which actually use this def. | |
271 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) { | |
272 Block* buse = bbs[use->_idx]; | |
273 if (buse == NULL) return LCA; // Unused killing Projs have no use block | |
274 if (!use->is_Phi()) return buse->dom_lca(LCA); | |
275 uint pmax = use->req(); // Number of Phi inputs | |
276 // Why does not this loop just break after finding the matching input to | |
277 // the Phi? Well...it's like this. I do not have true def-use/use-def | |
278 // chains. Means I cannot distinguish, from the def-use direction, which | |
279 // of many use-defs lead from the same use to the same def. That is, this | |
280 // Phi might have several uses of the same def. Each use appears in a | |
281 // different predecessor block. But when I enter here, I cannot distinguish | |
282 // which use-def edge I should find the predecessor block for. So I find | |
283 // them all. Means I do a little extra work if a Phi uses the same value | |
284 // more than once. | |
285 for (uint j=1; j<pmax; j++) { // For all inputs | |
286 if (use->in(j) == def) { // Found matching input? | |
287 Block* pred = bbs[buse->pred(j)->_idx]; | |
288 LCA = pred->dom_lca(LCA); | |
289 } | |
290 } | |
291 return LCA; | |
292 } | |
293 | |
294 //----------------------------raise_LCA_above_marks---------------------------- | |
295 // Return a new LCA that dominates LCA and any of its marked predecessors. | |
296 // Search all my parents up to 'early' (exclusive), looking for predecessors | |
297 // which are marked with the given index. Return the LCA (in the dom tree) | |
298 // of all marked blocks. If there are none marked, return the original | |
299 // LCA. | |
300 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, | |
301 Block* early, Block_Array &bbs) { | |
302 Block_List worklist; | |
303 worklist.push(LCA); | |
304 while (worklist.size() > 0) { | |
305 Block* mid = worklist.pop(); | |
306 if (mid == early) continue; // stop searching here | |
307 | |
308 // Test and set the visited bit. | |
309 if (mid->raise_LCA_visited() == mark) continue; // already visited | |
310 mid->set_raise_LCA_visited(mark); | |
311 | |
312 // Don't process the current LCA, otherwise the search may terminate early | |
313 if (mid != LCA && mid->raise_LCA_mark() == mark) { | |
314 // Raise the LCA. | |
315 LCA = mid->dom_lca(LCA); | |
316 if (LCA == early) break; // stop searching everywhere | |
317 assert(early->dominates(LCA), "early is high enough"); | |
318 // Resume searching at that point, skipping intermediate levels. | |
319 worklist.push(LCA); | |
320 } else { | |
321 // Keep searching through this block's predecessors. | |
322 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { | |
323 Block* mid_parent = bbs[ mid->pred(j)->_idx ]; | |
324 worklist.push(mid_parent); | |
325 } | |
326 } | |
327 } | |
328 return LCA; | |
329 } | |
330 | |
331 //--------------------------memory_early_block-------------------------------- | |
332 // This is a variation of find_deepest_input, the heart of schedule_early. | |
333 // Find the "early" block for a load, if we considered only memory and | |
334 // address inputs, that is, if other data inputs were ignored. | |
335 // | |
336 // Because a subset of edges are considered, the resulting block will | |
337 // be earlier (at a shallower dom_depth) than the true schedule_early | |
338 // point of the node. We compute this earlier block as a more permissive | |
339 // site for anti-dependency insertion, but only if subsume_loads is enabled. | |
340 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) { | |
341 Node* base; | |
342 Node* index; | |
343 Node* store = load->in(MemNode::Memory); | |
344 load->as_Mach()->memory_inputs(base, index); | |
345 | |
346 assert(base != NodeSentinel && index != NodeSentinel, | |
347 "unexpected base/index inputs"); | |
348 | |
349 Node* mem_inputs[4]; | |
350 int mem_inputs_length = 0; | |
351 if (base != NULL) mem_inputs[mem_inputs_length++] = base; | |
352 if (index != NULL) mem_inputs[mem_inputs_length++] = index; | |
353 if (store != NULL) mem_inputs[mem_inputs_length++] = store; | |
354 | |
355 // In the comparision below, add one to account for the control input, | |
356 // which may be null, but always takes up a spot in the in array. | |
357 if (mem_inputs_length + 1 < (int) load->req()) { | |
358 // This "load" has more inputs than just the memory, base and index inputs. | |
359 // For purposes of checking anti-dependences, we need to start | |
360 // from the early block of only the address portion of the instruction, | |
361 // and ignore other blocks that may have factored into the wider | |
362 // schedule_early calculation. | |
363 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); | |
364 | |
365 Block* deepb = NULL; // Deepest block so far | |
366 int deepb_dom_depth = 0; | |
367 for (int i = 0; i < mem_inputs_length; i++) { | |
368 Block* inb = bbs[mem_inputs[i]->_idx]; | |
369 if (deepb_dom_depth < (int) inb->_dom_depth) { | |
370 // The new inb must be dominated by the previous deepb. | |
371 // The various inputs must be linearly ordered in the dom | |
372 // tree, or else there will not be a unique deepest block. | |
373 DEBUG_ONLY(assert_dom(deepb, inb, load, bbs)); | |
374 deepb = inb; // Save deepest block | |
375 deepb_dom_depth = deepb->_dom_depth; | |
376 } | |
377 } | |
378 early = deepb; | |
379 } | |
380 | |
381 return early; | |
382 } | |
383 | |
384 //--------------------------insert_anti_dependences--------------------------- | |
385 // A load may need to witness memory that nearby stores can overwrite. | |
386 // For each nearby store, either insert an "anti-dependence" edge | |
387 // from the load to the store, or else move LCA upward to force the | |
388 // load to (eventually) be scheduled in a block above the store. | |
389 // | |
390 // Do not add edges to stores on distinct control-flow paths; | |
391 // only add edges to stores which might interfere. | |
392 // | |
393 // Return the (updated) LCA. There will not be any possibly interfering | |
394 // store between the load's "early block" and the updated LCA. | |
395 // Any stores in the updated LCA will have new precedence edges | |
396 // back to the load. The caller is expected to schedule the load | |
397 // in the LCA, in which case the precedence edges will make LCM | |
398 // preserve anti-dependences. The caller may also hoist the load | |
399 // above the LCA, if it is not the early block. | |
400 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { | |
401 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); | |
402 assert(LCA != NULL, ""); | |
403 DEBUG_ONLY(Block* LCA_orig = LCA); | |
404 | |
405 // Compute the alias index. Loads and stores with different alias indices | |
406 // do not need anti-dependence edges. | |
407 uint load_alias_idx = C->get_alias_index(load->adr_type()); | |
408 #ifdef ASSERT | |
409 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && | |
410 (PrintOpto || VerifyAliases || | |
411 PrintMiscellaneous && (WizardMode || Verbose))) { | |
412 // Load nodes should not consume all of memory. | |
413 // Reporting a bottom type indicates a bug in adlc. | |
414 // If some particular type of node validly consumes all of memory, | |
415 // sharpen the preceding "if" to exclude it, so we can catch bugs here. | |
416 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); | |
417 load->dump(2); | |
418 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); | |
419 } | |
420 #endif | |
421 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), | |
422 "String compare is only known 'load' that does not conflict with any stores"); | |
423 | |
424 if (!C->alias_type(load_alias_idx)->is_rewritable()) { | |
425 // It is impossible to spoil this load by putting stores before it, | |
426 // because we know that the stores will never update the value | |
427 // which 'load' must witness. | |
428 return LCA; | |
429 } | |
430 | |
431 node_idx_t load_index = load->_idx; | |
432 | |
433 // Note the earliest legal placement of 'load', as determined by | |
434 // by the unique point in the dom tree where all memory effects | |
435 // and other inputs are first available. (Computed by schedule_early.) | |
436 // For normal loads, 'early' is the shallowest place (dom graph wise) | |
437 // to look for anti-deps between this load and any store. | |
438 Block* early = _bbs[load_index]; | |
439 | |
440 // If we are subsuming loads, compute an "early" block that only considers | |
441 // memory or address inputs. This block may be different than the | |
442 // schedule_early block in that it could be at an even shallower depth in the | |
443 // dominator tree, and allow for a broader discovery of anti-dependences. | |
444 if (C->subsume_loads()) { | |
445 early = memory_early_block(load, early, _bbs); | |
446 } | |
447 | |
448 ResourceArea *area = Thread::current()->resource_area(); | |
449 Node_List worklist_mem(area); // prior memory state to store | |
450 Node_List worklist_store(area); // possible-def to explore | |
31
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
451 Node_List worklist_visited(area); // visited mergemem nodes |
0 | 452 Node_List non_early_stores(area); // all relevant stores outside of early |
453 bool must_raise_LCA = false; | |
454 | |
455 #ifdef TRACK_PHI_INPUTS | |
456 // %%% This extra checking fails because MergeMem nodes are not GVNed. | |
457 // Provide "phi_inputs" to check if every input to a PhiNode is from the | |
458 // original memory state. This indicates a PhiNode for which should not | |
459 // prevent the load from sinking. For such a block, set_raise_LCA_mark | |
460 // may be overly conservative. | |
461 // Mechanism: count inputs seen for each Phi encountered in worklist_store. | |
462 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); | |
463 #endif | |
464 | |
465 // 'load' uses some memory state; look for users of the same state. | |
466 // Recurse through MergeMem nodes to the stores that use them. | |
467 | |
468 // Each of these stores is a possible definition of memory | |
469 // that 'load' needs to use. We need to force 'load' | |
470 // to occur before each such store. When the store is in | |
471 // the same block as 'load', we insert an anti-dependence | |
472 // edge load->store. | |
473 | |
474 // The relevant stores "nearby" the load consist of a tree rooted | |
475 // at initial_mem, with internal nodes of type MergeMem. | |
476 // Therefore, the branches visited by the worklist are of this form: | |
477 // initial_mem -> (MergeMem ->)* store | |
478 // The anti-dependence constraints apply only to the fringe of this tree. | |
479 | |
480 Node* initial_mem = load->in(MemNode::Memory); | |
481 worklist_store.push(initial_mem); | |
31
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
482 worklist_visited.push(initial_mem); |
0 | 483 worklist_mem.push(NULL); |
484 while (worklist_store.size() > 0) { | |
485 // Examine a nearby store to see if it might interfere with our load. | |
486 Node* mem = worklist_mem.pop(); | |
487 Node* store = worklist_store.pop(); | |
488 uint op = store->Opcode(); | |
489 | |
490 // MergeMems do not directly have anti-deps. | |
491 // Treat them as internal nodes in a forward tree of memory states, | |
492 // the leaves of which are each a 'possible-def'. | |
493 if (store == initial_mem // root (exclusive) of tree we are searching | |
494 || op == Op_MergeMem // internal node of tree we are searching | |
495 ) { | |
496 mem = store; // It's not a possibly interfering store. | |
31
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
497 if (store == initial_mem) |
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
498 initial_mem = NULL; // only process initial memory once |
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
499 |
0 | 500 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
501 store = mem->fast_out(i); | |
502 if (store->is_MergeMem()) { | |
503 // Be sure we don't get into combinatorial problems. | |
504 // (Allow phis to be repeated; they can merge two relevant states.) | |
31
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
505 uint j = worklist_visited.size(); |
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
506 for (; j > 0; j--) { |
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
507 if (worklist_visited.at(j-1) == store) break; |
0 | 508 } |
31
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
509 if (j > 0) continue; // already on work list; do not repeat |
6152cbb08ce9
6590177: jck60019 test assert(!repeated,"do not walk merges twice")
kvn
parents:
0
diff
changeset
|
510 worklist_visited.push(store); |
0 | 511 } |
512 worklist_mem.push(mem); | |
513 worklist_store.push(store); | |
514 } | |
515 continue; | |
516 } | |
517 | |
518 if (op == Op_MachProj || op == Op_Catch) continue; | |
519 if (store->needs_anti_dependence_check()) continue; // not really a store | |
520 | |
521 // Compute the alias index. Loads and stores with different alias | |
522 // indices do not need anti-dependence edges. Wide MemBar's are | |
523 // anti-dependent on everything (except immutable memories). | |
524 const TypePtr* adr_type = store->adr_type(); | |
525 if (!C->can_alias(adr_type, load_alias_idx)) continue; | |
526 | |
527 // Most slow-path runtime calls do NOT modify Java memory, but | |
528 // they can block and so write Raw memory. | |
529 if (store->is_Mach()) { | |
530 MachNode* mstore = store->as_Mach(); | |
531 if (load_alias_idx != Compile::AliasIdxRaw) { | |
532 // Check for call into the runtime using the Java calling | |
533 // convention (and from there into a wrapper); it has no | |
534 // _method. Can't do this optimization for Native calls because | |
535 // they CAN write to Java memory. | |
536 if (mstore->ideal_Opcode() == Op_CallStaticJava) { | |
537 assert(mstore->is_MachSafePoint(), ""); | |
538 MachSafePointNode* ms = (MachSafePointNode*) mstore; | |
539 assert(ms->is_MachCallJava(), ""); | |
540 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; | |
541 if (mcj->_method == NULL) { | |
542 // These runtime calls do not write to Java visible memory | |
543 // (other than Raw) and so do not require anti-dependence edges. | |
544 continue; | |
545 } | |
546 } | |
547 // Same for SafePoints: they read/write Raw but only read otherwise. | |
548 // This is basically a workaround for SafePoints only defining control | |
549 // instead of control + memory. | |
550 if (mstore->ideal_Opcode() == Op_SafePoint) | |
551 continue; | |
552 } else { | |
553 // Some raw memory, such as the load of "top" at an allocation, | |
554 // can be control dependent on the previous safepoint. See | |
555 // comments in GraphKit::allocate_heap() about control input. | |
556 // Inserting an anti-dep between such a safepoint and a use | |
557 // creates a cycle, and will cause a subsequent failure in | |
558 // local scheduling. (BugId 4919904) | |
559 // (%%% How can a control input be a safepoint and not a projection??) | |
560 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) | |
561 continue; | |
562 } | |
563 } | |
564 | |
565 // Identify a block that the current load must be above, | |
566 // or else observe that 'store' is all the way up in the | |
567 // earliest legal block for 'load'. In the latter case, | |
568 // immediately insert an anti-dependence edge. | |
569 Block* store_block = _bbs[store->_idx]; | |
570 assert(store_block != NULL, "unused killing projections skipped above"); | |
571 | |
572 if (store->is_Phi()) { | |
573 // 'load' uses memory which is one (or more) of the Phi's inputs. | |
574 // It must be scheduled not before the Phi, but rather before | |
575 // each of the relevant Phi inputs. | |
576 // | |
577 // Instead of finding the LCA of all inputs to a Phi that match 'mem', | |
578 // we mark each corresponding predecessor block and do a combined | |
579 // hoisting operation later (raise_LCA_above_marks). | |
580 // | |
581 // Do not assert(store_block != early, "Phi merging memory after access") | |
582 // PhiNode may be at start of block 'early' with backedge to 'early' | |
583 DEBUG_ONLY(bool found_match = false); | |
584 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { | |
585 if (store->in(j) == mem) { // Found matching input? | |
586 DEBUG_ONLY(found_match = true); | |
587 Block* pred_block = _bbs[store_block->pred(j)->_idx]; | |
588 if (pred_block != early) { | |
589 // If any predecessor of the Phi matches the load's "early block", | |
590 // we do not need a precedence edge between the Phi and 'load' | |
591 // since the load will be forced into a block preceeding the Phi. | |
592 pred_block->set_raise_LCA_mark(load_index); | |
593 assert(!LCA_orig->dominates(pred_block) || | |
594 early->dominates(pred_block), "early is high enough"); | |
595 must_raise_LCA = true; | |
596 } | |
597 } | |
598 } | |
599 assert(found_match, "no worklist bug"); | |
600 #ifdef TRACK_PHI_INPUTS | |
601 #ifdef ASSERT | |
602 // This assert asks about correct handling of PhiNodes, which may not | |
603 // have all input edges directly from 'mem'. See BugId 4621264 | |
604 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; | |
605 // Increment by exactly one even if there are multiple copies of 'mem' | |
606 // coming into the phi, because we will run this block several times | |
607 // if there are several copies of 'mem'. (That's how DU iterators work.) | |
608 phi_inputs.at_put(store->_idx, num_mem_inputs); | |
609 assert(PhiNode::Input + num_mem_inputs < store->req(), | |
610 "Expect at least one phi input will not be from original memory state"); | |
611 #endif //ASSERT | |
612 #endif //TRACK_PHI_INPUTS | |
613 } else if (store_block != early) { | |
614 // 'store' is between the current LCA and earliest possible block. | |
615 // Label its block, and decide later on how to raise the LCA | |
616 // to include the effect on LCA of this store. | |
617 // If this store's block gets chosen as the raised LCA, we | |
618 // will find him on the non_early_stores list and stick him | |
619 // with a precedence edge. | |
620 // (But, don't bother if LCA is already raised all the way.) | |
621 if (LCA != early) { | |
622 store_block->set_raise_LCA_mark(load_index); | |
623 must_raise_LCA = true; | |
624 non_early_stores.push(store); | |
625 } | |
626 } else { | |
627 // Found a possibly-interfering store in the load's 'early' block. | |
628 // This means 'load' cannot sink at all in the dominator tree. | |
629 // Add an anti-dep edge, and squeeze 'load' into the highest block. | |
630 assert(store != load->in(0), "dependence cycle found"); | |
631 if (verify) { | |
632 assert(store->find_edge(load) != -1, "missing precedence edge"); | |
633 } else { | |
634 store->add_prec(load); | |
635 } | |
636 LCA = early; | |
637 // This turns off the process of gathering non_early_stores. | |
638 } | |
639 } | |
640 // (Worklist is now empty; all nearby stores have been visited.) | |
641 | |
642 // Finished if 'load' must be scheduled in its 'early' block. | |
643 // If we found any stores there, they have already been given | |
644 // precedence edges. | |
645 if (LCA == early) return LCA; | |
646 | |
647 // We get here only if there are no possibly-interfering stores | |
648 // in the load's 'early' block. Move LCA up above all predecessors | |
649 // which contain stores we have noted. | |
650 // | |
651 // The raised LCA block can be a home to such interfering stores, | |
652 // but its predecessors must not contain any such stores. | |
653 // | |
654 // The raised LCA will be a lower bound for placing the load, | |
655 // preventing the load from sinking past any block containing | |
656 // a store that may invalidate the memory state required by 'load'. | |
657 if (must_raise_LCA) | |
658 LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs); | |
659 if (LCA == early) return LCA; | |
660 | |
661 // Insert anti-dependence edges from 'load' to each store | |
662 // in the non-early LCA block. | |
663 // Mine the non_early_stores list for such stores. | |
664 if (LCA->raise_LCA_mark() == load_index) { | |
665 while (non_early_stores.size() > 0) { | |
666 Node* store = non_early_stores.pop(); | |
667 Block* store_block = _bbs[store->_idx]; | |
668 if (store_block == LCA) { | |
669 // add anti_dependence from store to load in its own block | |
670 assert(store != load->in(0), "dependence cycle found"); | |
671 if (verify) { | |
672 assert(store->find_edge(load) != -1, "missing precedence edge"); | |
673 } else { | |
674 store->add_prec(load); | |
675 } | |
676 } else { | |
677 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); | |
678 // Any other stores we found must be either inside the new LCA | |
679 // or else outside the original LCA. In the latter case, they | |
680 // did not interfere with any use of 'load'. | |
681 assert(LCA->dominates(store_block) | |
682 || !LCA_orig->dominates(store_block), "no stray stores"); | |
683 } | |
684 } | |
685 } | |
686 | |
687 // Return the highest block containing stores; any stores | |
688 // within that block have been given anti-dependence edges. | |
689 return LCA; | |
690 } | |
691 | |
692 // This class is used to iterate backwards over the nodes in the graph. | |
693 | |
694 class Node_Backward_Iterator { | |
695 | |
696 private: | |
697 Node_Backward_Iterator(); | |
698 | |
699 public: | |
700 // Constructor for the iterator | |
701 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs); | |
702 | |
703 // Postincrement operator to iterate over the nodes | |
704 Node *next(); | |
705 | |
706 private: | |
707 VectorSet &_visited; | |
708 Node_List &_stack; | |
709 Block_Array &_bbs; | |
710 }; | |
711 | |
712 // Constructor for the Node_Backward_Iterator | |
713 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs ) | |
714 : _visited(visited), _stack(stack), _bbs(bbs) { | |
715 // The stack should contain exactly the root | |
716 stack.clear(); | |
717 stack.push(root); | |
718 | |
719 // Clear the visited bits | |
720 visited.Clear(); | |
721 } | |
722 | |
723 // Iterator for the Node_Backward_Iterator | |
724 Node *Node_Backward_Iterator::next() { | |
725 | |
726 // If the _stack is empty, then just return NULL: finished. | |
727 if ( !_stack.size() ) | |
728 return NULL; | |
729 | |
730 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been | |
731 // made stateless, so I do not need to record the index 'i' on my _stack. | |
732 // Instead I visit all users each time, scanning for unvisited users. | |
733 // I visit unvisited not-anti-dependence users first, then anti-dependent | |
734 // children next. | |
735 Node *self = _stack.pop(); | |
736 | |
737 // I cycle here when I am entering a deeper level of recursion. | |
738 // The key variable 'self' was set prior to jumping here. | |
739 while( 1 ) { | |
740 | |
741 _visited.set(self->_idx); | |
742 | |
743 // Now schedule all uses as late as possible. | |
744 uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx; | |
745 uint src_rpo = _bbs[src]->_rpo; | |
746 | |
747 // Schedule all nodes in a post-order visit | |
748 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any | |
749 | |
750 // Scan for unvisited nodes | |
751 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { | |
752 // For all uses, schedule late | |
753 Node* n = self->fast_out(i); // Use | |
754 | |
755 // Skip already visited children | |
756 if ( _visited.test(n->_idx) ) | |
757 continue; | |
758 | |
759 // do not traverse backward control edges | |
760 Node *use = n->is_Proj() ? n->in(0) : n; | |
761 uint use_rpo = _bbs[use->_idx]->_rpo; | |
762 | |
763 if ( use_rpo < src_rpo ) | |
764 continue; | |
765 | |
766 // Phi nodes always precede uses in a basic block | |
767 if ( use_rpo == src_rpo && use->is_Phi() ) | |
768 continue; | |
769 | |
770 unvisited = n; // Found unvisited | |
771 | |
772 // Check for possible-anti-dependent | |
773 if( !n->needs_anti_dependence_check() ) | |
774 break; // Not visited, not anti-dep; schedule it NOW | |
775 } | |
776 | |
777 // Did I find an unvisited not-anti-dependent Node? | |
778 if ( !unvisited ) | |
779 break; // All done with children; post-visit 'self' | |
780 | |
781 // Visit the unvisited Node. Contains the obvious push to | |
782 // indicate I'm entering a deeper level of recursion. I push the | |
783 // old state onto the _stack and set a new state and loop (recurse). | |
784 _stack.push(self); | |
785 self = unvisited; | |
786 } // End recursion loop | |
787 | |
788 return self; | |
789 } | |
790 | |
791 //------------------------------ComputeLatenciesBackwards---------------------- | |
792 // Compute the latency of all the instructions. | |
793 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) { | |
794 #ifndef PRODUCT | |
795 if (trace_opto_pipelining()) | |
796 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); | |
797 #endif | |
798 | |
799 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); | |
800 Node *n; | |
801 | |
802 // Walk over all the nodes from last to first | |
803 while (n = iter.next()) { | |
804 // Set the latency for the definitions of this instruction | |
805 partial_latency_of_defs(n); | |
806 } | |
807 } // end ComputeLatenciesBackwards | |
808 | |
809 //------------------------------partial_latency_of_defs------------------------ | |
810 // Compute the latency impact of this node on all defs. This computes | |
811 // a number that increases as we approach the beginning of the routine. | |
812 void PhaseCFG::partial_latency_of_defs(Node *n) { | |
813 // Set the latency for this instruction | |
814 #ifndef PRODUCT | |
815 if (trace_opto_pipelining()) { | |
816 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", | |
817 n->_idx, _node_latency.at_grow(n->_idx)); | |
818 dump(); | |
819 } | |
820 #endif | |
821 | |
822 if (n->is_Proj()) | |
823 n = n->in(0); | |
824 | |
825 if (n->is_Root()) | |
826 return; | |
827 | |
828 uint nlen = n->len(); | |
829 uint use_latency = _node_latency.at_grow(n->_idx); | |
830 uint use_pre_order = _bbs[n->_idx]->_pre_order; | |
831 | |
832 for ( uint j=0; j<nlen; j++ ) { | |
833 Node *def = n->in(j); | |
834 | |
835 if (!def || def == n) | |
836 continue; | |
837 | |
838 // Walk backwards thru projections | |
839 if (def->is_Proj()) | |
840 def = def->in(0); | |
841 | |
842 #ifndef PRODUCT | |
843 if (trace_opto_pipelining()) { | |
844 tty->print("# in(%2d): ", j); | |
845 def->dump(); | |
846 } | |
847 #endif | |
848 | |
849 // If the defining block is not known, assume it is ok | |
850 Block *def_block = _bbs[def->_idx]; | |
851 uint def_pre_order = def_block ? def_block->_pre_order : 0; | |
852 | |
853 if ( (use_pre_order < def_pre_order) || | |
854 (use_pre_order == def_pre_order && n->is_Phi()) ) | |
855 continue; | |
856 | |
857 uint delta_latency = n->latency(j); | |
858 uint current_latency = delta_latency + use_latency; | |
859 | |
860 if (_node_latency.at_grow(def->_idx) < current_latency) { | |
861 _node_latency.at_put_grow(def->_idx, current_latency); | |
862 } | |
863 | |
864 #ifndef PRODUCT | |
865 if (trace_opto_pipelining()) { | |
866 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", | |
867 use_latency, j, delta_latency, current_latency, def->_idx, | |
868 _node_latency.at_grow(def->_idx)); | |
869 } | |
870 #endif | |
871 } | |
872 } | |
873 | |
874 //------------------------------latency_from_use------------------------------- | |
875 // Compute the latency of a specific use | |
876 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { | |
877 // If self-reference, return no latency | |
878 if (use == n || use->is_Root()) | |
879 return 0; | |
880 | |
881 uint def_pre_order = _bbs[def->_idx]->_pre_order; | |
882 uint latency = 0; | |
883 | |
884 // If the use is not a projection, then it is simple... | |
885 if (!use->is_Proj()) { | |
886 #ifndef PRODUCT | |
887 if (trace_opto_pipelining()) { | |
888 tty->print("# out(): "); | |
889 use->dump(); | |
890 } | |
891 #endif | |
892 | |
893 uint use_pre_order = _bbs[use->_idx]->_pre_order; | |
894 | |
895 if (use_pre_order < def_pre_order) | |
896 return 0; | |
897 | |
898 if (use_pre_order == def_pre_order && use->is_Phi()) | |
899 return 0; | |
900 | |
901 uint nlen = use->len(); | |
902 uint nl = _node_latency.at_grow(use->_idx); | |
903 | |
904 for ( uint j=0; j<nlen; j++ ) { | |
905 if (use->in(j) == n) { | |
906 // Change this if we want local latencies | |
907 uint ul = use->latency(j); | |
908 uint l = ul + nl; | |
909 if (latency < l) latency = l; | |
910 #ifndef PRODUCT | |
911 if (trace_opto_pipelining()) { | |
912 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", | |
913 nl, j, ul, l, latency); | |
914 } | |
915 #endif | |
916 } | |
917 } | |
918 } else { | |
919 // This is a projection, just grab the latency of the use(s) | |
920 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { | |
921 uint l = latency_from_use(use, def, use->fast_out(j)); | |
922 if (latency < l) latency = l; | |
923 } | |
924 } | |
925 | |
926 return latency; | |
927 } | |
928 | |
929 //------------------------------latency_from_uses------------------------------ | |
930 // Compute the latency of this instruction relative to all of it's uses. | |
931 // This computes a number that increases as we approach the beginning of the | |
932 // routine. | |
933 void PhaseCFG::latency_from_uses(Node *n) { | |
934 // Set the latency for this instruction | |
935 #ifndef PRODUCT | |
936 if (trace_opto_pipelining()) { | |
937 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", | |
938 n->_idx, _node_latency.at_grow(n->_idx)); | |
939 dump(); | |
940 } | |
941 #endif | |
942 uint latency=0; | |
943 const Node *def = n->is_Proj() ? n->in(0): n; | |
944 | |
945 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | |
946 uint l = latency_from_use(n, def, n->fast_out(i)); | |
947 | |
948 if (latency < l) latency = l; | |
949 } | |
950 | |
951 _node_latency.at_put_grow(n->_idx, latency); | |
952 } | |
953 | |
954 //------------------------------hoist_to_cheaper_block------------------------- | |
955 // Pick a block for node self, between early and LCA, that is a cheaper | |
956 // alternative to LCA. | |
957 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { | |
958 const double delta = 1+PROB_UNLIKELY_MAG(4); | |
959 Block* least = LCA; | |
960 double least_freq = least->_freq; | |
961 uint target = _node_latency.at_grow(self->_idx); | |
962 uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx); | |
963 uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx); | |
964 bool in_latency = (target <= start_latency); | |
965 const Block* root_block = _bbs[_root->_idx]; | |
966 | |
967 // Turn off latency scheduling if scheduling is just plain off | |
968 if (!C->do_scheduling()) | |
969 in_latency = true; | |
970 | |
971 // Do not hoist (to cover latency) instructions which target a | |
972 // single register. Hoisting stretches the live range of the | |
973 // single register and may force spilling. | |
974 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; | |
975 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) | |
976 in_latency = true; | |
977 | |
978 #ifndef PRODUCT | |
979 if (trace_opto_pipelining()) { | |
980 tty->print("# Find cheaper block for latency %d: ", | |
981 _node_latency.at_grow(self->_idx)); | |
982 self->dump(); | |
983 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", | |
984 LCA->_pre_order, | |
985 LCA->_nodes[0]->_idx, | |
986 start_latency, | |
987 LCA->_nodes[LCA->end_idx()]->_idx, | |
988 end_latency, | |
989 least_freq); | |
990 } | |
991 #endif | |
992 | |
993 // Walk up the dominator tree from LCA (Lowest common ancestor) to | |
994 // the earliest legal location. Capture the least execution frequency. | |
995 while (LCA != early) { | |
996 LCA = LCA->_idom; // Follow up the dominator tree | |
997 | |
998 if (LCA == NULL) { | |
999 // Bailout without retry | |
1000 C->record_method_not_compilable("late schedule failed: LCA == NULL"); | |
1001 return least; | |
1002 } | |
1003 | |
1004 // Don't hoist machine instructions to the root basic block | |
1005 if (mach && LCA == root_block) | |
1006 break; | |
1007 | |
1008 uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx); | |
1009 uint end_idx = LCA->end_idx(); | |
1010 uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx); | |
1011 double LCA_freq = LCA->_freq; | |
1012 #ifndef PRODUCT | |
1013 if (trace_opto_pipelining()) { | |
1014 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", | |
1015 LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); | |
1016 } | |
1017 #endif | |
1018 if (LCA_freq < least_freq || // Better Frequency | |
1019 ( !in_latency && // No block containing latency | |
1020 LCA_freq < least_freq * delta && // No worse frequency | |
1021 target >= end_lat && // within latency range | |
1022 !self->is_iteratively_computed() ) // But don't hoist IV increments | |
1023 // because they may end up above other uses of their phi forcing | |
1024 // their result register to be different from their input. | |
1025 ) { | |
1026 least = LCA; // Found cheaper block | |
1027 least_freq = LCA_freq; | |
1028 start_latency = start_lat; | |
1029 end_latency = end_lat; | |
1030 if (target <= start_lat) | |
1031 in_latency = true; | |
1032 } | |
1033 } | |
1034 | |
1035 #ifndef PRODUCT | |
1036 if (trace_opto_pipelining()) { | |
1037 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", | |
1038 least->_pre_order, start_latency, least_freq); | |
1039 } | |
1040 #endif | |
1041 | |
1042 // See if the latency needs to be updated | |
1043 if (target < end_latency) { | |
1044 #ifndef PRODUCT | |
1045 if (trace_opto_pipelining()) { | |
1046 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); | |
1047 } | |
1048 #endif | |
1049 _node_latency.at_put_grow(self->_idx, end_latency); | |
1050 partial_latency_of_defs(self); | |
1051 } | |
1052 | |
1053 return least; | |
1054 } | |
1055 | |
1056 | |
1057 //------------------------------schedule_late----------------------------------- | |
1058 // Now schedule all codes as LATE as possible. This is the LCA in the | |
1059 // dominator tree of all USES of a value. Pick the block with the least | |
1060 // loop nesting depth that is lowest in the dominator tree. | |
1061 extern const char must_clone[]; | |
1062 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { | |
1063 #ifndef PRODUCT | |
1064 if (trace_opto_pipelining()) | |
1065 tty->print("\n#---- schedule_late ----\n"); | |
1066 #endif | |
1067 | |
1068 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); | |
1069 Node *self; | |
1070 | |
1071 // Walk over all the nodes from last to first | |
1072 while (self = iter.next()) { | |
1073 Block* early = _bbs[self->_idx]; // Earliest legal placement | |
1074 | |
1075 if (self->is_top()) { | |
1076 // Top node goes in bb #2 with other constants. | |
1077 // It must be special-cased, because it has no out edges. | |
1078 early->add_inst(self); | |
1079 continue; | |
1080 } | |
1081 | |
1082 // No uses, just terminate | |
1083 if (self->outcnt() == 0) { | |
1084 assert(self->Opcode() == Op_MachProj, "sanity"); | |
1085 continue; // Must be a dead machine projection | |
1086 } | |
1087 | |
1088 // If node is pinned in the block, then no scheduling can be done. | |
1089 if( self->pinned() ) // Pinned in block? | |
1090 continue; | |
1091 | |
1092 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; | |
1093 if (mach) { | |
1094 switch (mach->ideal_Opcode()) { | |
1095 case Op_CreateEx: | |
1096 // Don't move exception creation | |
1097 early->add_inst(self); | |
1098 continue; | |
1099 break; | |
1100 case Op_CheckCastPP: | |
1101 // Don't move CheckCastPP nodes away from their input, if the input | |
1102 // is a rawptr (5071820). | |
1103 Node *def = self->in(1); | |
1104 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { | |
1105 early->add_inst(self); | |
1106 continue; | |
1107 } | |
1108 break; | |
1109 } | |
1110 } | |
1111 | |
1112 // Gather LCA of all uses | |
1113 Block *LCA = NULL; | |
1114 { | |
1115 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { | |
1116 // For all uses, find LCA | |
1117 Node* use = self->fast_out(i); | |
1118 LCA = raise_LCA_above_use(LCA, use, self, _bbs); | |
1119 } | |
1120 } // (Hide defs of imax, i from rest of block.) | |
1121 | |
1122 // Place temps in the block of their use. This isn't a | |
1123 // requirement for correctness but it reduces useless | |
1124 // interference between temps and other nodes. | |
1125 if (mach != NULL && mach->is_MachTemp()) { | |
1126 _bbs.map(self->_idx, LCA); | |
1127 LCA->add_inst(self); | |
1128 continue; | |
1129 } | |
1130 | |
1131 // Check if 'self' could be anti-dependent on memory | |
1132 if (self->needs_anti_dependence_check()) { | |
1133 // Hoist LCA above possible-defs and insert anti-dependences to | |
1134 // defs in new LCA block. | |
1135 LCA = insert_anti_dependences(LCA, self); | |
1136 } | |
1137 | |
1138 if (early->_dom_depth > LCA->_dom_depth) { | |
1139 // Somehow the LCA has moved above the earliest legal point. | |
1140 // (One way this can happen is via memory_early_block.) | |
1141 if (C->subsume_loads() == true && !C->failing()) { | |
1142 // Retry with subsume_loads == false | |
1143 // If this is the first failure, the sentinel string will "stick" | |
1144 // to the Compile object, and the C2Compiler will see it and retry. | |
1145 C->record_failure(C2Compiler::retry_no_subsuming_loads()); | |
1146 } else { | |
1147 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) | |
1148 C->record_method_not_compilable("late schedule failed: incorrect graph"); | |
1149 } | |
1150 return; | |
1151 } | |
1152 | |
1153 // If there is no opportunity to hoist, then we're done. | |
1154 bool try_to_hoist = (LCA != early); | |
1155 | |
1156 // Must clone guys stay next to use; no hoisting allowed. | |
1157 // Also cannot hoist guys that alter memory or are otherwise not | |
1158 // allocatable (hoisting can make a value live longer, leading to | |
1159 // anti and output dependency problems which are normally resolved | |
1160 // by the register allocator giving everyone a different register). | |
1161 if (mach != NULL && must_clone[mach->ideal_Opcode()]) | |
1162 try_to_hoist = false; | |
1163 | |
1164 Block* late = NULL; | |
1165 if (try_to_hoist) { | |
1166 // Now find the block with the least execution frequency. | |
1167 // Start at the latest schedule and work up to the earliest schedule | |
1168 // in the dominator tree. Thus the Node will dominate all its uses. | |
1169 late = hoist_to_cheaper_block(LCA, early, self); | |
1170 } else { | |
1171 // Just use the LCA of the uses. | |
1172 late = LCA; | |
1173 } | |
1174 | |
1175 // Put the node into target block | |
1176 schedule_node_into_block(self, late); | |
1177 | |
1178 #ifdef ASSERT | |
1179 if (self->needs_anti_dependence_check()) { | |
1180 // since precedence edges are only inserted when we're sure they | |
1181 // are needed make sure that after placement in a block we don't | |
1182 // need any new precedence edges. | |
1183 verify_anti_dependences(late, self); | |
1184 } | |
1185 #endif | |
1186 } // Loop until all nodes have been visited | |
1187 | |
1188 } // end ScheduleLate | |
1189 | |
1190 //------------------------------GlobalCodeMotion------------------------------- | |
1191 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) { | |
1192 ResourceMark rm; | |
1193 | |
1194 #ifndef PRODUCT | |
1195 if (trace_opto_pipelining()) { | |
1196 tty->print("\n---- Start GlobalCodeMotion ----\n"); | |
1197 } | |
1198 #endif | |
1199 | |
1200 // Initialize the bbs.map for things on the proj_list | |
1201 uint i; | |
1202 for( i=0; i < proj_list.size(); i++ ) | |
1203 _bbs.map(proj_list[i]->_idx, NULL); | |
1204 | |
1205 // Set the basic block for Nodes pinned into blocks | |
1206 Arena *a = Thread::current()->resource_area(); | |
1207 VectorSet visited(a); | |
1208 schedule_pinned_nodes( visited ); | |
1209 | |
1210 // Find the earliest Block any instruction can be placed in. Some | |
1211 // instructions are pinned into Blocks. Unpinned instructions can | |
1212 // appear in last block in which all their inputs occur. | |
1213 visited.Clear(); | |
1214 Node_List stack(a); | |
1215 stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list | |
1216 if (!schedule_early(visited, stack)) { | |
1217 // Bailout without retry | |
1218 C->record_method_not_compilable("early schedule failed"); | |
1219 return; | |
1220 } | |
1221 | |
1222 // Build Def-Use edges. | |
1223 proj_list.push(_root); // Add real root as another root | |
1224 proj_list.pop(); | |
1225 | |
1226 // Compute the latency information (via backwards walk) for all the | |
1227 // instructions in the graph | |
1228 GrowableArray<uint> node_latency; | |
1229 _node_latency = node_latency; | |
1230 | |
1231 if( C->do_scheduling() ) | |
1232 ComputeLatenciesBackwards(visited, stack); | |
1233 | |
1234 // Now schedule all codes as LATE as possible. This is the LCA in the | |
1235 // dominator tree of all USES of a value. Pick the block with the least | |
1236 // loop nesting depth that is lowest in the dominator tree. | |
1237 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) | |
1238 schedule_late(visited, stack); | |
1239 if( C->failing() ) { | |
1240 // schedule_late fails only when graph is incorrect. | |
1241 assert(!VerifyGraphEdges, "verification should have failed"); | |
1242 return; | |
1243 } | |
1244 | |
1245 unique = C->unique(); | |
1246 | |
1247 #ifndef PRODUCT | |
1248 if (trace_opto_pipelining()) { | |
1249 tty->print("\n---- Detect implicit null checks ----\n"); | |
1250 } | |
1251 #endif | |
1252 | |
1253 // Detect implicit-null-check opportunities. Basically, find NULL checks | |
1254 // with suitable memory ops nearby. Use the memory op to do the NULL check. | |
1255 // I can generate a memory op if there is not one nearby. | |
1256 if (C->is_method_compilation()) { | |
1257 // Don't do it for natives, adapters, or runtime stubs | |
1258 int allowed_reasons = 0; | |
1259 // ...and don't do it when there have been too many traps, globally. | |
1260 for (int reason = (int)Deoptimization::Reason_none+1; | |
1261 reason < Compile::trapHistLength; reason++) { | |
1262 assert(reason < BitsPerInt, "recode bit map"); | |
1263 if (!C->too_many_traps((Deoptimization::DeoptReason) reason)) | |
1264 allowed_reasons |= nth_bit(reason); | |
1265 } | |
1266 // By reversing the loop direction we get a very minor gain on mpegaudio. | |
1267 // Feel free to revert to a forward loop for clarity. | |
1268 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { | |
1269 for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) { | |
1270 Node *proj = matcher._null_check_tests[i ]; | |
1271 Node *val = matcher._null_check_tests[i+1]; | |
1272 _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons); | |
1273 // The implicit_null_check will only perform the transformation | |
1274 // if the null branch is truly uncommon, *and* it leads to an | |
1275 // uncommon trap. Combined with the too_many_traps guards | |
1276 // above, this prevents SEGV storms reported in 6366351, | |
1277 // by recompiling offending methods without this optimization. | |
1278 } | |
1279 } | |
1280 | |
1281 #ifndef PRODUCT | |
1282 if (trace_opto_pipelining()) { | |
1283 tty->print("\n---- Start Local Scheduling ----\n"); | |
1284 } | |
1285 #endif | |
1286 | |
1287 // Schedule locally. Right now a simple topological sort. | |
1288 // Later, do a real latency aware scheduler. | |
1289 int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique()); | |
1290 memset( ready_cnt, -1, C->unique() * sizeof(int) ); | |
1291 visited.Clear(); | |
1292 for (i = 0; i < _num_blocks; i++) { | |
1293 if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) { | |
1294 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { | |
1295 C->record_method_not_compilable("local schedule failed"); | |
1296 } | |
1297 return; | |
1298 } | |
1299 } | |
1300 | |
1301 // If we inserted any instructions between a Call and his CatchNode, | |
1302 // clone the instructions on all paths below the Catch. | |
1303 for( i=0; i < _num_blocks; i++ ) | |
1304 _blocks[i]->call_catch_cleanup(_bbs); | |
1305 | |
1306 #ifndef PRODUCT | |
1307 if (trace_opto_pipelining()) { | |
1308 tty->print("\n---- After GlobalCodeMotion ----\n"); | |
1309 for (uint i = 0; i < _num_blocks; i++) { | |
1310 _blocks[i]->dump(); | |
1311 } | |
1312 } | |
1313 #endif | |
1314 } | |
1315 | |
1316 | |
1317 //------------------------------Estimate_Block_Frequency----------------------- | |
1318 // Estimate block frequencies based on IfNode probabilities. | |
1319 void PhaseCFG::Estimate_Block_Frequency() { | |
1320 int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1; | |
1321 // Most of our algorithms will die horribly if frequency can become | |
1322 // negative so make sure cnts is a sane value. | |
1323 if( cnts <= 0 ) cnts = 1; | |
1324 float f = (float)cnts/(float)FreqCountInvocations; | |
1325 | |
1326 // Create the loop tree and calculate loop depth. | |
1327 _root_loop = create_loop_tree(); | |
1328 _root_loop->compute_loop_depth(0); | |
1329 | |
1330 // Compute block frequency of each block, relative to a single loop entry. | |
1331 _root_loop->compute_freq(); | |
1332 | |
1333 // Adjust all frequencies to be relative to a single method entry | |
1334 _root_loop->_freq = f * 1.0; | |
1335 _root_loop->scale_freq(); | |
1336 | |
1337 // force paths ending at uncommon traps to be infrequent | |
1338 Block_List worklist; | |
1339 Block* root_blk = _blocks[0]; | |
1340 for (uint i = 0; i < root_blk->num_preds(); i++) { | |
1341 Block *pb = _bbs[root_blk->pred(i)->_idx]; | |
1342 if (pb->has_uncommon_code()) { | |
1343 worklist.push(pb); | |
1344 } | |
1345 } | |
1346 while (worklist.size() > 0) { | |
1347 Block* uct = worklist.pop(); | |
1348 uct->_freq = PROB_MIN; | |
1349 for (uint i = 0; i < uct->num_preds(); i++) { | |
1350 Block *pb = _bbs[uct->pred(i)->_idx]; | |
1351 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { | |
1352 worklist.push(pb); | |
1353 } | |
1354 } | |
1355 } | |
1356 | |
1357 #ifndef PRODUCT | |
1358 if (PrintCFGBlockFreq) { | |
1359 tty->print_cr("CFG Block Frequencies"); | |
1360 _root_loop->dump_tree(); | |
1361 if (Verbose) { | |
1362 tty->print_cr("PhaseCFG dump"); | |
1363 dump(); | |
1364 tty->print_cr("Node dump"); | |
1365 _root->dump(99999); | |
1366 } | |
1367 } | |
1368 #endif | |
1369 } | |
1370 | |
1371 //----------------------------create_loop_tree-------------------------------- | |
1372 // Create a loop tree from the CFG | |
1373 CFGLoop* PhaseCFG::create_loop_tree() { | |
1374 | |
1375 #ifdef ASSERT | |
1376 assert( _blocks[0] == _broot, "" ); | |
1377 for (uint i = 0; i < _num_blocks; i++ ) { | |
1378 Block *b = _blocks[i]; | |
1379 // Check that _loop field are clear...we could clear them if not. | |
1380 assert(b->_loop == NULL, "clear _loop expected"); | |
1381 // Sanity check that the RPO numbering is reflected in the _blocks array. | |
1382 // It doesn't have to be for the loop tree to be built, but if it is not, | |
1383 // then the blocks have been reordered since dom graph building...which | |
1384 // may question the RPO numbering | |
1385 assert(b->_rpo == i, "unexpected reverse post order number"); | |
1386 } | |
1387 #endif | |
1388 | |
1389 int idct = 0; | |
1390 CFGLoop* root_loop = new CFGLoop(idct++); | |
1391 | |
1392 Block_List worklist; | |
1393 | |
1394 // Assign blocks to loops | |
1395 for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block | |
1396 Block *b = _blocks[i]; | |
1397 | |
1398 if (b->head()->is_Loop()) { | |
1399 Block* loop_head = b; | |
1400 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); | |
1401 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); | |
1402 Block* tail = _bbs[tail_n->_idx]; | |
1403 | |
1404 // Defensively filter out Loop nodes for non-single-entry loops. | |
1405 // For all reasonable loops, the head occurs before the tail in RPO. | |
1406 if (i <= tail->_rpo) { | |
1407 | |
1408 // The tail and (recursive) predecessors of the tail | |
1409 // are made members of a new loop. | |
1410 | |
1411 assert(worklist.size() == 0, "nonempty worklist"); | |
1412 CFGLoop* nloop = new CFGLoop(idct++); | |
1413 assert(loop_head->_loop == NULL, "just checking"); | |
1414 loop_head->_loop = nloop; | |
1415 // Add to nloop so push_pred() will skip over inner loops | |
1416 nloop->add_member(loop_head); | |
1417 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs); | |
1418 | |
1419 while (worklist.size() > 0) { | |
1420 Block* member = worklist.pop(); | |
1421 if (member != loop_head) { | |
1422 for (uint j = 1; j < member->num_preds(); j++) { | |
1423 nloop->push_pred(member, j, worklist, _bbs); | |
1424 } | |
1425 } | |
1426 } | |
1427 } | |
1428 } | |
1429 } | |
1430 | |
1431 // Create a member list for each loop consisting | |
1432 // of both blocks and (immediate child) loops. | |
1433 for (uint i = 0; i < _num_blocks; i++) { | |
1434 Block *b = _blocks[i]; | |
1435 CFGLoop* lp = b->_loop; | |
1436 if (lp == NULL) { | |
1437 // Not assigned to a loop. Add it to the method's pseudo loop. | |
1438 b->_loop = root_loop; | |
1439 lp = root_loop; | |
1440 } | |
1441 if (lp == root_loop || b != lp->head()) { // loop heads are already members | |
1442 lp->add_member(b); | |
1443 } | |
1444 if (lp != root_loop) { | |
1445 if (lp->parent() == NULL) { | |
1446 // Not a nested loop. Make it a child of the method's pseudo loop. | |
1447 root_loop->add_nested_loop(lp); | |
1448 } | |
1449 if (b == lp->head()) { | |
1450 // Add nested loop to member list of parent loop. | |
1451 lp->parent()->add_member(lp); | |
1452 } | |
1453 } | |
1454 } | |
1455 | |
1456 return root_loop; | |
1457 } | |
1458 | |
1459 //------------------------------push_pred-------------------------------------- | |
1460 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) { | |
1461 Node* pred_n = blk->pred(i); | |
1462 Block* pred = node_to_blk[pred_n->_idx]; | |
1463 CFGLoop *pred_loop = pred->_loop; | |
1464 if (pred_loop == NULL) { | |
1465 // Filter out blocks for non-single-entry loops. | |
1466 // For all reasonable loops, the head occurs before the tail in RPO. | |
1467 if (pred->_rpo > head()->_rpo) { | |
1468 pred->_loop = this; | |
1469 worklist.push(pred); | |
1470 } | |
1471 } else if (pred_loop != this) { | |
1472 // Nested loop. | |
1473 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { | |
1474 pred_loop = pred_loop->_parent; | |
1475 } | |
1476 // Make pred's loop be a child | |
1477 if (pred_loop->_parent == NULL) { | |
1478 add_nested_loop(pred_loop); | |
1479 // Continue with loop entry predecessor. | |
1480 Block* pred_head = pred_loop->head(); | |
1481 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); | |
1482 assert(pred_head != head(), "loop head in only one loop"); | |
1483 push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk); | |
1484 } else { | |
1485 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); | |
1486 } | |
1487 } | |
1488 } | |
1489 | |
1490 //------------------------------add_nested_loop-------------------------------- | |
1491 // Make cl a child of the current loop in the loop tree. | |
1492 void CFGLoop::add_nested_loop(CFGLoop* cl) { | |
1493 assert(_parent == NULL, "no parent yet"); | |
1494 assert(cl != this, "not my own parent"); | |
1495 cl->_parent = this; | |
1496 CFGLoop* ch = _child; | |
1497 if (ch == NULL) { | |
1498 _child = cl; | |
1499 } else { | |
1500 while (ch->_sibling != NULL) { ch = ch->_sibling; } | |
1501 ch->_sibling = cl; | |
1502 } | |
1503 } | |
1504 | |
1505 //------------------------------compute_loop_depth----------------------------- | |
1506 // Store the loop depth in each CFGLoop object. | |
1507 // Recursively walk the children to do the same for them. | |
1508 void CFGLoop::compute_loop_depth(int depth) { | |
1509 _depth = depth; | |
1510 CFGLoop* ch = _child; | |
1511 while (ch != NULL) { | |
1512 ch->compute_loop_depth(depth + 1); | |
1513 ch = ch->_sibling; | |
1514 } | |
1515 } | |
1516 | |
1517 //------------------------------compute_freq----------------------------------- | |
1518 // Compute the frequency of each block and loop, relative to a single entry | |
1519 // into the dominating loop head. | |
1520 void CFGLoop::compute_freq() { | |
1521 // Bottom up traversal of loop tree (visit inner loops first.) | |
1522 // Set loop head frequency to 1.0, then transitively | |
1523 // compute frequency for all successors in the loop, | |
1524 // as well as for each exit edge. Inner loops are | |
1525 // treated as single blocks with loop exit targets | |
1526 // as the successor blocks. | |
1527 | |
1528 // Nested loops first | |
1529 CFGLoop* ch = _child; | |
1530 while (ch != NULL) { | |
1531 ch->compute_freq(); | |
1532 ch = ch->_sibling; | |
1533 } | |
1534 assert (_members.length() > 0, "no empty loops"); | |
1535 Block* hd = head(); | |
1536 hd->_freq = 1.0f; | |
1537 for (int i = 0; i < _members.length(); i++) { | |
1538 CFGElement* s = _members.at(i); | |
1539 float freq = s->_freq; | |
1540 if (s->is_block()) { | |
1541 Block* b = s->as_Block(); | |
1542 for (uint j = 0; j < b->_num_succs; j++) { | |
1543 Block* sb = b->_succs[j]; | |
1544 update_succ_freq(sb, freq * b->succ_prob(j)); | |
1545 } | |
1546 } else { | |
1547 CFGLoop* lp = s->as_CFGLoop(); | |
1548 assert(lp->_parent == this, "immediate child"); | |
1549 for (int k = 0; k < lp->_exits.length(); k++) { | |
1550 Block* eb = lp->_exits.at(k).get_target(); | |
1551 float prob = lp->_exits.at(k).get_prob(); | |
1552 update_succ_freq(eb, freq * prob); | |
1553 } | |
1554 } | |
1555 } | |
1556 | |
1557 #if 0 | |
1558 // Raise frequency of the loop backedge block, in an effort | |
1559 // to keep it empty. Skip the method level "loop". | |
1560 if (_parent != NULL) { | |
1561 CFGElement* s = _members.at(_members.length() - 1); | |
1562 if (s->is_block()) { | |
1563 Block* bk = s->as_Block(); | |
1564 if (bk->_num_succs == 1 && bk->_succs[0] == hd) { | |
1565 // almost any value >= 1.0f works | |
1566 // FIXME: raw constant | |
1567 bk->_freq = 1.05f; | |
1568 } | |
1569 } | |
1570 } | |
1571 #endif | |
1572 | |
1573 // For all loops other than the outer, "method" loop, | |
1574 // sum and normalize the exit probability. The "method" loop | |
1575 // should keep the initial exit probability of 1, so that | |
1576 // inner blocks do not get erroneously scaled. | |
1577 if (_depth != 0) { | |
1578 // Total the exit probabilities for this loop. | |
1579 float exits_sum = 0.0f; | |
1580 for (int i = 0; i < _exits.length(); i++) { | |
1581 exits_sum += _exits.at(i).get_prob(); | |
1582 } | |
1583 | |
1584 // Normalize the exit probabilities. Until now, the | |
1585 // probabilities estimate the possibility of exit per | |
1586 // a single loop iteration; afterward, they estimate | |
1587 // the probability of exit per loop entry. | |
1588 for (int i = 0; i < _exits.length(); i++) { | |
1589 Block* et = _exits.at(i).get_target(); | |
1590 float new_prob = _exits.at(i).get_prob() / exits_sum; | |
1591 BlockProbPair bpp(et, new_prob); | |
1592 _exits.at_put(i, bpp); | |
1593 } | |
1594 | |
1595 // Save the total, but guard against unreasoable probability, | |
1596 // as the value is used to estimate the loop trip count. | |
1597 // An infinite trip count would blur relative block | |
1598 // frequencies. | |
1599 if (exits_sum > 1.0f) exits_sum = 1.0; | |
1600 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; | |
1601 _exit_prob = exits_sum; | |
1602 } | |
1603 } | |
1604 | |
1605 //------------------------------succ_prob------------------------------------- | |
1606 // Determine the probability of reaching successor 'i' from the receiver block. | |
1607 float Block::succ_prob(uint i) { | |
1608 int eidx = end_idx(); | |
1609 Node *n = _nodes[eidx]; // Get ending Node | |
1610 int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode(); | |
1611 | |
1612 // Switch on branch type | |
1613 switch( op ) { | |
1614 case Op_CountedLoopEnd: | |
1615 case Op_If: { | |
1616 assert (i < 2, "just checking"); | |
1617 // Conditionals pass on only part of their frequency | |
1618 float prob = n->as_MachIf()->_prob; | |
1619 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); | |
1620 // If succ[i] is the FALSE branch, invert path info | |
1621 if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) { | |
1622 return 1.0f - prob; // not taken | |
1623 } else { | |
1624 return prob; // taken | |
1625 } | |
1626 } | |
1627 | |
1628 case Op_Jump: | |
1629 // Divide the frequency between all successors evenly | |
1630 return 1.0f/_num_succs; | |
1631 | |
1632 case Op_Catch: { | |
1633 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); | |
1634 if (ci->_con == CatchProjNode::fall_through_index) { | |
1635 // Fall-thru path gets the lion's share. | |
1636 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; | |
1637 } else { | |
1638 // Presume exceptional paths are equally unlikely | |
1639 return PROB_UNLIKELY_MAG(5); | |
1640 } | |
1641 } | |
1642 | |
1643 case Op_Root: | |
1644 case Op_Goto: | |
1645 // Pass frequency straight thru to target | |
1646 return 1.0f; | |
1647 | |
1648 case Op_NeverBranch: | |
1649 return 0.0f; | |
1650 | |
1651 case Op_TailCall: | |
1652 case Op_TailJump: | |
1653 case Op_Return: | |
1654 case Op_Halt: | |
1655 case Op_Rethrow: | |
1656 // Do not push out freq to root block | |
1657 return 0.0f; | |
1658 | |
1659 default: | |
1660 ShouldNotReachHere(); | |
1661 } | |
1662 | |
1663 return 0.0f; | |
1664 } | |
1665 | |
1666 //------------------------------update_succ_freq------------------------------- | |
1667 // Update the appropriate frequency associated with block 'b', a succesor of | |
1668 // a block in this loop. | |
1669 void CFGLoop::update_succ_freq(Block* b, float freq) { | |
1670 if (b->_loop == this) { | |
1671 if (b == head()) { | |
1672 // back branch within the loop | |
1673 // Do nothing now, the loop carried frequency will be | |
1674 // adjust later in scale_freq(). | |
1675 } else { | |
1676 // simple branch within the loop | |
1677 b->_freq += freq; | |
1678 } | |
1679 } else if (!in_loop_nest(b)) { | |
1680 // branch is exit from this loop | |
1681 BlockProbPair bpp(b, freq); | |
1682 _exits.append(bpp); | |
1683 } else { | |
1684 // branch into nested loop | |
1685 CFGLoop* ch = b->_loop; | |
1686 ch->_freq += freq; | |
1687 } | |
1688 } | |
1689 | |
1690 //------------------------------in_loop_nest----------------------------------- | |
1691 // Determine if block b is in the receiver's loop nest. | |
1692 bool CFGLoop::in_loop_nest(Block* b) { | |
1693 int depth = _depth; | |
1694 CFGLoop* b_loop = b->_loop; | |
1695 int b_depth = b_loop->_depth; | |
1696 if (depth == b_depth) { | |
1697 return true; | |
1698 } | |
1699 while (b_depth > depth) { | |
1700 b_loop = b_loop->_parent; | |
1701 b_depth = b_loop->_depth; | |
1702 } | |
1703 return b_loop == this; | |
1704 } | |
1705 | |
1706 //------------------------------scale_freq------------------------------------- | |
1707 // Scale frequency of loops and blocks by trip counts from outer loops | |
1708 // Do a top down traversal of loop tree (visit outer loops first.) | |
1709 void CFGLoop::scale_freq() { | |
1710 float loop_freq = _freq * trip_count(); | |
1711 for (int i = 0; i < _members.length(); i++) { | |
1712 CFGElement* s = _members.at(i); | |
1713 s->_freq *= loop_freq; | |
1714 } | |
1715 CFGLoop* ch = _child; | |
1716 while (ch != NULL) { | |
1717 ch->scale_freq(); | |
1718 ch = ch->_sibling; | |
1719 } | |
1720 } | |
1721 | |
1722 #ifndef PRODUCT | |
1723 //------------------------------dump_tree-------------------------------------- | |
1724 void CFGLoop::dump_tree() const { | |
1725 dump(); | |
1726 if (_child != NULL) _child->dump_tree(); | |
1727 if (_sibling != NULL) _sibling->dump_tree(); | |
1728 } | |
1729 | |
1730 //------------------------------dump------------------------------------------- | |
1731 void CFGLoop::dump() const { | |
1732 for (int i = 0; i < _depth; i++) tty->print(" "); | |
1733 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", | |
1734 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); | |
1735 for (int i = 0; i < _depth; i++) tty->print(" "); | |
1736 tty->print(" members:", _id); | |
1737 int k = 0; | |
1738 for (int i = 0; i < _members.length(); i++) { | |
1739 if (k++ >= 6) { | |
1740 tty->print("\n "); | |
1741 for (int j = 0; j < _depth+1; j++) tty->print(" "); | |
1742 k = 0; | |
1743 } | |
1744 CFGElement *s = _members.at(i); | |
1745 if (s->is_block()) { | |
1746 Block *b = s->as_Block(); | |
1747 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); | |
1748 } else { | |
1749 CFGLoop* lp = s->as_CFGLoop(); | |
1750 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); | |
1751 } | |
1752 } | |
1753 tty->print("\n"); | |
1754 for (int i = 0; i < _depth; i++) tty->print(" "); | |
1755 tty->print(" exits: "); | |
1756 k = 0; | |
1757 for (int i = 0; i < _exits.length(); i++) { | |
1758 if (k++ >= 7) { | |
1759 tty->print("\n "); | |
1760 for (int j = 0; j < _depth+1; j++) tty->print(" "); | |
1761 k = 0; | |
1762 } | |
1763 Block *blk = _exits.at(i).get_target(); | |
1764 float prob = _exits.at(i).get_prob(); | |
1765 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); | |
1766 } | |
1767 tty->print("\n"); | |
1768 } | |
1769 #endif |