Mercurial > hg > truffle
annotate src/share/vm/opto/split_if.cpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 08eb13460b3a |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1273
diff
changeset
|
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1273
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1273
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1273
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "memory/allocation.inline.hpp" | |
27 #include "opto/callnode.hpp" | |
28 #include "opto/connode.hpp" | |
29 #include "opto/loopnode.hpp" | |
0 | 30 |
31 | |
32 //------------------------------split_thru_region------------------------------ | |
33 // Split Node 'n' through merge point. | |
34 Node *PhaseIdealLoop::split_thru_region( Node *n, Node *region ) { | |
35 uint wins = 0; | |
36 assert( n->is_CFG(), "" ); | |
37 assert( region->is_Region(), "" ); | |
38 Node *r = new (C, region->req()) RegionNode( region->req() ); | |
39 IdealLoopTree *loop = get_loop( n ); | |
40 for( uint i = 1; i < region->req(); i++ ) { | |
41 Node *x = n->clone(); | |
42 Node *in0 = n->in(0); | |
43 if( in0->in(0) == region ) x->set_req( 0, in0->in(i) ); | |
44 for( uint j = 1; j < n->req(); j++ ) { | |
45 Node *in = n->in(j); | |
46 if( get_ctrl(in) == region ) | |
47 x->set_req( j, in->in(i) ); | |
48 } | |
49 _igvn.register_new_node_with_optimizer(x); | |
50 set_loop(x, loop); | |
51 set_idom(x, x->in(0), dom_depth(x->in(0))+1); | |
52 r->init_req(i, x); | |
53 } | |
54 | |
55 // Record region | |
56 r->set_req(0,region); // Not a TRUE RegionNode | |
57 _igvn.register_new_node_with_optimizer(r); | |
58 set_loop(r, loop); | |
59 if( !loop->_child ) | |
60 loop->_body.push(r); | |
61 return r; | |
62 } | |
63 | |
64 //------------------------------split_up--------------------------------------- | |
65 // Split block-local op up through the phis to empty the current block | |
66 bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { | |
67 if( n->is_CFG() ) { | |
68 assert( n->in(0) != blk1, "Lousy candidate for split-if" ); | |
69 return false; | |
70 } | |
71 if( get_ctrl(n) != blk1 && get_ctrl(n) != blk2 ) | |
72 return false; // Not block local | |
73 if( n->is_Phi() ) return false; // Local PHIs are expected | |
74 | |
75 // Recursively split-up inputs | |
76 for (uint i = 1; i < n->req(); i++) { | |
77 if( split_up( n->in(i), blk1, blk2 ) ) { | |
78 // Got split recursively and self went dead? | |
79 if (n->outcnt() == 0) | |
80 _igvn.remove_dead_node(n); | |
81 return true; | |
82 } | |
83 } | |
84 | |
85 // Check for needing to clone-up a compare. Can't do that, it forces | |
86 // another (nested) split-if transform. Instead, clone it "down". | |
87 if( n->is_Cmp() ) { | |
88 assert(get_ctrl(n) == blk2 || get_ctrl(n) == blk1, "must be in block with IF"); | |
89 // Check for simple Cmp/Bool/CMove which we can clone-up. Cmp/Bool/CMove | |
90 // sequence can have no other users and it must all reside in the split-if | |
91 // block. Non-simple Cmp/Bool/CMove sequences are 'cloned-down' below - | |
92 // private, per-use versions of the Cmp and Bool are made. These sink to | |
93 // the CMove block. If the CMove is in the split-if block, then in the | |
94 // next iteration this will become a simple Cmp/Bool/CMove set to clone-up. | |
95 Node *bol, *cmov; | |
96 if( !(n->outcnt() == 1 && n->unique_out()->is_Bool() && | |
97 (bol = n->unique_out()->as_Bool()) && | |
98 (get_ctrl(bol) == blk1 || | |
99 get_ctrl(bol) == blk2) && | |
100 bol->outcnt() == 1 && | |
101 bol->unique_out()->is_CMove() && | |
102 (cmov = bol->unique_out()->as_CMove()) && | |
103 (get_ctrl(cmov) == blk1 || | |
104 get_ctrl(cmov) == blk2) ) ) { | |
105 | |
106 // Must clone down | |
107 #ifndef PRODUCT | |
108 if( PrintOpto && VerifyLoopOptimizations ) { | |
109 tty->print("Cloning down: "); | |
110 n->dump(); | |
111 } | |
112 #endif | |
113 // Clone down any block-local BoolNode uses of this CmpNode | |
114 for (DUIterator i = n->outs(); n->has_out(i); i++) { | |
115 Node* bol = n->out(i); | |
116 assert( bol->is_Bool(), "" ); | |
117 if (bol->outcnt() == 1) { | |
118 Node* use = bol->unique_out(); | |
119 Node *use_c = use->is_If() ? use->in(0) : get_ctrl(use); | |
120 if (use_c == blk1 || use_c == blk2) { | |
121 continue; | |
122 } | |
123 } | |
124 if (get_ctrl(bol) == blk1 || get_ctrl(bol) == blk2) { | |
125 // Recursively sink any BoolNode | |
126 #ifndef PRODUCT | |
127 if( PrintOpto && VerifyLoopOptimizations ) { | |
128 tty->print("Cloning down: "); | |
129 bol->dump(); | |
130 } | |
131 #endif | |
132 for (DUIterator_Last jmin, j = bol->last_outs(jmin); j >= jmin; --j) { | |
133 // Uses are either IfNodes or CMoves | |
134 Node* iff = bol->last_out(j); | |
135 assert( iff->in(1) == bol, "" ); | |
136 // Get control block of either the CMove or the If input | |
137 Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff); | |
138 Node *x = bol->clone(); | |
139 register_new_node(x, iff_ctrl); | |
140 _igvn.hash_delete(iff); | |
141 iff->set_req(1, x); | |
142 _igvn._worklist.push(iff); | |
143 } | |
144 _igvn.remove_dead_node( bol ); | |
145 --i; | |
146 } | |
147 } | |
148 // Clone down this CmpNode | |
149 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) { | |
150 Node* bol = n->last_out(j); | |
151 assert( bol->in(1) == n, "" ); | |
152 Node *x = n->clone(); | |
153 register_new_node(x, get_ctrl(bol)); | |
154 _igvn.hash_delete(bol); | |
155 bol->set_req(1, x); | |
156 _igvn._worklist.push(bol); | |
157 } | |
158 _igvn.remove_dead_node( n ); | |
159 | |
160 return true; | |
161 } | |
162 } | |
163 | |
164 // See if splitting-up a Store. Any anti-dep loads must go up as | |
165 // well. An anti-dep load might be in the wrong block, because in | |
166 // this particular layout/schedule we ignored anti-deps and allow | |
167 // memory to be alive twice. This only works if we do the same | |
168 // operations on anti-dep loads as we do their killing stores. | |
169 if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) { | |
170 // Get store's memory slice | |
171 int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr()); | |
172 | |
173 // Get memory-phi anti-dep loads will be using | |
174 Node *memphi = n->in(MemNode::Memory); | |
175 assert( memphi->is_Phi(), "" ); | |
176 // Hoist any anti-dep load to the splitting block; | |
177 // it will then "split-up". | |
178 for (DUIterator_Fast imax,i = memphi->fast_outs(imax); i < imax; i++) { | |
179 Node *load = memphi->fast_out(i); | |
180 if( load->is_Load() && alias_idx == C->get_alias_index(_igvn.type(load->in(MemNode::Address))->is_ptr()) ) | |
181 set_ctrl(load,blk1); | |
182 } | |
183 } | |
184 | |
185 // Found some other Node; must clone it up | |
186 #ifndef PRODUCT | |
187 if( PrintOpto && VerifyLoopOptimizations ) { | |
188 tty->print("Cloning up: "); | |
189 n->dump(); | |
190 } | |
191 #endif | |
192 | |
1273
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
193 // ConvI2L may have type information on it which becomes invalid if |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
194 // it moves up in the graph so change any clones so widen the type |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
195 // to TypeLong::INT when pushing it up. |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
196 const Type* rtype = NULL; |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
197 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) { |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
198 rtype = TypeLong::INT; |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
199 } |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
200 |
0 | 201 // Now actually split-up this guy. One copy per control path merging. |
202 Node *phi = PhiNode::make_blank(blk1, n); | |
203 for( uint j = 1; j < blk1->req(); j++ ) { | |
204 Node *x = n->clone(); | |
1273
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
205 // Widen the type of the ConvI2L when pushing up. |
877a14af58e1
6663854: assert(n != __null,"Bad immediate dominator info.") in C2 with -Xcomp
never
parents:
1172
diff
changeset
|
206 if (rtype != NULL) x->as_Type()->set_type(rtype); |
0 | 207 if( n->in(0) && n->in(0) == blk1 ) |
208 x->set_req( 0, blk1->in(j) ); | |
209 for( uint i = 1; i < n->req(); i++ ) { | |
210 Node *m = n->in(i); | |
211 if( get_ctrl(m) == blk1 ) { | |
212 assert( m->in(0) == blk1, "" ); | |
213 x->set_req( i, m->in(j) ); | |
214 } | |
215 } | |
216 register_new_node( x, blk1->in(j) ); | |
217 phi->init_req( j, x ); | |
218 } | |
219 // Announce phi to optimizer | |
220 register_new_node(phi, blk1); | |
221 | |
222 // Remove cloned-up value from optimizer; use phi instead | |
1621
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
223 _igvn.replace_node( n, phi ); |
0 | 224 |
225 // (There used to be a self-recursive call to split_up() here, | |
226 // but it is not needed. All necessary forward walking is done | |
227 // by do_split_if() below.) | |
228 | |
229 return true; | |
230 } | |
231 | |
232 //------------------------------register_new_node------------------------------ | |
233 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) { | |
1172 | 234 assert(!n->is_CFG(), "must be data node"); |
0 | 235 _igvn.register_new_node_with_optimizer(n); |
236 set_ctrl(n, blk); | |
237 IdealLoopTree *loop = get_loop(blk); | |
238 if( !loop->_child ) | |
239 loop->_body.push(n); | |
240 } | |
241 | |
242 //------------------------------small_cache------------------------------------ | |
243 struct small_cache : public Dict { | |
244 | |
245 small_cache() : Dict( cmpkey, hashptr ) {} | |
246 Node *probe( Node *use_blk ) { return (Node*)((*this)[use_blk]); } | |
247 void lru_insert( Node *use_blk, Node *new_def ) { Insert(use_blk,new_def); } | |
248 }; | |
249 | |
250 //------------------------------spinup----------------------------------------- | |
251 // "Spin up" the dominator tree, starting at the use site and stopping when we | |
252 // find the post-dominating point. | |
253 | |
254 // We must be at the merge point which post-dominates 'new_false' and | |
255 // 'new_true'. Figure out which edges into the RegionNode eventually lead up | |
256 // to false and which to true. Put in a PhiNode to merge values; plug in | |
257 // the appropriate false-arm or true-arm values. If some path leads to the | |
258 // original IF, then insert a Phi recursively. | |
259 Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) { | |
260 if (use_blk->is_top()) // Handle dead uses | |
261 return use_blk; | |
262 Node *prior_n = (Node*)0xdeadbeef; | |
263 Node *n = use_blk; // Get path input | |
264 assert( use_blk != iff_dom, "" ); | |
265 // Here's the "spinup" the dominator tree loop. Do a cache-check | |
266 // along the way, in case we've come this way before. | |
267 while( n != iff_dom ) { // Found post-dominating point? | |
268 prior_n = n; | |
269 n = idom(n); // Search higher | |
270 Node *s = cache->probe( prior_n ); // Check cache | |
271 if( s ) return s; // Cache hit! | |
272 } | |
273 | |
274 Node *phi_post; | |
275 if( prior_n == new_false || prior_n == new_true ) { | |
276 phi_post = def->clone(); | |
277 phi_post->set_req(0, prior_n ); | |
278 register_new_node(phi_post, prior_n); | |
279 } else { | |
280 // This method handles both control uses (looking for Regions) or data | |
281 // uses (looking for Phis). If looking for a control use, then we need | |
282 // to insert a Region instead of a Phi; however Regions always exist | |
283 // previously (the hash_find_insert below would always hit) so we can | |
284 // return the existing Region. | |
285 if( def->is_CFG() ) { | |
286 phi_post = prior_n; // If looking for CFG, return prior | |
287 } else { | |
288 assert( def->is_Phi(), "" ); | |
289 assert( prior_n->is_Region(), "must be a post-dominating merge point" ); | |
290 | |
291 // Need a Phi here | |
292 phi_post = PhiNode::make_blank(prior_n, def); | |
293 // Search for both true and false on all paths till find one. | |
294 for( uint i = 1; i < phi_post->req(); i++ ) // For all paths | |
295 phi_post->init_req( i, spinup( iff_dom, new_false, new_true, prior_n->in(i), def, cache ) ); | |
296 Node *t = _igvn.hash_find_insert(phi_post); | |
297 if( t ) { // See if we already have this one | |
298 // phi_post will not be used, so kill it | |
299 _igvn.remove_dead_node(phi_post); | |
300 phi_post->destruct(); | |
301 phi_post = t; | |
302 } else { | |
303 register_new_node( phi_post, prior_n ); | |
304 } | |
305 } | |
306 } | |
307 | |
308 // Update cache everywhere | |
309 prior_n = (Node*)0xdeadbeef; // Reset IDOM walk | |
310 n = use_blk; // Get path input | |
311 // Spin-up the idom tree again, basically doing path-compression. | |
312 // Insert cache entries along the way, so that if we ever hit this | |
313 // point in the IDOM tree again we'll stop immediately on a cache hit. | |
314 while( n != iff_dom ) { // Found post-dominating point? | |
315 prior_n = n; | |
316 n = idom(n); // Search higher | |
317 cache->lru_insert( prior_n, phi_post ); // Fill cache | |
318 } // End of while not gone high enough | |
319 | |
320 return phi_post; | |
321 } | |
322 | |
323 //------------------------------find_use_block--------------------------------- | |
324 // Find the block a USE is in. Normally USE's are in the same block as the | |
325 // using instruction. For Phi-USE's, the USE is in the predecessor block | |
326 // along the corresponding path. | |
327 Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) { | |
328 // CFG uses are their own block | |
329 if( use->is_CFG() ) | |
330 return use; | |
331 | |
332 if( use->is_Phi() ) { // Phi uses in prior block | |
333 // Grab the first Phi use; there may be many. | |
605 | 334 // Each will be handled as a separate iteration of |
0 | 335 // the "while( phi->outcnt() )" loop. |
336 uint j; | |
337 for( j = 1; j < use->req(); j++ ) | |
338 if( use->in(j) == def ) | |
339 break; | |
340 assert( j < use->req(), "def should be among use's inputs" ); | |
341 return use->in(0)->in(j); | |
342 } | |
343 // Normal (non-phi) use | |
344 Node *use_blk = get_ctrl(use); | |
345 // Some uses are directly attached to the old (and going away) | |
346 // false and true branches. | |
347 if( use_blk == old_false ) { | |
348 use_blk = new_false; | |
349 set_ctrl(use, new_false); | |
350 } | |
351 if( use_blk == old_true ) { | |
352 use_blk = new_true; | |
353 set_ctrl(use, new_true); | |
354 } | |
355 | |
356 if (use_blk == NULL) { // He's dead, Jim | |
1621
6027dddc26c6
6677629: PhaseIterGVN::subsume_node() should call hash_delete() and add_users_to_worklist()
kvn
parents:
1552
diff
changeset
|
357 _igvn.replace_node(use, C->top()); |
0 | 358 } |
359 | |
360 return use_blk; | |
361 } | |
362 | |
363 //------------------------------handle_use------------------------------------- | |
364 // Handle uses of the merge point. Basically, split-if makes the merge point | |
365 // go away so all uses of the merge point must go away as well. Most block | |
366 // local uses have already been split-up, through the merge point. Uses from | |
367 // far below the merge point can't always be split up (e.g., phi-uses are | |
368 // pinned) and it makes too much stuff live. Instead we use a path-based | |
369 // solution to move uses down. | |
370 // | |
371 // If the use is along the pre-split-CFG true branch, then the new use will | |
372 // be from the post-split-CFG true merge point. Vice-versa for the false | |
373 // path. Some uses will be along both paths; then we sink the use to the | |
374 // post-dominating location; we may need to insert a Phi there. | |
375 void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ) { | |
376 | |
377 Node *use_blk = find_use_block(use,def,old_false,new_false,old_true,new_true); | |
378 if( !use_blk ) return; // He's dead, Jim | |
379 | |
380 // Walk up the dominator tree until I hit either the old IfFalse, the old | |
381 // IfTrue or the old If. Insert Phis where needed. | |
382 Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache ); | |
383 | |
384 // Found where this USE goes. Re-point him. | |
385 uint i; | |
386 for( i = 0; i < use->req(); i++ ) | |
387 if( use->in(i) == def ) | |
388 break; | |
389 assert( i < use->req(), "def should be among use's inputs" ); | |
390 _igvn.hash_delete(use); | |
391 use->set_req(i, new_def); | |
392 _igvn._worklist.push(use); | |
393 } | |
394 | |
395 //------------------------------do_split_if------------------------------------ | |
396 // Found an If getting its condition-code input from a Phi in the same block. | |
397 // Split thru the Region. | |
398 void PhaseIdealLoop::do_split_if( Node *iff ) { | |
399 #ifndef PRODUCT | |
400 if( PrintOpto && VerifyLoopOptimizations ) | |
401 tty->print_cr("Split-if"); | |
402 #endif | |
403 C->set_major_progress(); | |
404 Node *region = iff->in(0); | |
405 Node *region_dom = idom(region); | |
406 | |
407 // We are going to clone this test (and the control flow with it) up through | |
408 // the incoming merge point. We need to empty the current basic block. | |
409 // Clone any instructions which must be in this block up through the merge | |
410 // point. | |
411 DUIterator i, j; | |
412 bool progress = true; | |
413 while (progress) { | |
414 progress = false; | |
415 for (i = region->outs(); region->has_out(i); i++) { | |
416 Node* n = region->out(i); | |
417 if( n == region ) continue; | |
418 // The IF to be split is OK. | |
419 if( n == iff ) continue; | |
420 if( !n->is_Phi() ) { // Found pinned memory op or such | |
421 if (split_up(n, region, iff)) { | |
422 i = region->refresh_out_pos(i); | |
423 progress = true; | |
424 } | |
425 continue; | |
426 } | |
427 assert( n->in(0) == region, "" ); | |
428 | |
429 // Recursively split up all users of a Phi | |
430 for (j = n->outs(); n->has_out(j); j++) { | |
431 Node* m = n->out(j); | |
432 // If m is dead, throw it away, and declare progress | |
433 if (_nodes[m->_idx] == NULL) { | |
434 _igvn.remove_dead_node(m); | |
435 // fall through | |
436 } | |
437 else if (m != iff && split_up(m, region, iff)) { | |
438 // fall through | |
439 } else { | |
440 continue; | |
441 } | |
442 // Something unpredictable changed. | |
443 // Tell the iterators to refresh themselves, and rerun the loop. | |
444 i = region->refresh_out_pos(i); | |
445 j = region->refresh_out_pos(j); | |
446 progress = true; | |
447 } | |
448 } | |
449 } | |
450 | |
451 // Now we have no instructions in the block containing the IF. | |
452 // Split the IF. | |
453 Node *new_iff = split_thru_region( iff, region ); | |
454 | |
455 // Replace both uses of 'new_iff' with Regions merging True/False | |
456 // paths. This makes 'new_iff' go dead. | |
457 Node *old_false, *old_true; | |
458 Node *new_false, *new_true; | |
459 for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) { | |
460 Node *ifp = iff->last_out(j2); | |
461 assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" ); | |
462 ifp->set_req(0, new_iff); | |
463 Node *ifpx = split_thru_region( ifp, region ); | |
464 | |
465 // Replace 'If' projection of a Region with a Region of | |
466 // 'If' projections. | |
467 ifpx->set_req(0, ifpx); // A TRUE RegionNode | |
468 | |
469 // Setup dominator info | |
470 set_idom(ifpx, region_dom, dom_depth(region_dom) + 1); | |
471 | |
472 // Check for splitting loop tails | |
473 if( get_loop(iff)->tail() == ifp ) | |
474 get_loop(iff)->_tail = ifpx; | |
475 | |
476 // Replace in the graph with lazy-update mechanism | |
477 new_iff->set_req(0, new_iff); // hook self so it does not go dead | |
478 lazy_replace_proj( ifp, ifpx ); | |
479 new_iff->set_req(0, region); | |
480 | |
481 // Record bits for later xforms | |
482 if( ifp->Opcode() == Op_IfFalse ) { | |
483 old_false = ifp; | |
484 new_false = ifpx; | |
485 } else { | |
486 old_true = ifp; | |
487 new_true = ifpx; | |
488 } | |
489 } | |
490 _igvn.remove_dead_node(new_iff); | |
491 // Lazy replace IDOM info with the region's dominator | |
492 lazy_replace( iff, region_dom ); | |
493 | |
494 // Now make the original merge point go dead, by handling all its uses. | |
495 small_cache region_cache; | |
496 // Preload some control flow in region-cache | |
497 region_cache.lru_insert( new_false, new_false ); | |
498 region_cache.lru_insert( new_true , new_true ); | |
499 // Now handle all uses of the splitting block | |
500 for (DUIterator_Last kmin, k = region->last_outs(kmin); k >= kmin; --k) { | |
501 Node* phi = region->last_out(k); | |
502 if( !phi->in(0) ) { // Dead phi? Remove it | |
503 _igvn.remove_dead_node(phi); | |
504 continue; | |
505 } | |
506 assert( phi->in(0) == region, "" ); | |
507 if( phi == region ) { // Found the self-reference | |
508 phi->set_req(0, NULL); | |
509 continue; // Break the self-cycle | |
510 } | |
511 // Expected common case: Phi hanging off of Region | |
512 if( phi->is_Phi() ) { | |
513 // Need a per-def cache. Phi represents a def, so make a cache | |
514 small_cache phi_cache; | |
515 | |
516 // Inspect all Phi uses to make the Phi go dead | |
517 for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) { | |
518 Node* use = phi->last_out(l); | |
519 // Compute the new DEF for this USE. New DEF depends on the path | |
520 // taken from the original DEF to the USE. The new DEF may be some | |
521 // collection of PHI's merging values from different paths. The Phis | |
522 // inserted depend only on the location of the USE. We use a | |
523 // 2-element cache to handle multiple uses from the same block. | |
524 handle_use( use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true ); | |
525 } // End of while phi has uses | |
526 | |
527 // Because handle_use might relocate region->_out, | |
528 // we must refresh the iterator. | |
529 k = region->last_outs(kmin); | |
530 | |
531 // Remove the dead Phi | |
532 _igvn.remove_dead_node( phi ); | |
533 | |
534 } else { | |
535 // Random memory op guarded by Region. Compute new DEF for USE. | |
536 handle_use( phi, region, ®ion_cache, region_dom, new_false, new_true, old_false, old_true ); | |
537 } | |
538 | |
539 } // End of while merge point has phis | |
540 | |
541 // Any leftover bits in the splitting block must not have depended on local | |
542 // Phi inputs (these have already been split-up). Hence it's safe to hoist | |
543 // these guys to the dominating point. | |
544 lazy_replace( region, region_dom ); | |
545 #ifndef PRODUCT | |
546 if( VerifyLoopOptimizations ) verify(); | |
547 #endif | |
548 } |