comparison src/share/vm/opto/lcm.cpp @ 12023:d1034bd8cefc

8022284: Hide internal data structure in PhaseCFG Summary: Hide private node to block mapping using public interface Reviewed-by: kvn, roland
author adlertz
date Wed, 07 Aug 2013 17:56:19 +0200
parents 70120f47d403
children adb9a7d94cb5
comparison
equal deleted inserted replaced
12004:71526a36ebb4 12023:d1034bd8cefc
235 continue; // Give up is reference is beyond 4K page size 235 continue; // Give up is reference is beyond 4K page size
236 } 236 }
237 } 237 }
238 238
239 // Check ctrl input to see if the null-check dominates the memory op 239 // Check ctrl input to see if the null-check dominates the memory op
240 Block *cb = cfg->_bbs[mach->_idx]; 240 Block *cb = cfg->get_block_for_node(mach);
241 cb = cb->_idom; // Always hoist at least 1 block 241 cb = cb->_idom; // Always hoist at least 1 block
242 if( !was_store ) { // Stores can be hoisted only one block 242 if( !was_store ) { // Stores can be hoisted only one block
243 while( cb->_dom_depth > (_dom_depth + 1)) 243 while( cb->_dom_depth > (_dom_depth + 1))
244 cb = cb->_idom; // Hoist loads as far as we want 244 cb = cb->_idom; // Hoist loads as far as we want
245 // The non-null-block should dominate the memory op, too. Live 245 // The non-null-block should dominate the memory op, too. Live
260 vidx = j; 260 vidx = j;
261 // Ignore DecodeN val which could be hoisted to where needed. 261 // Ignore DecodeN val which could be hoisted to where needed.
262 if( is_decoden ) continue; 262 if( is_decoden ) continue;
263 } 263 }
264 // Block of memory-op input 264 // Block of memory-op input
265 Block *inb = cfg->_bbs[mach->in(j)->_idx]; 265 Block *inb = cfg->get_block_for_node(mach->in(j));
266 Block *b = this; // Start from nul check 266 Block *b = this; // Start from nul check
267 while( b != inb && b->_dom_depth > inb->_dom_depth ) 267 while( b != inb && b->_dom_depth > inb->_dom_depth )
268 b = b->_idom; // search upwards for input 268 b = b->_idom; // search upwards for input
269 // See if input dominates null check 269 // See if input dominates null check
270 if( b != inb ) 270 if( b != inb )
271 break; 271 break;
272 } 272 }
273 if( j > 0 ) 273 if( j > 0 )
274 continue; 274 continue;
275 Block *mb = cfg->_bbs[mach->_idx]; 275 Block *mb = cfg->get_block_for_node(mach);
276 // Hoisting stores requires more checks for the anti-dependence case. 276 // Hoisting stores requires more checks for the anti-dependence case.
277 // Give up hoisting if we have to move the store past any load. 277 // Give up hoisting if we have to move the store past any load.
278 if( was_store ) { 278 if( was_store ) {
279 Block *b = mb; // Start searching here for a local load 279 Block *b = mb; // Start searching here for a local load
280 // mach use (faulting) trying to hoist 280 // mach use (faulting) trying to hoist
289 } 289 }
290 if( k < b->_nodes.size() ) 290 if( k < b->_nodes.size() )
291 break; // Found anti-dependent load 291 break; // Found anti-dependent load
292 // Make sure control does not do a merge (would have to check allpaths) 292 // Make sure control does not do a merge (would have to check allpaths)
293 if( b->num_preds() != 2 ) break; 293 if( b->num_preds() != 2 ) break;
294 b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block 294 b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
295 } 295 }
296 if( b != this ) continue; 296 if( b != this ) continue;
297 } 297 }
298 298
299 // Make sure this memory op is not already being used for a NullCheck 299 // Make sure this memory op is not already being used for a NullCheck
301 if( e->is_MachNullCheck() && e->in(1) == mach ) 301 if( e->is_MachNullCheck() && e->in(1) == mach )
302 continue; // Already being used as a NULL check 302 continue; // Already being used as a NULL check
303 303
304 // Found a candidate! Pick one with least dom depth - the highest 304 // Found a candidate! Pick one with least dom depth - the highest
305 // in the dom tree should be closest to the null check. 305 // in the dom tree should be closest to the null check.
306 if( !best || 306 if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
307 cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
308 best = mach; 307 best = mach;
309 bidx = vidx; 308 bidx = vidx;
310
311 } 309 }
312 } 310 }
313 // No candidate! 311 // No candidate!
314 if( !best ) return; 312 if (best == NULL) {
313 return;
314 }
315 315
316 // ---- Found an implicit null check 316 // ---- Found an implicit null check
317 extern int implicit_null_checks; 317 extern int implicit_null_checks;
318 implicit_null_checks++; 318 implicit_null_checks++;
319 319
320 if( is_decoden ) { 320 if( is_decoden ) {
321 // Check if we need to hoist decodeHeapOop_not_null first. 321 // Check if we need to hoist decodeHeapOop_not_null first.
322 Block *valb = cfg->_bbs[val->_idx]; 322 Block *valb = cfg->get_block_for_node(val);
323 if( this != valb && this->_dom_depth < valb->_dom_depth ) { 323 if( this != valb && this->_dom_depth < valb->_dom_depth ) {
324 // Hoist it up to the end of the test block. 324 // Hoist it up to the end of the test block.
325 valb->find_remove(val); 325 valb->find_remove(val);
326 this->add_inst(val); 326 this->add_inst(val);
327 cfg->_bbs.map(val->_idx,this); 327 cfg->map_node_to_block(val, this);
328 // DecodeN on x86 may kill flags. Check for flag-killing projections 328 // DecodeN on x86 may kill flags. Check for flag-killing projections
329 // that also need to be hoisted. 329 // that also need to be hoisted.
330 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { 330 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
331 Node* n = val->fast_out(j); 331 Node* n = val->fast_out(j);
332 if( n->is_MachProj() ) { 332 if( n->is_MachProj() ) {
333 cfg->_bbs[n->_idx]->find_remove(n); 333 cfg->get_block_for_node(n)->find_remove(n);
334 this->add_inst(n); 334 this->add_inst(n);
335 cfg->_bbs.map(n->_idx,this); 335 cfg->map_node_to_block(n, this);
336 } 336 }
337 } 337 }
338 } 338 }
339 } 339 }
340 // Hoist the memory candidate up to the end of the test block. 340 // Hoist the memory candidate up to the end of the test block.
341 Block *old_block = cfg->_bbs[best->_idx]; 341 Block *old_block = cfg->get_block_for_node(best);
342 old_block->find_remove(best); 342 old_block->find_remove(best);
343 add_inst(best); 343 add_inst(best);
344 cfg->_bbs.map(best->_idx,this); 344 cfg->map_node_to_block(best, this);
345 345
346 // Move the control dependence 346 // Move the control dependence
347 if (best->in(0) && best->in(0) == old_block->_nodes[0]) 347 if (best->in(0) && best->in(0) == old_block->_nodes[0])
348 best->set_req(0, _nodes[0]); 348 best->set_req(0, _nodes[0]);
349 349
350 // Check for flag-killing projections that also need to be hoisted 350 // Check for flag-killing projections that also need to be hoisted
351 // Should be DU safe because no edge updates. 351 // Should be DU safe because no edge updates.
352 for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { 352 for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
353 Node* n = best->fast_out(j); 353 Node* n = best->fast_out(j);
354 if( n->is_MachProj() ) { 354 if( n->is_MachProj() ) {
355 cfg->_bbs[n->_idx]->find_remove(n); 355 cfg->get_block_for_node(n)->find_remove(n);
356 add_inst(n); 356 add_inst(n);
357 cfg->_bbs.map(n->_idx,this); 357 cfg->map_node_to_block(n, this);
358 } 358 }
359 } 359 }
360 360
361 Compile *C = cfg->C; 361 Compile *C = cfg->C;
362 // proj==Op_True --> ne test; proj==Op_False --> eq test. 362 // proj==Op_True --> ne test; proj==Op_False --> eq test.
383 // Since schedule-local needs precise def-use info, we need to correct 383 // Since schedule-local needs precise def-use info, we need to correct
384 // it as well. 384 // it as well.
385 Node *old_tst = proj->in(0); 385 Node *old_tst = proj->in(0);
386 MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); 386 MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
387 _nodes.map(end_idx(),nul_chk); 387 _nodes.map(end_idx(),nul_chk);
388 cfg->_bbs.map(nul_chk->_idx,this); 388 cfg->map_node_to_block(nul_chk, this);
389 // Redirect users of old_test to nul_chk 389 // Redirect users of old_test to nul_chk
390 for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) 390 for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
391 old_tst->last_out(i2)->set_req(0, nul_chk); 391 old_tst->last_out(i2)->set_req(0, nul_chk);
392 // Clean-up any dead code 392 // Clean-up any dead code
393 for (uint i3 = 0; i3 < old_tst->req(); i3++) 393 for (uint i3 = 0; i3 < old_tst->req(); i3++)
466 466
467 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 467 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
468 Node* use = n->fast_out(j); 468 Node* use = n->fast_out(j);
469 469
470 // The use is a conditional branch, make them adjacent 470 // The use is a conditional branch, make them adjacent
471 if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) { 471 if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
472 found_machif = true; 472 found_machif = true;
473 break; 473 break;
474 } 474 }
475 475
476 // More than this instruction pending for successor to be ready, 476 // More than this instruction pending for successor to be ready,
527 return n; 527 return n;
528 } 528 }
529 529
530 530
531 //------------------------------set_next_call---------------------------------- 531 //------------------------------set_next_call----------------------------------
532 void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) { 532 void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
533 if( next_call.test_set(n->_idx) ) return; 533 if( next_call.test_set(n->_idx) ) return;
534 for( uint i=0; i<n->len(); i++ ) { 534 for( uint i=0; i<n->len(); i++ ) {
535 Node *m = n->in(i); 535 Node *m = n->in(i);
536 if( !m ) continue; // must see all nodes in block that precede call 536 if( !m ) continue; // must see all nodes in block that precede call
537 if( bbs[m->_idx] == this ) 537 if (cfg->get_block_for_node(m) == this) {
538 set_next_call( m, next_call, bbs ); 538 set_next_call(m, next_call, cfg);
539 }
539 } 540 }
540 } 541 }
541 542
542 //------------------------------needed_for_next_call--------------------------- 543 //------------------------------needed_for_next_call---------------------------
543 // Set the flag 'next_call' for each Node that is needed for the next call to 544 // Set the flag 'next_call' for each Node that is needed for the next call to
544 // be scheduled. This flag lets me bias scheduling so Nodes needed for the 545 // be scheduled. This flag lets me bias scheduling so Nodes needed for the
545 // next subroutine call get priority - basically it moves things NOT needed 546 // next subroutine call get priority - basically it moves things NOT needed
546 // for the next call till after the call. This prevents me from trying to 547 // for the next call till after the call. This prevents me from trying to
547 // carry lots of stuff live across a call. 548 // carry lots of stuff live across a call.
548 void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) { 549 void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
549 // Find the next control-defining Node in this block 550 // Find the next control-defining Node in this block
550 Node* call = NULL; 551 Node* call = NULL;
551 for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { 552 for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
552 Node* m = this_call->fast_out(i); 553 Node* m = this_call->fast_out(i);
553 if( bbs[m->_idx] == this && // Local-block user 554 if(cfg->get_block_for_node(m) == this && // Local-block user
554 m != this_call && // Not self-start node 555 m != this_call && // Not self-start node
555 m->is_MachCall() ) 556 m->is_MachCall() )
556 call = m; 557 call = m;
557 break; 558 break;
558 } 559 }
559 if (call == NULL) return; // No next call (e.g., block end is near) 560 if (call == NULL) return; // No next call (e.g., block end is near)
560 // Set next-call for all inputs to this call 561 // Set next-call for all inputs to this call
561 set_next_call(call, next_call, bbs); 562 set_next_call(call, next_call, cfg);
562 } 563 }
563 564
564 //------------------------------add_call_kills------------------------------------- 565 //------------------------------add_call_kills-------------------------------------
565 void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { 566 void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
566 // Fill in the kill mask for the call 567 // Fill in the kill mask for the call
576 } 577 }
577 } 578 }
578 579
579 580
580 //------------------------------sched_call------------------------------------- 581 //------------------------------sched_call-------------------------------------
581 uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { 582 uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
582 RegMask regs; 583 RegMask regs;
583 584
584 // Schedule all the users of the call right now. All the users are 585 // Schedule all the users of the call right now. All the users are
585 // projection Nodes, so they must be scheduled next to the call. 586 // projection Nodes, so they must be scheduled next to the call.
586 // Collect all the defined registers. 587 // Collect all the defined registers.
595 // Collect defined registers 596 // Collect defined registers
596 regs.OR(n->out_RegMask()); 597 regs.OR(n->out_RegMask());
597 // Check for scheduling the next control-definer 598 // Check for scheduling the next control-definer
598 if( n->bottom_type() == Type::CONTROL ) 599 if( n->bottom_type() == Type::CONTROL )
599 // Warm up next pile of heuristic bits 600 // Warm up next pile of heuristic bits
600 needed_for_next_call(n, next_call, bbs); 601 needed_for_next_call(n, next_call, cfg);
601 602
602 // Children of projections are now all ready 603 // Children of projections are now all ready
603 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 604 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
604 Node* m = n->fast_out(j); // Get user 605 Node* m = n->fast_out(j); // Get user
605 if( bbs[m->_idx] != this ) continue; 606 if(cfg->get_block_for_node(m) != this) {
607 continue;
608 }
606 if( m->is_Phi() ) continue; 609 if( m->is_Phi() ) continue;
607 int m_cnt = ready_cnt.at(m->_idx)-1; 610 int m_cnt = ready_cnt.at(m->_idx)-1;
608 ready_cnt.at_put(m->_idx, m_cnt); 611 ready_cnt.at_put(m->_idx, m_cnt);
609 if( m_cnt == 0 ) 612 if( m_cnt == 0 )
610 worklist.push(m); 613 worklist.push(m);
618 621
619 // Set all registers killed and not already defined by the call. 622 // Set all registers killed and not already defined by the call.
620 uint r_cnt = mcall->tf()->range()->cnt(); 623 uint r_cnt = mcall->tf()->range()->cnt();
621 int op = mcall->ideal_Opcode(); 624 int op = mcall->ideal_Opcode();
622 MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); 625 MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
623 bbs.map(proj->_idx,this); 626 cfg->map_node_to_block(proj, this);
624 _nodes.insert(node_cnt++, proj); 627 _nodes.insert(node_cnt++, proj);
625 628
626 // Select the right register save policy. 629 // Select the right register save policy.
627 const char * save_policy; 630 const char * save_policy;
628 switch (op) { 631 switch (op) {
706 // Count block-local inputs to 'n' 709 // Count block-local inputs to 'n'
707 uint cnt = n->len(); // Input count 710 uint cnt = n->len(); // Input count
708 uint local = 0; 711 uint local = 0;
709 for( uint j=0; j<cnt; j++ ) { 712 for( uint j=0; j<cnt; j++ ) {
710 Node *m = n->in(j); 713 Node *m = n->in(j);
711 if( m && cfg->_bbs[m->_idx] == this && !m->is_top() ) 714 if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
712 local++; // One more block-local input 715 local++; // One more block-local input
713 } 716 }
714 ready_cnt.at_put(n->_idx, local); // Count em up 717 ready_cnt.at_put(n->_idx, local); // Count em up
715 718
716 #ifdef ASSERT 719 #ifdef ASSERT
718 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { 721 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
719 // Check the precedence edges 722 // Check the precedence edges
720 for (uint prec = n->req(); prec < n->len(); prec++) { 723 for (uint prec = n->req(); prec < n->len(); prec++) {
721 Node* oop_store = n->in(prec); 724 Node* oop_store = n->in(prec);
722 if (oop_store != NULL) { 725 if (oop_store != NULL) {
723 assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); 726 assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
724 } 727 }
725 } 728 }
726 } 729 }
727 } 730 }
728 #endif 731 #endif
751 uint i3; 754 uint i3;
752 for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled 755 for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
753 Node *n = _nodes[i3]; // Get pre-scheduled 756 Node *n = _nodes[i3]; // Get pre-scheduled
754 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 757 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
755 Node* m = n->fast_out(j); 758 Node* m = n->fast_out(j);
756 if( cfg->_bbs[m->_idx] ==this ) { // Local-block user 759 if (cfg->get_block_for_node(m) == this) { // Local-block user
757 int m_cnt = ready_cnt.at(m->_idx)-1; 760 int m_cnt = ready_cnt.at(m->_idx)-1;
758 ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count 761 ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
759 } 762 }
760 } 763 }
761 } 764 }
784 Node* d = delay.pop(); 787 Node* d = delay.pop();
785 worklist.push(d); 788 worklist.push(d);
786 } 789 }
787 790
788 // Warm up the 'next_call' heuristic bits 791 // Warm up the 'next_call' heuristic bits
789 needed_for_next_call(_nodes[0], next_call, cfg->_bbs); 792 needed_for_next_call(_nodes[0], next_call, cfg);
790 793
791 #ifndef PRODUCT 794 #ifndef PRODUCT
792 if (cfg->trace_opto_pipelining()) { 795 if (cfg->trace_opto_pipelining()) {
793 for (uint j=0; j<_nodes.size(); j++) { 796 for (uint j=0; j<_nodes.size(); j++) {
794 Node *n = _nodes[j]; 797 Node *n = _nodes[j];
835 } 838 }
836 839
837 #endif 840 #endif
838 if( n->is_MachCall() ) { 841 if( n->is_MachCall() ) {
839 MachCallNode *mcall = n->as_MachCall(); 842 MachCallNode *mcall = n->as_MachCall();
840 phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call); 843 phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
841 continue; 844 continue;
842 } 845 }
843 846
844 if (n->is_Mach() && n->as_Mach()->has_call()) { 847 if (n->is_Mach() && n->as_Mach()->has_call()) {
845 RegMask regs; 848 RegMask regs;
846 regs.Insert(matcher.c_frame_pointer()); 849 regs.Insert(matcher.c_frame_pointer());
847 regs.OR(n->out_RegMask()); 850 regs.OR(n->out_RegMask());
848 851
849 MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); 852 MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
850 cfg->_bbs.map(proj->_idx,this); 853 cfg->map_node_to_block(proj, this);
851 _nodes.insert(phi_cnt++, proj); 854 _nodes.insert(phi_cnt++, proj);
852 855
853 add_call_kills(proj, regs, matcher._c_reg_save_policy, false); 856 add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
854 } 857 }
855 858
856 // Children are now all ready 859 // Children are now all ready
857 for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { 860 for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
858 Node* m = n->fast_out(i5); // Get user 861 Node* m = n->fast_out(i5); // Get user
859 if( cfg->_bbs[m->_idx] != this ) continue; 862 if (cfg->get_block_for_node(m) != this) {
863 continue;
864 }
860 if( m->is_Phi() ) continue; 865 if( m->is_Phi() ) continue;
861 if (m->_idx >= max_idx) { // new node, skip it 866 if (m->_idx >= max_idx) { // new node, skip it
862 assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types"); 867 assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
863 continue; 868 continue;
864 } 869 }
912 } 917 }
913 } 918 }
914 } 919 }
915 920
916 //------------------------------catch_cleanup_find_cloned_def------------------ 921 //------------------------------catch_cleanup_find_cloned_def------------------
917 static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { 922 static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
918 assert( use_blk != def_blk, "Inter-block cleanup only"); 923 assert( use_blk != def_blk, "Inter-block cleanup only");
919 924
920 // The use is some block below the Catch. Find and return the clone of the def 925 // The use is some block below the Catch. Find and return the clone of the def
921 // that dominates the use. If there is no clone in a dominating block, then 926 // that dominates the use. If there is no clone in a dominating block, then
922 // create a phi for the def in a dominating block. 927 // create a phi for the def in a dominating block.
938 if( j == def_blk->_num_succs ) { 943 if( j == def_blk->_num_succs ) {
939 // Block at same level in dom-tree is not a successor. It needs a 944 // Block at same level in dom-tree is not a successor. It needs a
940 // PhiNode, the PhiNode uses from the def and IT's uses need fixup. 945 // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
941 Node_Array inputs = new Node_List(Thread::current()->resource_area()); 946 Node_Array inputs = new Node_List(Thread::current()->resource_area());
942 for(uint k = 1; k < use_blk->num_preds(); k++) { 947 for(uint k = 1; k < use_blk->num_preds(); k++) {
943 inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx)); 948 Block* block = cfg->get_block_for_node(use_blk->pred(k));
949 inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
944 } 950 }
945 951
946 // Check to see if the use_blk already has an identical phi inserted. 952 // Check to see if the use_blk already has an identical phi inserted.
947 // If it exists, it will be at the first position since all uses of a 953 // If it exists, it will be at the first position since all uses of a
948 // def are processed together. 954 // def are processed together.
960 966
961 // If an existing PhiNode was not found, make a new one. 967 // If an existing PhiNode was not found, make a new one.
962 if (fixup == NULL) { 968 if (fixup == NULL) {
963 Node *new_phi = PhiNode::make(use_blk->head(), def); 969 Node *new_phi = PhiNode::make(use_blk->head(), def);
964 use_blk->_nodes.insert(1, new_phi); 970 use_blk->_nodes.insert(1, new_phi);
965 bbs.map(new_phi->_idx, use_blk); 971 cfg->map_node_to_block(new_phi, use_blk);
966 for (uint k = 1; k < use_blk->num_preds(); k++) { 972 for (uint k = 1; k < use_blk->num_preds(); k++) {
967 new_phi->set_req(k, inputs[k]); 973 new_phi->set_req(k, inputs[k]);
968 } 974 }
969 fixup = new_phi; 975 fixup = new_phi;
970 } 976 }
1000 } 1006 }
1001 1007
1002 //------------------------------catch_cleanup_inter_block--------------------- 1008 //------------------------------catch_cleanup_inter_block---------------------
1003 // Fix all input edges in use that reference "def". The use is in a different 1009 // Fix all input edges in use that reference "def". The use is in a different
1004 // block than the def. 1010 // block than the def.
1005 static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { 1011 static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
1006 if( !use_blk ) return; // Can happen if the use is a precedence edge 1012 if( !use_blk ) return; // Can happen if the use is a precedence edge
1007 1013
1008 Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx); 1014 Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
1009 catch_cleanup_fix_all_inputs(use, def, new_def); 1015 catch_cleanup_fix_all_inputs(use, def, new_def);
1010 } 1016 }
1011 1017
1012 //------------------------------call_catch_cleanup----------------------------- 1018 //------------------------------call_catch_cleanup-----------------------------
1013 // If we inserted any instructions between a Call and his CatchNode, 1019 // If we inserted any instructions between a Call and his CatchNode,
1014 // clone the instructions on all paths below the Catch. 1020 // clone the instructions on all paths below the Catch.
1015 void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) { 1021 void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
1016 1022
1017 // End of region to clone 1023 // End of region to clone
1018 uint end = end_idx(); 1024 uint end = end_idx();
1019 if( !_nodes[end]->is_Catch() ) return; 1025 if( !_nodes[end]->is_Catch() ) return;
1020 // Start of region to clone 1026 // Start of region to clone
1035 for( uint j = end; j > beg; j-- ) { 1041 for( uint j = end; j > beg; j-- ) {
1036 // It is safe here to clone a node with anti_dependence 1042 // It is safe here to clone a node with anti_dependence
1037 // since clones dominate on each path. 1043 // since clones dominate on each path.
1038 Node *clone = _nodes[j-1]->clone(); 1044 Node *clone = _nodes[j-1]->clone();
1039 sb->_nodes.insert( 1, clone ); 1045 sb->_nodes.insert( 1, clone );
1040 bbs.map(clone->_idx,sb); 1046 cfg->map_node_to_block(clone, sb);
1041 } 1047 }
1042 } 1048 }
1043 1049
1044 1050
1045 // Fixup edges. Check the def-use info per cloned Node 1051 // Fixup edges. Check the def-use info per cloned Node
1052 out->push(n->fast_out(j1)); 1058 out->push(n->fast_out(j1));
1053 } 1059 }
1054 uint max = out->size(); 1060 uint max = out->size();
1055 for (uint j = 0; j < max; j++) {// For all users 1061 for (uint j = 0; j < max; j++) {// For all users
1056 Node *use = out->pop(); 1062 Node *use = out->pop();
1057 Block *buse = bbs[use->_idx]; 1063 Block *buse = cfg->get_block_for_node(use);
1058 if( use->is_Phi() ) { 1064 if( use->is_Phi() ) {
1059 for( uint k = 1; k < use->req(); k++ ) 1065 for( uint k = 1; k < use->req(); k++ )
1060 if( use->in(k) == n ) { 1066 if( use->in(k) == n ) {
1061 Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx); 1067 Block* block = cfg->get_block_for_node(buse->pred(k));
1068 Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
1062 use->set_req(k, fixup); 1069 use->set_req(k, fixup);
1063 } 1070 }
1064 } else { 1071 } else {
1065 if (this == buse) { 1072 if (this == buse) {
1066 catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); 1073 catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
1067 } else { 1074 } else {
1068 catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx); 1075 catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
1069 } 1076 }
1070 } 1077 }
1071 } // End for all users 1078 } // End for all users
1072 1079
1073 } // End of for all Nodes in cloned area 1080 } // End of for all Nodes in cloned area