comparison src/share/vm/opto/reg_split.cpp @ 14422:2b8e28fdf503

Merge
author kvn
date Tue, 05 Nov 2013 17:38:04 -0800
parents 4c9115774c8e
children de6a9e811145
comparison
equal deleted inserted replaced
14421:3068270ba476 14422:2b8e28fdf503
49 // As a side effect, unlink from (hence make dead) coalesced copies. 49 // As a side effect, unlink from (hence make dead) coalesced copies.
50 // 50 //
51 51
52 static const char out_of_nodes[] = "out of nodes during split"; 52 static const char out_of_nodes[] = "out of nodes during split";
53 53
54 static bool contains_no_live_range_input(const Node* def) {
55 for (uint i = 1; i < def->req(); ++i) {
56 if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
57 return false;
58 }
59 }
60 return true;
61 }
62
63 //------------------------------get_spillcopy_wide----------------------------- 54 //------------------------------get_spillcopy_wide-----------------------------
64 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the 55 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the
65 // wide ideal-register spill-mask if possible. If the 'wide-mask' does 56 // wide ideal-register spill-mask if possible. If the 'wide-mask' does
66 // not cover the input (or output), use the input (or output) mask instead. 57 // not cover the input (or output), use the input (or output) mask instead.
67 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) { 58 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
110 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
111 // instead. Update high-pressure indices. Create a new live range. 102 // instead. Update high-pressure indices. Create a new live range.
112 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
113 // Skip intervening ProjNodes. Do not insert between a ProjNode and 104 // Skip intervening ProjNodes. Do not insert between a ProjNode and
114 // its definer. 105 // its definer.
115 while( i < b->_nodes.size() && 106 while( i < b->number_of_nodes() &&
116 (b->_nodes[i]->is_Proj() || 107 (b->get_node(i)->is_Proj() ||
117 b->_nodes[i]->is_Phi() ) ) 108 b->get_node(i)->is_Phi() ) )
118 i++; 109 i++;
119 110
120 // Do not insert between a call and his Catch 111 // Do not insert between a call and his Catch
121 if( b->_nodes[i]->is_Catch() ) { 112 if( b->get_node(i)->is_Catch() ) {
122 // Put the instruction at the top of the fall-thru block. 113 // Put the instruction at the top of the fall-thru block.
123 // Find the fall-thru projection 114 // Find the fall-thru projection
124 while( 1 ) { 115 while( 1 ) {
125 const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj(); 116 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
126 if( cp->_con == CatchProjNode::fall_through_index ) 117 if( cp->_con == CatchProjNode::fall_through_index )
127 break; 118 break;
128 } 119 }
129 int sidx = i - b->end_idx()-1; 120 int sidx = i - b->end_idx()-1;
130 b = b->_succs[sidx]; // Switch to successor block 121 b = b->_succs[sidx]; // Switch to successor block
131 i = 1; // Right at start of block 122 i = 1; // Right at start of block
132 } 123 }
133 124
134 b->_nodes.insert(i,spill); // Insert node in block 125 b->insert_node(spill, i); // Insert node in block
135 _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect 126 _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect
136 // Adjust the point where we go hi-pressure 127 // Adjust the point where we go hi-pressure
137 if( i <= b->_ihrp_index ) b->_ihrp_index++; 128 if( i <= b->_ihrp_index ) b->_ihrp_index++;
138 if( i <= b->_fhrp_index ) b->_fhrp_index++; 129 if( i <= b->_fhrp_index ) b->_fhrp_index++;
139 130
158 // null check location (ie - null check is in HRP block) we need to do 149 // null check location (ie - null check is in HRP block) we need to do
159 // the null-check first, then spill-down in the following block. 150 // the null-check first, then spill-down in the following block.
160 // (The implicit_null_check function ensures the use is also dominated 151 // (The implicit_null_check function ensures the use is also dominated
161 // by the branch-not-taken block.) 152 // by the branch-not-taken block.)
162 Node *be = b->end(); 153 Node *be = b->end();
163 if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) { 154 if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
164 // Spill goes in the branch-not-taken block 155 // Spill goes in the branch-not-taken block
165 b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue]; 156 b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
166 loc = 0; // Just past the Region 157 loc = 0; // Just past the Region
167 } 158 }
168 assert( loc >= 0, "must insert past block head" ); 159 assert( loc >= 0, "must insert past block head" );
169 160
170 // Get a def-side SpillCopy 161 // Get a def-side SpillCopy
324 // same time - a definite no-no. Split out private copies of 315 // same time - a definite no-no. Split out private copies of
325 // the inputs. 316 // the inputs.
326 if( def->req() > 1 ) { 317 if( def->req() > 1 ) {
327 for( uint i = 1; i < def->req(); i++ ) { 318 for( uint i = 1; i < def->req(); i++ ) {
328 Node *in = def->in(i); 319 Node *in = def->in(i);
329 // Check for single-def (LRG cannot redefined)
330 uint lidx = _lrg_map.live_range_id(in); 320 uint lidx = _lrg_map.live_range_id(in);
331 if (lidx >= _lrg_map.max_lrg_id()) { 321 // We do not need this for live ranges that are only defined once.
332 continue; // Value is a recent spill-copy 322 // However, this is not true for spill copies that are added in this
333 } 323 // Split() pass, since they might get coalesced later on in this pass.
334 if (lrgs(lidx).is_singledef()) { 324 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
335 continue; 325 continue;
336 } 326 }
337 327
338 Block *b_def = _cfg.get_block_for_node(def); 328 Block *b_def = _cfg.get_block_for_node(def);
339 int idx_def = b_def->find_node(def); 329 int idx_def = b_def->find_node(def);
448 return false; 438 return false;
449 } 439 }
450 440
451 // Scan block for 1st use. 441 // Scan block for 1st use.
452 for( uint i = 1; i <= b->end_idx(); i++ ) { 442 for( uint i = 1; i <= b->end_idx(); i++ ) {
453 Node *n = b->_nodes[i]; 443 Node *n = b->get_node(i);
454 // Ignore PHI use, these can be up or down 444 // Ignore PHI use, these can be up or down
455 if (n->is_Phi()) { 445 if (n->is_Phi()) {
456 continue; 446 continue;
457 } 447 }
458 for (uint j = 1; j < n->req(); j++) { 448 for (uint j = 1; j < n->req(); j++) {
483 // Free thread local resources used by this method on exit. 473 // Free thread local resources used by this method on exit.
484 ResourceMark rm(split_arena); 474 ResourceMark rm(split_arena);
485 475
486 uint bidx, pidx, slidx, insidx, inpidx, twoidx; 476 uint bidx, pidx, slidx, insidx, inpidx, twoidx;
487 uint non_phi = 1, spill_cnt = 0; 477 uint non_phi = 1, spill_cnt = 0;
488 Node **Reachblock;
489 Node *n1, *n2, *n3; 478 Node *n1, *n2, *n3;
490 Node_List *defs,*phis; 479 Node_List *defs,*phis;
491 bool *UPblock; 480 bool *UPblock;
492 bool u1, u2, u3; 481 bool u1, u2, u3;
493 Block *b, *pred; 482 Block *b, *pred;
566 return 0; 555 return 0;
567 } 556 }
568 557
569 b = _cfg.get_block(bidx); 558 b = _cfg.get_block(bidx);
570 // Reaches & UP arrays for this block 559 // Reaches & UP arrays for this block
571 Reachblock = Reaches[b->_pre_order]; 560 Node** Reachblock = Reaches[b->_pre_order];
572 UPblock = UP[b->_pre_order]; 561 UPblock = UP[b->_pre_order];
573 // Reset counter of start of non-Phi nodes in block 562 // Reset counter of start of non-Phi nodes in block
574 non_phi = 1; 563 non_phi = 1;
575 //----------Block Entry Handling---------- 564 //----------Block Entry Handling----------
576 // Check for need to insert a new phi 565 // Check for need to insert a new phi
645 } 634 }
646 } // End for all potential Phi inputs 635 } // End for all potential Phi inputs
647 636
648 // check block for appropriate phinode & update edges 637 // check block for appropriate phinode & update edges
649 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 638 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
650 n1 = b->_nodes[insidx]; 639 n1 = b->get_node(insidx);
651 // bail if this is not a phi 640 // bail if this is not a phi
652 phi = n1->is_Phi() ? n1->as_Phi() : NULL; 641 phi = n1->is_Phi() ? n1->as_Phi() : NULL;
653 if( phi == NULL ) { 642 if( phi == NULL ) {
654 // Keep track of index of first non-PhiNode instruction in block 643 // Keep track of index of first non-PhiNode instruction in block
655 non_phi = insidx; 644 non_phi = insidx;
745 } 734 }
746 735
747 //----------Walk Instructions in the Block and Split---------- 736 //----------Walk Instructions in the Block and Split----------
748 // For all non-phi instructions in the block 737 // For all non-phi instructions in the block
749 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { 738 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
750 Node *n = b->_nodes[insidx]; 739 Node *n = b->get_node(insidx);
751 // Find the defining Node's live range index 740 // Find the defining Node's live range index
752 uint defidx = _lrg_map.find_id(n); 741 uint defidx = _lrg_map.find_id(n);
753 uint cnt = n->req(); 742 uint cnt = n->req();
754 743
755 if (n->is_Phi()) { 744 if (n->is_Phi()) {
774 assert( u, "at least 1 valid input expected" ); 763 assert( u, "at least 1 valid input expected" );
775 if (i >= cnt) { // Found one unique input 764 if (i >= cnt) { // Found one unique input
776 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); 765 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
777 n->replace_by(u); // Then replace with unique input 766 n->replace_by(u); // Then replace with unique input
778 n->disconnect_inputs(NULL, C); 767 n->disconnect_inputs(NULL, C);
779 b->_nodes.remove(insidx); 768 b->remove_node(insidx);
780 insidx--; 769 insidx--;
781 b->_ihrp_index--; 770 b->_ihrp_index--;
782 b->_fhrp_index--; 771 b->_fhrp_index--;
783 } 772 }
784 } 773 }
787 } 776 }
788 assert( insidx > b->_ihrp_index || 777 assert( insidx > b->_ihrp_index ||
789 (b->_reg_pressure < (uint)INTPRESSURE) || 778 (b->_reg_pressure < (uint)INTPRESSURE) ||
790 b->_ihrp_index > 4000000 || 779 b->_ihrp_index > 4000000 ||
791 b->_ihrp_index >= b->end_idx() || 780 b->_ihrp_index >= b->end_idx() ||
792 !b->_nodes[b->_ihrp_index]->is_Proj(), "" ); 781 !b->get_node(b->_ihrp_index)->is_Proj(), "" );
793 assert( insidx > b->_fhrp_index || 782 assert( insidx > b->_fhrp_index ||
794 (b->_freg_pressure < (uint)FLOATPRESSURE) || 783 (b->_freg_pressure < (uint)FLOATPRESSURE) ||
795 b->_fhrp_index > 4000000 || 784 b->_fhrp_index > 4000000 ||
796 b->_fhrp_index >= b->end_idx() || 785 b->_fhrp_index >= b->end_idx() ||
797 !b->_nodes[b->_fhrp_index]->is_Proj(), "" ); 786 !b->get_node(b->_fhrp_index)->is_Proj(), "" );
798 787
799 // ********** Handle Crossing HRP Boundry ********** 788 // ********** Handle Crossing HRP Boundry **********
800 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 789 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
801 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 790 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
802 // Check for need to split at HRP boundary - split if UP 791 // Check for need to split at HRP boundary - split if UP
817 } 806 }
818 else { 807 else {
819 // Insert point is just past last use or def in the block 808 // Insert point is just past last use or def in the block
820 int insert_point = insidx-1; 809 int insert_point = insidx-1;
821 while( insert_point > 0 ) { 810 while( insert_point > 0 ) {
822 Node *n = b->_nodes[insert_point]; 811 Node *n = b->get_node(insert_point);
823 // Hit top of block? Quit going backwards 812 // Hit top of block? Quit going backwards
824 if (n->is_Phi()) { 813 if (n->is_Phi()) {
825 break; 814 break;
826 } 815 }
827 // Found a def? Better split after it. 816 // Found a def? Better split after it.
863 } 852 }
864 #endif 853 #endif
865 } 854 }
866 } // end if LRG is UP 855 } // end if LRG is UP
867 } // end for all spilling live ranges 856 } // end for all spilling live ranges
868 assert( b->_nodes[insidx] == n, "got insidx set incorrectly" ); 857 assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
869 } // end if crossing HRP Boundry 858 } // end if crossing HRP Boundry
870 859
871 // If the LRG index is oob, then this is a new spillcopy, skip it. 860 // If the LRG index is oob, then this is a new spillcopy, skip it.
872 if (defidx >= _lrg_map.max_lrg_id()) { 861 if (defidx >= _lrg_map.max_lrg_id()) {
873 continue; 862 continue;
876 uint copyidx = n->is_Copy(); 865 uint copyidx = n->is_Copy();
877 // Remove coalesced copy from CFG 866 // Remove coalesced copy from CFG
878 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { 867 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
879 n->replace_by( n->in(copyidx) ); 868 n->replace_by( n->in(copyidx) );
880 n->set_req( copyidx, NULL ); 869 n->set_req( copyidx, NULL );
881 b->_nodes.remove(insidx--); 870 b->remove_node(insidx--);
882 b->_ihrp_index--; // Adjust the point where we go hi-pressure 871 b->_ihrp_index--; // Adjust the point where we go hi-pressure
883 b->_fhrp_index--; 872 b->_fhrp_index--;
884 continue; 873 continue;
885 } 874 }
886 875
930 } 919 }
931 920
932 // Rematerializable? Then clone def at use site instead 921 // Rematerializable? Then clone def at use site instead
933 // of store/load 922 // of store/load
934 if( def->rematerialize() ) { 923 if( def->rematerialize() ) {
935 int old_size = b->_nodes.size(); 924 int old_size = b->number_of_nodes();
936 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); 925 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
937 if( !def ) return 0; // Bail out 926 if( !def ) return 0; // Bail out
938 insidx += b->_nodes.size()-old_size; 927 insidx += b->number_of_nodes()-old_size;
939 } 928 }
940 929
941 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; 930 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
942 // Base pointers and oopmap references do not care where they live. 931 // Base pointers and oopmap references do not care where they live.
943 if ((inpidx >= oopoff) || 932 if ((inpidx >= oopoff) ||
1322 // Get predecessor block pre-order number 1311 // Get predecessor block pre-order number
1323 Block *pred = _cfg.get_block_for_node(b->pred(i)); 1312 Block *pred = _cfg.get_block_for_node(b->pred(i));
1324 pidx = pred->_pre_order; 1313 pidx = pred->_pre_order;
1325 // Grab reaching def 1314 // Grab reaching def
1326 Node *def = Reaches[pidx][slidx]; 1315 Node *def = Reaches[pidx][slidx];
1316 Node** Reachblock = Reaches[pidx];
1327 assert( def, "must have reaching def" ); 1317 assert( def, "must have reaching def" );
1328 // If input up/down sense and reg-pressure DISagree 1318 // If input up/down sense and reg-pressure DISagree
1329 if (def->rematerialize() && contains_no_live_range_input(def)) { 1319 if (def->rematerialize()) {
1330 // Place the rematerialized node above any MSCs created during 1320 // Place the rematerialized node above any MSCs created during
1331 // phi node splitting. end_idx points at the insertion point 1321 // phi node splitting. end_idx points at the insertion point
1332 // so look at the node before it. 1322 // so look at the node before it.
1333 int insert = pred->end_idx(); 1323 int insert = pred->end_idx();
1334 while (insert >= 1 && 1324 while (insert >= 1 &&
1335 pred->_nodes[insert - 1]->is_SpillCopy() && 1325 pred->get_node(insert - 1)->is_SpillCopy() &&
1336 _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { 1326 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
1337 insert--; 1327 insert--;
1338 } 1328 }
1339 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); 1329 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
1340 if (!def) { 1330 if (!def) {
1341 return 0; // Bail out 1331 return 0; // Bail out
1400 #ifdef ASSERT 1390 #ifdef ASSERT
1401 // Validate all live range index assignments 1391 // Validate all live range index assignments
1402 for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) { 1392 for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
1403 b = _cfg.get_block(bidx); 1393 b = _cfg.get_block(bidx);
1404 for (insidx = 0; insidx <= b->end_idx(); insidx++) { 1394 for (insidx = 0; insidx <= b->end_idx(); insidx++) {
1405 Node *n = b->_nodes[insidx]; 1395 Node *n = b->get_node(insidx);
1406 uint defidx = _lrg_map.find(n); 1396 uint defidx = _lrg_map.find(n);
1407 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); 1397 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
1408 assert(defidx < maxlrg,"Bad live range index in Split"); 1398 assert(defidx < maxlrg,"Bad live range index in Split");
1409 } 1399 }
1410 } 1400 }