comparison src/share/vm/opto/chaitin.cpp @ 12023:d1034bd8cefc

8022284: Hide internal data structure in PhaseCFG Summary: Hide private node to block mapping using public interface Reviewed-by: kvn, roland
author adlertz
date Wed, 07 Aug 2013 17:56:19 +0200
parents 693e4d04fd09
children adb9a7d94cb5
comparison
equal deleted inserted replaced
12004:71526a36ebb4 12023:d1034bd8cefc
293 _lrg_map.uf_extend(lrg, lrg); 293 _lrg_map.uf_extend(lrg, lrg);
294 } 294 }
295 295
296 296
297 bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { 297 bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
298 Block *bcon = _cfg._bbs[con->_idx]; 298 Block* bcon = _cfg.get_block_for_node(con);
299 uint cindex = bcon->find_node(con); 299 uint cindex = bcon->find_node(con);
300 Node *con_next = bcon->_nodes[cindex+1]; 300 Node *con_next = bcon->_nodes[cindex+1];
301 if (con_next->in(0) != con || !con_next->is_MachProj()) { 301 if (con_next->in(0) != con || !con_next->is_MachProj()) {
302 return false; // No MachProj's follow 302 return false; // No MachProj's follow
303 } 303 }
304 304
305 // Copy kills after the cloned constant 305 // Copy kills after the cloned constant
306 Node *kills = con_next->clone(); 306 Node *kills = con_next->clone();
307 kills->set_req(0, copy); 307 kills->set_req(0, copy);
308 b->_nodes.insert(idx, kills); 308 b->_nodes.insert(idx, kills);
309 _cfg._bbs.map(kills->_idx, b); 309 _cfg.map_node_to_block(kills, b);
310 new_lrg(kills, max_lrg_id); 310 new_lrg(kills, max_lrg_id);
311 return true; 311 return true;
312 } 312 }
313 313
314 //------------------------------compact---------------------------------------- 314 //------------------------------compact----------------------------------------
960 // Limit result register mask to acceptable registers. 960 // Limit result register mask to acceptable registers.
961 // Do not limit registers from uncommon uses before 961 // Do not limit registers from uncommon uses before
962 // AggressiveCoalesce. This effectively pre-virtual-splits 962 // AggressiveCoalesce. This effectively pre-virtual-splits
963 // around uncommon uses of common defs. 963 // around uncommon uses of common defs.
964 const RegMask &rm = n->in_RegMask(k); 964 const RegMask &rm = n->in_RegMask(k);
965 if( !after_aggressive && 965 if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
966 _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
967 // Since we are BEFORE aggressive coalesce, leave the register 966 // Since we are BEFORE aggressive coalesce, leave the register
968 // mask untrimmed by the call. This encourages more coalescing. 967 // mask untrimmed by the call. This encourages more coalescing.
969 // Later, AFTER aggressive, this live range will have to spill 968 // Later, AFTER aggressive, this live range will have to spill
970 // but the spiller handles slow-path calls very nicely. 969 // but the spiller handles slow-path calls very nicely.
971 } else { 970 } else {
1707 if (base->in(0) == NULL) { 1706 if (base->in(0) == NULL) {
1708 // Initialize it once and make it shared: 1707 // Initialize it once and make it shared:
1709 // set control to _root and place it into Start block 1708 // set control to _root and place it into Start block
1710 // (where top() node is placed). 1709 // (where top() node is placed).
1711 base->init_req(0, _cfg._root); 1710 base->init_req(0, _cfg._root);
1712 Block *startb = _cfg._bbs[C->top()->_idx]; 1711 Block *startb = _cfg.get_block_for_node(C->top());
1713 startb->_nodes.insert(startb->find_node(C->top()), base ); 1712 startb->_nodes.insert(startb->find_node(C->top()), base );
1714 _cfg._bbs.map( base->_idx, startb ); 1713 _cfg.map_node_to_block(base, startb);
1715 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); 1714 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1716 } 1715 }
1717 if (_lrg_map.live_range_id(base) == 0) { 1716 if (_lrg_map.live_range_id(base) == 0) {
1718 new_lrg(base, maxlrg++); 1717 new_lrg(base, maxlrg++);
1719 } 1718 }
1720 assert(base->in(0) == _cfg._root && 1719 assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
1721 _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
1722 derived_base_map[derived->_idx] = base; 1720 derived_base_map[derived->_idx] = base;
1723 return base; 1721 return base;
1724 } 1722 }
1725 1723
1726 // Check for AddP-related opcodes 1724 // Check for AddP-related opcodes
1752 t = t->meet(base->in(i)->bottom_type()); 1750 t = t->meet(base->in(i)->bottom_type());
1753 } 1751 }
1754 base->as_Phi()->set_type(t); 1752 base->as_Phi()->set_type(t);
1755 1753
1756 // Search the current block for an existing base-Phi 1754 // Search the current block for an existing base-Phi
1757 Block *b = _cfg._bbs[derived->_idx]; 1755 Block *b = _cfg.get_block_for_node(derived);
1758 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi 1756 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1759 Node *phi = b->_nodes[i]; 1757 Node *phi = b->_nodes[i];
1760 if( !phi->is_Phi() ) { // Found end of Phis with no match? 1758 if( !phi->is_Phi() ) { // Found end of Phis with no match?
1761 b->_nodes.insert( i, base ); // Must insert created Phi here as base 1759 b->_nodes.insert( i, base ); // Must insert created Phi here as base
1762 _cfg._bbs.map( base->_idx, b ); 1760 _cfg.map_node_to_block(base, b);
1763 new_lrg(base,maxlrg++); 1761 new_lrg(base,maxlrg++);
1764 break; 1762 break;
1765 } 1763 }
1766 // See if Phi matches. 1764 // See if Phi matches.
1767 uint j; 1765 uint j;
1813 // one after. Instead we split the input to the compare just after the 1811 // one after. Instead we split the input to the compare just after the
1814 // phi. 1812 // phi.
1815 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { 1813 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1816 Node *phi = n->in(1); 1814 Node *phi = n->in(1);
1817 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { 1815 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1818 Block *phi_block = _cfg._bbs[phi->_idx]; 1816 Block *phi_block = _cfg.get_block_for_node(phi);
1819 if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) { 1817 if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
1820 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; 1818 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1821 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); 1819 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
1822 insert_proj( phi_block, 1, spill, maxlrg++ ); 1820 insert_proj( phi_block, 1, spill, maxlrg++ );
1823 n->set_req(1,spill); 1821 n->set_req(1,spill);
1824 must_recompute_live = true; 1822 must_recompute_live = true;
1868 // reaching def's. So if I find the base's live range then 1866 // reaching def's. So if I find the base's live range then
1869 // I know the base's def reaches here. 1867 // I know the base's def reaches here.
1870 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or 1868 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1871 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND 1869 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1872 (_lrg_map.live_range_id(base) > 0) && // not a constant 1870 (_lrg_map.live_range_id(base) > 0) && // not a constant
1873 _cfg._bbs[base->_idx] != b) { // base not def'd in blk) 1871 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
1874 // Base pointer is not currently live. Since I stretched 1872 // Base pointer is not currently live. Since I stretched
1875 // the base pointer to here and it crosses basic-block 1873 // the base pointer to here and it crosses basic-block
1876 // boundaries, the global live info is now incorrect. 1874 // boundaries, the global live info is now incorrect.
1877 // Recompute live. 1875 // Recompute live.
1878 must_recompute_live = true; 1876 must_recompute_live = true;
1991 tty->print(" Spill_2"); 1989 tty->print(" Spill_2");
1992 } 1990 }
1993 tty->print("\n"); 1991 tty->print("\n");
1994 } 1992 }
1995 1993
1996 void PhaseChaitin::dump( const Block * b ) const { 1994 void PhaseChaitin::dump(const Block *b) const {
1997 b->dump_head( &_cfg._bbs ); 1995 b->dump_head(&_cfg);
1998 1996
1999 // For all instructions 1997 // For all instructions
2000 for( uint j = 0; j < b->_nodes.size(); j++ ) 1998 for( uint j = 0; j < b->_nodes.size(); j++ )
2001 dump(b->_nodes[j]); 1999 dump(b->_nodes[j]);
2002 // Print live-out info at end of block 2000 // Print live-out info at end of block
2297 for( uint j = 0; j < b->_nodes.size(); j++ ) { 2295 for( uint j = 0; j < b->_nodes.size(); j++ ) {
2298 Node *n = b->_nodes[j]; 2296 Node *n = b->_nodes[j];
2299 if (_lrg_map.find_const(n) == lidx) { 2297 if (_lrg_map.find_const(n) == lidx) {
2300 if (!dump_once++) { 2298 if (!dump_once++) {
2301 tty->cr(); 2299 tty->cr();
2302 b->dump_head( &_cfg._bbs ); 2300 b->dump_head(&_cfg);
2303 } 2301 }
2304 dump(n); 2302 dump(n);
2305 continue; 2303 continue;
2306 } 2304 }
2307 if (!defs_only) { 2305 if (!defs_only) {
2312 continue; // be robust in the dumper 2310 continue; // be robust in the dumper
2313 } 2311 }
2314 if (_lrg_map.find_const(m) == lidx) { 2312 if (_lrg_map.find_const(m) == lidx) {
2315 if (!dump_once++) { 2313 if (!dump_once++) {
2316 tty->cr(); 2314 tty->cr();
2317 b->dump_head(&_cfg._bbs); 2315 b->dump_head(&_cfg);
2318 } 2316 }
2319 dump(n); 2317 dump(n);
2320 } 2318 }
2321 } 2319 }
2322 } 2320 }