comparison src/share/vm/opto/output.cpp @ 12023:d1034bd8cefc

8022284: Hide internal data structure in PhaseCFG Summary: Hide private node to block mapping using public interface Reviewed-by: kvn, roland
author adlertz
date Wed, 07 Aug 2013 17:56:19 +0200
parents 70120f47d403
children 3cce976666d9 adb9a7d94cb5
comparison
equal deleted inserted replaced
12004:71526a36ebb4 12023:d1034bd8cefc
66 C->inner_loops()*(OptoLoopAlignment-1)), 66 C->inner_loops()*(OptoLoopAlignment-1)),
67 "out of nodes before code generation" ) ) { 67 "out of nodes before code generation" ) ) {
68 return; 68 return;
69 } 69 }
70 // Make sure I can find the Start Node 70 // Make sure I can find the Start Node
71 Block_Array& bbs = _cfg->_bbs;
72 Block *entry = _cfg->_blocks[1]; 71 Block *entry = _cfg->_blocks[1];
73 Block *broot = _cfg->_broot; 72 Block *broot = _cfg->_broot;
74 73
75 const StartNode *start = entry->_nodes[0]->as_Start(); 74 const StartNode *start = entry->_nodes[0]->as_Start();
76 75
77 // Replace StartNode with prolog 76 // Replace StartNode with prolog
78 MachPrologNode *prolog = new (this) MachPrologNode(); 77 MachPrologNode *prolog = new (this) MachPrologNode();
79 entry->_nodes.map( 0, prolog ); 78 entry->_nodes.map( 0, prolog );
80 bbs.map( prolog->_idx, entry ); 79 _cfg->map_node_to_block(prolog, entry);
81 bbs.map( start->_idx, NULL ); // start is no longer in any block 80 _cfg->unmap_node_from_block(start); // start is no longer in any block
82 81
83 // Virtual methods need an unverified entry point 82 // Virtual methods need an unverified entry point
84 83
85 if( is_osr_compilation() ) { 84 if( is_osr_compilation() ) {
86 if( PoisonOSREntry ) { 85 if( PoisonOSREntry ) {
115 if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point? 114 if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
116 Node *m = b->end(); 115 Node *m = b->end();
117 if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) { 116 if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
118 MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); 117 MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
119 b->add_inst( epilog ); 118 b->add_inst( epilog );
120 bbs.map(epilog->_idx, b); 119 _cfg->map_node_to_block(epilog, b);
121 //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
122 } 120 }
123 } 121 }
124 } 122 }
125 123
126 # ifdef ENABLE_ZAP_DEAD_LOCALS 124 # ifdef ENABLE_ZAP_DEAD_LOCALS
250 } 248 }
251 } 249 }
252 if (insert) { 250 if (insert) {
253 Node *zap = call_zap_node(n->as_MachSafePoint(), i); 251 Node *zap = call_zap_node(n->as_MachSafePoint(), i);
254 b->_nodes.insert( j, zap ); 252 b->_nodes.insert( j, zap );
255 _cfg->_bbs.map( zap->_idx, b ); 253 _cfg->map_node_to_block(zap, b);
256 ++j; 254 ++j;
257 } 255 }
258 } 256 }
259 } 257 }
260 } 258 }
1232 cb->flush_bundle(true); 1230 cb->flush_bundle(true);
1233 1231
1234 #ifdef ASSERT 1232 #ifdef ASSERT
1235 if (!b->is_connector()) { 1233 if (!b->is_connector()) {
1236 stringStream st; 1234 stringStream st;
1237 b->dump_head(&_cfg->_bbs, &st); 1235 b->dump_head(_cfg, &st);
1238 MacroAssembler(cb).block_comment(st.as_string()); 1236 MacroAssembler(cb).block_comment(st.as_string());
1239 } 1237 }
1240 jmp_target[i] = 0; 1238 jmp_target[i] = 0;
1241 jmp_offset[i] = 0; 1239 jmp_offset[i] = 0;
1242 jmp_size[i] = 0; 1240 jmp_size[i] = 0;
1308 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); 1306 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1309 int nops_cnt = padding / nop_size; 1307 int nops_cnt = padding / nop_size;
1310 MachNode *nop = new (this) MachNopNode(nops_cnt); 1308 MachNode *nop = new (this) MachNopNode(nops_cnt);
1311 b->_nodes.insert(j++, nop); 1309 b->_nodes.insert(j++, nop);
1312 last_inst++; 1310 last_inst++;
1313 _cfg->_bbs.map( nop->_idx, b ); 1311 _cfg->map_node_to_block(nop, b);
1314 nop->emit(*cb, _regalloc); 1312 nop->emit(*cb, _regalloc);
1315 cb->flush_bundle(true); 1313 cb->flush_bundle(true);
1316 current_offset = cb->insts_size(); 1314 current_offset = cb->insts_size();
1317 } 1315 }
1318 1316
1393 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller"); 1391 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1394 // Insert padding between avoid_back_to_back branches. 1392 // Insert padding between avoid_back_to_back branches.
1395 if (needs_padding && replacement->avoid_back_to_back()) { 1393 if (needs_padding && replacement->avoid_back_to_back()) {
1396 MachNode *nop = new (this) MachNopNode(); 1394 MachNode *nop = new (this) MachNopNode();
1397 b->_nodes.insert(j++, nop); 1395 b->_nodes.insert(j++, nop);
1398 _cfg->_bbs.map(nop->_idx, b); 1396 _cfg->map_node_to_block(nop, b);
1399 last_inst++; 1397 last_inst++;
1400 nop->emit(*cb, _regalloc); 1398 nop->emit(*cb, _regalloc);
1401 cb->flush_bundle(true); 1399 cb->flush_bundle(true);
1402 current_offset = cb->insts_size(); 1400 current_offset = cb->insts_size();
1403 } 1401 }
1547 Block *nb = _cfg->_blocks[i+1]; 1545 Block *nb = _cfg->_blocks[i+1];
1548 int padding = nb->alignment_padding(current_offset); 1546 int padding = nb->alignment_padding(current_offset);
1549 if( padding > 0 ) { 1547 if( padding > 0 ) {
1550 MachNode *nop = new (this) MachNopNode(padding / nop_size); 1548 MachNode *nop = new (this) MachNopNode(padding / nop_size);
1551 b->_nodes.insert( b->_nodes.size(), nop ); 1549 b->_nodes.insert( b->_nodes.size(), nop );
1552 _cfg->_bbs.map( nop->_idx, b ); 1550 _cfg->map_node_to_block(nop, b);
1553 nop->emit(*cb, _regalloc); 1551 nop->emit(*cb, _regalloc);
1554 current_offset = cb->insts_size(); 1552 current_offset = cb->insts_size();
1555 } 1553 }
1556 } 1554 }
1557 // Verify that the distance for generated before forward 1555 // Verify that the distance for generated before forward
1735 // Initializer for class Scheduling 1733 // Initializer for class Scheduling
1736 1734
1737 Scheduling::Scheduling(Arena *arena, Compile &compile) 1735 Scheduling::Scheduling(Arena *arena, Compile &compile)
1738 : _arena(arena), 1736 : _arena(arena),
1739 _cfg(compile.cfg()), 1737 _cfg(compile.cfg()),
1740 _bbs(compile.cfg()->_bbs),
1741 _regalloc(compile.regalloc()), 1738 _regalloc(compile.regalloc()),
1742 _reg_node(arena), 1739 _reg_node(arena),
1743 _bundle_instr_count(0), 1740 _bundle_instr_count(0),
1744 _bundle_cycle_number(0), 1741 _bundle_cycle_number(0),
1745 _scheduled(arena), 1742 _scheduled(arena),
2083 Node *def = n->in(i); 2080 Node *def = n->in(i);
2084 if (!def) continue; 2081 if (!def) continue;
2085 if( def->is_Proj() ) // If this is a machine projection, then 2082 if( def->is_Proj() ) // If this is a machine projection, then
2086 def = def->in(0); // propagate usage thru to the base instruction 2083 def = def->in(0); // propagate usage thru to the base instruction
2087 2084
2088 if( _bbs[def->_idx] != bb ) // Ignore if not block-local 2085 if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
2089 continue; 2086 continue;
2087 }
2090 2088
2091 // Compute the latency 2089 // Compute the latency
2092 uint l = _bundle_cycle_number + n->latency(i); 2090 uint l = _bundle_cycle_number + n->latency(i);
2093 if (_current_latency[def->_idx] < l) 2091 if (_current_latency[def->_idx] < l)
2094 _current_latency[def->_idx] = l; 2092 _current_latency[def->_idx] = l;
2356 // Account for all uses 2354 // Account for all uses
2357 for ( uint k = 0; k < n->len(); k++ ) { 2355 for ( uint k = 0; k < n->len(); k++ ) {
2358 Node *inp = n->in(k); 2356 Node *inp = n->in(k);
2359 if (!inp) continue; 2357 if (!inp) continue;
2360 assert(inp != n, "no cycles allowed" ); 2358 assert(inp != n, "no cycles allowed" );
2361 if( _bbs[inp->_idx] == bb ) { // Block-local use? 2359 if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
2362 if( inp->is_Proj() ) // Skip through Proj's 2360 if (inp->is_Proj()) { // Skip through Proj's
2363 inp = inp->in(0); 2361 inp = inp->in(0);
2362 }
2364 ++_uses[inp->_idx]; // Count 1 block-local use 2363 ++_uses[inp->_idx]; // Count 1 block-local use
2365 } 2364 }
2366 } 2365 }
2367 2366
2368 // If this instruction has a 0 use count, then it is available 2367 // If this instruction has a 0 use count, then it is available
2641 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) { 2640 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
2642 if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow 2641 if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
2643 return; 2642 return;
2644 2643
2645 Node *pinch = _reg_node[def_reg]; // Get pinch point 2644 Node *pinch = _reg_node[def_reg]; // Get pinch point
2646 if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet? 2645 if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
2647 is_def ) { // Check for a true def (not a kill) 2646 is_def ) { // Check for a true def (not a kill)
2648 _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point 2647 _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
2649 return; 2648 return;
2650 } 2649 }
2651 2650
2667 } 2666 }
2668 if (pinch->_idx >= _regalloc->node_regs_max_index()) { 2667 if (pinch->_idx >= _regalloc->node_regs_max_index()) {
2669 _cfg->C->record_method_not_compilable("too many D-U pinch points"); 2668 _cfg->C->record_method_not_compilable("too many D-U pinch points");
2670 return; 2669 return;
2671 } 2670 }
2672 _bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init) 2671 _cfg->map_node_to_block(pinch, b); // Pretend it's valid in this block (lazy init)
2673 _reg_node.map(def_reg,pinch); // Record pinch-point 2672 _reg_node.map(def_reg,pinch); // Record pinch-point
2674 //_regalloc->set_bad(pinch->_idx); // Already initialized this way. 2673 //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
2675 if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill 2674 if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
2676 pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call 2675 pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call
2677 add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch 2676 add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
2711 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) { 2710 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2712 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow 2711 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2713 return; 2712 return;
2714 Node *pinch = _reg_node[use_reg]; // Get pinch point 2713 Node *pinch = _reg_node[use_reg]; // Get pinch point
2715 // Check for no later def_reg/kill in block 2714 // Check for no later def_reg/kill in block
2716 if( pinch && _bbs[pinch->_idx] == b && 2715 if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
2717 // Use has to be block-local as well 2716 // Use has to be block-local as well
2718 _bbs[use->_idx] == b ) { 2717 _cfg->get_block_for_node(use) == b) {
2719 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) 2718 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2720 pinch->req() == 1 ) { // pinch not yet in block? 2719 pinch->req() == 1 ) { // pinch not yet in block?
2721 pinch->del_req(0); // yank pointer to later-def, also set flag 2720 pinch->del_req(0); // yank pointer to later-def, also set flag
2722 // Insert the pinch-point in the block just after the last use 2721 // Insert the pinch-point in the block just after the last use
2723 b->_nodes.insert(b->find_node(use)+1,pinch); 2722 b->_nodes.insert(b->find_node(use)+1,pinch);
2893 if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); 2892 if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
2894 #endif 2893 #endif
2895 int trace_cnt = 0; 2894 int trace_cnt = 0;
2896 for (uint k = 0; k < _reg_node.Size(); k++) { 2895 for (uint k = 0; k < _reg_node.Size(); k++) {
2897 Node* pinch = _reg_node[k]; 2896 Node* pinch = _reg_node[k];
2898 if (pinch != NULL && pinch->Opcode() == Op_Node && 2897 if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
2899 // no predecence input edges 2898 // no predecence input edges
2900 (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { 2899 (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
2901 cleanup_pinch(pinch); 2900 cleanup_pinch(pinch);
2902 _pinch_free_list.push(pinch); 2901 _pinch_free_list.push(pinch);
2903 _reg_node.map(k, NULL); 2902 _reg_node.map(k, NULL);