Mercurial > hg > truffle
comparison src/share/vm/opto/output.cpp @ 12039:3cce976666d9
Merge hs25-b46
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Thu, 10 Oct 2013 14:20:04 +0200 |
parents | 836a62f43af9 d1034bd8cefc |
children | cefad50507d8 |
comparison
equal
deleted
inserted
replaced
11958:a0f5be106e67 | 12039:3cce976666d9 |
---|---|
66 C->inner_loops()*(OptoLoopAlignment-1)), | 66 C->inner_loops()*(OptoLoopAlignment-1)), |
67 "out of nodes before code generation" ) ) { | 67 "out of nodes before code generation" ) ) { |
68 return; | 68 return; |
69 } | 69 } |
70 // Make sure I can find the Start Node | 70 // Make sure I can find the Start Node |
71 Block_Array& bbs = _cfg->_bbs; | |
72 Block *entry = _cfg->_blocks[1]; | 71 Block *entry = _cfg->_blocks[1]; |
73 Block *broot = _cfg->_broot; | 72 Block *broot = _cfg->_broot; |
74 | 73 |
75 const StartNode *start = entry->_nodes[0]->as_Start(); | 74 const StartNode *start = entry->_nodes[0]->as_Start(); |
76 | 75 |
77 // Replace StartNode with prolog | 76 // Replace StartNode with prolog |
78 MachPrologNode *prolog = new (this) MachPrologNode(); | 77 MachPrologNode *prolog = new (this) MachPrologNode(); |
79 entry->_nodes.map( 0, prolog ); | 78 entry->_nodes.map( 0, prolog ); |
80 bbs.map( prolog->_idx, entry ); | 79 _cfg->map_node_to_block(prolog, entry); |
81 bbs.map( start->_idx, NULL ); // start is no longer in any block | 80 _cfg->unmap_node_from_block(start); // start is no longer in any block |
82 | 81 |
83 // Virtual methods need an unverified entry point | 82 // Virtual methods need an unverified entry point |
84 | 83 |
85 if( is_osr_compilation() ) { | 84 if( is_osr_compilation() ) { |
86 if( PoisonOSREntry ) { | 85 if( PoisonOSREntry ) { |
115 if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point? | 114 if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point? |
116 Node *m = b->end(); | 115 Node *m = b->end(); |
117 if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) { | 116 if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) { |
118 MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); | 117 MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); |
119 b->add_inst( epilog ); | 118 b->add_inst( epilog ); |
120 bbs.map(epilog->_idx, b); | 119 _cfg->map_node_to_block(epilog, b); |
121 //_regalloc->set_bad(epilog->_idx); // Already initialized this way. | |
122 } | 120 } |
123 } | 121 } |
124 } | 122 } |
125 | 123 |
126 # ifdef ENABLE_ZAP_DEAD_LOCALS | 124 # ifdef ENABLE_ZAP_DEAD_LOCALS |
250 } | 248 } |
251 } | 249 } |
252 if (insert) { | 250 if (insert) { |
253 Node *zap = call_zap_node(n->as_MachSafePoint(), i); | 251 Node *zap = call_zap_node(n->as_MachSafePoint(), i); |
254 b->_nodes.insert( j, zap ); | 252 b->_nodes.insert( j, zap ); |
255 _cfg->_bbs.map( zap->_idx, b ); | 253 _cfg->map_node_to_block(zap, b); |
256 ++j; | 254 ++j; |
257 } | 255 } |
258 } | 256 } |
259 } | 257 } |
260 } | 258 } |
1235 cb->flush_bundle(true); | 1233 cb->flush_bundle(true); |
1236 | 1234 |
1237 #ifdef ASSERT | 1235 #ifdef ASSERT |
1238 if (!b->is_connector()) { | 1236 if (!b->is_connector()) { |
1239 stringStream st; | 1237 stringStream st; |
1240 b->dump_head(&_cfg->_bbs, &st); | 1238 b->dump_head(_cfg, &st); |
1241 MacroAssembler(cb).block_comment(st.as_string()); | 1239 MacroAssembler(cb).block_comment(st.as_string()); |
1242 } | 1240 } |
1243 jmp_target[i] = 0; | 1241 jmp_target[i] = 0; |
1244 jmp_offset[i] = 0; | 1242 jmp_offset[i] = 0; |
1245 jmp_size[i] = 0; | 1243 jmp_size[i] = 0; |
1311 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); | 1309 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); |
1312 int nops_cnt = padding / nop_size; | 1310 int nops_cnt = padding / nop_size; |
1313 MachNode *nop = new (this) MachNopNode(nops_cnt); | 1311 MachNode *nop = new (this) MachNopNode(nops_cnt); |
1314 b->_nodes.insert(j++, nop); | 1312 b->_nodes.insert(j++, nop); |
1315 last_inst++; | 1313 last_inst++; |
1316 _cfg->_bbs.map( nop->_idx, b ); | 1314 _cfg->map_node_to_block(nop, b); |
1317 nop->emit(*cb, _regalloc); | 1315 nop->emit(*cb, _regalloc); |
1318 cb->flush_bundle(true); | 1316 cb->flush_bundle(true); |
1319 current_offset = cb->insts_size(); | 1317 current_offset = cb->insts_size(); |
1320 } | 1318 } |
1321 | 1319 |
1396 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller"); | 1394 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller"); |
1397 // Insert padding between avoid_back_to_back branches. | 1395 // Insert padding between avoid_back_to_back branches. |
1398 if (needs_padding && replacement->avoid_back_to_back()) { | 1396 if (needs_padding && replacement->avoid_back_to_back()) { |
1399 MachNode *nop = new (this) MachNopNode(); | 1397 MachNode *nop = new (this) MachNopNode(); |
1400 b->_nodes.insert(j++, nop); | 1398 b->_nodes.insert(j++, nop); |
1401 _cfg->_bbs.map(nop->_idx, b); | 1399 _cfg->map_node_to_block(nop, b); |
1402 last_inst++; | 1400 last_inst++; |
1403 nop->emit(*cb, _regalloc); | 1401 nop->emit(*cb, _regalloc); |
1404 cb->flush_bundle(true); | 1402 cb->flush_bundle(true); |
1405 current_offset = cb->insts_size(); | 1403 current_offset = cb->insts_size(); |
1406 } | 1404 } |
1550 Block *nb = _cfg->_blocks[i+1]; | 1548 Block *nb = _cfg->_blocks[i+1]; |
1551 int padding = nb->alignment_padding(current_offset); | 1549 int padding = nb->alignment_padding(current_offset); |
1552 if( padding > 0 ) { | 1550 if( padding > 0 ) { |
1553 MachNode *nop = new (this) MachNopNode(padding / nop_size); | 1551 MachNode *nop = new (this) MachNopNode(padding / nop_size); |
1554 b->_nodes.insert( b->_nodes.size(), nop ); | 1552 b->_nodes.insert( b->_nodes.size(), nop ); |
1555 _cfg->_bbs.map( nop->_idx, b ); | 1553 _cfg->map_node_to_block(nop, b); |
1556 nop->emit(*cb, _regalloc); | 1554 nop->emit(*cb, _regalloc); |
1557 current_offset = cb->insts_size(); | 1555 current_offset = cb->insts_size(); |
1558 } | 1556 } |
1559 } | 1557 } |
1560 // Verify that the distance for generated before forward | 1558 // Verify that the distance for generated before forward |
1738 // Initializer for class Scheduling | 1736 // Initializer for class Scheduling |
1739 | 1737 |
1740 Scheduling::Scheduling(Arena *arena, Compile &compile) | 1738 Scheduling::Scheduling(Arena *arena, Compile &compile) |
1741 : _arena(arena), | 1739 : _arena(arena), |
1742 _cfg(compile.cfg()), | 1740 _cfg(compile.cfg()), |
1743 _bbs(compile.cfg()->_bbs), | |
1744 _regalloc(compile.regalloc()), | 1741 _regalloc(compile.regalloc()), |
1745 _reg_node(arena), | 1742 _reg_node(arena), |
1746 _bundle_instr_count(0), | 1743 _bundle_instr_count(0), |
1747 _bundle_cycle_number(0), | 1744 _bundle_cycle_number(0), |
1748 _scheduled(arena), | 1745 _scheduled(arena), |
2086 Node *def = n->in(i); | 2083 Node *def = n->in(i); |
2087 if (!def) continue; | 2084 if (!def) continue; |
2088 if( def->is_Proj() ) // If this is a machine projection, then | 2085 if( def->is_Proj() ) // If this is a machine projection, then |
2089 def = def->in(0); // propagate usage thru to the base instruction | 2086 def = def->in(0); // propagate usage thru to the base instruction |
2090 | 2087 |
2091 if( _bbs[def->_idx] != bb ) // Ignore if not block-local | 2088 if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local |
2092 continue; | 2089 continue; |
2090 } | |
2093 | 2091 |
2094 // Compute the latency | 2092 // Compute the latency |
2095 uint l = _bundle_cycle_number + n->latency(i); | 2093 uint l = _bundle_cycle_number + n->latency(i); |
2096 if (_current_latency[def->_idx] < l) | 2094 if (_current_latency[def->_idx] < l) |
2097 _current_latency[def->_idx] = l; | 2095 _current_latency[def->_idx] = l; |
2359 // Account for all uses | 2357 // Account for all uses |
2360 for ( uint k = 0; k < n->len(); k++ ) { | 2358 for ( uint k = 0; k < n->len(); k++ ) { |
2361 Node *inp = n->in(k); | 2359 Node *inp = n->in(k); |
2362 if (!inp) continue; | 2360 if (!inp) continue; |
2363 assert(inp != n, "no cycles allowed" ); | 2361 assert(inp != n, "no cycles allowed" ); |
2364 if( _bbs[inp->_idx] == bb ) { // Block-local use? | 2362 if (_cfg->get_block_for_node(inp) == bb) { // Block-local use? |
2365 if( inp->is_Proj() ) // Skip through Proj's | 2363 if (inp->is_Proj()) { // Skip through Proj's |
2366 inp = inp->in(0); | 2364 inp = inp->in(0); |
2365 } | |
2367 ++_uses[inp->_idx]; // Count 1 block-local use | 2366 ++_uses[inp->_idx]; // Count 1 block-local use |
2368 } | 2367 } |
2369 } | 2368 } |
2370 | 2369 |
2371 // If this instruction has a 0 use count, then it is available | 2370 // If this instruction has a 0 use count, then it is available |
2644 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) { | 2643 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) { |
2645 if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow | 2644 if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow |
2646 return; | 2645 return; |
2647 | 2646 |
2648 Node *pinch = _reg_node[def_reg]; // Get pinch point | 2647 Node *pinch = _reg_node[def_reg]; // Get pinch point |
2649 if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet? | 2648 if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet? |
2650 is_def ) { // Check for a true def (not a kill) | 2649 is_def ) { // Check for a true def (not a kill) |
2651 _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point | 2650 _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point |
2652 return; | 2651 return; |
2653 } | 2652 } |
2654 | 2653 |
2670 } | 2669 } |
2671 if (pinch->_idx >= _regalloc->node_regs_max_index()) { | 2670 if (pinch->_idx >= _regalloc->node_regs_max_index()) { |
2672 _cfg->C->record_method_not_compilable("too many D-U pinch points"); | 2671 _cfg->C->record_method_not_compilable("too many D-U pinch points"); |
2673 return; | 2672 return; |
2674 } | 2673 } |
2675 _bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init) | 2674 _cfg->map_node_to_block(pinch, b); // Pretend it's valid in this block (lazy init) |
2676 _reg_node.map(def_reg,pinch); // Record pinch-point | 2675 _reg_node.map(def_reg,pinch); // Record pinch-point |
2677 //_regalloc->set_bad(pinch->_idx); // Already initialized this way. | 2676 //_regalloc->set_bad(pinch->_idx); // Already initialized this way. |
2678 if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill | 2677 if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill |
2679 pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call | 2678 pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call |
2680 add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch | 2679 add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch |
2714 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) { | 2713 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) { |
2715 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow | 2714 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow |
2716 return; | 2715 return; |
2717 Node *pinch = _reg_node[use_reg]; // Get pinch point | 2716 Node *pinch = _reg_node[use_reg]; // Get pinch point |
2718 // Check for no later def_reg/kill in block | 2717 // Check for no later def_reg/kill in block |
2719 if( pinch && _bbs[pinch->_idx] == b && | 2718 if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b && |
2720 // Use has to be block-local as well | 2719 // Use has to be block-local as well |
2721 _bbs[use->_idx] == b ) { | 2720 _cfg->get_block_for_node(use) == b) { |
2722 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) | 2721 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) |
2723 pinch->req() == 1 ) { // pinch not yet in block? | 2722 pinch->req() == 1 ) { // pinch not yet in block? |
2724 pinch->del_req(0); // yank pointer to later-def, also set flag | 2723 pinch->del_req(0); // yank pointer to later-def, also set flag |
2725 // Insert the pinch-point in the block just after the last use | 2724 // Insert the pinch-point in the block just after the last use |
2726 b->_nodes.insert(b->find_node(use)+1,pinch); | 2725 b->_nodes.insert(b->find_node(use)+1,pinch); |
2896 if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); | 2895 if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); |
2897 #endif | 2896 #endif |
2898 int trace_cnt = 0; | 2897 int trace_cnt = 0; |
2899 for (uint k = 0; k < _reg_node.Size(); k++) { | 2898 for (uint k = 0; k < _reg_node.Size(); k++) { |
2900 Node* pinch = _reg_node[k]; | 2899 Node* pinch = _reg_node[k]; |
2901 if (pinch != NULL && pinch->Opcode() == Op_Node && | 2900 if ((pinch != NULL) && pinch->Opcode() == Op_Node && |
2902 // no predecence input edges | 2901 // no predecence input edges |
2903 (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { | 2902 (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { |
2904 cleanup_pinch(pinch); | 2903 cleanup_pinch(pinch); |
2905 _pinch_free_list.push(pinch); | 2904 _pinch_free_list.push(pinch); |
2906 _reg_node.map(k, NULL); | 2905 _reg_node.map(k, NULL); |