comparison src/share/vm/opto/output.cpp @ 14422:2b8e28fdf503

Merge
author kvn
date Tue, 05 Nov 2013 17:38:04 -0800
parents e2722a66aba7 650868c062a9
children 318d0622a6d7
comparison
equal deleted inserted replaced
14421:3068270ba476 14422:2b8e28fdf503
55 extern int emit_deopt_handler(CodeBuffer &cbuf); 55 extern int emit_deopt_handler(CodeBuffer &cbuf);
56 56
57 // Convert Nodes to instruction bits and pass off to the VM 57 // Convert Nodes to instruction bits and pass off to the VM
58 void Compile::Output() { 58 void Compile::Output() {
59 // RootNode goes 59 // RootNode goes
60 assert( _cfg->get_root_block()->_nodes.size() == 0, "" ); 60 assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
61 61
62 // The number of new nodes (mostly MachNop) is proportional to 62 // The number of new nodes (mostly MachNop) is proportional to
63 // the number of java calls and inner loops which are aligned. 63 // the number of java calls and inner loops which are aligned.
64 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 + 64 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
65 C->inner_loops()*(OptoLoopAlignment-1)), 65 C->inner_loops()*(OptoLoopAlignment-1)),
68 } 68 }
69 // Make sure I can find the Start Node 69 // Make sure I can find the Start Node
70 Block *entry = _cfg->get_block(1); 70 Block *entry = _cfg->get_block(1);
71 Block *broot = _cfg->get_root_block(); 71 Block *broot = _cfg->get_root_block();
72 72
73 const StartNode *start = entry->_nodes[0]->as_Start(); 73 const StartNode *start = entry->head()->as_Start();
74 74
75 // Replace StartNode with prolog 75 // Replace StartNode with prolog
76 MachPrologNode *prolog = new (this) MachPrologNode(); 76 MachPrologNode *prolog = new (this) MachPrologNode();
77 entry->_nodes.map( 0, prolog ); 77 entry->map_node(prolog, 0);
78 _cfg->map_node_to_block(prolog, entry); 78 _cfg->map_node_to_block(prolog, entry);
79 _cfg->unmap_node_from_block(start); // start is no longer in any block 79 _cfg->unmap_node_from_block(start); // start is no longer in any block
80 80
81 // Virtual methods need an unverified entry point 81 // Virtual methods need an unverified entry point
82 82
142 if (trace_opto_output()) { 142 if (trace_opto_output()) {
143 tty->print("\n---- After ScheduleAndBundle ----\n"); 143 tty->print("\n---- After ScheduleAndBundle ----\n");
144 for (uint i = 0; i < _cfg->number_of_blocks(); i++) { 144 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
145 tty->print("\nBB#%03d:\n", i); 145 tty->print("\nBB#%03d:\n", i);
146 Block* block = _cfg->get_block(i); 146 Block* block = _cfg->get_block(i);
147 for (uint j = 0; j < block->_nodes.size(); j++) { 147 for (uint j = 0; j < block->number_of_nodes(); j++) {
148 Node* n = block->_nodes[j]; 148 Node* n = block->get_node(j);
149 OptoReg::Name reg = _regalloc->get_reg_first(n); 149 OptoReg::Name reg = _regalloc->get_reg_first(n);
150 tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : ""); 150 tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
151 n->dump(); 151 n->dump();
152 } 152 }
153 } 153 }
224 return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care 224 return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
225 225
226 // Insert call to zap runtime stub before every node with an oop map 226 // Insert call to zap runtime stub before every node with an oop map
227 for( uint i=0; i<_cfg->number_of_blocks(); i++ ) { 227 for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
228 Block *b = _cfg->get_block(i); 228 Block *b = _cfg->get_block(i);
229 for ( uint j = 0; j < b->_nodes.size(); ++j ) { 229 for ( uint j = 0; j < b->number_of_nodes(); ++j ) {
230 Node *n = b->_nodes[j]; 230 Node *n = b->get_node(j);
231 231
232 // Determining if we should insert a zap-a-lot node in output. 232 // Determining if we should insert a zap-a-lot node in output.
233 // We do that for all nodes that has oopmap info, except for calls 233 // We do that for all nodes that has oopmap info, except for calls
234 // to allocation. Calls to allocation passes in the old top-of-eden pointer 234 // to allocation. Calls to allocation passes in the old top-of-eden pointer
235 // and expect the C code to reset it. Hence, there can be no safepoints between 235 // and expect the C code to reset it. Hence, there can be no safepoints between
254 insert = false; 254 insert = false;
255 } 255 }
256 } 256 }
257 if (insert) { 257 if (insert) {
258 Node *zap = call_zap_node(n->as_MachSafePoint(), i); 258 Node *zap = call_zap_node(n->as_MachSafePoint(), i);
259 b->_nodes.insert( j, zap ); 259 b->insert_node(zap, j);
260 _cfg->map_node_to_block(zap, b); 260 _cfg->map_node_to_block(zap, b);
261 ++j; 261 ++j;
262 } 262 }
263 } 263 }
264 } 264 }
377 jmp_nidx[i] = -1; 377 jmp_nidx[i] = -1;
378 DEBUG_ONLY( jmp_target[i] = 0; ) 378 DEBUG_ONLY( jmp_target[i] = 0; )
379 DEBUG_ONLY( jmp_rule[i] = 0; ) 379 DEBUG_ONLY( jmp_rule[i] = 0; )
380 380
381 // Sum all instruction sizes to compute block size 381 // Sum all instruction sizes to compute block size
382 uint last_inst = block->_nodes.size(); 382 uint last_inst = block->number_of_nodes();
383 uint blk_size = 0; 383 uint blk_size = 0;
384 for (uint j = 0; j < last_inst; j++) { 384 for (uint j = 0; j < last_inst; j++) {
385 Node* nj = block->_nodes[j]; 385 Node* nj = block->get_node(j);
386 // Handle machine instruction nodes 386 // Handle machine instruction nodes
387 if (nj->is_Mach()) { 387 if (nj->is_Mach()) {
388 MachNode *mach = nj->as_Mach(); 388 MachNode *mach = nj->as_Mach();
389 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding 389 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
390 reloc_size += mach->reloc(); 390 reloc_size += mach->reloc();
475 has_short_branch_candidate = false; 475 has_short_branch_candidate = false;
476 int adjust_block_start = 0; 476 int adjust_block_start = 0;
477 for (uint i = 0; i < nblocks; i++) { 477 for (uint i = 0; i < nblocks; i++) {
478 Block* block = _cfg->get_block(i); 478 Block* block = _cfg->get_block(i);
479 int idx = jmp_nidx[i]; 479 int idx = jmp_nidx[i];
480 MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach(); 480 MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
481 if (mach != NULL && mach->may_be_short_branch()) { 481 if (mach != NULL && mach->may_be_short_branch()) {
482 #ifdef ASSERT 482 #ifdef ASSERT
483 assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity"); 483 assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
484 int j; 484 int j;
485 // Find the branch; ignore trailing NOPs. 485 // Find the branch; ignore trailing NOPs.
486 for (j = block->_nodes.size()-1; j>=0; j--) { 486 for (j = block->number_of_nodes()-1; j>=0; j--) {
487 Node* n = block->_nodes[j]; 487 Node* n = block->get_node(j);
488 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) 488 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
489 break; 489 break;
490 } 490 }
491 assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity"); 491 assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
492 #endif 492 #endif
493 int br_size = jmp_size[i]; 493 int br_size = jmp_size[i];
494 int br_offs = blk_starts[i] + jmp_offset[i]; 494 int br_offs = blk_starts[i] + jmp_offset[i];
495 495
496 // This requires the TRUE branch target be in succs[0] 496 // This requires the TRUE branch target be in succs[0]
520 if (needs_padding && replacement->avoid_back_to_back()) { 520 if (needs_padding && replacement->avoid_back_to_back()) {
521 jmp_offset[i] += nop_size; 521 jmp_offset[i] += nop_size;
522 diff -= nop_size; 522 diff -= nop_size;
523 } 523 }
524 adjust_block_start += diff; 524 adjust_block_start += diff;
525 block->_nodes.map(idx, replacement); 525 block->map_node(replacement, idx);
526 mach->subsume_by(replacement, C); 526 mach->subsume_by(replacement, C);
527 mach = replacement; 527 mach = replacement;
528 progress = true; 528 progress = true;
529 529
530 jmp_size[i] = new_size; 530 jmp_size[i] = new_size;
637 cik->is_array_klass(), "Not supported allocation."); 637 cik->is_array_klass(), "Not supported allocation.");
638 sv = new ObjectValue(spobj->_idx, 638 sv = new ObjectValue(spobj->_idx,
639 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); 639 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
640 Compile::set_sv_for_object_node(objs, sv); 640 Compile::set_sv_for_object_node(objs, sv);
641 641
642 uint first_ind = spobj->first_index(); 642 uint first_ind = spobj->first_index(sfpt->jvms());
643 for (uint i = 0; i < spobj->n_fields(); i++) { 643 for (uint i = 0; i < spobj->n_fields(); i++) {
644 Node* fld_node = sfpt->in(first_ind+i); 644 Node* fld_node = sfpt->in(first_ind+i);
645 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs); 645 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
646 } 646 }
647 } 647 }
892 892
893 // Build the growable array of ScopeValues for exp stack 893 // Build the growable array of ScopeValues for exp stack
894 GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon); 894 GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
895 895
896 // Loop over monitors and insert into array 896 // Loop over monitors and insert into array
897 for(idx = 0; idx < num_mon; idx++) { 897 for (idx = 0; idx < num_mon; idx++) {
898 // Grab the node that defines this monitor 898 // Grab the node that defines this monitor
899 Node* box_node = sfn->monitor_box(jvms, idx); 899 Node* box_node = sfn->monitor_box(jvms, idx);
900 Node* obj_node = sfn->monitor_obj(jvms, idx); 900 Node* obj_node = sfn->monitor_obj(jvms, idx);
901 901
902 // Create ScopeValue for object 902 // Create ScopeValue for object
903 ScopeValue *scval = NULL; 903 ScopeValue *scval = NULL;
904 904
905 if( obj_node->is_SafePointScalarObject() ) { 905 if (obj_node->is_SafePointScalarObject()) {
906 SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject(); 906 SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
907 scval = Compile::sv_for_node_id(objs, spobj->_idx); 907 scval = Compile::sv_for_node_id(objs, spobj->_idx);
908 if (scval == NULL) { 908 if (scval == NULL) {
909 const Type *t = obj_node->bottom_type(); 909 const Type *t = spobj->bottom_type();
910 ciKlass* cik = t->is_oopptr()->klass(); 910 ciKlass* cik = t->is_oopptr()->klass();
911 assert(cik->is_instance_klass() || 911 assert(cik->is_instance_klass() ||
912 cik->is_array_klass(), "Not supported allocation."); 912 cik->is_array_klass(), "Not supported allocation.");
913 ObjectValue* sv = new ObjectValue(spobj->_idx, 913 ObjectValue* sv = new ObjectValue(spobj->_idx,
914 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); 914 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
915 Compile::set_sv_for_object_node(objs, sv); 915 Compile::set_sv_for_object_node(objs, sv);
916 916
917 uint first_ind = spobj->first_index(); 917 uint first_ind = spobj->first_index(youngest_jvms);
918 for (uint i = 0; i < spobj->n_fields(); i++) { 918 for (uint i = 0; i < spobj->n_fields(); i++) {
919 Node* fld_node = sfn->in(first_ind+i); 919 Node* fld_node = sfn->in(first_ind+i);
920 (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs); 920 (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
921 } 921 }
922 scval = sv; 922 scval = sv;
923 } 923 }
924 } else if( !obj_node->is_Con() ) { 924 } else if (!obj_node->is_Con()) {
925 OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node); 925 OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
926 if( obj_node->bottom_type()->base() == Type::NarrowOop ) { 926 if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
927 scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop ); 927 scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
928 } else { 928 } else {
929 scval = new_loc_value( _regalloc, obj_reg, Location::oop ); 929 scval = new_loc_value( _regalloc, obj_reg, Location::oop );
1086 // Fill the constant table. 1086 // Fill the constant table.
1087 // Note: This must happen before shorten_branches. 1087 // Note: This must happen before shorten_branches.
1088 for (uint i = 0; i < _cfg->number_of_blocks(); i++) { 1088 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1089 Block* b = _cfg->get_block(i); 1089 Block* b = _cfg->get_block(i);
1090 1090
1091 for (uint j = 0; j < b->_nodes.size(); j++) { 1091 for (uint j = 0; j < b->number_of_nodes(); j++) {
1092 Node* n = b->_nodes[j]; 1092 Node* n = b->get_node(j);
1093 1093
1094 // If the node is a MachConstantNode evaluate the constant 1094 // If the node is a MachConstantNode evaluate the constant
1095 // value section. 1095 // value section.
1096 if (n->is_MachConstant()) { 1096 if (n->is_MachConstant()) {
1097 MachConstantNode* machcon = n->as_MachConstant(); 1097 MachConstantNode* machcon = n->as_MachConstant();
1245 int blk_offset = current_offset; 1245 int blk_offset = current_offset;
1246 1246
1247 // Define the label at the beginning of the basic block 1247 // Define the label at the beginning of the basic block
1248 MacroAssembler(cb).bind(blk_labels[block->_pre_order]); 1248 MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
1249 1249
1250 uint last_inst = block->_nodes.size(); 1250 uint last_inst = block->number_of_nodes();
1251 1251
1252 // Emit block normally, except for last instruction. 1252 // Emit block normally, except for last instruction.
1253 // Emit means "dump code bits into code buffer". 1253 // Emit means "dump code bits into code buffer".
1254 for (uint j = 0; j<last_inst; j++) { 1254 for (uint j = 0; j<last_inst; j++) {
1255 1255
1256 // Get the node 1256 // Get the node
1257 Node* n = block->_nodes[j]; 1257 Node* n = block->get_node(j);
1258 1258
1259 // See if delay slots are supported 1259 // See if delay slots are supported
1260 if (valid_bundle_info(n) && 1260 if (valid_bundle_info(n) &&
1261 node_bundling(n)->used_in_unconditional_delay()) { 1261 node_bundling(n)->used_in_unconditional_delay()) {
1262 assert(delay_slot == NULL, "no use of delay slot node"); 1262 assert(delay_slot == NULL, "no use of delay slot node");
1306 1306
1307 if(padding > 0) { 1307 if(padding > 0) {
1308 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); 1308 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1309 int nops_cnt = padding / nop_size; 1309 int nops_cnt = padding / nop_size;
1310 MachNode *nop = new (this) MachNopNode(nops_cnt); 1310 MachNode *nop = new (this) MachNopNode(nops_cnt);
1311 block->_nodes.insert(j++, nop); 1311 block->insert_node(nop, j++);
1312 last_inst++; 1312 last_inst++;
1313 _cfg->map_node_to_block(nop, block); 1313 _cfg->map_node_to_block(nop, block);
1314 nop->emit(*cb, _regalloc); 1314 nop->emit(*cb, _regalloc);
1315 cb->flush_bundle(true); 1315 cb->flush_bundle(true);
1316 current_offset = cb->insts_size(); 1316 current_offset = cb->insts_size();
1392 int new_size = replacement->size(_regalloc); 1392 int new_size = replacement->size(_regalloc);
1393 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller"); 1393 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1394 // Insert padding between avoid_back_to_back branches. 1394 // Insert padding between avoid_back_to_back branches.
1395 if (needs_padding && replacement->avoid_back_to_back()) { 1395 if (needs_padding && replacement->avoid_back_to_back()) {
1396 MachNode *nop = new (this) MachNopNode(); 1396 MachNode *nop = new (this) MachNopNode();
1397 block->_nodes.insert(j++, nop); 1397 block->insert_node(nop, j++);
1398 _cfg->map_node_to_block(nop, block); 1398 _cfg->map_node_to_block(nop, block);
1399 last_inst++; 1399 last_inst++;
1400 nop->emit(*cb, _regalloc); 1400 nop->emit(*cb, _regalloc);
1401 cb->flush_bundle(true); 1401 cb->flush_bundle(true);
1402 current_offset = cb->insts_size(); 1402 current_offset = cb->insts_size();
1405 jmp_target[i] = block_num; 1405 jmp_target[i] = block_num;
1406 jmp_offset[i] = current_offset - blk_offset; 1406 jmp_offset[i] = current_offset - blk_offset;
1407 jmp_size[i] = new_size; 1407 jmp_size[i] = new_size;
1408 jmp_rule[i] = mach->rule(); 1408 jmp_rule[i] = mach->rule();
1409 #endif 1409 #endif
1410 block->_nodes.map(j, replacement); 1410 block->map_node(replacement, j);
1411 mach->subsume_by(replacement, C); 1411 mach->subsume_by(replacement, C);
1412 n = replacement; 1412 n = replacement;
1413 mach = replacement; 1413 mach = replacement;
1414 } 1414 }
1415 } 1415 }
1436 Node *oop_store = mach->in(prec); // Precedence edge 1436 Node *oop_store = mach->in(prec); // Precedence edge
1437 if (oop_store == NULL) continue; 1437 if (oop_store == NULL) continue;
1438 count++; 1438 count++;
1439 uint i4; 1439 uint i4;
1440 for (i4 = 0; i4 < last_inst; ++i4) { 1440 for (i4 = 0; i4 < last_inst; ++i4) {
1441 if (block->_nodes[i4] == oop_store) { 1441 if (block->get_node(i4) == oop_store) {
1442 break; 1442 break;
1443 } 1443 }
1444 } 1444 }
1445 // Note: This test can provide a false failure if other precedence 1445 // Note: This test can provide a false failure if other precedence
1446 // edges have been added to the storeCMNode. 1446 // edges have been added to the storeCMNode.
1546 if (i < nblocks-1) { 1546 if (i < nblocks-1) {
1547 Block *nb = _cfg->get_block(i + 1); 1547 Block *nb = _cfg->get_block(i + 1);
1548 int padding = nb->alignment_padding(current_offset); 1548 int padding = nb->alignment_padding(current_offset);
1549 if( padding > 0 ) { 1549 if( padding > 0 ) {
1550 MachNode *nop = new (this) MachNopNode(padding / nop_size); 1550 MachNode *nop = new (this) MachNopNode(padding / nop_size);
1551 block->_nodes.insert(block->_nodes.size(), nop); 1551 block->insert_node(nop, block->number_of_nodes());
1552 _cfg->map_node_to_block(nop, block); 1552 _cfg->map_node_to_block(nop, block);
1553 nop->emit(*cb, _regalloc); 1553 nop->emit(*cb, _regalloc);
1554 current_offset = cb->insts_size(); 1554 current_offset = cb->insts_size();
1555 } 1555 }
1556 } 1556 }
1653 Block* block = _cfg->get_block(i); 1653 Block* block = _cfg->get_block(i);
1654 Node *n = NULL; 1654 Node *n = NULL;
1655 int j; 1655 int j;
1656 1656
1657 // Find the branch; ignore trailing NOPs. 1657 // Find the branch; ignore trailing NOPs.
1658 for (j = block->_nodes.size() - 1; j >= 0; j--) { 1658 for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1659 n = block->_nodes[j]; 1659 n = block->get_node(j);
1660 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) { 1660 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1661 break; 1661 break;
1662 } 1662 }
1663 } 1663 }
1664 1664
1673 1673
1674 // Get the offset of the return from the call 1674 // Get the offset of the return from the call
1675 uint call_return = call_returns[block->_pre_order]; 1675 uint call_return = call_returns[block->_pre_order];
1676 #ifdef ASSERT 1676 #ifdef ASSERT
1677 assert( call_return > 0, "no call seen for this basic block" ); 1677 assert( call_return > 0, "no call seen for this basic block" );
1678 while (block->_nodes[--j]->is_MachProj()) ; 1678 while (block->get_node(--j)->is_MachProj()) ;
1679 assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call"); 1679 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
1680 #endif 1680 #endif
1681 // last instruction is a CatchNode, find it's CatchProjNodes 1681 // last instruction is a CatchNode, find it's CatchProjNodes
1682 int nof_succs = block->_num_succs; 1682 int nof_succs = block->_num_succs;
1683 // allocate space 1683 // allocate space
1684 GrowableArray<intptr_t> handler_bcis(nof_succs); 1684 GrowableArray<intptr_t> handler_bcis(nof_succs);
1780 memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements)); 1780 memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
1781 1781
1782 // Get the last node 1782 // Get the last node
1783 Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1); 1783 Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
1784 1784
1785 _next_node = block->_nodes[block->_nodes.size() - 1]; 1785 _next_node = block->get_node(block->number_of_nodes() - 1);
1786 } 1786 }
1787 1787
1788 #ifndef PRODUCT 1788 #ifndef PRODUCT
1789 // Scheduling destructor 1789 // Scheduling destructor
1790 Scheduling::~Scheduling() { 1790 Scheduling::~Scheduling() {
1873 1873
1874 // This is a kludge, forcing all latency calculations to start at 1. 1874 // This is a kludge, forcing all latency calculations to start at 1.
1875 // Used to allow latency 0 to force an instruction to the beginning 1875 // Used to allow latency 0 to force an instruction to the beginning
1876 // of the bb 1876 // of the bb
1877 uint latency = 1; 1877 uint latency = 1;
1878 Node *use = bb->_nodes[j]; 1878 Node *use = bb->get_node(j);
1879 uint nlen = use->len(); 1879 uint nlen = use->len();
1880 1880
1881 // Walk over all the inputs 1881 // Walk over all the inputs
1882 for ( uint k=0; k < nlen; k++ ) { 1882 for ( uint k=0; k < nlen; k++ ) {
1883 Node *def = use->in(k); 1883 Node *def = use->in(k);
2284 (op != Op_Node && // Not an unused antidepedence node and 2284 (op != Op_Node && // Not an unused antidepedence node and
2285 // not an unallocated boxlock 2285 // not an unallocated boxlock
2286 (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) { 2286 (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2287 2287
2288 // Push any trailing projections 2288 // Push any trailing projections
2289 if( bb->_nodes[bb->_nodes.size()-1] != n ) { 2289 if( bb->get_node(bb->number_of_nodes()-1) != n ) {
2290 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2290 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2291 Node *foi = n->fast_out(i); 2291 Node *foi = n->fast_out(i);
2292 if( foi->is_Proj() ) 2292 if( foi->is_Proj() )
2293 _scheduled.push(foi); 2293 _scheduled.push(foi);
2294 } 2294 }
2327 2327
2328 // No delay slot specified 2328 // No delay slot specified
2329 _unconditional_delay_slot = NULL; 2329 _unconditional_delay_slot = NULL;
2330 2330
2331 #ifdef ASSERT 2331 #ifdef ASSERT
2332 for( uint i=0; i < bb->_nodes.size(); i++ ) 2332 for( uint i=0; i < bb->number_of_nodes(); i++ )
2333 assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" ); 2333 assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
2334 #endif 2334 #endif
2335 2335
2336 // Force the _uses count to never go to zero for unscheduable pieces 2336 // Force the _uses count to never go to zero for unscheduable pieces
2337 // of the block 2337 // of the block
2338 for( uint k = 0; k < _bb_start; k++ ) 2338 for( uint k = 0; k < _bb_start; k++ )
2339 _uses[bb->_nodes[k]->_idx] = 1; 2339 _uses[bb->get_node(k)->_idx] = 1;
2340 for( uint l = _bb_end; l < bb->_nodes.size(); l++ ) 2340 for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
2341 _uses[bb->_nodes[l]->_idx] = 1; 2341 _uses[bb->get_node(l)->_idx] = 1;
2342 2342
2343 // Iterate backwards over the instructions in the block. Don't count the 2343 // Iterate backwards over the instructions in the block. Don't count the
2344 // branch projections at end or the block header instructions. 2344 // branch projections at end or the block header instructions.
2345 for( uint j = _bb_end-1; j >= _bb_start; j-- ) { 2345 for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2346 Node *n = bb->_nodes[j]; 2346 Node *n = bb->get_node(j);
2347 if( n->is_Proj() ) continue; // Projections handled another way 2347 if( n->is_Proj() ) continue; // Projections handled another way
2348 2348
2349 // Account for all uses 2349 // Account for all uses
2350 for ( uint k = 0; k < n->len(); k++ ) { 2350 for ( uint k = 0; k < n->len(); k++ ) {
2351 Node *inp = n->in(k); 2351 Node *inp = n->in(k);
2396 bb = _cfg->get_block(i); 2396 bb = _cfg->get_block(i);
2397 2397
2398 #ifndef PRODUCT 2398 #ifndef PRODUCT
2399 if (_cfg->C->trace_opto_output()) { 2399 if (_cfg->C->trace_opto_output()) {
2400 tty->print("# Schedule BB#%03d (initial)\n", i); 2400 tty->print("# Schedule BB#%03d (initial)\n", i);
2401 for (uint j = 0; j < bb->_nodes.size(); j++) { 2401 for (uint j = 0; j < bb->number_of_nodes(); j++) {
2402 bb->_nodes[j]->dump(); 2402 bb->get_node(j)->dump();
2403 } 2403 }
2404 } 2404 }
2405 #endif 2405 #endif
2406 2406
2407 // On the head node, skip processing 2407 // On the head node, skip processing
2424 #endif 2424 #endif
2425 step_and_clear(); 2425 step_and_clear();
2426 } 2426 }
2427 2427
2428 // Leave untouched the starting instruction, any Phis, a CreateEx node 2428 // Leave untouched the starting instruction, any Phis, a CreateEx node
2429 // or Top. bb->_nodes[_bb_start] is the first schedulable instruction. 2429 // or Top. bb->get_node(_bb_start) is the first schedulable instruction.
2430 _bb_end = bb->_nodes.size()-1; 2430 _bb_end = bb->number_of_nodes()-1;
2431 for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) { 2431 for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2432 Node *n = bb->_nodes[_bb_start]; 2432 Node *n = bb->get_node(_bb_start);
2433 // Things not matched, like Phinodes and ProjNodes don't get scheduled. 2433 // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2434 // Also, MachIdealNodes do not get scheduled 2434 // Also, MachIdealNodes do not get scheduled
2435 if( !n->is_Mach() ) continue; // Skip non-machine nodes 2435 if( !n->is_Mach() ) continue; // Skip non-machine nodes
2436 MachNode *mach = n->as_Mach(); 2436 MachNode *mach = n->as_Mach();
2437 int iop = mach->ideal_Opcode(); 2437 int iop = mach->ideal_Opcode();
2447 // might schedule. _bb_end points just after last schedulable inst. We 2447 // might schedule. _bb_end points just after last schedulable inst. We
2448 // normally schedule conditional branches (despite them being forced last 2448 // normally schedule conditional branches (despite them being forced last
2449 // in the block), because they have delay slots we can fill. Calls all 2449 // in the block), because they have delay slots we can fill. Calls all
2450 // have their delay slots filled in the template expansions, so we don't 2450 // have their delay slots filled in the template expansions, so we don't
2451 // bother scheduling them. 2451 // bother scheduling them.
2452 Node *last = bb->_nodes[_bb_end]; 2452 Node *last = bb->get_node(_bb_end);
2453 // Ignore trailing NOPs. 2453 // Ignore trailing NOPs.
2454 while (_bb_end > 0 && last->is_Mach() && 2454 while (_bb_end > 0 && last->is_Mach() &&
2455 last->as_Mach()->ideal_Opcode() == Op_Con) { 2455 last->as_Mach()->ideal_Opcode() == Op_Con) {
2456 last = bb->_nodes[--_bb_end]; 2456 last = bb->get_node(--_bb_end);
2457 } 2457 }
2458 assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, ""); 2458 assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
2459 if( last->is_Catch() || 2459 if( last->is_Catch() ||
2460 // Exclude unreachable path case when Halt node is in a separate block. 2460 // Exclude unreachable path case when Halt node is in a separate block.
2461 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { 2461 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2462 // There must be a prior call. Skip it. 2462 // There must be a prior call. Skip it.
2463 while( !bb->_nodes[--_bb_end]->is_MachCall() ) { 2463 while( !bb->get_node(--_bb_end)->is_MachCall() ) {
2464 assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" ); 2464 assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
2465 } 2465 }
2466 } else if( last->is_MachNullCheck() ) { 2466 } else if( last->is_MachNullCheck() ) {
2467 // Backup so the last null-checked memory instruction is 2467 // Backup so the last null-checked memory instruction is
2468 // outside the schedulable range. Skip over the nullcheck, 2468 // outside the schedulable range. Skip over the nullcheck,
2469 // projection, and the memory nodes. 2469 // projection, and the memory nodes.
2470 Node *mem = last->in(1); 2470 Node *mem = last->in(1);
2471 do { 2471 do {
2472 _bb_end--; 2472 _bb_end--;
2473 } while (mem != bb->_nodes[_bb_end]); 2473 } while (mem != bb->get_node(_bb_end));
2474 } else { 2474 } else {
2475 // Set _bb_end to point after last schedulable inst. 2475 // Set _bb_end to point after last schedulable inst.
2476 _bb_end++; 2476 _bb_end++;
2477 } 2477 }
2478 2478
2497 } 2497 }
2498 2498
2499 assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" ); 2499 assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2500 #ifdef ASSERT 2500 #ifdef ASSERT
2501 for( uint l = _bb_start; l < _bb_end; l++ ) { 2501 for( uint l = _bb_start; l < _bb_end; l++ ) {
2502 Node *n = bb->_nodes[l]; 2502 Node *n = bb->get_node(l);
2503 uint m; 2503 uint m;
2504 for( m = 0; m < _bb_end-_bb_start; m++ ) 2504 for( m = 0; m < _bb_end-_bb_start; m++ )
2505 if( _scheduled[m] == n ) 2505 if( _scheduled[m] == n )
2506 break; 2506 break;
2507 assert( m < _bb_end-_bb_start, "instruction missing in schedule" ); 2507 assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2508 } 2508 }
2509 #endif 2509 #endif
2510 2510
2511 // Now copy the instructions (in reverse order) back to the block 2511 // Now copy the instructions (in reverse order) back to the block
2512 for ( uint k = _bb_start; k < _bb_end; k++ ) 2512 for ( uint k = _bb_start; k < _bb_end; k++ )
2513 bb->_nodes.map(k, _scheduled[_bb_end-k-1]); 2513 bb->map_node(_scheduled[_bb_end-k-1], k);
2514 2514
2515 #ifndef PRODUCT 2515 #ifndef PRODUCT
2516 if (_cfg->C->trace_opto_output()) { 2516 if (_cfg->C->trace_opto_output()) {
2517 tty->print("# Schedule BB#%03d (final)\n", i); 2517 tty->print("# Schedule BB#%03d (final)\n", i);
2518 uint current = 0; 2518 uint current = 0;
2519 for (uint j = 0; j < bb->_nodes.size(); j++) { 2519 for (uint j = 0; j < bb->number_of_nodes(); j++) {
2520 Node *n = bb->_nodes[j]; 2520 Node *n = bb->get_node(j);
2521 if( valid_bundle_info(n) ) { 2521 if( valid_bundle_info(n) ) {
2522 Bundle *bundle = node_bundling(n); 2522 Bundle *bundle = node_bundling(n);
2523 if (bundle->instr_count() > 0 || bundle->flags() > 0) { 2523 if (bundle->instr_count() > 0 || bundle->flags() > 0) {
2524 tty->print("*** Bundle: "); 2524 tty->print("*** Bundle: ");
2525 bundle->dump(); 2525 bundle->dump();
2577 _reg_node.clear(); 2577 _reg_node.clear();
2578 2578
2579 // Walk over the block backwards. Check to make sure each DEF doesn't 2579 // Walk over the block backwards. Check to make sure each DEF doesn't
2580 // kill a live value (other than the one it's supposed to). Add each 2580 // kill a live value (other than the one it's supposed to). Add each
2581 // USE to the live set. 2581 // USE to the live set.
2582 for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) { 2582 for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
2583 Node *n = b->_nodes[i]; 2583 Node *n = b->get_node(i);
2584 int n_op = n->Opcode(); 2584 int n_op = n->Opcode();
2585 if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) { 2585 if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2586 // Fat-proj kills a slew of registers 2586 // Fat-proj kills a slew of registers
2587 RegMask rm = n->out_RegMask();// Make local copy 2587 RegMask rm = n->out_RegMask();// Make local copy
2588 while( rm.is_NotEmpty() ) { 2588 while( rm.is_NotEmpty() ) {
2709 _cfg->get_block_for_node(use) == b) { 2709 _cfg->get_block_for_node(use) == b) {
2710 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) 2710 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2711 pinch->req() == 1 ) { // pinch not yet in block? 2711 pinch->req() == 1 ) { // pinch not yet in block?
2712 pinch->del_req(0); // yank pointer to later-def, also set flag 2712 pinch->del_req(0); // yank pointer to later-def, also set flag
2713 // Insert the pinch-point in the block just after the last use 2713 // Insert the pinch-point in the block just after the last use
2714 b->_nodes.insert(b->find_node(use)+1,pinch); 2714 b->insert_node(pinch, b->find_node(use) + 1);
2715 _bb_end++; // Increase size scheduled region in block 2715 _bb_end++; // Increase size scheduled region in block
2716 } 2716 }
2717 2717
2718 add_prec_edge_from_to(pinch,use); 2718 add_prec_edge_from_to(pinch,use);
2719 } 2719 }
2761 // block. Leftover node from some prior block is treated like a NULL (no 2761 // block. Leftover node from some prior block is treated like a NULL (no
2762 // prior def, so no anti-dependence needed). Valid def is distinguished by 2762 // prior def, so no anti-dependence needed). Valid def is distinguished by
2763 // it being in the current block. 2763 // it being in the current block.
2764 bool fat_proj_seen = false; 2764 bool fat_proj_seen = false;
2765 uint last_safept = _bb_end-1; 2765 uint last_safept = _bb_end-1;
2766 Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL; 2766 Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
2767 Node* last_safept_node = end_node; 2767 Node* last_safept_node = end_node;
2768 for( uint i = _bb_end-1; i >= _bb_start; i-- ) { 2768 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2769 Node *n = b->_nodes[i]; 2769 Node *n = b->get_node(i);
2770 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges 2770 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2771 if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) { 2771 if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2772 // Fat-proj kills a slew of registers 2772 // Fat-proj kills a slew of registers
2773 // This can add edges to 'n' and obscure whether or not it was a def, 2773 // This can add edges to 'n' and obscure whether or not it was a def,
2774 // hence the is_def flag. 2774 // hence the is_def flag.
2813 } 2813 }
2814 } 2814 }
2815 // Do not allow defs of new derived values to float above GC 2815 // Do not allow defs of new derived values to float above GC
2816 // points unless the base is definitely available at the GC point. 2816 // points unless the base is definitely available at the GC point.
2817 2817
2818 Node *m = b->_nodes[i]; 2818 Node *m = b->get_node(i);
2819 2819
2820 // Add precedence edge from following safepoint to use of derived pointer 2820 // Add precedence edge from following safepoint to use of derived pointer
2821 if( last_safept_node != end_node && 2821 if( last_safept_node != end_node &&
2822 m != last_safept_node) { 2822 m != last_safept_node) {
2823 for (uint k = 1; k < m->req(); k++) { 2823 for (uint k = 1; k < m->req(); k++) {
2830 } 2830 }
2831 } 2831 }
2832 2832
2833 if( n->jvms() ) { // Precedence edge from derived to safept 2833 if( n->jvms() ) { // Precedence edge from derived to safept
2834 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use() 2834 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2835 if( b->_nodes[last_safept] != last_safept_node ) { 2835 if( b->get_node(last_safept) != last_safept_node ) {
2836 last_safept = b->find_node(last_safept_node); 2836 last_safept = b->find_node(last_safept_node);
2837 } 2837 }
2838 for( uint j=last_safept; j > i; j-- ) { 2838 for( uint j=last_safept; j > i; j-- ) {
2839 Node *mach = b->_nodes[j]; 2839 Node *mach = b->get_node(j);
2840 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP ) 2840 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2841 mach->add_prec( n ); 2841 mach->add_prec( n );
2842 } 2842 }
2843 last_safept = i; 2843 last_safept = i;
2844 last_safept_node = m; 2844 last_safept_node = m;