Mercurial > hg > truffle
comparison src/share/vm/opto/matcher.cpp @ 6848:8e47bac5643a
7054512: Compress class pointers after perm gen removal
Summary: support of compress class pointers in the compilers.
Reviewed-by: kvn, twisti
author | roland |
---|---|
date | Tue, 09 Oct 2012 10:11:38 +0200 |
parents | e626685e9f6c |
children | 2aff40cb4703 |
comparison
equal
deleted
inserted
replaced
6847:65d07d9ee446 | 6848:8e47bac5643a |
---|---|
1056 // Monitor boxes are also represented directly. | 1056 // Monitor boxes are also represented directly. |
1057 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do | 1057 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do |
1058 Node *m = n->in(i); // Get input | 1058 Node *m = n->in(i); // Get input |
1059 int op = m->Opcode(); | 1059 int op = m->Opcode(); |
1060 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); | 1060 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); |
1061 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || | 1061 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass || |
1062 op == Op_ConF || op == Op_ConD || op == Op_ConL | 1062 op == Op_ConF || op == Op_ConD || op == Op_ConL |
1063 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp | 1063 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp |
1064 ) { | 1064 ) { |
1065 m = m->clone(); | 1065 m = m->clone(); |
1066 #ifdef ASSERT | 1066 #ifdef ASSERT |
1448 break; // mem_control? If so, we can use it | 1448 break; // mem_control? If so, we can use it |
1449 } | 1449 } |
1450 if (j == max_scan) // No post-domination before scan end? | 1450 if (j == max_scan) // No post-domination before scan end? |
1451 return true; // Then break the match tree up | 1451 return true; // Then break the match tree up |
1452 } | 1452 } |
1453 if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) { | 1453 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) || |
1454 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) { | |
1454 // These are commonly used in address expressions and can | 1455 // These are commonly used in address expressions and can |
1455 // efficiently fold into them on X64 in some cases. | 1456 // efficiently fold into them on X64 in some cases. |
1456 return false; | 1457 return false; |
1457 } | 1458 } |
1458 } | 1459 } |
1572 // Con nodes reduced using the same rule can share their MachNode | 1573 // Con nodes reduced using the same rule can share their MachNode |
1573 // which reduces the number of copies of a constant in the final | 1574 // which reduces the number of copies of a constant in the final |
1574 // program. The register allocator is free to split uses later to | 1575 // program. The register allocator is free to split uses later to |
1575 // split live ranges. | 1576 // split live ranges. |
1576 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { | 1577 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { |
1577 if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL; | 1578 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL; |
1578 | 1579 |
1579 // See if this Con has already been reduced using this rule. | 1580 // See if this Con has already been reduced using this rule. |
1580 if (_shared_nodes.Size() <= leaf->_idx) return NULL; | 1581 if (_shared_nodes.Size() <= leaf->_idx) return NULL; |
1581 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); | 1582 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); |
1582 if (last != NULL && rule == last->rule()) { | 1583 if (last != NULL && rule == last->rule()) { |
1583 // Don't expect control change for DecodeN | 1584 // Don't expect control change for DecodeN |
1584 if (leaf->is_DecodeN()) | 1585 if (leaf->is_DecodeNarrowPtr()) |
1585 return last; | 1586 return last; |
1586 // Get the new space root. | 1587 // Get the new space root. |
1587 Node* xroot = new_node(C->root()); | 1588 Node* xroot = new_node(C->root()); |
1588 if (xroot == NULL) { | 1589 if (xroot == NULL) { |
1589 // This shouldn't happen give the order of matching. | 1590 // This shouldn't happen give the order of matching. |
1669 } | 1670 } |
1670 const Type* mach_at = mach->adr_type(); | 1671 const Type* mach_at = mach->adr_type(); |
1671 // DecodeN node consumed by an address may have different type | 1672 // DecodeN node consumed by an address may have different type |
1672 // then its input. Don't compare types for such case. | 1673 // then its input. Don't compare types for such case. |
1673 if (m->adr_type() != mach_at && | 1674 if (m->adr_type() != mach_at && |
1674 (m->in(MemNode::Address)->is_DecodeN() || | 1675 (m->in(MemNode::Address)->is_DecodeNarrowPtr() || |
1675 m->in(MemNode::Address)->is_AddP() && | 1676 m->in(MemNode::Address)->is_AddP() && |
1676 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN() || | 1677 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() || |
1677 m->in(MemNode::Address)->is_AddP() && | 1678 m->in(MemNode::Address)->is_AddP() && |
1678 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() && | 1679 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() && |
1679 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeN())) { | 1680 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) { |
1680 mach_at = m->adr_type(); | 1681 mach_at = m->adr_type(); |
1681 } | 1682 } |
1682 if (m->adr_type() != mach_at) { | 1683 if (m->adr_type() != mach_at) { |
1683 m->dump(); | 1684 m->dump(); |
1684 tty->print_cr("mach:"); | 1685 tty->print_cr("mach:"); |
1719 if (_allocation_started) { | 1720 if (_allocation_started) { |
1720 guarantee(ex == mach, "no expand rules during spill generation"); | 1721 guarantee(ex == mach, "no expand rules during spill generation"); |
1721 guarantee(_proj_list.size() == num_proj, "no allocation during spill generation"); | 1722 guarantee(_proj_list.size() == num_proj, "no allocation during spill generation"); |
1722 } | 1723 } |
1723 | 1724 |
1724 if (leaf->is_Con() || leaf->is_DecodeN()) { | 1725 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) { |
1725 // Record the con for sharing | 1726 // Record the con for sharing |
1726 _shared_nodes.map(leaf->_idx, ex); | 1727 _shared_nodes.map(leaf->_idx, ex); |
1727 } | 1728 } |
1728 | 1729 |
1729 return ex; | 1730 return ex; |
2036 if( _must_clone[mop] ) { | 2037 if( _must_clone[mop] ) { |
2037 mstack.push(m, Visit); | 2038 mstack.push(m, Visit); |
2038 continue; // for(int i = ...) | 2039 continue; // for(int i = ...) |
2039 } | 2040 } |
2040 | 2041 |
2041 if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) { | 2042 if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) { |
2042 // Bases used in addresses must be shared but since | 2043 // Bases used in addresses must be shared but since |
2043 // they are shared through a DecodeN they may appear | 2044 // they are shared through a DecodeN they may appear |
2044 // to have a single use so force sharing here. | 2045 // to have a single use so force sharing here. |
2045 set_shared(m->in(AddPNode::Base)->in(1)); | 2046 set_shared(m->in(AddPNode::Base)->in(1)); |
2046 } | 2047 } |
2275 bool is_decoden = ((intptr_t)val) & 1; | 2276 bool is_decoden = ((intptr_t)val) & 1; |
2276 val = (Node*)(((intptr_t)val) & ~1); | 2277 val = (Node*)(((intptr_t)val) & ~1); |
2277 if (has_new_node(val)) { | 2278 if (has_new_node(val)) { |
2278 Node* new_val = new_node(val); | 2279 Node* new_val = new_node(val); |
2279 if (is_decoden) { | 2280 if (is_decoden) { |
2280 assert(val->is_DecodeN() && val->in(0) == NULL, "sanity"); | 2281 assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity"); |
2281 // Note: new_val may have a control edge if | 2282 // Note: new_val may have a control edge if |
2282 // the original ideal node DecodeN was matched before | 2283 // the original ideal node DecodeN was matched before |
2283 // it was unpinned in Matcher::collect_null_checks(). | 2284 // it was unpinned in Matcher::collect_null_checks(). |
2284 // Unpin the mach node and mark it. | 2285 // Unpin the mach node and mark it. |
2285 new_val->set_req(0, NULL); | 2286 new_val->set_req(0, NULL); |