Mercurial > hg > truffle
comparison src/share/vm/opto/parse1.cpp @ 14909:4ca6dc0799b6
Backout jdk9 merge
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Tue, 01 Apr 2014 13:57:07 +0200 |
parents | 45467c53f178 |
children | 89152779163c |
comparison
equal
deleted
inserted
replaced
14908:8db6e76cb658 | 14909:4ca6dc0799b6 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
104 Node *ctl = control(); | 104 Node *ctl = control(); |
105 | 105 |
106 // Very similar to LoadNode::make, except we handle un-aligned longs and | 106 // Very similar to LoadNode::make, except we handle un-aligned longs and |
107 // doubles on Sparc. Intel can handle them just fine directly. | 107 // doubles on Sparc. Intel can handle them just fine directly. |
108 Node *l; | 108 Node *l; |
109 switch (bt) { // Signature is flattened | 109 switch( bt ) { // Signature is flattened |
110 case T_INT: l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; | 110 case T_INT: l = new (C) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; |
111 case T_FLOAT: l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; | 111 case T_FLOAT: l = new (C) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; |
112 case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; | 112 case T_ADDRESS: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; |
113 case T_OBJECT: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; | 113 case T_OBJECT: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; |
114 case T_LONG: | 114 case T_LONG: |
115 case T_DOUBLE: { | 115 case T_DOUBLE: { |
116 // Since arguments are in reverse order, the argument address 'adr' | 116 // Since arguments are in reverse order, the argument address 'adr' |
117 // refers to the back half of the long/double. Recompute adr. | 117 // refers to the back half of the long/double. Recompute adr. |
118 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize); | 118 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); |
119 if (Matcher::misaligned_doubles_ok) { | 119 if( Matcher::misaligned_doubles_ok ) { |
120 l = (bt == T_DOUBLE) | 120 l = (bt == T_DOUBLE) |
121 ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered) | 121 ? (Node*)new (C) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) |
122 : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered); | 122 : (Node*)new (C) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); |
123 } else { | 123 } else { |
124 l = (bt == T_DOUBLE) | 124 l = (bt == T_DOUBLE) |
125 ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered) | 125 ? (Node*)new (C) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) |
126 : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered); | 126 : (Node*)new (C) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); |
127 } | 127 } |
128 break; | 128 break; |
129 } | 129 } |
130 default: ShouldNotReachHere(); | 130 default: ShouldNotReachHere(); |
131 } | 131 } |
227 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); | 227 Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf); |
228 // Try and copy the displaced header to the BoxNode | 228 // Try and copy the displaced header to the BoxNode |
229 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); | 229 Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); |
230 | 230 |
231 | 231 |
232 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); | 232 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw); |
233 | 233 |
234 // Build a bogus FastLockNode (no code will be generated) and push the | 234 // Build a bogus FastLockNode (no code will be generated) and push the |
235 // monitor into our debug info. | 235 // monitor into our debug info. |
236 const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock(); | 236 const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock(); |
237 map()->push_monitor(flock); | 237 map()->push_monitor(flock); |
388 _caller = caller; | 388 _caller = caller; |
389 _method = parse_method; | 389 _method = parse_method; |
390 _expected_uses = expected_uses; | 390 _expected_uses = expected_uses; |
391 _depth = 1 + (caller->has_method() ? caller->depth() : 0); | 391 _depth = 1 + (caller->has_method() ? caller->depth() : 0); |
392 _wrote_final = false; | 392 _wrote_final = false; |
393 _wrote_volatile = false; | |
394 _alloc_with_final = NULL; | 393 _alloc_with_final = NULL; |
395 _entry_bci = InvocationEntryBci; | 394 _entry_bci = InvocationEntryBci; |
396 _tf = NULL; | 395 _tf = NULL; |
397 _block = NULL; | 396 _block = NULL; |
398 debug_only(_block_count = -1); | 397 debug_only(_block_count = -1); |
906 _exits.set_control(gvn().transform(region)); | 905 _exits.set_control(gvn().transform(region)); |
907 | 906 |
908 Node* iophi = _exits.i_o(); | 907 Node* iophi = _exits.i_o(); |
909 _exits.set_i_o(gvn().transform(iophi)); | 908 _exits.set_i_o(gvn().transform(iophi)); |
910 | 909 |
911 // On PPC64, also add MemBarRelease for constructors which write | 910 if (wrote_final()) { |
912 // volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu | |
913 // is set on PPC64, no sync instruction is issued after volatile | |
914 // stores. We want to quarantee the same behaviour as on platforms | |
915 // with total store order, although this is not required by the Java | |
916 // memory model. So as with finals, we add a barrier here. | |
917 if (wrote_final() PPC64_ONLY(|| (wrote_volatile() && method()->is_initializer()))) { | |
918 // This method (which must be a constructor by the rules of Java) | 911 // This method (which must be a constructor by the rules of Java) |
919 // wrote a final. The effects of all initializations must be | 912 // wrote a final. The effects of all initializations must be |
920 // committed to memory before any code after the constructor | 913 // committed to memory before any code after the constructor |
921 // publishes the reference to the newly constructor object. | 914 // publishes the reference to the newly constructor object. |
922 // Rather than wait for the publication, we simply block the | 915 // Rather than wait for the publication, we simply block the |
1654 // Phis of pointers cannot lose the basic pointer type. | 1647 // Phis of pointers cannot lose the basic pointer type. |
1655 debug_only(const Type* bt1 = phi->bottom_type()); | 1648 debug_only(const Type* bt1 = phi->bottom_type()); |
1656 assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); | 1649 assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); |
1657 map()->set_req(j, _gvn.transform_no_reclaim(phi)); | 1650 map()->set_req(j, _gvn.transform_no_reclaim(phi)); |
1658 debug_only(const Type* bt2 = phi->bottom_type()); | 1651 debug_only(const Type* bt2 = phi->bottom_type()); |
1659 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow"); | 1652 assert(bt2->higher_equal(bt1), "must be consistent with type-flow"); |
1660 record_for_igvn(phi); | 1653 record_for_igvn(phi); |
1661 } | 1654 } |
1662 } | 1655 } |
1663 } // End of for all values to be merged | 1656 } // End of for all values to be merged |
1664 | 1657 |
1936 // class is often visible so the access flags are constant. | 1929 // class is often visible so the access flags are constant. |
1937 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); | 1930 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); |
1938 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); | 1931 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); |
1939 | 1932 |
1940 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); | 1933 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); |
1941 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); | 1934 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); |
1942 | 1935 |
1943 Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); | 1936 Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); |
1944 Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0))); | 1937 Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0))); |
1945 Node* test = _gvn.transform(new (C) BoolNode(check, BoolTest::ne)); | 1938 Node* test = _gvn.transform(new (C) BoolNode(check, BoolTest::ne)); |
1946 | 1939 |
2027 const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); | 2020 const TypeInstPtr *tp = value->bottom_type()->isa_instptr(); |
2028 if (tp && tp->klass()->is_loaded() && | 2021 if (tp && tp->klass()->is_loaded() && |
2029 !tp->klass()->is_interface()) { | 2022 !tp->klass()->is_interface()) { |
2030 // sharpen the type eagerly; this eases certain assert checking | 2023 // sharpen the type eagerly; this eases certain assert checking |
2031 if (tp->higher_equal(TypeInstPtr::NOTNULL)) | 2024 if (tp->higher_equal(TypeInstPtr::NOTNULL)) |
2032 tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); | 2025 tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr(); |
2033 value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr)); | 2026 value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr)); |
2034 } | 2027 } |
2035 } | 2028 } |
2036 phi->add_req(value); | 2029 phi->add_req(value); |
2037 } | 2030 } |