comparison src/share/vm/opto/parse1.cpp @ 6804:e626685e9f6c

7193318: C2: remove number of inputs requirement from Node's new operator Summary: Deleted placement new operator of Node - node(size_t, Compile *, int). Reviewed-by: kvn, twisti Contributed-by: bharadwaj.yadavalli@oracle.com
author kvn
date Thu, 27 Sep 2012 09:38:42 -0700
parents da91efe96a93
children f13867c41f73
comparison
equal deleted inserted replaced
6803:06f52c4d0e18 6804:e626685e9f6c
105 105
106 // Very similar to LoadNode::make, except we handle un-aligned longs and 106 // Very similar to LoadNode::make, except we handle un-aligned longs and
107 // doubles on Sparc. Intel can handle them just fine directly. 107 // doubles on Sparc. Intel can handle them just fine directly.
108 Node *l; 108 Node *l;
109 switch( bt ) { // Signature is flattened 109 switch( bt ) { // Signature is flattened
110 case T_INT: l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; 110 case T_INT: l = new (C) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
111 case T_FLOAT: l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; 111 case T_FLOAT: l = new (C) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
112 case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; 112 case T_ADDRESS: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break;
113 case T_OBJECT: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; 113 case T_OBJECT: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
114 case T_LONG: 114 case T_LONG:
115 case T_DOUBLE: { 115 case T_DOUBLE: {
116 // Since arguments are in reverse order, the argument address 'adr' 116 // Since arguments are in reverse order, the argument address 'adr'
117 // refers to the back half of the long/double. Recompute adr. 117 // refers to the back half of the long/double. Recompute adr.
118 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); 118 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
119 if( Matcher::misaligned_doubles_ok ) { 119 if( Matcher::misaligned_doubles_ok ) {
120 l = (bt == T_DOUBLE) 120 l = (bt == T_DOUBLE)
121 ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) 121 ? (Node*)new (C) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
122 : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); 122 : (Node*)new (C) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
123 } else { 123 } else {
124 l = (bt == T_DOUBLE) 124 l = (bt == T_DOUBLE)
125 ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) 125 ? (Node*)new (C) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
126 : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); 126 : (Node*)new (C) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
127 } 127 }
128 break; 128 break;
129 } 129 }
130 default: ShouldNotReachHere(); 130 default: ShouldNotReachHere();
131 } 131 }
145 145
146 // TypeFlow may assert null-ness if a type appears unloaded. 146 // TypeFlow may assert null-ness if a type appears unloaded.
147 if (type == TypePtr::NULL_PTR || 147 if (type == TypePtr::NULL_PTR ||
148 (tp != NULL && !tp->klass()->is_loaded())) { 148 (tp != NULL && !tp->klass()->is_loaded())) {
149 // Value must be null, not a real oop. 149 // Value must be null, not a real oop.
150 Node* chk = _gvn.transform( new (C, 3) CmpPNode(l, null()) ); 150 Node* chk = _gvn.transform( new (C) CmpPNode(l, null()) );
151 Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) ); 151 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
152 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN); 152 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
153 set_control(_gvn.transform( new (C, 1) IfTrueNode(iff) )); 153 set_control(_gvn.transform( new (C) IfTrueNode(iff) ));
154 Node* bad_type = _gvn.transform( new (C, 1) IfFalseNode(iff) ); 154 Node* bad_type = _gvn.transform( new (C) IfFalseNode(iff) );
155 bad_type_exit->control()->add_req(bad_type); 155 bad_type_exit->control()->add_req(bad_type);
156 l = null(); 156 l = null();
157 } 157 }
158 158
159 // Typeflow can also cut off paths from the CFG, based on 159 // Typeflow can also cut off paths from the CFG, based on
216 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr"); 216 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
217 int mcnt = osr_block->flow()->monitor_count(); 217 int mcnt = osr_block->flow()->monitor_count();
218 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize); 218 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
219 for (index = 0; index < mcnt; index++) { 219 for (index = 0; index < mcnt; index++) {
220 // Make a BoxLockNode for the monitor. 220 // Make a BoxLockNode for the monitor.
221 Node *box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor())); 221 Node *box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
222 222
223 223
224 // Displaced headers and locked objects are interleaved in the 224 // Displaced headers and locked objects are interleaved in the
225 // temp OSR buffer. We only copy the locked objects out here. 225 // temp OSR buffer. We only copy the locked objects out here.
226 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. 226 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
231 231
232 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw); 232 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw);
233 233
234 // Build a bogus FastLockNode (no code will be generated) and push the 234 // Build a bogus FastLockNode (no code will be generated) and push the
235 // monitor into our debug info. 235 // monitor into our debug info.
236 const FastLockNode *flock = _gvn.transform(new (C, 3) FastLockNode( 0, lock_object, box ))->as_FastLock(); 236 const FastLockNode *flock = _gvn.transform(new (C) FastLockNode( 0, lock_object, box ))->as_FastLock();
237 map()->push_monitor(flock); 237 map()->push_monitor(flock);
238 238
239 // If the lock is our method synchronization lock, tuck it away in 239 // If the lock is our method synchronization lock, tuck it away in
240 // _sync_lock for return and rethrow exit paths. 240 // _sync_lock for return and rethrow exit paths.
241 if (index == 0 && method()->is_synchronized()) { 241 if (index == 0 && method()->is_synchronized()) {
321 osr_buf); 321 osr_buf);
322 322
323 // Now that the interpreter state is loaded, make sure it will match 323 // Now that the interpreter state is loaded, make sure it will match
324 // at execution time what the compiler is expecting now: 324 // at execution time what the compiler is expecting now:
325 SafePointNode* bad_type_exit = clone_map(); 325 SafePointNode* bad_type_exit = clone_map();
326 bad_type_exit->set_control(new (C, 1) RegionNode(1)); 326 bad_type_exit->set_control(new (C) RegionNode(1));
327 327
328 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point"); 328 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
329 for (index = 0; index < max_locals; index++) { 329 for (index = 0; index < max_locals; index++) {
330 if (stopped()) break; 330 if (stopped()) break;
331 Node* l = local(index); 331 Node* l = local(index);
645 // It is fine to set it here since do_one_block() will set it anyway. 645 // It is fine to set it here since do_one_block() will set it anyway.
646 set_parse_bci(block->start()); 646 set_parse_bci(block->start());
647 add_predicate(); 647 add_predicate();
648 // Add new region for back branches. 648 // Add new region for back branches.
649 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region 649 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
650 RegionNode *r = new (C, edges+1) RegionNode(edges+1); 650 RegionNode *r = new (C) RegionNode(edges+1);
651 _gvn.set_type(r, Type::CONTROL); 651 _gvn.set_type(r, Type::CONTROL);
652 record_for_igvn(r); 652 record_for_igvn(r);
653 r->init_req(edges, control()); 653 r->init_req(edges, control());
654 set_control(r); 654 set_control(r);
655 // Add new phis. 655 // Add new phis.
712 // make a clone of caller to prevent sharing of side-effects 712 // make a clone of caller to prevent sharing of side-effects
713 _exits.set_map(_exits.clone_map()); 713 _exits.set_map(_exits.clone_map());
714 _exits.clean_stack(_exits.sp()); 714 _exits.clean_stack(_exits.sp());
715 _exits.sync_jvms(); 715 _exits.sync_jvms();
716 716
717 RegionNode* region = new (C, 1) RegionNode(1); 717 RegionNode* region = new (C) RegionNode(1);
718 record_for_igvn(region); 718 record_for_igvn(region);
719 gvn().set_type_bottom(region); 719 gvn().set_type_bottom(region);
720 _exits.set_control(region); 720 _exits.set_control(region);
721 721
722 // Note: iophi and memphi are not transformed until do_exits. 722 // Note: iophi and memphi are not transformed until do_exits.
723 Node* iophi = new (C, region->req()) PhiNode(region, Type::ABIO); 723 Node* iophi = new (C) PhiNode(region, Type::ABIO);
724 Node* memphi = new (C, region->req()) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); 724 Node* memphi = new (C) PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
725 _exits.set_i_o(iophi); 725 _exits.set_i_o(iophi);
726 _exits.set_all_memory(memphi); 726 _exits.set_all_memory(memphi);
727 727
728 // Add a return value to the exit state. (Do not push it yet.) 728 // Add a return value to the exit state. (Do not push it yet.)
729 if (tf()->range()->cnt() > TypeFunc::Parms) { 729 if (tf()->range()->cnt() > TypeFunc::Parms) {
734 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr(); 734 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
735 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) { 735 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
736 ret_type = TypeOopPtr::BOTTOM; 736 ret_type = TypeOopPtr::BOTTOM;
737 } 737 }
738 int ret_size = type2size[ret_type->basic_type()]; 738 int ret_size = type2size[ret_type->basic_type()];
739 Node* ret_phi = new (C, region->req()) PhiNode(region, ret_type); 739 Node* ret_phi = new (C) PhiNode(region, ret_type);
740 _exits.ensure_stack(ret_size); 740 _exits.ensure_stack(ret_size);
741 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range"); 741 assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
742 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method"); 742 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
743 _exits.set_argument(0, ret_phi); // here is where the parser finds it 743 _exits.set_argument(0, ret_phi); // here is where the parser finds it
744 // Note: ret_phi is not yet pushed, until do_exits. 744 // Note: ret_phi is not yet pushed, until do_exits.
751 // unknown caller. The method & bci will be NULL & InvocationEntryBci. 751 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
752 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { 752 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
753 int arg_size = tf->domain()->cnt(); 753 int arg_size = tf->domain()->cnt();
754 int max_size = MAX2(arg_size, (int)tf->range()->cnt()); 754 int max_size = MAX2(arg_size, (int)tf->range()->cnt());
755 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms); 755 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
756 SafePointNode* map = new (this, max_size) SafePointNode(max_size, NULL); 756 SafePointNode* map = new (this) SafePointNode(max_size, NULL);
757 record_for_igvn(map); 757 record_for_igvn(map);
758 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); 758 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
759 Node_Notes* old_nn = default_node_notes(); 759 Node_Notes* old_nn = default_node_notes();
760 if (old_nn != NULL && has_method()) { 760 if (old_nn != NULL && has_method()) {
761 Node_Notes* entry_nn = old_nn->clone(this); 761 Node_Notes* entry_nn = old_nn->clone(this);
765 entry_nn->set_jvms(entry_jvms); 765 entry_nn->set_jvms(entry_jvms);
766 set_default_node_notes(entry_nn); 766 set_default_node_notes(entry_nn);
767 } 767 }
768 uint i; 768 uint i;
769 for (i = 0; i < (uint)arg_size; i++) { 769 for (i = 0; i < (uint)arg_size; i++) {
770 Node* parm = initial_gvn()->transform(new (this, 1) ParmNode(start, i)); 770 Node* parm = initial_gvn()->transform(new (this) ParmNode(start, i));
771 map->init_req(i, parm); 771 map->init_req(i, parm);
772 // Record all these guys for later GVN. 772 // Record all these guys for later GVN.
773 record_for_igvn(parm); 773 record_for_igvn(parm);
774 } 774 }
775 for (; i < map->req(); i++) { 775 for (; i < map->req(); i++) {
796 796
797 797
798 //--------------------------return_values-------------------------------------- 798 //--------------------------return_values--------------------------------------
799 void Compile::return_values(JVMState* jvms) { 799 void Compile::return_values(JVMState* jvms) {
800 GraphKit kit(jvms); 800 GraphKit kit(jvms);
801 Node* ret = new (this, TypeFunc::Parms) ReturnNode(TypeFunc::Parms, 801 Node* ret = new (this) ReturnNode(TypeFunc::Parms,
802 kit.control(), 802 kit.control(),
803 kit.i_o(), 803 kit.i_o(),
804 kit.reset_memory(), 804 kit.reset_memory(),
805 kit.frameptr(), 805 kit.frameptr(),
806 kit.returnadr()); 806 kit.returnadr());
824 GraphKit kit(jvms); 824 GraphKit kit(jvms);
825 if (!kit.has_exceptions()) return; // nothing to generate 825 if (!kit.has_exceptions()) return; // nothing to generate
826 // Load my combined exception state into the kit, with all phis transformed: 826 // Load my combined exception state into the kit, with all phis transformed:
827 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states(); 827 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
828 Node* ex_oop = kit.use_exception_state(ex_map); 828 Node* ex_oop = kit.use_exception_state(ex_map);
829 RethrowNode* exit = new (this, TypeFunc::Parms + 1) RethrowNode(kit.control(), 829 RethrowNode* exit = new (this) RethrowNode(kit.control(),
830 kit.i_o(), kit.reset_memory(), 830 kit.i_o(), kit.reset_memory(),
831 kit.frameptr(), kit.returnadr(), 831 kit.frameptr(), kit.returnadr(),
832 // like a return but with exception input 832 // like a return but with exception input
833 ex_oop); 833 ex_oop);
834 // bind to root 834 // bind to root
1018 1018
1019 assert(method() != NULL, "parser must have a method"); 1019 assert(method() != NULL, "parser must have a method");
1020 1020
1021 // Create an initial safepoint to hold JVM state during parsing 1021 // Create an initial safepoint to hold JVM state during parsing
1022 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL); 1022 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1023 set_map(new (C, len) SafePointNode(len, jvms)); 1023 set_map(new (C) SafePointNode(len, jvms));
1024 jvms->set_map(map()); 1024 jvms->set_map(map());
1025 record_for_igvn(map()); 1025 record_for_igvn(map());
1026 assert(jvms->endoff() == len, "correct jvms sizing"); 1026 assert(jvms->endoff() == len, "correct jvms sizing");
1027 1027
1028 SafePointNode* inmap = _caller->map(); 1028 SafePointNode* inmap = _caller->map();
1526 } 1526 }
1527 // Add a Region to start the new basic block. Phis will be added 1527 // Add a Region to start the new basic block. Phis will be added
1528 // later lazily. 1528 // later lazily.
1529 int edges = target->pred_count(); 1529 int edges = target->pred_count();
1530 if (edges < pnum) edges = pnum; // might be a new path! 1530 if (edges < pnum) edges = pnum; // might be a new path!
1531 RegionNode *r = new (C, edges+1) RegionNode(edges+1); 1531 RegionNode *r = new (C) RegionNode(edges+1);
1532 gvn().set_type(r, Type::CONTROL); 1532 gvn().set_type(r, Type::CONTROL);
1533 record_for_igvn(r); 1533 record_for_igvn(r);
1534 // zap all inputs to NULL for debugging (done in Node(uint) constructor) 1534 // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1535 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } 1535 // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1536 r->init_req(pnum, control()); 1536 r->init_req(pnum, control());
1921 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); 1921 Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
1922 1922
1923 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); 1923 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
1924 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); 1924 Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
1925 1925
1926 Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); 1926 Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
1927 Node* check = _gvn.transform(new (C, 3) CmpINode(mask, intcon(0))); 1927 Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
1928 Node* test = _gvn.transform(new (C, 2) BoolNode(check, BoolTest::ne)); 1928 Node* test = _gvn.transform(new (C) BoolNode(check, BoolTest::ne));
1929 1929
1930 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN); 1930 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
1931 1931
1932 RegionNode* result_rgn = new (C, 3) RegionNode(3); 1932 RegionNode* result_rgn = new (C) RegionNode(3);
1933 record_for_igvn(result_rgn); 1933 record_for_igvn(result_rgn);
1934 1934
1935 Node *skip_register = _gvn.transform(new (C, 1) IfFalseNode(iff)); 1935 Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff));
1936 result_rgn->init_req(1, skip_register); 1936 result_rgn->init_req(1, skip_register);
1937 1937
1938 Node *needs_register = _gvn.transform(new (C, 1) IfTrueNode(iff)); 1938 Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff));
1939 set_control(needs_register); 1939 set_control(needs_register);
1940 if (stopped()) { 1940 if (stopped()) {
1941 // There is no slow path. 1941 // There is no slow path.
1942 result_rgn->init_req(2, top()); 1942 result_rgn->init_req(2, top());
1943 } else { 1943 } else {
2011 if (tp && tp->klass()->is_loaded() && 2011 if (tp && tp->klass()->is_loaded() &&
2012 !tp->klass()->is_interface()) { 2012 !tp->klass()->is_interface()) {
2013 // sharpen the type eagerly; this eases certain assert checking 2013 // sharpen the type eagerly; this eases certain assert checking
2014 if (tp->higher_equal(TypeInstPtr::NOTNULL)) 2014 if (tp->higher_equal(TypeInstPtr::NOTNULL))
2015 tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr(); 2015 tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
2016 value = _gvn.transform(new (C, 2) CheckCastPPNode(0,value,tr)); 2016 value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
2017 } 2017 }
2018 } 2018 }
2019 phi->add_req(value); 2019 phi->add_req(value);
2020 } 2020 }
2021 2021
2046 2046
2047 // Clear out dead values from the debug info. 2047 // Clear out dead values from the debug info.
2048 kill_dead_locals(); 2048 kill_dead_locals();
2049 2049
2050 // Clone the JVM State 2050 // Clone the JVM State
2051 SafePointNode *sfpnt = new (C, parms) SafePointNode(parms, NULL); 2051 SafePointNode *sfpnt = new (C) SafePointNode(parms, NULL);
2052 2052
2053 // Capture memory state BEFORE a SafePoint. Since we can block at a 2053 // Capture memory state BEFORE a SafePoint. Since we can block at a
2054 // SafePoint we need our GC state to be safe; i.e. we need all our current 2054 // SafePoint we need our GC state to be safe; i.e. we need all our current
2055 // write barriers (card marks) to not float down after the SafePoint so we 2055 // write barriers (card marks) to not float down after the SafePoint so we
2056 // must read raw memory. Likewise we need all oop stores to match the card 2056 // must read raw memory. Likewise we need all oop stores to match the card