comparison src/share/vm/opto/escape.cpp @ 65:99269dbf4ba8

6674588: (Escape Analysis) Improve Escape Analysis code Summary: Current EA code has several problems which have to be fixed. Reviewed-by: jrose, sgoldman
author kvn
date Fri, 14 Mar 2008 15:26:33 -0700
parents 76256d272075
children 36cd3cc4d27b a6cb86dd209b
comparison
equal deleted inserted replaced
64:b8f5ba577b02 65:99269dbf4ba8
58 "Field" 58 "Field"
59 }; 59 };
60 60
61 static char *esc_names[] = { 61 static char *esc_names[] = {
62 "UnknownEscape", 62 "UnknownEscape",
63 "NoEscape ", 63 "NoEscape",
64 "ArgEscape ", 64 "ArgEscape",
65 "GlobalEscape " 65 "GlobalEscape"
66 }; 66 };
67 67
68 static char *edge_type_suffix[] = { 68 static char *edge_type_suffix[] = {
69 "?", // UnknownEdge 69 "?", // UnknownEdge
70 "P", // PointsToEdge 70 "P", // PointsToEdge
73 }; 73 };
74 74
75 void PointsToNode::dump() const { 75 void PointsToNode::dump() const {
76 NodeType nt = node_type(); 76 NodeType nt = node_type();
77 EscapeState es = escape_state(); 77 EscapeState es = escape_state();
78 tty->print("%s %s [[", node_type_names[(int) nt], esc_names[(int) es]); 78 tty->print("%s %s %s [[", node_type_names[(int) nt], esc_names[(int) es], _scalar_replaceable ? "" : "NSR");
79 for (uint i = 0; i < edge_count(); i++) { 79 for (uint i = 0; i < edge_count(); i++) {
80 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]); 80 tty->print(" %d%s", edge_target(i), edge_type_suffix[(int) edge_type(i)]);
81 } 81 }
82 tty->print("]] "); 82 tty->print("]] ");
83 if (_node == NULL) 83 if (_node == NULL)
89 89
90 ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()), _node_map(C->comp_arena()) { 90 ConnectionGraph::ConnectionGraph(Compile * C) : _processed(C->comp_arena()), _node_map(C->comp_arena()) {
91 _collecting = true; 91 _collecting = true;
92 this->_compile = C; 92 this->_compile = C;
93 const PointsToNode &dummy = PointsToNode(); 93 const PointsToNode &dummy = PointsToNode();
94 _nodes = new(C->comp_arena()) GrowableArray<PointsToNode>(C->comp_arena(), (int) INITIAL_NODE_COUNT, 0, dummy); 94 int sz = C->unique();
95 _nodes = new(C->comp_arena()) GrowableArray<PointsToNode>(C->comp_arena(), sz, sz, dummy);
95 _phantom_object = C->top()->_idx; 96 _phantom_object = C->top()->_idx;
96 PointsToNode *phn = ptnode_adr(_phantom_object); 97 PointsToNode *phn = ptnode_adr(_phantom_object);
98 phn->_node = C->top();
97 phn->set_node_type(PointsToNode::JavaObject); 99 phn->set_node_type(PointsToNode::JavaObject);
98 phn->set_escape_state(PointsToNode::GlobalEscape); 100 phn->set_escape_state(PointsToNode::GlobalEscape);
99 } 101 }
100 102
101 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) { 103 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
119 // deferred edges 121 // deferred edges
120 if (from_i != to_i) 122 if (from_i != to_i)
121 f->add_edge(to_i, PointsToNode::DeferredEdge); 123 f->add_edge(to_i, PointsToNode::DeferredEdge);
122 } 124 }
123 125
124 int ConnectionGraph::type_to_offset(const Type *t) { 126 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) {
125 const TypePtr *t_ptr = t->isa_ptr(); 127 const Type *adr_type = phase->type(adr);
128 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL &&
129 adr->in(AddPNode::Address)->is_Proj() &&
130 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
131 // We are computing a raw address for a store captured by an Initialize
132 // compute an appropriate address type. AddP cases #3 and #5 (see below).
133 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
134 assert(offs != Type::OffsetBot ||
135 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
136 "offset must be a constant or it is initialization of array");
137 return offs;
138 }
139 const TypePtr *t_ptr = adr_type->isa_ptr();
126 assert(t_ptr != NULL, "must be a pointer type"); 140 assert(t_ptr != NULL, "must be a pointer type");
127 return t_ptr->offset(); 141 return t_ptr->offset();
128 } 142 }
129 143
130 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) { 144 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
145 PointsToNode::EscapeState old_es = npt->escape_state(); 159 PointsToNode::EscapeState old_es = npt->escape_state();
146 if (es > old_es) 160 if (es > old_es)
147 npt->set_escape_state(es); 161 npt->set_escape_state(es);
148 } 162 }
149 163
164 void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
165 PointsToNode::EscapeState es, bool done) {
166 PointsToNode* ptadr = ptnode_adr(n->_idx);
167 ptadr->_node = n;
168 ptadr->set_node_type(nt);
169
170 // inline set_escape_state(idx, es);
171 PointsToNode::EscapeState old_es = ptadr->escape_state();
172 if (es > old_es)
173 ptadr->set_escape_state(es);
174
175 if (done)
176 _processed.set(n->_idx);
177 }
178
150 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) { 179 PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
151 uint idx = n->_idx; 180 uint idx = n->_idx;
152 PointsToNode::EscapeState es; 181 PointsToNode::EscapeState es;
153 182
154 // If we are still collecting we don't know the answer yet 183 // If we are still collecting or there were no non-escaping allocations
155 if (_collecting) 184 // we don't know the answer yet
185 if (_collecting || !_has_allocations)
156 return PointsToNode::UnknownEscape; 186 return PointsToNode::UnknownEscape;
157 187
158 // if the node was created after the escape computation, return 188 // if the node was created after the escape computation, return
159 // UnknownEscape 189 // UnknownEscape
160 if (idx >= (uint)_nodes->length()) 190 if (idx >= (uint)_nodes->length())
167 return es; 197 return es;
168 198
169 // compute max escape state of anything this node could point to 199 // compute max escape state of anything this node could point to
170 VectorSet ptset(Thread::current()->resource_area()); 200 VectorSet ptset(Thread::current()->resource_area());
171 PointsTo(ptset, n, phase); 201 PointsTo(ptset, n, phase);
172 for( VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i ) { 202 for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
173 uint pt = i.elem; 203 uint pt = i.elem;
174 PointsToNode::EscapeState pes = _nodes->at(pt).escape_state(); 204 PointsToNode::EscapeState pes = _nodes->adr_at(pt)->escape_state();
175 if (pes > es) 205 if (pes > es)
176 es = pes; 206 es = pes;
177 } 207 }
178 // cache the computed escape state 208 // cache the computed escape state
179 assert(es != PointsToNode::UnknownEscape, "should have computed an escape state"); 209 assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
183 213
184 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) { 214 void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
185 VectorSet visited(Thread::current()->resource_area()); 215 VectorSet visited(Thread::current()->resource_area());
186 GrowableArray<uint> worklist; 216 GrowableArray<uint> worklist;
187 217
188 n = skip_casts(n); 218 n = n->uncast();
189 PointsToNode npt = _nodes->at_grow(n->_idx); 219 PointsToNode npt = _nodes->at_grow(n->_idx);
190 220
191 // If we have a JavaObject, return just that object 221 // If we have a JavaObject, return just that object
192 if (npt.node_type() == PointsToNode::JavaObject) { 222 if (npt.node_type() == PointsToNode::JavaObject) {
193 ptset.set(n->_idx); 223 ptset.set(n->_idx);
194 return; 224 return;
195 } 225 }
196 // we may have a Phi which has not been processed 226 assert(npt._node != NULL, "unregistered node");
197 if (npt._node == NULL) { 227
198 assert(n->is_Phi(), "unprocessed node must be a Phi");
199 record_for_escape_analysis(n);
200 npt = _nodes->at(n->_idx);
201 }
202 worklist.push(n->_idx); 228 worklist.push(n->_idx);
203 while(worklist.length() > 0) { 229 while(worklist.length() > 0) {
204 int ni = worklist.pop(); 230 int ni = worklist.pop();
205 PointsToNode pn = _nodes->at_grow(ni); 231 PointsToNode pn = _nodes->at_grow(ni);
206 if (!visited.test(ni)) { 232 if (!visited.test_set(ni)) {
207 visited.set(ni);
208
209 // ensure that all inputs of a Phi have been processed 233 // ensure that all inputs of a Phi have been processed
210 if (_collecting && pn._node->is_Phi()) { 234 assert(!_collecting || !pn._node->is_Phi() || _processed.test(ni),"");
211 PhiNode *phi = pn._node->as_Phi();
212 process_phi_escape(phi, phase);
213 }
214 235
215 int edges_processed = 0; 236 int edges_processed = 0;
216 for (uint e = 0; e < pn.edge_count(); e++) { 237 for (uint e = 0; e < pn.edge_count(); e++) {
238 uint etgt = pn.edge_target(e);
217 PointsToNode::EdgeType et = pn.edge_type(e); 239 PointsToNode::EdgeType et = pn.edge_type(e);
218 if (et == PointsToNode::PointsToEdge) { 240 if (et == PointsToNode::PointsToEdge) {
219 ptset.set(pn.edge_target(e)); 241 ptset.set(etgt);
220 edges_processed++; 242 edges_processed++;
221 } else if (et == PointsToNode::DeferredEdge) { 243 } else if (et == PointsToNode::DeferredEdge) {
222 worklist.push(pn.edge_target(e)); 244 worklist.push(etgt);
223 edges_processed++; 245 edges_processed++;
246 } else {
247 assert(false,"neither PointsToEdge or DeferredEdge");
224 } 248 }
225 } 249 }
226 if (edges_processed == 0) { 250 if (edges_processed == 0) {
227 // no deferred or pointsto edges found. Assume the value was set outside 251 // no deferred or pointsto edges found. Assume the value was set
228 // this method. Add the phantom object to the pointsto set. 252 // outside this method. Add the phantom object to the pointsto set.
229 ptset.set(_phantom_object); 253 ptset.set(_phantom_object);
230 } 254 }
231 } 255 }
232 } 256 }
233 } 257 }
237 261
238 uint i = 0; 262 uint i = 0;
239 PointsToNode *ptn = ptnode_adr(ni); 263 PointsToNode *ptn = ptnode_adr(ni);
240 264
241 while(i < ptn->edge_count()) { 265 while(i < ptn->edge_count()) {
266 uint t = ptn->edge_target(i);
267 PointsToNode *ptt = ptnode_adr(t);
242 if (ptn->edge_type(i) != PointsToNode::DeferredEdge) { 268 if (ptn->edge_type(i) != PointsToNode::DeferredEdge) {
243 i++; 269 i++;
244 } else { 270 } else {
245 uint t = ptn->edge_target(i);
246 PointsToNode *ptt = ptnode_adr(t);
247 ptn->remove_edge(t, PointsToNode::DeferredEdge); 271 ptn->remove_edge(t, PointsToNode::DeferredEdge);
248 if(!visited.test(t)) { 272 if(!visited.test_set(t)) {
249 visited.set(t);
250 for (uint j = 0; j < ptt->edge_count(); j++) { 273 for (uint j = 0; j < ptt->edge_count(); j++) {
251 uint n1 = ptt->edge_target(j); 274 uint n1 = ptt->edge_target(j);
252 PointsToNode *pt1 = ptnode_adr(n1); 275 PointsToNode *pt1 = ptnode_adr(n1);
253 switch(ptt->edge_type(j)) { 276 switch(ptt->edge_type(j)) {
254 case PointsToNode::PointsToEdge: 277 case PointsToNode::PointsToEdge:
255 add_pointsto_edge(ni, n1); 278 add_pointsto_edge(ni, n1);
279 if(n1 == _phantom_object) {
280 // Special case - field set outside (globally escaping).
281 ptn->set_escape_state(PointsToNode::GlobalEscape);
282 }
256 break; 283 break;
257 case PointsToNode::DeferredEdge: 284 case PointsToNode::DeferredEdge:
258 add_deferred_edge(ni, n1); 285 add_deferred_edge(ni, n1);
259 break; 286 break;
260 case PointsToNode::FieldEdge: 287 case PointsToNode::FieldEdge:
289 add_pointsto_edge(fi, to_i); 316 add_pointsto_edge(fi, to_i);
290 } 317 }
291 } 318 }
292 } 319 }
293 320
294 // Add a deferred edge from node given by "from_i" to any field of adr_i whose offset 321 // Add a deferred edge from node given by "from_i" to any field of adr_i
295 // matches "offset" 322 // whose offset matches "offset".
296 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) { 323 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
297 PointsToNode an = _nodes->at_grow(adr_i); 324 PointsToNode an = _nodes->at_grow(adr_i);
298 for (uint fe = 0; fe < an.edge_count(); fe++) { 325 for (uint fe = 0; fe < an.edge_count(); fe++) {
299 assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge"); 326 assert(an.edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
300 int fi = an.edge_target(fe); 327 int fi = an.edge_target(fe);
308 add_deferred_edge(from_i, fi); 335 add_deferred_edge(from_i, fi);
309 } 336 }
310 } 337 }
311 } 338 }
312 339
313 // 340 // Helper functions
314 // Search memory chain of "mem" to find a MemNode whose address 341
315 // is the specified alias index. Returns the MemNode found or the 342 static Node* get_addp_base(Node *addp) {
316 // first non-MemNode encountered. 343 assert(addp->is_AddP(), "must be AddP");
317 // 344 //
318 Node *ConnectionGraph::find_mem(Node *mem, int alias_idx, PhaseGVN *igvn) { 345 // AddP cases for Base and Address inputs:
319 if (mem == NULL) 346 // case #1. Direct object's field reference:
320 return mem; 347 // Allocate
321 while (mem->is_Mem()) { 348 // |
322 const Type *at = igvn->type(mem->in(MemNode::Address)); 349 // Proj #5 ( oop result )
323 if (at != Type::TOP) { 350 // |
324 assert (at->isa_ptr() != NULL, "pointer type required."); 351 // CheckCastPP (cast to instance type)
325 int idx = _compile->get_alias_index(at->is_ptr()); 352 // | |
326 if (idx == alias_idx) 353 // AddP ( base == address )
327 break; 354 //
328 } 355 // case #2. Indirect object's field reference:
329 mem = mem->in(MemNode::Memory); 356 // Phi
330 } 357 // |
331 return mem; 358 // CastPP (cast to instance type)
359 // | |
360 // AddP ( base == address )
361 //
362 // case #3. Raw object's field reference for Initialize node:
363 // Allocate
364 // |
365 // Proj #5 ( oop result )
366 // top |
367 // \ |
368 // AddP ( base == top )
369 //
370 // case #4. Array's element reference:
371 // {CheckCastPP | CastPP}
372 // | | |
373 // | AddP ( array's element offset )
374 // | |
375 // AddP ( array's offset )
376 //
377 // case #5. Raw object's field reference for arraycopy stub call:
378 // The inline_native_clone() case when the arraycopy stub is called
379 // after the allocation before Initialize and CheckCastPP nodes.
380 // Allocate
381 // |
382 // Proj #5 ( oop result )
383 // | |
384 // AddP ( base == address )
385 //
386 // case #6. Constant Pool or ThreadLocal or Raw object's field reference:
387 // ConP # Object from Constant Pool.
388 // top |
389 // \ |
390 // AddP ( base == top )
391 //
392 Node *base = addp->in(AddPNode::Base)->uncast();
393 if (base->is_top()) { // The AddP case #3 and #6.
394 base = addp->in(AddPNode::Address)->uncast();
395 assert(base->Opcode() == Op_ConP || base->Opcode() == Op_ThreadLocal ||
396 base->is_Mem() && base->bottom_type() == TypeRawPtr::NOTNULL ||
397 base->is_Proj() && base->in(0)->is_Allocate(), "sanity");
398 }
399 return base;
400 }
401
402 static Node* find_second_addp(Node* addp, Node* n) {
403 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
404
405 Node* addp2 = addp->raw_out(0);
406 if (addp->outcnt() == 1 && addp2->is_AddP() &&
407 addp2->in(AddPNode::Base) == n &&
408 addp2->in(AddPNode::Address) == addp) {
409
410 assert(addp->in(AddPNode::Base) == n, "expecting the same base");
411 //
412 // Find array's offset to push it on worklist first and
413 // as result process an array's element offset first (pushed second)
414 // to avoid CastPP for the array's offset.
415 // Otherwise the inserted CastPP (LocalVar) will point to what
416 // the AddP (Field) points to. Which would be wrong since
417 // the algorithm expects the CastPP has the same point as
418 // as AddP's base CheckCastPP (LocalVar).
419 //
420 // ArrayAllocation
421 // |
422 // CheckCastPP
423 // |
424 // memProj (from ArrayAllocation CheckCastPP)
425 // | ||
426 // | || Int (element index)
427 // | || | ConI (log(element size))
428 // | || | /
429 // | || LShift
430 // | || /
431 // | AddP (array's element offset)
432 // | |
433 // | | ConI (array's offset: #12(32-bits) or #24(64-bits))
434 // | / /
435 // AddP (array's offset)
436 // |
437 // Load/Store (memory operation on array's element)
438 //
439 return addp2;
440 }
441 return NULL;
332 } 442 }
333 443
334 // 444 //
335 // Adjust the type and inputs of an AddP which computes the 445 // Adjust the type and inputs of an AddP which computes the
336 // address of a field of an instance 446 // address of a field of an instance
337 // 447 //
338 void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { 448 void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
449 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
450 assert(base_t != NULL && base_t->is_instance(), "expecting instance oopptr");
339 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 451 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
340 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 452 if (t == NULL) {
341 assert(t != NULL, "expecting oopptr"); 453 // We are computing a raw address for a store captured by an Initialize
342 assert(base_t != NULL && base_t->is_instance(), "expecting instance oopptr"); 454 // compute an appropriate address type.
455 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
456 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
457 int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
458 assert(offs != Type::OffsetBot, "offset must be a constant");
459 t = base_t->add_offset(offs)->is_oopptr();
460 }
343 uint inst_id = base_t->instance_id(); 461 uint inst_id = base_t->instance_id();
344 assert(!t->is_instance() || t->instance_id() == inst_id, 462 assert(!t->is_instance() || t->instance_id() == inst_id,
345 "old type must be non-instance or match new type"); 463 "old type must be non-instance or match new type");
346 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 464 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
347 // ensure an alias index is allocated for the instance type 465 // Do NOT remove the next call: ensure an new alias index is allocated
466 // for the instance type
348 int alias_idx = _compile->get_alias_index(tinst); 467 int alias_idx = _compile->get_alias_index(tinst);
349 igvn->set_type(addp, tinst); 468 igvn->set_type(addp, tinst);
350 // record the allocation in the node map 469 // record the allocation in the node map
351 set_map(addp->_idx, get_map(base->_idx)); 470 set_map(addp->_idx, get_map(base->_idx));
352 // if the Address input is not the appropriate instance type (due to intervening 471 // if the Address input is not the appropriate instance type
353 // casts,) insert a cast 472 // (due to intervening casts,) insert a cast
354 Node *adr = addp->in(AddPNode::Address); 473 Node *adr = addp->in(AddPNode::Address);
355 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 474 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
356 if (atype->instance_id() != inst_id) { 475 if (atype != NULL && atype->instance_id() != inst_id) {
357 assert(!atype->is_instance(), "no conflicting instances"); 476 assert(!atype->is_instance(), "no conflicting instances");
358 const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr(); 477 const TypeOopPtr *new_atype = base_t->add_offset(atype->offset())->isa_oopptr();
359 Node *acast = new (_compile, 2) CastPPNode(adr, new_atype); 478 Node *acast = new (_compile, 2) CastPPNode(adr, new_atype);
360 acast->set_req(0, adr->in(0)); 479 acast->set_req(0, adr->in(0));
361 igvn->set_type(acast, new_atype); 480 igvn->set_type(acast, new_atype);
370 } 489 }
371 igvn->hash_delete(addp); 490 igvn->hash_delete(addp);
372 addp->set_req(AddPNode::Base, bcast); 491 addp->set_req(AddPNode::Base, bcast);
373 addp->set_req(AddPNode::Address, acast); 492 addp->set_req(AddPNode::Address, acast);
374 igvn->hash_insert(addp); 493 igvn->hash_insert(addp);
375 record_for_optimizer(addp); 494 }
376 } 495 // Put on IGVN worklist since at least addp's type was changed above.
496 record_for_optimizer(addp);
377 } 497 }
378 498
379 // 499 //
380 // Create a new version of orig_phi if necessary. Returns either the newly 500 // Create a new version of orig_phi if necessary. Returns either the newly
381 // created phi or an existing phi. Sets create_new to indicate wheter a new 501 // created phi or an existing phi. Sets create_new to indicate wheter a new
384 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { 504 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) {
385 Compile *C = _compile; 505 Compile *C = _compile;
386 new_created = false; 506 new_created = false;
387 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 507 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
388 // nothing to do if orig_phi is bottom memory or matches alias_idx 508 // nothing to do if orig_phi is bottom memory or matches alias_idx
389 if (phi_alias_idx == Compile::AliasIdxBot || phi_alias_idx == alias_idx) { 509 if (phi_alias_idx == alias_idx) {
390 return orig_phi; 510 return orig_phi;
391 } 511 }
392 // have we already created a Phi for this alias index? 512 // have we already created a Phi for this alias index?
393 PhiNode *result = get_map_phi(orig_phi->_idx); 513 PhiNode *result = get_map_phi(orig_phi->_idx);
394 const TypePtr *atype = C->get_adr_type(alias_idx);
395 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 514 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
396 return result; 515 return result;
397 } 516 }
398 if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) { 517 if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
399 if (C->do_escape_analysis() == true && !C->failing()) { 518 if (C->do_escape_analysis() == true && !C->failing()) {
402 // to the Compile object, and the C2Compiler will see it and retry. 521 // to the Compile object, and the C2Compiler will see it and retry.
403 C->record_failure(C2Compiler::retry_no_escape_analysis()); 522 C->record_failure(C2Compiler::retry_no_escape_analysis());
404 } 523 }
405 return NULL; 524 return NULL;
406 } 525 }
407
408 orig_phi_worklist.append_if_missing(orig_phi); 526 orig_phi_worklist.append_if_missing(orig_phi);
527 const TypePtr *atype = C->get_adr_type(alias_idx);
409 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 528 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
410 set_map_phi(orig_phi->_idx, result); 529 set_map_phi(orig_phi->_idx, result);
411 igvn->set_type(result, result->bottom_type()); 530 igvn->set_type(result, result->bottom_type());
412 record_for_optimizer(result); 531 record_for_optimizer(result);
413 new_created = true; 532 new_created = true;
421 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { 540 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) {
422 541
423 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 542 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
424 Compile *C = _compile; 543 Compile *C = _compile;
425 bool new_phi_created; 544 bool new_phi_created;
426 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created); 545 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, igvn, new_phi_created);
427 if (!new_phi_created) { 546 if (!new_phi_created) {
428 return result; 547 return result;
429 } 548 }
430 549
431 GrowableArray<PhiNode *> phi_list; 550 GrowableArray<PhiNode *> phi_list;
434 PhiNode *phi = orig_phi; 553 PhiNode *phi = orig_phi;
435 uint idx = 1; 554 uint idx = 1;
436 bool finished = false; 555 bool finished = false;
437 while(!finished) { 556 while(!finished) {
438 while (idx < phi->req()) { 557 while (idx < phi->req()) {
439 Node *mem = find_mem(phi->in(idx), alias_idx, igvn); 558 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, igvn);
440 if (mem != NULL && mem->is_Phi()) { 559 if (mem != NULL && mem->is_Phi()) {
441 PhiNode *nphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created); 560 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, igvn, new_phi_created);
442 if (new_phi_created) { 561 if (new_phi_created) {
443 // found an phi for which we created a new split, push current one on worklist and begin 562 // found an phi for which we created a new split, push current one on worklist and begin
444 // processing new one 563 // processing new one
445 phi_list.push(phi); 564 phi_list.push(phi);
446 cur_input.push(idx); 565 cur_input.push(idx);
447 phi = mem->as_Phi(); 566 phi = mem->as_Phi();
448 result = nphi; 567 result = newphi;
449 idx = 1; 568 idx = 1;
450 continue; 569 continue;
451 } else { 570 } else {
452 mem = nphi; 571 mem = newphi;
453 } 572 }
454 } 573 }
455 if (C->failing()) { 574 if (C->failing()) {
456 return NULL; 575 return NULL;
457 } 576 }
459 } 578 }
460 #ifdef ASSERT 579 #ifdef ASSERT
461 // verify that the new Phi has an input for each input of the original 580 // verify that the new Phi has an input for each input of the original
462 assert( phi->req() == result->req(), "must have same number of inputs."); 581 assert( phi->req() == result->req(), "must have same number of inputs.");
463 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 582 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match");
583 #endif
584 // Check if all new phi's inputs have specified alias index.
585 // Otherwise use old phi.
464 for (uint i = 1; i < phi->req(); i++) { 586 for (uint i = 1; i < phi->req(); i++) {
465 assert((phi->in(i) == NULL) == (result->in(i) == NULL), "inputs must correspond."); 587 Node* in = result->in(i);
466 } 588 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond.");
467 #endif 589 }
468 // we have finished processing a Phi, see if there are any more to do 590 // we have finished processing a Phi, see if there are any more to do
469 finished = (phi_list.length() == 0 ); 591 finished = (phi_list.length() == 0 );
470 if (!finished) { 592 if (!finished) {
471 phi = phi_list.pop(); 593 phi = phi_list.pop();
472 idx = cur_input.pop(); 594 idx = cur_input.pop();
473 PhiNode *prev_phi = get_map_phi(phi->_idx); 595 PhiNode *prev_result = get_map_phi(phi->_idx);
474 prev_phi->set_req(idx++, result); 596 prev_result->set_req(idx++, result);
475 result = prev_phi; 597 result = prev_result;
476 } 598 }
477 } 599 }
478 return result; 600 return result;
479 } 601 }
602
603
604 //
605 // The next methods are derived from methods in MemNode.
606 //
607 static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *tinst) {
608 Node *mem = mmem;
609 // TypeInstPtr::NOTNULL+any is an OOP with unknown offset - generally
610 // means an array I have not precisely typed yet. Do not do any
611 // alias stuff with it any time soon.
612 if( tinst->base() != Type::AnyPtr &&
613 !(tinst->klass()->is_java_lang_Object() &&
614 tinst->offset() == Type::OffsetBot) ) {
615 mem = mmem->memory_at(alias_idx);
616 // Update input if it is progress over what we have now
617 }
618 return mem;
619 }
620
621 //
622 // Search memory chain of "mem" to find a MemNode whose address
623 // is the specified alias index.
624 //
625 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *phase) {
626 if (orig_mem == NULL)
627 return orig_mem;
628 Compile* C = phase->C;
629 const TypeOopPtr *tinst = C->get_adr_type(alias_idx)->isa_oopptr();
630 bool is_instance = (tinst != NULL) && tinst->is_instance();
631 Node *prev = NULL;
632 Node *result = orig_mem;
633 while (prev != result) {
634 prev = result;
635 if (result->is_Mem()) {
636 MemNode *mem = result->as_Mem();
637 const Type *at = phase->type(mem->in(MemNode::Address));
638 if (at != Type::TOP) {
639 assert (at->isa_ptr() != NULL, "pointer type required.");
640 int idx = C->get_alias_index(at->is_ptr());
641 if (idx == alias_idx)
642 break;
643 }
644 result = mem->in(MemNode::Memory);
645 }
646 if (!is_instance)
647 continue; // don't search further for non-instance types
648 // skip over a call which does not affect this memory slice
649 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
650 Node *proj_in = result->in(0);
651 if (proj_in->is_Call()) {
652 CallNode *call = proj_in->as_Call();
653 if (!call->may_modify(tinst, phase)) {
654 result = call->in(TypeFunc::Memory);
655 }
656 } else if (proj_in->is_Initialize()) {
657 AllocateNode* alloc = proj_in->as_Initialize()->allocation();
658 // Stop if this is the initialization for the object instance which
659 // which contains this memory slice, otherwise skip over it.
660 if (alloc == NULL || alloc->_idx != tinst->instance_id()) {
661 result = proj_in->in(TypeFunc::Memory);
662 }
663 } else if (proj_in->is_MemBar()) {
664 result = proj_in->in(TypeFunc::Memory);
665 }
666 } else if (result->is_MergeMem()) {
667 MergeMemNode *mmem = result->as_MergeMem();
668 result = step_through_mergemem(mmem, alias_idx, tinst);
669 if (result == mmem->base_memory()) {
670 // Didn't find instance memory, search through general slice recursively.
671 result = mmem->memory_at(C->get_general_index(alias_idx));
672 result = find_inst_mem(result, alias_idx, orig_phis, phase);
673 if (C->failing()) {
674 return NULL;
675 }
676 mmem->set_memory_at(alias_idx, result);
677 }
678 } else if (result->is_Phi() &&
679 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
680 Node *un = result->as_Phi()->unique_input(phase);
681 if (un != NULL) {
682 result = un;
683 } else {
684 break;
685 }
686 }
687 }
688 if (is_instance && result->is_Phi()) {
689 PhiNode *mphi = result->as_Phi();
690 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
691 const TypePtr *t = mphi->adr_type();
692 if (C->get_alias_index(t) != alias_idx) {
693 result = split_memory_phi(mphi, alias_idx, orig_phis, phase);
694 }
695 }
696 // the result is either MemNode, PhiNode, InitializeNode.
697 return result;
698 }
699
480 700
481 // 701 //
482 // Convert the types of unescaped object to instance types where possible, 702 // Convert the types of unescaped object to instance types where possible,
483 // propagate the new type information through the graph, and update memory 703 // propagate the new type information through the graph, and update memory
484 // edges and MergeMem inputs to reflect the new type. 704 // edges and MergeMem inputs to reflect the new type.
574 PhaseGVN *igvn = _compile->initial_gvn(); 794 PhaseGVN *igvn = _compile->initial_gvn();
575 uint new_index_start = (uint) _compile->num_alias_types(); 795 uint new_index_start = (uint) _compile->num_alias_types();
576 VectorSet visited(Thread::current()->resource_area()); 796 VectorSet visited(Thread::current()->resource_area());
577 VectorSet ptset(Thread::current()->resource_area()); 797 VectorSet ptset(Thread::current()->resource_area());
578 798
579 // Phase 1: Process possible allocations from alloc_worklist. Create instance 799
580 // types for the CheckCastPP for allocations where possible. 800 // Phase 1: Process possible allocations from alloc_worklist.
801 // Create instance types for the CheckCastPP for allocations where possible.
581 while (alloc_worklist.length() != 0) { 802 while (alloc_worklist.length() != 0) {
582 Node *n = alloc_worklist.pop(); 803 Node *n = alloc_worklist.pop();
583 uint ni = n->_idx; 804 uint ni = n->_idx;
805 const TypeOopPtr* tinst = NULL;
584 if (n->is_Call()) { 806 if (n->is_Call()) {
585 CallNode *alloc = n->as_Call(); 807 CallNode *alloc = n->as_Call();
586 // copy escape information to call node 808 // copy escape information to call node
587 PointsToNode ptn = _nodes->at(alloc->_idx); 809 PointsToNode* ptn = _nodes->adr_at(alloc->_idx);
588 PointsToNode::EscapeState es = escape_state(alloc, igvn); 810 PointsToNode::EscapeState es = escape_state(alloc, igvn);
589 alloc->_escape_state = es; 811 // We have an allocation or call which returns a Java object,
590 // find CheckCastPP of call return value 812 // see if it is unescaped.
591 n = alloc->proj_out(TypeFunc::Parms); 813 if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
592 if (n != NULL && n->outcnt() == 1) {
593 n = n->unique_out();
594 if (n->Opcode() != Op_CheckCastPP) {
595 continue;
596 }
597 } else {
598 continue; 814 continue;
599 }
600 // we have an allocation or call which returns a Java object, see if it is unescaped
601 if (es != PointsToNode::NoEscape || !ptn._unique_type) {
602 continue; // can't make a unique type
603 }
604 if (alloc->is_Allocate()) { 815 if (alloc->is_Allocate()) {
605 // Set the scalar_replaceable flag before the next check. 816 // Set the scalar_replaceable flag before the next check.
606 alloc->as_Allocate()->_is_scalar_replaceable = true; 817 alloc->as_Allocate()->_is_scalar_replaceable = true;
607 } 818 }
608 819 // find CheckCastPP of call return value
820 n = alloc->result_cast();
821 if (n == NULL || // No uses accept Initialize or
822 !n->is_CheckCastPP()) // not unique CheckCastPP.
823 continue;
824 // The inline code for Object.clone() casts the allocation result to
825 // java.lang.Object and then to the the actual type of the allocated
826 // object. Detect this case and use the second cast.
827 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
828 && igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT) {
829 Node *cast2 = NULL;
830 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
831 Node *use = n->fast_out(i);
832 if (use->is_CheckCastPP()) {
833 cast2 = use;
834 break;
835 }
836 }
837 if (cast2 != NULL) {
838 n = cast2;
839 } else {
840 continue;
841 }
842 }
843 set_escape_state(n->_idx, es);
844 // in order for an object to be stackallocatable, it must be:
845 // - a direct allocation (not a call returning an object)
846 // - non-escaping
847 // - eligible to be a unique type
848 // - not determined to be ineligible by escape analysis
609 set_map(alloc->_idx, n); 849 set_map(alloc->_idx, n);
610 set_map(n->_idx, alloc); 850 set_map(n->_idx, alloc);
611 const TypeInstPtr *t = igvn->type(n)->isa_instptr(); 851 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
612 // Unique types which are arrays are not currently supported. 852 if (t == NULL)
613 // The check for AllocateArray is needed in case an array
614 // allocation is immediately cast to Object
615 if (t == NULL || alloc->is_AllocateArray())
616 continue; // not a TypeInstPtr 853 continue; // not a TypeInstPtr
617 const TypeOopPtr *tinst = t->cast_to_instance(ni); 854 tinst = t->cast_to_instance(ni);
618 igvn->hash_delete(n); 855 igvn->hash_delete(n);
619 igvn->set_type(n, tinst); 856 igvn->set_type(n, tinst);
620 n->raise_bottom_type(tinst); 857 n->raise_bottom_type(tinst);
621 igvn->hash_insert(n); 858 igvn->hash_insert(n);
859 record_for_optimizer(n);
860 if (alloc->is_Allocate() && ptn->_scalar_replaceable &&
861 (t->isa_instptr() || t->isa_aryptr())) {
862 // An allocation may have an Initialize which has raw stores. Scan
863 // the users of the raw allocation result and push AddP users
864 // on alloc_worklist.
865 Node *raw_result = alloc->proj_out(TypeFunc::Parms);
866 assert (raw_result != NULL, "must have an allocation result");
867 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
868 Node *use = raw_result->fast_out(i);
869 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
870 Node* addp2 = find_second_addp(use, raw_result);
871 if (addp2 != NULL) {
872 assert(alloc->is_AllocateArray(),"array allocation was expected");
873 alloc_worklist.append_if_missing(addp2);
874 }
875 alloc_worklist.append_if_missing(use);
876 } else if (use->is_Initialize()) {
877 memnode_worklist.append_if_missing(use);
878 }
879 }
880 }
622 } else if (n->is_AddP()) { 881 } else if (n->is_AddP()) {
623 ptset.Clear(); 882 ptset.Clear();
624 PointsTo(ptset, n->in(AddPNode::Address), igvn); 883 PointsTo(ptset, get_addp_base(n), igvn);
625 assert(ptset.Size() == 1, "AddP address is unique"); 884 assert(ptset.Size() == 1, "AddP address is unique");
626 Node *base = get_map(ptset.getelem()); 885 uint elem = ptset.getelem(); // Allocation node's index
886 if (elem == _phantom_object)
887 continue; // Assume the value was set outside this method.
888 Node *base = get_map(elem); // CheckCastPP node
627 split_AddP(n, base, igvn); 889 split_AddP(n, base, igvn);
628 } else if (n->is_Phi() || n->Opcode() == Op_CastPP || n->Opcode() == Op_CheckCastPP) { 890 tinst = igvn->type(base)->isa_oopptr();
891 } else if (n->is_Phi() ||
892 n->is_CheckCastPP() ||
893 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
629 if (visited.test_set(n->_idx)) { 894 if (visited.test_set(n->_idx)) {
630 assert(n->is_Phi(), "loops only through Phi's"); 895 assert(n->is_Phi(), "loops only through Phi's");
631 continue; // already processed 896 continue; // already processed
632 } 897 }
633 ptset.Clear(); 898 ptset.Clear();
634 PointsTo(ptset, n, igvn); 899 PointsTo(ptset, n, igvn);
635 if (ptset.Size() == 1) { 900 if (ptset.Size() == 1) {
901 uint elem = ptset.getelem(); // Allocation node's index
902 if (elem == _phantom_object)
903 continue; // Assume the value was set outside this method.
904 Node *val = get_map(elem); // CheckCastPP node
636 TypeNode *tn = n->as_Type(); 905 TypeNode *tn = n->as_Type();
637 Node *val = get_map(ptset.getelem()); 906 tinst = igvn->type(val)->isa_oopptr();
638 const TypeInstPtr *val_t = igvn->type(val)->isa_instptr();; 907 assert(tinst != NULL && tinst->is_instance() &&
639 assert(val_t != NULL && val_t->is_instance(), "instance type expected."); 908 tinst->instance_id() == elem , "instance type expected.");
640 const TypeInstPtr *tn_t = igvn->type(tn)->isa_instptr();; 909 const TypeOopPtr *tn_t = igvn->type(tn)->isa_oopptr();
641 910
642 if (tn_t != NULL && val_t->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) { 911 if (tn_t != NULL &&
912 tinst->cast_to_instance(TypeOopPtr::UNKNOWN_INSTANCE)->higher_equal(tn_t)) {
643 igvn->hash_delete(tn); 913 igvn->hash_delete(tn);
644 igvn->set_type(tn, val_t); 914 igvn->set_type(tn, tinst);
645 tn->set_type(val_t); 915 tn->set_type(tinst);
646 igvn->hash_insert(tn); 916 igvn->hash_insert(tn);
917 record_for_optimizer(n);
647 } 918 }
648 } 919 }
649 } else { 920 } else {
650 continue; 921 continue;
651 } 922 }
652 // push users on appropriate worklist 923 // push users on appropriate worklist
653 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 924 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
654 Node *use = n->fast_out(i); 925 Node *use = n->fast_out(i);
655 if(use->is_Mem() && use->in(MemNode::Address) == n) { 926 if(use->is_Mem() && use->in(MemNode::Address) == n) {
656 memnode_worklist.push(use); 927 memnode_worklist.append_if_missing(use);
657 } else if (use->is_AddP() || use->is_Phi() || use->Opcode() == Op_CastPP || use->Opcode() == Op_CheckCastPP) { 928 } else if (use->is_Initialize()) {
658 alloc_worklist.push(use); 929 memnode_worklist.append_if_missing(use);
659 } 930 } else if (use->is_MergeMem()) {
660 } 931 mergemem_worklist.append_if_missing(use);
661 932 } else if (use->is_Call() && tinst != NULL) {
662 } 933 // Look for MergeMem nodes for calls which reference unique allocation
934 // (through CheckCastPP nodes) even for debug info.
935 Node* m = use->in(TypeFunc::Memory);
936 uint iid = tinst->instance_id();
937 while (m->is_Proj() && m->in(0)->is_Call() &&
938 m->in(0) != use && !m->in(0)->_idx != iid) {
939 m = m->in(0)->in(TypeFunc::Memory);
940 }
941 if (m->is_MergeMem()) {
942 mergemem_worklist.append_if_missing(m);
943 }
944 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
945 Node* addp2 = find_second_addp(use, n);
946 if (addp2 != NULL) {
947 alloc_worklist.append_if_missing(addp2);
948 }
949 alloc_worklist.append_if_missing(use);
950 } else if (use->is_Phi() ||
951 use->is_CheckCastPP() ||
952 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
953 alloc_worklist.append_if_missing(use);
954 }
955 }
956
957 }
958 // New alias types were created in split_AddP().
663 uint new_index_end = (uint) _compile->num_alias_types(); 959 uint new_index_end = (uint) _compile->num_alias_types();
664 960
665 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 961 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and
666 // compute new values for Memory inputs (the Memory inputs are not 962 // compute new values for Memory inputs (the Memory inputs are not
667 // actually updated until phase 4.) 963 // actually updated until phase 4.)
668 if (memnode_worklist.length() == 0) 964 if (memnode_worklist.length() == 0)
669 return; // nothing to do 965 return; // nothing to do
670 966
671
672 while (memnode_worklist.length() != 0) { 967 while (memnode_worklist.length() != 0) {
673 Node *n = memnode_worklist.pop(); 968 Node *n = memnode_worklist.pop();
969 if (visited.test_set(n->_idx))
970 continue;
674 if (n->is_Phi()) { 971 if (n->is_Phi()) {
675 assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required"); 972 assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
676 // we don't need to do anything, but the users must be pushed if we haven't processed 973 // we don't need to do anything, but the users must be pushed if we haven't processed
677 // this Phi before 974 // this Phi before
678 if (visited.test_set(n->_idx)) 975 } else if (n->is_Initialize()) {
976 // we don't need to do anything, but the users of the memory projection must be pushed
977 n = n->as_Initialize()->proj_out(TypeFunc::Memory);
978 if (n == NULL)
679 continue; 979 continue;
680 } else { 980 } else {
681 assert(n->is_Mem(), "memory node required."); 981 assert(n->is_Mem(), "memory node required.");
682 Node *addr = n->in(MemNode::Address); 982 Node *addr = n->in(MemNode::Address);
983 assert(addr->is_AddP(), "AddP required");
683 const Type *addr_t = igvn->type(addr); 984 const Type *addr_t = igvn->type(addr);
684 if (addr_t == Type::TOP) 985 if (addr_t == Type::TOP)
685 continue; 986 continue;
686 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 987 assert (addr_t->isa_ptr() != NULL, "pointer type required.");
687 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 988 int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
688 Node *mem = find_mem(n->in(MemNode::Memory), alias_idx, igvn); 989 assert ((uint)alias_idx < new_index_end, "wrong alias index");
689 if (mem->is_Phi()) { 990 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
690 mem = split_memory_phi(mem->as_Phi(), alias_idx, orig_phis, igvn);
691 }
692 if (_compile->failing()) { 991 if (_compile->failing()) {
693 return; 992 return;
694 } 993 }
695 if (mem != n->in(MemNode::Memory)) 994 if (mem != n->in(MemNode::Memory)) {
696 set_map(n->_idx, mem); 995 set_map(n->_idx, mem);
996 _nodes->adr_at(n->_idx)->_node = n;
997 }
697 if (n->is_Load()) { 998 if (n->is_Load()) {
698 continue; // don't push users 999 continue; // don't push users
699 } else if (n->is_LoadStore()) { 1000 } else if (n->is_LoadStore()) {
700 // get the memory projection 1001 // get the memory projection
701 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1002 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
710 } 1011 }
711 // push user on appropriate worklist 1012 // push user on appropriate worklist
712 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1013 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
713 Node *use = n->fast_out(i); 1014 Node *use = n->fast_out(i);
714 if (use->is_Phi()) { 1015 if (use->is_Phi()) {
715 memnode_worklist.push(use); 1016 memnode_worklist.append_if_missing(use);
716 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { 1017 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
717 memnode_worklist.push(use); 1018 memnode_worklist.append_if_missing(use);
1019 } else if (use->is_Initialize()) {
1020 memnode_worklist.append_if_missing(use);
718 } else if (use->is_MergeMem()) { 1021 } else if (use->is_MergeMem()) {
719 mergemem_worklist.push(use); 1022 mergemem_worklist.append_if_missing(use);
720 } 1023 }
721 } 1024 }
722 } 1025 }
723 1026
724 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 1027 // Phase 3: Process MergeMem nodes from mergemem_worklist.
725 // moving the first node encountered of each instance type to the 1028 // Walk each memory moving the first node encountered of each
726 // the input corresponding to its alias index. 1029 // instance type to the the input corresponding to its alias index.
727 while (mergemem_worklist.length() != 0) { 1030 while (mergemem_worklist.length() != 0) {
728 Node *n = mergemem_worklist.pop(); 1031 Node *n = mergemem_worklist.pop();
729 assert(n->is_MergeMem(), "MergeMem node required."); 1032 assert(n->is_MergeMem(), "MergeMem node required.");
1033 if (visited.test_set(n->_idx))
1034 continue;
730 MergeMemNode *nmm = n->as_MergeMem(); 1035 MergeMemNode *nmm = n->as_MergeMem();
731 // Note: we don't want to use MergeMemStream here because we only want to 1036 // Note: we don't want to use MergeMemStream here because we only want to
732 // scan inputs which exist at the start, not ones we add during processing 1037 // scan inputs which exist at the start, not ones we add during processing.
733 uint nslices = nmm->req(); 1038 uint nslices = nmm->req();
734 igvn->hash_delete(nmm); 1039 igvn->hash_delete(nmm);
735 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 1040 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
736 Node * mem = nmm->in(i); 1041 Node* mem = nmm->in(i);
737 Node * cur = NULL; 1042 Node* cur = NULL;
738 if (mem == NULL || mem->is_top()) 1043 if (mem == NULL || mem->is_top())
739 continue; 1044 continue;
740 while (mem->is_Mem()) { 1045 while (mem->is_Mem()) {
741 const Type *at = igvn->type(mem->in(MemNode::Address)); 1046 const Type *at = igvn->type(mem->in(MemNode::Address));
742 if (at != Type::TOP) { 1047 if (at != Type::TOP) {
752 } 1057 }
753 } 1058 }
754 mem = mem->in(MemNode::Memory); 1059 mem = mem->in(MemNode::Memory);
755 } 1060 }
756 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 1061 nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
757 if (mem->is_Phi()) { 1062 // Find any instance of the current type if we haven't encountered
758 // We have encountered a Phi, we need to split the Phi for 1063 // a value of the instance along the chain.
759 // any instance of the current type if we haven't encountered 1064 for (uint ni = new_index_start; ni < new_index_end; ni++) {
760 // a value of the instance along the chain. 1065 if((uint)_compile->get_general_index(ni) == i) {
761 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1066 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
762 if((uint)_compile->get_general_index(ni) == i) { 1067 if (nmm->is_empty_memory(m)) {
763 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 1068 Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
764 if (nmm->is_empty_memory(m)) { 1069 if (_compile->failing()) {
765 m = split_memory_phi(mem->as_Phi(), ni, orig_phis, igvn); 1070 return;
766 if (_compile->failing()) { 1071 }
767 return; 1072 nmm->set_memory_at(ni, result);
768 } 1073 }
769 nmm->set_memory_at(ni, m); 1074 }
1075 }
1076 }
1077 // Find the rest of instances values
1078 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1079 const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr();
1080 Node* result = step_through_mergemem(nmm, ni, tinst);
1081 if (result == nmm->base_memory()) {
1082 // Didn't find instance memory, search through general slice recursively.
1083 result = nmm->memory_at(igvn->C->get_general_index(ni));
1084 result = find_inst_mem(result, ni, orig_phis, igvn);
1085 if (_compile->failing()) {
1086 return;
1087 }
1088 nmm->set_memory_at(ni, result);
1089 }
1090 }
1091 igvn->hash_insert(nmm);
1092 record_for_optimizer(nmm);
1093
1094 // Propagate new memory slices to following MergeMem nodes.
1095 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1096 Node *use = n->fast_out(i);
1097 if (use->is_Call()) {
1098 CallNode* in = use->as_Call();
1099 if (in->proj_out(TypeFunc::Memory) != NULL) {
1100 Node* m = in->proj_out(TypeFunc::Memory);
1101 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1102 Node* mm = m->fast_out(j);
1103 if (mm->is_MergeMem()) {
1104 mergemem_worklist.append_if_missing(mm);
770 } 1105 }
771 } 1106 }
772 } 1107 }
773 } 1108 if (use->is_Allocate()) {
774 } 1109 use = use->as_Allocate()->initialization();
775 igvn->hash_insert(nmm); 1110 if (use == NULL) {
776 record_for_optimizer(nmm); 1111 continue;
777 } 1112 }
778 1113 }
779 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes 1114 }
780 // 1115 if (use->is_Initialize()) {
1116 InitializeNode* in = use->as_Initialize();
1117 if (in->proj_out(TypeFunc::Memory) != NULL) {
1118 Node* m = in->proj_out(TypeFunc::Memory);
1119 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1120 Node* mm = m->fast_out(j);
1121 if (mm->is_MergeMem()) {
1122 mergemem_worklist.append_if_missing(mm);
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129
1130 // Phase 4: Update the inputs of non-instance memory Phis and
1131 // the Memory input of memnodes
781 // First update the inputs of any non-instance Phi's from 1132 // First update the inputs of any non-instance Phi's from
782 // which we split out an instance Phi. Note we don't have 1133 // which we split out an instance Phi. Note we don't have
783 // to recursively process Phi's encounted on the input memory 1134 // to recursively process Phi's encounted on the input memory
784 // chains as is done in split_memory_phi() since they will 1135 // chains as is done in split_memory_phi() since they will
785 // also be processed here. 1136 // also be processed here.
787 PhiNode *phi = orig_phis.pop(); 1138 PhiNode *phi = orig_phis.pop();
788 int alias_idx = _compile->get_alias_index(phi->adr_type()); 1139 int alias_idx = _compile->get_alias_index(phi->adr_type());
789 igvn->hash_delete(phi); 1140 igvn->hash_delete(phi);
790 for (uint i = 1; i < phi->req(); i++) { 1141 for (uint i = 1; i < phi->req(); i++) {
791 Node *mem = phi->in(i); 1142 Node *mem = phi->in(i);
792 Node *new_mem = find_mem(mem, alias_idx, igvn); 1143 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis, igvn);
1144 if (_compile->failing()) {
1145 return;
1146 }
793 if (mem != new_mem) { 1147 if (mem != new_mem) {
794 phi->set_req(i, new_mem); 1148 phi->set_req(i, new_mem);
795 } 1149 }
796 } 1150 }
797 igvn->hash_insert(phi); 1151 igvn->hash_insert(phi);
801 // Update the memory inputs of MemNodes with the value we computed 1155 // Update the memory inputs of MemNodes with the value we computed
802 // in Phase 2. 1156 // in Phase 2.
803 for (int i = 0; i < _nodes->length(); i++) { 1157 for (int i = 0; i < _nodes->length(); i++) {
804 Node *nmem = get_map(i); 1158 Node *nmem = get_map(i);
805 if (nmem != NULL) { 1159 if (nmem != NULL) {
806 Node *n = _nodes->at(i)._node; 1160 Node *n = _nodes->adr_at(i)->_node;
807 if (n != NULL && n->is_Mem()) { 1161 if (n != NULL && n->is_Mem()) {
808 igvn->hash_delete(n); 1162 igvn->hash_delete(n);
809 n->set_req(MemNode::Memory, nmem); 1163 n->set_req(MemNode::Memory, nmem);
810 igvn->hash_insert(n); 1164 igvn->hash_insert(n);
811 record_for_optimizer(n); 1165 record_for_optimizer(n);
813 } 1167 }
814 } 1168 }
815 } 1169 }
816 1170
817 void ConnectionGraph::compute_escape() { 1171 void ConnectionGraph::compute_escape() {
1172
1173 // 1. Populate Connection Graph with Ideal nodes.
1174
1175 Unique_Node_List worklist_init;
1176 worklist_init.map(_compile->unique(), NULL); // preallocate space
1177
1178 // Initialize worklist
1179 if (_compile->root() != NULL) {
1180 worklist_init.push(_compile->root());
1181 }
1182
1183 GrowableArray<int> cg_worklist;
1184 PhaseGVN* igvn = _compile->initial_gvn();
1185 bool has_allocations = false;
1186
1187 // Push all useful nodes onto CG list and set their type.
1188 for( uint next = 0; next < worklist_init.size(); ++next ) {
1189 Node* n = worklist_init.at(next);
1190 record_for_escape_analysis(n, igvn);
1191 if (n->is_Call() &&
1192 _nodes->adr_at(n->_idx)->node_type() == PointsToNode::JavaObject) {
1193 has_allocations = true;
1194 }
1195 if(n->is_AddP())
1196 cg_worklist.append(n->_idx);
1197 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1198 Node* m = n->fast_out(i); // Get user
1199 worklist_init.push(m);
1200 }
1201 }
1202
1203 if (has_allocations) {
1204 _has_allocations = true;
1205 } else {
1206 _has_allocations = false;
1207 _collecting = false;
1208 return; // Nothing to do.
1209 }
1210
1211 // 2. First pass to create simple CG edges (doesn't require to walk CG).
1212 for( uint next = 0; next < _delayed_worklist.size(); ++next ) {
1213 Node* n = _delayed_worklist.at(next);
1214 build_connection_graph(n, igvn);
1215 }
1216
1217 // 3. Pass to create fields edges (Allocate -F-> AddP).
1218 for( int next = 0; next < cg_worklist.length(); ++next ) {
1219 int ni = cg_worklist.at(next);
1220 build_connection_graph(_nodes->adr_at(ni)->_node, igvn);
1221 }
1222
1223 cg_worklist.clear();
1224 cg_worklist.append(_phantom_object);
1225
1226 // 4. Build Connection Graph which need
1227 // to walk the connection graph.
1228 for (uint ni = 0; ni < (uint)_nodes->length(); ni++) {
1229 PointsToNode* ptn = _nodes->adr_at(ni);
1230 Node *n = ptn->_node;
1231 if (n != NULL) { // Call, AddP, LoadP, StoreP
1232 build_connection_graph(n, igvn);
1233 if (ptn->node_type() != PointsToNode::UnknownType)
1234 cg_worklist.append(n->_idx); // Collect CG nodes
1235 }
1236 }
1237
1238 VectorSet ptset(Thread::current()->resource_area());
1239 GrowableArray<Node*> alloc_worklist;
818 GrowableArray<int> worklist; 1240 GrowableArray<int> worklist;
819 GrowableArray<Node *> alloc_worklist;
820 VectorSet visited(Thread::current()->resource_area());
821 PhaseGVN *igvn = _compile->initial_gvn();
822
823 // process Phi nodes from the deferred list, they may not have
824 while(_deferred.size() > 0) {
825 Node * n = _deferred.pop();
826 PhiNode * phi = n->as_Phi();
827
828 process_phi_escape(phi, igvn);
829 }
830
831 VectorSet ptset(Thread::current()->resource_area());
832 1241
833 // remove deferred edges from the graph and collect 1242 // remove deferred edges from the graph and collect
834 // information we will need for type splitting 1243 // information we will need for type splitting
835 for (uint ni = 0; ni < (uint)_nodes->length(); ni++) { 1244 for( int next = 0; next < cg_worklist.length(); ++next ) {
836 PointsToNode * ptn = _nodes->adr_at(ni); 1245 int ni = cg_worklist.at(next);
1246 PointsToNode* ptn = _nodes->adr_at(ni);
837 PointsToNode::NodeType nt = ptn->node_type(); 1247 PointsToNode::NodeType nt = ptn->node_type();
838
839 if (nt == PointsToNode::UnknownType) {
840 continue; // not a node we are interested in
841 }
842 Node *n = ptn->_node; 1248 Node *n = ptn->_node;
843 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { 1249 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
844 remove_deferred(ni); 1250 remove_deferred(ni);
845 if (n->is_AddP()) { 1251 if (n->is_AddP()) {
846 // if this AddP computes an address which may point to more that one 1252 // If this AddP computes an address which may point to more that one
847 // object, nothing the address points to can be a unique type. 1253 // object, nothing the address points to can be scalar replaceable.
848 Node *base = n->in(AddPNode::Base); 1254 Node *base = get_addp_base(n);
849 ptset.Clear(); 1255 ptset.Clear();
850 PointsTo(ptset, base, igvn); 1256 PointsTo(ptset, base, igvn);
851 if (ptset.Size() > 1) { 1257 if (ptset.Size() > 1) {
852 for( VectorSetI j(&ptset); j.test(); ++j ) { 1258 for( VectorSetI j(&ptset); j.test(); ++j ) {
853 PointsToNode *ptaddr = _nodes->adr_at(j.elem); 1259 uint pt = j.elem;
854 ptaddr->_unique_type = false; 1260 ptnode_adr(pt)->_scalar_replaceable = false;
855 } 1261 }
856 } 1262 }
857 } 1263 }
858 } else if (n->is_Call()) { 1264 } else if (nt == PointsToNode::JavaObject && n->is_Call()) {
859 // initialize _escape_state of calls to GlobalEscape 1265 // Push call on alloc_worlist (alocations are calls)
860 n->as_Call()->_escape_state = PointsToNode::GlobalEscape; 1266 // for processing by split_unique_types().
861 // push call on alloc_worlist (alocations are calls) 1267 alloc_worklist.append(n);
862 // for processing by split_unique_types() 1268 }
863 alloc_worklist.push(n); 1269 }
864 } 1270
865 }
866 // push all GlobalEscape nodes on the worklist 1271 // push all GlobalEscape nodes on the worklist
867 for (uint nj = 0; nj < (uint)_nodes->length(); nj++) { 1272 for( int next = 0; next < cg_worklist.length(); ++next ) {
868 if (_nodes->at(nj).escape_state() == PointsToNode::GlobalEscape) { 1273 int nk = cg_worklist.at(next);
869 worklist.append(nj); 1274 if (_nodes->adr_at(nk)->escape_state() == PointsToNode::GlobalEscape)
870 } 1275 worklist.append(nk);
871 } 1276 }
872 // mark all node reachable from GlobalEscape nodes 1277 // mark all node reachable from GlobalEscape nodes
873 while(worklist.length() > 0) { 1278 while(worklist.length() > 0) {
874 PointsToNode n = _nodes->at(worklist.pop()); 1279 PointsToNode n = _nodes->at(worklist.pop());
875 for (uint ei = 0; ei < n.edge_count(); ei++) { 1280 for (uint ei = 0; ei < n.edge_count(); ei++) {
876 uint npi = n.edge_target(ei); 1281 uint npi = n.edge_target(ei);
877 PointsToNode *np = ptnode_adr(npi); 1282 PointsToNode *np = ptnode_adr(npi);
878 if (np->escape_state() != PointsToNode::GlobalEscape) { 1283 if (np->escape_state() < PointsToNode::GlobalEscape) {
879 np->set_escape_state(PointsToNode::GlobalEscape); 1284 np->set_escape_state(PointsToNode::GlobalEscape);
880 worklist.append_if_missing(npi); 1285 worklist.append_if_missing(npi);
881 } 1286 }
882 } 1287 }
883 } 1288 }
884 1289
885 // push all ArgEscape nodes on the worklist 1290 // push all ArgEscape nodes on the worklist
886 for (uint nk = 0; nk < (uint)_nodes->length(); nk++) { 1291 for( int next = 0; next < cg_worklist.length(); ++next ) {
887 if (_nodes->at(nk).escape_state() == PointsToNode::ArgEscape) 1292 int nk = cg_worklist.at(next);
1293 if (_nodes->adr_at(nk)->escape_state() == PointsToNode::ArgEscape)
888 worklist.push(nk); 1294 worklist.push(nk);
889 } 1295 }
890 // mark all node reachable from ArgEscape nodes 1296 // mark all node reachable from ArgEscape nodes
891 while(worklist.length() > 0) { 1297 while(worklist.length() > 0) {
892 PointsToNode n = _nodes->at(worklist.pop()); 1298 PointsToNode n = _nodes->at(worklist.pop());
893
894 for (uint ei = 0; ei < n.edge_count(); ei++) { 1299 for (uint ei = 0; ei < n.edge_count(); ei++) {
895 uint npi = n.edge_target(ei); 1300 uint npi = n.edge_target(ei);
896 PointsToNode *np = ptnode_adr(npi); 1301 PointsToNode *np = ptnode_adr(npi);
897 if (np->escape_state() != PointsToNode::ArgEscape) { 1302 if (np->escape_state() < PointsToNode::ArgEscape) {
898 np->set_escape_state(PointsToNode::ArgEscape); 1303 np->set_escape_state(PointsToNode::ArgEscape);
899 worklist.append_if_missing(npi); 1304 worklist.append_if_missing(npi);
900 } 1305 }
901 } 1306 }
902 } 1307 }
1308
1309 // push all NoEscape nodes on the worklist
1310 for( int next = 0; next < cg_worklist.length(); ++next ) {
1311 int nk = cg_worklist.at(next);
1312 if (_nodes->adr_at(nk)->escape_state() == PointsToNode::NoEscape)
1313 worklist.push(nk);
1314 }
1315 // mark all node reachable from NoEscape nodes
1316 while(worklist.length() > 0) {
1317 PointsToNode n = _nodes->at(worklist.pop());
1318 for (uint ei = 0; ei < n.edge_count(); ei++) {
1319 uint npi = n.edge_target(ei);
1320 PointsToNode *np = ptnode_adr(npi);
1321 if (np->escape_state() < PointsToNode::NoEscape) {
1322 np->set_escape_state(PointsToNode::NoEscape);
1323 worklist.append_if_missing(npi);
1324 }
1325 }
1326 }
1327
903 _collecting = false; 1328 _collecting = false;
904 1329
905 // Now use the escape information to create unique types for 1330 has_allocations = false; // Are there scalar replaceable allocations?
906 // unescaped objects 1331
907 split_unique_types(alloc_worklist); 1332 for( int next = 0; next < alloc_worklist.length(); ++next ) {
908 if (_compile->failing()) return; 1333 Node* n = alloc_worklist.at(next);
909 1334 uint ni = n->_idx;
910 // Clean up after split unique types. 1335 PointsToNode* ptn = _nodes->adr_at(ni);
911 ResourceMark rm; 1336 PointsToNode::EscapeState es = ptn->escape_state();
912 PhaseRemoveUseless pru(_compile->initial_gvn(), _compile->for_igvn()); 1337 if (ptn->escape_state() == PointsToNode::NoEscape &&
913 } 1338 ptn->_scalar_replaceable) {
914 1339 has_allocations = true;
915 Node * ConnectionGraph::skip_casts(Node *n) { 1340 break;
916 while(n->Opcode() == Op_CastPP || n->Opcode() == Op_CheckCastPP) { 1341 }
917 n = n->in(1); 1342 }
918 } 1343 if (!has_allocations) {
919 return n; 1344 return; // Nothing to do.
920 } 1345 }
921 1346
922 void ConnectionGraph::process_phi_escape(PhiNode *phi, PhaseTransform *phase) { 1347 if(_compile->AliasLevel() >= 3 && EliminateAllocations) {
923 1348 // Now use the escape information to create unique types for
924 if (phi->type()->isa_oopptr() == NULL) 1349 // unescaped objects
925 return; // nothing to do if not an oop 1350 split_unique_types(alloc_worklist);
926 1351 if (_compile->failing()) return;
927 PointsToNode *ptadr = ptnode_adr(phi->_idx); 1352
928 int incount = phi->req(); 1353 // Clean up after split unique types.
929 int non_null_inputs = 0; 1354 ResourceMark rm;
930 1355 PhaseRemoveUseless pru(_compile->initial_gvn(), _compile->for_igvn());
931 for (int i = 1; i < incount ; i++) { 1356
932 if (phi->in(i) != NULL) 1357 #ifdef ASSERT
933 non_null_inputs++; 1358 } else if (PrintEscapeAnalysis || PrintEliminateAllocations) {
934 } 1359 tty->print("=== No allocations eliminated for ");
935 if (non_null_inputs == ptadr->_inputs_processed) 1360 C()->method()->print_short_name();
936 return; // no new inputs since the last time this node was processed, 1361 if(!EliminateAllocations) {
937 // the current information is valid 1362 tty->print(" since EliminateAllocations is off ===");
938 1363 } else if(_compile->AliasLevel() < 3) {
939 ptadr->_inputs_processed = non_null_inputs; // prevent recursive processing of this node 1364 tty->print(" since AliasLevel < 3 ===");
940 for (int j = 1; j < incount ; j++) { 1365 }
941 Node * n = phi->in(j); 1366 tty->cr();
942 if (n == NULL) 1367 #endif
943 continue; // ignore NULL
944 n = skip_casts(n);
945 if (n->is_top() || n == phi)
946 continue; // ignore top or inputs which go back this node
947 int nopc = n->Opcode();
948 PointsToNode npt = _nodes->at(n->_idx);
949 if (_nodes->at(n->_idx).node_type() == PointsToNode::JavaObject) {
950 add_pointsto_edge(phi->_idx, n->_idx);
951 } else {
952 add_deferred_edge(phi->_idx, n->_idx);
953 }
954 } 1368 }
955 } 1369 }
956 1370
957 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { 1371 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
958 1372
959 _processed.set(call->_idx);
960 switch (call->Opcode()) { 1373 switch (call->Opcode()) {
961 1374 #ifdef ASSERT
962 // arguments to allocation and locking don't escape
963 case Op_Allocate: 1375 case Op_Allocate:
964 case Op_AllocateArray: 1376 case Op_AllocateArray:
965 case Op_Lock: 1377 case Op_Lock:
966 case Op_Unlock: 1378 case Op_Unlock:
967 break; 1379 assert(false, "should be done already");
1380 break;
1381 #endif
1382 case Op_CallLeafNoFP:
1383 {
1384 // Stub calls, objects do not escape but they are not scale replaceable.
1385 // Adjust escape state for outgoing arguments.
1386 const TypeTuple * d = call->tf()->domain();
1387 VectorSet ptset(Thread::current()->resource_area());
1388 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1389 const Type* at = d->field_at(i);
1390 Node *arg = call->in(i)->uncast();
1391 const Type *aat = phase->type(arg);
1392 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
1393 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1394 aat->isa_ptr() != NULL, "expecting an Ptr");
1395 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
1396 if (arg->is_AddP()) {
1397 //
1398 // The inline_native_clone() case when the arraycopy stub is called
1399 // after the allocation before Initialize and CheckCastPP nodes.
1400 //
1401 // Set AddP's base (Allocate) as not scalar replaceable since
1402 // pointer to the base (with offset) is passed as argument.
1403 //
1404 arg = get_addp_base(arg);
1405 }
1406 ptset.Clear();
1407 PointsTo(ptset, arg, phase);
1408 for( VectorSetI j(&ptset); j.test(); ++j ) {
1409 uint pt = j.elem;
1410 set_escape_state(pt, PointsToNode::ArgEscape);
1411 }
1412 }
1413 }
1414 break;
1415 }
968 1416
969 case Op_CallStaticJava: 1417 case Op_CallStaticJava:
970 // For a static call, we know exactly what method is being called. 1418 // For a static call, we know exactly what method is being called.
971 // Use bytecode estimator to record the call's escape affects 1419 // Use bytecode estimator to record the call's escape affects
972 { 1420 {
973 ciMethod *meth = call->as_CallJava()->method(); 1421 ciMethod *meth = call->as_CallJava()->method();
974 if (meth != NULL) { 1422 BCEscapeAnalyzer *call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
1423 // fall-through if not a Java method or no analyzer information
1424 if (call_analyzer != NULL) {
975 const TypeTuple * d = call->tf()->domain(); 1425 const TypeTuple * d = call->tf()->domain();
976 BCEscapeAnalyzer call_analyzer(meth);
977 VectorSet ptset(Thread::current()->resource_area()); 1426 VectorSet ptset(Thread::current()->resource_area());
1427 bool copy_dependencies = false;
978 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1428 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
979 const Type* at = d->field_at(i); 1429 const Type* at = d->field_at(i);
980 int k = i - TypeFunc::Parms; 1430 int k = i - TypeFunc::Parms;
981 1431
982 if (at->isa_oopptr() != NULL) { 1432 if (at->isa_oopptr() != NULL) {
983 Node *arg = skip_casts(call->in(i)); 1433 Node *arg = call->in(i)->uncast();
984 1434
985 if (!call_analyzer.is_arg_stack(k)) { 1435 bool global_escapes = false;
1436 bool fields_escapes = false;
1437 if (!call_analyzer->is_arg_stack(k)) {
986 // The argument global escapes, mark everything it could point to 1438 // The argument global escapes, mark everything it could point to
987 ptset.Clear(); 1439 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
988 PointsTo(ptset, arg, phase); 1440 global_escapes = true;
989 for( VectorSetI j(&ptset); j.test(); ++j ) { 1441 } else {
990 uint pt = j.elem; 1442 if (!call_analyzer->is_arg_local(k)) {
991 1443 // The argument itself doesn't escape, but any fields might
1444 fields_escapes = true;
1445 }
1446 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
1447 copy_dependencies = true;
1448 }
1449
1450 ptset.Clear();
1451 PointsTo(ptset, arg, phase);
1452 for( VectorSetI j(&ptset); j.test(); ++j ) {
1453 uint pt = j.elem;
1454 if (global_escapes) {
1455 //The argument global escapes, mark everything it could point to
992 set_escape_state(pt, PointsToNode::GlobalEscape); 1456 set_escape_state(pt, PointsToNode::GlobalEscape);
993 } 1457 } else {
994 } else if (!call_analyzer.is_arg_local(k)) { 1458 if (fields_escapes) {
995 // The argument itself doesn't escape, but any fields might 1459 // The argument itself doesn't escape, but any fields might
996 ptset.Clear(); 1460 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
997 PointsTo(ptset, arg, phase); 1461 }
998 for( VectorSetI j(&ptset); j.test(); ++j ) { 1462 set_escape_state(pt, PointsToNode::ArgEscape);
999 uint pt = j.elem;
1000 add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
1001 } 1463 }
1002 } 1464 }
1003 } 1465 }
1004 } 1466 }
1005 call_analyzer.copy_dependencies(C()->dependencies()); 1467 if (copy_dependencies)
1468 call_analyzer->copy_dependencies(C()->dependencies());
1006 break; 1469 break;
1007 } 1470 }
1008 // fall-through if not a Java method
1009 } 1471 }
1010 1472
1011 default: 1473 default:
1012 // Some other type of call, assume the worst case: all arguments 1474 // Fall-through here if not a Java method or no analyzer information
1475 // or some other type of call, assume the worst case: all arguments
1013 // globally escape. 1476 // globally escape.
1014 { 1477 {
1015 // adjust escape state for outgoing arguments 1478 // adjust escape state for outgoing arguments
1016 const TypeTuple * d = call->tf()->domain(); 1479 const TypeTuple * d = call->tf()->domain();
1017 VectorSet ptset(Thread::current()->resource_area()); 1480 VectorSet ptset(Thread::current()->resource_area());
1018 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1481 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1019 const Type* at = d->field_at(i); 1482 const Type* at = d->field_at(i);
1020
1021 if (at->isa_oopptr() != NULL) { 1483 if (at->isa_oopptr() != NULL) {
1022 Node *arg = skip_casts(call->in(i)); 1484 Node *arg = call->in(i)->uncast();
1485 set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
1023 ptset.Clear(); 1486 ptset.Clear();
1024 PointsTo(ptset, arg, phase); 1487 PointsTo(ptset, arg, phase);
1025 for( VectorSetI j(&ptset); j.test(); ++j ) { 1488 for( VectorSetI j(&ptset); j.test(); ++j ) {
1026 uint pt = j.elem; 1489 uint pt = j.elem;
1027
1028 set_escape_state(pt, PointsToNode::GlobalEscape); 1490 set_escape_state(pt, PointsToNode::GlobalEscape);
1491 PointsToNode *ptadr = ptnode_adr(pt);
1029 } 1492 }
1030 } 1493 }
1031 } 1494 }
1032 } 1495 }
1033 } 1496 }
1034 } 1497 }
1035 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) { 1498 void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *phase) {
1499 PointsToNode *ptadr = ptnode_adr(resproj->_idx);
1500
1036 CallNode *call = resproj->in(0)->as_Call(); 1501 CallNode *call = resproj->in(0)->as_Call();
1037
1038 PointsToNode *ptadr = ptnode_adr(resproj->_idx);
1039
1040 ptadr->_node = resproj;
1041 ptadr->set_node_type(PointsToNode::LocalVar);
1042 set_escape_state(resproj->_idx, PointsToNode::UnknownEscape);
1043 _processed.set(resproj->_idx);
1044
1045 switch (call->Opcode()) { 1502 switch (call->Opcode()) {
1046 case Op_Allocate: 1503 case Op_Allocate:
1047 { 1504 {
1048 Node *k = call->in(AllocateNode::KlassNode); 1505 Node *k = call->in(AllocateNode::KlassNode);
1049 const TypeKlassPtr *kt; 1506 const TypeKlassPtr *kt;
1055 assert(kt != NULL, "TypeKlassPtr required."); 1512 assert(kt != NULL, "TypeKlassPtr required.");
1056 ciKlass* cik = kt->klass(); 1513 ciKlass* cik = kt->klass();
1057 ciInstanceKlass* ciik = cik->as_instance_klass(); 1514 ciInstanceKlass* ciik = cik->as_instance_klass();
1058 1515
1059 PointsToNode *ptadr = ptnode_adr(call->_idx); 1516 PointsToNode *ptadr = ptnode_adr(call->_idx);
1060 ptadr->set_node_type(PointsToNode::JavaObject); 1517 PointsToNode::EscapeState es;
1518 uint edge_to;
1061 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) { 1519 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
1062 set_escape_state(call->_idx, PointsToNode::GlobalEscape); 1520 es = PointsToNode::GlobalEscape;
1063 add_pointsto_edge(resproj->_idx, _phantom_object); 1521 edge_to = _phantom_object; // Could not be worse
1064 } else { 1522 } else {
1065 set_escape_state(call->_idx, PointsToNode::NoEscape); 1523 es = PointsToNode::NoEscape;
1066 add_pointsto_edge(resproj->_idx, call->_idx); 1524 edge_to = call->_idx;
1067 } 1525 }
1068 _processed.set(call->_idx); 1526 set_escape_state(call->_idx, es);
1527 add_pointsto_edge(resproj->_idx, edge_to);
1528 _processed.set(resproj->_idx);
1069 break; 1529 break;
1070 } 1530 }
1071 1531
1072 case Op_AllocateArray: 1532 case Op_AllocateArray:
1073 { 1533 {
1074 PointsToNode *ptadr = ptnode_adr(call->_idx); 1534 PointsToNode *ptadr = ptnode_adr(call->_idx);
1075 ptadr->set_node_type(PointsToNode::JavaObject); 1535 int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1536 if (length < 0 || length > EliminateAllocationArraySizeLimit) {
1537 // Not scalar replaceable if the length is not constant or too big.
1538 ptadr->_scalar_replaceable = false;
1539 }
1076 set_escape_state(call->_idx, PointsToNode::NoEscape); 1540 set_escape_state(call->_idx, PointsToNode::NoEscape);
1077 _processed.set(call->_idx);
1078 add_pointsto_edge(resproj->_idx, call->_idx); 1541 add_pointsto_edge(resproj->_idx, call->_idx);
1079 break; 1542 _processed.set(resproj->_idx);
1080 } 1543 break;
1081 1544 }
1082 case Op_Lock:
1083 case Op_Unlock:
1084 break;
1085 1545
1086 case Op_CallStaticJava: 1546 case Op_CallStaticJava:
1087 // For a static call, we know exactly what method is being called. 1547 // For a static call, we know exactly what method is being called.
1088 // Use bytecode estimator to record whether the call's return value escapes 1548 // Use bytecode estimator to record whether the call's return value escapes
1089 { 1549 {
1550 bool done = true;
1090 const TypeTuple *r = call->tf()->range(); 1551 const TypeTuple *r = call->tf()->range();
1091 const Type* ret_type = NULL; 1552 const Type* ret_type = NULL;
1092 1553
1093 if (r->cnt() > TypeFunc::Parms) 1554 if (r->cnt() > TypeFunc::Parms)
1094 ret_type = r->field_at(TypeFunc::Parms); 1555 ret_type = r->field_at(TypeFunc::Parms);
1095 1556
1096 // Note: we use isa_ptr() instead of isa_oopptr() here because the 1557 // Note: we use isa_ptr() instead of isa_oopptr() here because the
1097 // _multianewarray functions return a TypeRawPtr. 1558 // _multianewarray functions return a TypeRawPtr.
1098 if (ret_type == NULL || ret_type->isa_ptr() == NULL) 1559 if (ret_type == NULL || ret_type->isa_ptr() == NULL) {
1560 _processed.set(resproj->_idx);
1099 break; // doesn't return a pointer type 1561 break; // doesn't return a pointer type
1100 1562 }
1101 ciMethod *meth = call->as_CallJava()->method(); 1563 ciMethod *meth = call->as_CallJava()->method();
1564 const TypeTuple * d = call->tf()->domain();
1102 if (meth == NULL) { 1565 if (meth == NULL) {
1103 // not a Java method, assume global escape 1566 // not a Java method, assume global escape
1104 set_escape_state(call->_idx, PointsToNode::GlobalEscape); 1567 set_escape_state(call->_idx, PointsToNode::GlobalEscape);
1105 if (resproj != NULL) 1568 if (resproj != NULL)
1106 add_pointsto_edge(resproj->_idx, _phantom_object); 1569 add_pointsto_edge(resproj->_idx, _phantom_object);
1107 } else { 1570 } else {
1108 BCEscapeAnalyzer call_analyzer(meth); 1571 BCEscapeAnalyzer *call_analyzer = meth->get_bcea();
1109 VectorSet ptset(Thread::current()->resource_area()); 1572 VectorSet ptset(Thread::current()->resource_area());
1110 1573 bool copy_dependencies = false;
1111 if (call_analyzer.is_return_local() && resproj != NULL) { 1574
1575 if (call_analyzer->is_return_allocated()) {
1576 // Returns a newly allocated unescaped object, simply
1577 // update dependency information.
1578 // Mark it as NoEscape so that objects referenced by
1579 // it's fields will be marked as NoEscape at least.
1580 set_escape_state(call->_idx, PointsToNode::NoEscape);
1581 if (resproj != NULL)
1582 add_pointsto_edge(resproj->_idx, call->_idx);
1583 copy_dependencies = true;
1584 } else if (call_analyzer->is_return_local() && resproj != NULL) {
1112 // determine whether any arguments are returned 1585 // determine whether any arguments are returned
1113 const TypeTuple * d = call->tf()->domain();
1114 set_escape_state(call->_idx, PointsToNode::NoEscape); 1586 set_escape_state(call->_idx, PointsToNode::NoEscape);
1115 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1587 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1116 const Type* at = d->field_at(i); 1588 const Type* at = d->field_at(i);
1117 1589
1118 if (at->isa_oopptr() != NULL) { 1590 if (at->isa_oopptr() != NULL) {
1119 Node *arg = skip_casts(call->in(i)); 1591 Node *arg = call->in(i)->uncast();
1120 1592
1121 if (call_analyzer.is_arg_returned(i - TypeFunc::Parms)) { 1593 if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
1122 PointsToNode *arg_esp = _nodes->adr_at(arg->_idx); 1594 PointsToNode *arg_esp = _nodes->adr_at(arg->_idx);
1123 if (arg_esp->node_type() == PointsToNode::JavaObject) 1595 if (arg_esp->node_type() == PointsToNode::UnknownType)
1596 done = false;
1597 else if (arg_esp->node_type() == PointsToNode::JavaObject)
1124 add_pointsto_edge(resproj->_idx, arg->_idx); 1598 add_pointsto_edge(resproj->_idx, arg->_idx);
1125 else 1599 else
1126 add_deferred_edge(resproj->_idx, arg->_idx); 1600 add_deferred_edge(resproj->_idx, arg->_idx);
1127 arg_esp->_hidden_alias = true; 1601 arg_esp->_hidden_alias = true;
1128 } 1602 }
1129 } 1603 }
1130 } 1604 }
1605 copy_dependencies = true;
1131 } else { 1606 } else {
1132 set_escape_state(call->_idx, PointsToNode::GlobalEscape); 1607 set_escape_state(call->_idx, PointsToNode::GlobalEscape);
1133 if (resproj != NULL) 1608 if (resproj != NULL)
1134 add_pointsto_edge(resproj->_idx, _phantom_object); 1609 add_pointsto_edge(resproj->_idx, _phantom_object);
1135 } 1610 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1136 call_analyzer.copy_dependencies(C()->dependencies()); 1611 const Type* at = d->field_at(i);
1137 } 1612 if (at->isa_oopptr() != NULL) {
1613 Node *arg = call->in(i)->uncast();
1614 PointsToNode *arg_esp = _nodes->adr_at(arg->_idx);
1615 arg_esp->_hidden_alias = true;
1616 }
1617 }
1618 }
1619 if (copy_dependencies)
1620 call_analyzer->copy_dependencies(C()->dependencies());
1621 }
1622 if (done)
1623 _processed.set(resproj->_idx);
1138 break; 1624 break;
1139 } 1625 }
1140 1626
1141 default: 1627 default:
1142 // Some other type of call, assume the worst case that the 1628 // Some other type of call, assume the worst case that the
1143 // returned value, if any, globally escapes. 1629 // returned value, if any, globally escapes.
1144 { 1630 {
1145 const TypeTuple *r = call->tf()->range(); 1631 const TypeTuple *r = call->tf()->range();
1146
1147 if (r->cnt() > TypeFunc::Parms) { 1632 if (r->cnt() > TypeFunc::Parms) {
1148 const Type* ret_type = r->field_at(TypeFunc::Parms); 1633 const Type* ret_type = r->field_at(TypeFunc::Parms);
1149 1634
1150 // Note: we use isa_ptr() instead of isa_oopptr() here because the 1635 // Note: we use isa_ptr() instead of isa_oopptr() here because the
1151 // _multianewarray functions return a TypeRawPtr. 1636 // _multianewarray functions return a TypeRawPtr.
1152 if (ret_type->isa_ptr() != NULL) { 1637 if (ret_type->isa_ptr() != NULL) {
1153 PointsToNode *ptadr = ptnode_adr(call->_idx); 1638 PointsToNode *ptadr = ptnode_adr(call->_idx);
1154 ptadr->set_node_type(PointsToNode::JavaObject);
1155 set_escape_state(call->_idx, PointsToNode::GlobalEscape); 1639 set_escape_state(call->_idx, PointsToNode::GlobalEscape);
1156 if (resproj != NULL) 1640 if (resproj != NULL)
1157 add_pointsto_edge(resproj->_idx, _phantom_object); 1641 add_pointsto_edge(resproj->_idx, _phantom_object);
1158 } 1642 }
1159 } 1643 }
1160 } 1644 _processed.set(resproj->_idx);
1161 } 1645 }
1162 } 1646 }
1163 1647 }
1164 void ConnectionGraph::record_for_escape_analysis(Node *n) { 1648
1165 if (_collecting) { 1649 // Populate Connection Graph with Ideal nodes and create simple
1166 if (n->is_Phi()) { 1650 // connection graph edges (do not need to check the node_type of inputs
1167 PhiNode *phi = n->as_Phi(); 1651 // or to call PointsTo() to walk the connection graph).
1168 const Type *pt = phi->type(); 1652 void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase) {
1169 if ((pt->isa_oopptr() != NULL) || pt == TypePtr::NULL_PTR) { 1653 if (_processed.test(n->_idx))
1170 PointsToNode *ptn = ptnode_adr(phi->_idx); 1654 return; // No need to redefine node's state.
1171 ptn->set_node_type(PointsToNode::LocalVar); 1655
1172 ptn->_node = n; 1656 if (n->is_Call()) {
1173 _deferred.push(n); 1657 // Arguments to allocation and locking don't escape.
1174 } 1658 if (n->is_Allocate()) {
1175 } 1659 add_node(n, PointsToNode::JavaObject, PointsToNode::UnknownEscape, true);
1176 } 1660 record_for_optimizer(n);
1177 } 1661 } else if (n->is_Lock() || n->is_Unlock()) {
1178 1662 // Put Lock and Unlock nodes on IGVN worklist to process them during
1179 void ConnectionGraph::record_escape_work(Node *n, PhaseTransform *phase) { 1663 // the first IGVN optimization when escape information is still available.
1180 1664 record_for_optimizer(n);
1181 int opc = n->Opcode(); 1665 _processed.set(n->_idx);
1666 } else {
1667 // Have to process call's arguments first.
1668 PointsToNode::NodeType nt = PointsToNode::UnknownType;
1669
1670 // Check if a call returns an object.
1671 const TypeTuple *r = n->as_Call()->tf()->range();
1672 if (r->cnt() > TypeFunc::Parms &&
1673 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
1674 // Note: use isa_ptr() instead of isa_oopptr() here because
1675 // the _multianewarray functions return a TypeRawPtr.
1676 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
1677 nt = PointsToNode::JavaObject;
1678 }
1679 }
1680 add_node(n, nt, PointsToNode::UnknownEscape, false);
1681 }
1682 return;
1683 }
1684
1685 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1686 // ThreadLocal has RawPrt type.
1687 switch (n->Opcode()) {
1688 case Op_AddP:
1689 {
1690 add_node(n, PointsToNode::Field, PointsToNode::UnknownEscape, false);
1691 break;
1692 }
1693 case Op_CastX2P:
1694 { // "Unsafe" memory access.
1695 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
1696 break;
1697 }
1698 case Op_CastPP:
1699 case Op_CheckCastPP:
1700 {
1701 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
1702 int ti = n->in(1)->_idx;
1703 PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
1704 if (nt == PointsToNode::UnknownType) {
1705 _delayed_worklist.push(n); // Process it later.
1706 break;
1707 } else if (nt == PointsToNode::JavaObject) {
1708 add_pointsto_edge(n->_idx, ti);
1709 } else {
1710 add_deferred_edge(n->_idx, ti);
1711 }
1712 _processed.set(n->_idx);
1713 break;
1714 }
1715 case Op_ConP:
1716 {
1717 // assume all pointer constants globally escape except for null
1718 PointsToNode::EscapeState es;
1719 if (phase->type(n) == TypePtr::NULL_PTR)
1720 es = PointsToNode::NoEscape;
1721 else
1722 es = PointsToNode::GlobalEscape;
1723
1724 add_node(n, PointsToNode::JavaObject, es, true);
1725 break;
1726 }
1727 case Op_CreateEx:
1728 {
1729 // assume that all exception objects globally escape
1730 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
1731 break;
1732 }
1733 case Op_LoadKlass:
1734 {
1735 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, true);
1736 break;
1737 }
1738 case Op_LoadP:
1739 {
1740 const Type *t = phase->type(n);
1741 if (t->isa_ptr() == NULL) {
1742 _processed.set(n->_idx);
1743 return;
1744 }
1745 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
1746 break;
1747 }
1748 case Op_Parm:
1749 {
1750 _processed.set(n->_idx); // No need to redefine it state.
1751 uint con = n->as_Proj()->_con;
1752 if (con < TypeFunc::Parms)
1753 return;
1754 const Type *t = n->in(0)->as_Start()->_domain->field_at(con);
1755 if (t->isa_ptr() == NULL)
1756 return;
1757 // We have to assume all input parameters globally escape
1758 // (Note: passing 'false' since _processed is already set).
1759 add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
1760 break;
1761 }
1762 case Op_Phi:
1763 {
1764 if (n->as_Phi()->type()->isa_ptr() == NULL) {
1765 // nothing to do if not an oop
1766 _processed.set(n->_idx);
1767 return;
1768 }
1769 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
1770 uint i;
1771 for (i = 1; i < n->req() ; i++) {
1772 Node* in = n->in(i);
1773 if (in == NULL)
1774 continue; // ignore NULL
1775 in = in->uncast();
1776 if (in->is_top() || in == n)
1777 continue; // ignore top or inputs which go back this node
1778 int ti = in->_idx;
1779 PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
1780 if (nt == PointsToNode::UnknownType) {
1781 break;
1782 } else if (nt == PointsToNode::JavaObject) {
1783 add_pointsto_edge(n->_idx, ti);
1784 } else {
1785 add_deferred_edge(n->_idx, ti);
1786 }
1787 }
1788 if (i >= n->req())
1789 _processed.set(n->_idx);
1790 else
1791 _delayed_worklist.push(n);
1792 break;
1793 }
1794 case Op_Proj:
1795 {
1796 // we are only interested in the result projection from a call
1797 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
1798 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
1799 process_call_result(n->as_Proj(), phase);
1800 if (!_processed.test(n->_idx)) {
1801 // The call's result may need to be processed later if the call
1802 // returns it's argument and the argument is not processed yet.
1803 _delayed_worklist.push(n);
1804 }
1805 } else {
1806 _processed.set(n->_idx);
1807 }
1808 break;
1809 }
1810 case Op_Return:
1811 {
1812 if( n->req() > TypeFunc::Parms &&
1813 phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
1814 // Treat Return value as LocalVar with GlobalEscape escape state.
1815 add_node(n, PointsToNode::LocalVar, PointsToNode::GlobalEscape, false);
1816 int ti = n->in(TypeFunc::Parms)->_idx;
1817 PointsToNode::NodeType nt = _nodes->adr_at(ti)->node_type();
1818 if (nt == PointsToNode::UnknownType) {
1819 _delayed_worklist.push(n); // Process it later.
1820 break;
1821 } else if (nt == PointsToNode::JavaObject) {
1822 add_pointsto_edge(n->_idx, ti);
1823 } else {
1824 add_deferred_edge(n->_idx, ti);
1825 }
1826 }
1827 _processed.set(n->_idx);
1828 break;
1829 }
1830 case Op_StoreP:
1831 {
1832 const Type *adr_type = phase->type(n->in(MemNode::Address));
1833 if (adr_type->isa_oopptr()) {
1834 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
1835 } else {
1836 Node* adr = n->in(MemNode::Address);
1837 if (adr->is_AddP() && phase->type(adr) == TypeRawPtr::NOTNULL &&
1838 adr->in(AddPNode::Address)->is_Proj() &&
1839 adr->in(AddPNode::Address)->in(0)->is_Allocate()) {
1840 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
1841 // We are computing a raw address for a store captured
1842 // by an Initialize compute an appropriate address type.
1843 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1844 assert(offs != Type::OffsetBot, "offset must be a constant");
1845 } else {
1846 _processed.set(n->_idx);
1847 return;
1848 }
1849 }
1850 break;
1851 }
1852 case Op_StorePConditional:
1853 case Op_CompareAndSwapP:
1854 {
1855 const Type *adr_type = phase->type(n->in(MemNode::Address));
1856 if (adr_type->isa_oopptr()) {
1857 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
1858 } else {
1859 _processed.set(n->_idx);
1860 return;
1861 }
1862 break;
1863 }
1864 case Op_ThreadLocal:
1865 {
1866 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
1867 break;
1868 }
1869 default:
1870 ;
1871 // nothing to do
1872 }
1873 return;
1874 }
1875
1876 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
1877 // Don't set processed bit for AddP, LoadP, StoreP since
1878 // they may need more then one pass to process.
1879 if (_processed.test(n->_idx))
1880 return; // No need to redefine node's state.
1881
1182 PointsToNode *ptadr = ptnode_adr(n->_idx); 1882 PointsToNode *ptadr = ptnode_adr(n->_idx);
1183 1883
1184 if (_processed.test(n->_idx))
1185 return;
1186
1187 ptadr->_node = n;
1188 if (n->is_Call()) { 1884 if (n->is_Call()) {
1189 CallNode *call = n->as_Call(); 1885 CallNode *call = n->as_Call();
1190 process_call_arguments(call, phase); 1886 process_call_arguments(call, phase);
1887 _processed.set(n->_idx);
1191 return; 1888 return;
1192 } 1889 }
1193 1890
1194 switch (opc) { 1891 switch (n->Opcode()) {
1195 case Op_AddP: 1892 case Op_AddP:
1196 { 1893 {
1197 Node *base = skip_casts(n->in(AddPNode::Base)); 1894 Node *base = get_addp_base(n);
1198 ptadr->set_node_type(PointsToNode::Field); 1895 // Create a field edge to this node from everything base could point to.
1199
1200 // create a field edge to this node from everything adr could point to
1201 VectorSet ptset(Thread::current()->resource_area()); 1896 VectorSet ptset(Thread::current()->resource_area());
1202 PointsTo(ptset, base, phase); 1897 PointsTo(ptset, base, phase);
1203 for( VectorSetI i(&ptset); i.test(); ++i ) { 1898 for( VectorSetI i(&ptset); i.test(); ++i ) {
1204 uint pt = i.elem; 1899 uint pt = i.elem;
1205 add_field_edge(pt, n->_idx, type_to_offset(phase->type(n))); 1900 add_field_edge(pt, n->_idx, address_offset(n, phase));
1206 } 1901 }
1207 break; 1902 break;
1208 } 1903 }
1209 case Op_Parm: 1904 case Op_CastX2P:
1210 { 1905 {
1211 ProjNode *nproj = n->as_Proj(); 1906 assert(false, "Op_CastX2P");
1212 uint con = nproj->_con; 1907 break;
1213 if (con < TypeFunc::Parms) 1908 }
1214 return; 1909 case Op_CastPP:
1215 const Type *t = nproj->in(0)->as_Start()->_domain->field_at(con); 1910 case Op_CheckCastPP:
1911 {
1912 int ti = n->in(1)->_idx;
1913 if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
1914 add_pointsto_edge(n->_idx, ti);
1915 } else {
1916 add_deferred_edge(n->_idx, ti);
1917 }
1918 _processed.set(n->_idx);
1919 break;
1920 }
1921 case Op_ConP:
1922 {
1923 assert(false, "Op_ConP");
1924 break;
1925 }
1926 case Op_CreateEx:
1927 {
1928 assert(false, "Op_CreateEx");
1929 break;
1930 }
1931 case Op_LoadKlass:
1932 {
1933 assert(false, "Op_LoadKlass");
1934 break;
1935 }
1936 case Op_LoadP:
1937 {
1938 const Type *t = phase->type(n);
1939 #ifdef ASSERT
1216 if (t->isa_ptr() == NULL) 1940 if (t->isa_ptr() == NULL)
1217 return; 1941 assert(false, "Op_LoadP");
1218 ptadr->set_node_type(PointsToNode::JavaObject); 1942 #endif
1219 if (t->isa_oopptr() != NULL) { 1943
1220 set_escape_state(n->_idx, PointsToNode::ArgEscape); 1944 Node* adr = n->in(MemNode::Address)->uncast();
1945 const Type *adr_type = phase->type(adr);
1946 Node* adr_base;
1947 if (adr->is_AddP()) {
1948 adr_base = get_addp_base(adr);
1221 } else { 1949 } else {
1222 // this must be the incoming state of an OSR compile, we have to assume anything 1950 adr_base = adr;
1223 // passed in globally escapes 1951 }
1224 assert(_compile->is_osr_compilation(), "bad argument type for non-osr compilation"); 1952
1225 set_escape_state(n->_idx, PointsToNode::GlobalEscape); 1953 // For everything "adr_base" could point to, create a deferred edge from
1226 } 1954 // this node to each field with the same offset.
1227 _processed.set(n->_idx);
1228 break;
1229 }
1230 case Op_Phi:
1231 {
1232 PhiNode *phi = n->as_Phi();
1233 if (phi->type()->isa_oopptr() == NULL)
1234 return; // nothing to do if not an oop
1235 ptadr->set_node_type(PointsToNode::LocalVar);
1236 process_phi_escape(phi, phase);
1237 break;
1238 }
1239 case Op_CreateEx:
1240 {
1241 // assume that all exception objects globally escape
1242 ptadr->set_node_type(PointsToNode::JavaObject);
1243 set_escape_state(n->_idx, PointsToNode::GlobalEscape);
1244 _processed.set(n->_idx);
1245 break;
1246 }
1247 case Op_ConP:
1248 {
1249 const Type *t = phase->type(n);
1250 ptadr->set_node_type(PointsToNode::JavaObject);
1251 // assume all pointer constants globally escape except for null
1252 if (t == TypePtr::NULL_PTR)
1253 set_escape_state(n->_idx, PointsToNode::NoEscape);
1254 else
1255 set_escape_state(n->_idx, PointsToNode::GlobalEscape);
1256 _processed.set(n->_idx);
1257 break;
1258 }
1259 case Op_LoadKlass:
1260 {
1261 ptadr->set_node_type(PointsToNode::JavaObject);
1262 set_escape_state(n->_idx, PointsToNode::GlobalEscape);
1263 _processed.set(n->_idx);
1264 break;
1265 }
1266 case Op_LoadP:
1267 {
1268 const Type *t = phase->type(n);
1269 if (!t->isa_oopptr())
1270 return;
1271 ptadr->set_node_type(PointsToNode::LocalVar);
1272 set_escape_state(n->_idx, PointsToNode::UnknownEscape);
1273
1274 Node *adr = skip_casts(n->in(MemNode::Address));
1275 const Type *adr_type = phase->type(adr);
1276 Node *adr_base = skip_casts((adr->Opcode() == Op_AddP) ? adr->in(AddPNode::Base) : adr);
1277
1278 // For everything "adr" could point to, create a deferred edge from
1279 // this node to each field with the same offset as "adr_type"
1280 VectorSet ptset(Thread::current()->resource_area()); 1955 VectorSet ptset(Thread::current()->resource_area());
1281 PointsTo(ptset, adr_base, phase); 1956 PointsTo(ptset, adr_base, phase);
1282 // If ptset is empty, then this value must have been set outside 1957 int offset = address_offset(adr, phase);
1283 // this method, so we add the phantom node
1284 if (ptset.Size() == 0)
1285 ptset.set(_phantom_object);
1286 for( VectorSetI i(&ptset); i.test(); ++i ) { 1958 for( VectorSetI i(&ptset); i.test(); ++i ) {
1287 uint pt = i.elem; 1959 uint pt = i.elem;
1288 add_deferred_edge_to_fields(n->_idx, pt, type_to_offset(adr_type)); 1960 add_deferred_edge_to_fields(n->_idx, pt, offset);
1289 } 1961 }
1962 break;
1963 }
1964 case Op_Parm:
1965 {
1966 assert(false, "Op_Parm");
1967 break;
1968 }
1969 case Op_Phi:
1970 {
1971 #ifdef ASSERT
1972 if (n->as_Phi()->type()->isa_ptr() == NULL)
1973 assert(false, "Op_Phi");
1974 #endif
1975 for (uint i = 1; i < n->req() ; i++) {
1976 Node* in = n->in(i);
1977 if (in == NULL)
1978 continue; // ignore NULL
1979 in = in->uncast();
1980 if (in->is_top() || in == n)
1981 continue; // ignore top or inputs which go back this node
1982 int ti = in->_idx;
1983 if (_nodes->adr_at(in->_idx)->node_type() == PointsToNode::JavaObject) {
1984 add_pointsto_edge(n->_idx, ti);
1985 } else {
1986 add_deferred_edge(n->_idx, ti);
1987 }
1988 }
1989 _processed.set(n->_idx);
1990 break;
1991 }
1992 case Op_Proj:
1993 {
1994 // we are only interested in the result projection from a call
1995 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
1996 process_call_result(n->as_Proj(), phase);
1997 assert(_processed.test(n->_idx), "all call results should be processed");
1998 } else {
1999 assert(false, "Op_Proj");
2000 }
2001 break;
2002 }
2003 case Op_Return:
2004 {
2005 #ifdef ASSERT
2006 if( n->req() <= TypeFunc::Parms ||
2007 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2008 assert(false, "Op_Return");
2009 }
2010 #endif
2011 int ti = n->in(TypeFunc::Parms)->_idx;
2012 if (_nodes->adr_at(ti)->node_type() == PointsToNode::JavaObject) {
2013 add_pointsto_edge(n->_idx, ti);
2014 } else {
2015 add_deferred_edge(n->_idx, ti);
2016 }
2017 _processed.set(n->_idx);
1290 break; 2018 break;
1291 } 2019 }
1292 case Op_StoreP: 2020 case Op_StoreP:
1293 case Op_StorePConditional: 2021 case Op_StorePConditional:
1294 case Op_CompareAndSwapP: 2022 case Op_CompareAndSwapP:
1295 { 2023 {
1296 Node *adr = n->in(MemNode::Address); 2024 Node *adr = n->in(MemNode::Address);
1297 Node *val = skip_casts(n->in(MemNode::ValueIn));
1298 const Type *adr_type = phase->type(adr); 2025 const Type *adr_type = phase->type(adr);
2026 #ifdef ASSERT
1299 if (!adr_type->isa_oopptr()) 2027 if (!adr_type->isa_oopptr())
1300 return; 2028 assert(phase->type(adr) == TypeRawPtr::NOTNULL, "Op_StoreP");
1301 2029 #endif
1302 assert(adr->Opcode() == Op_AddP, "expecting an AddP"); 2030
1303 Node *adr_base = adr->in(AddPNode::Base); 2031 assert(adr->is_AddP(), "expecting an AddP");
1304 2032 Node *adr_base = get_addp_base(adr);
1305 // For everything "adr_base" could point to, create a deferred edge to "val" from each field 2033 Node *val = n->in(MemNode::ValueIn)->uncast();
1306 // with the same offset as "adr_type" 2034 // For everything "adr_base" could point to, create a deferred edge
2035 // to "val" from each field with the same offset.
1307 VectorSet ptset(Thread::current()->resource_area()); 2036 VectorSet ptset(Thread::current()->resource_area());
1308 PointsTo(ptset, adr_base, phase); 2037 PointsTo(ptset, adr_base, phase);
1309 for( VectorSetI i(&ptset); i.test(); ++i ) { 2038 for( VectorSetI i(&ptset); i.test(); ++i ) {
1310 uint pt = i.elem; 2039 uint pt = i.elem;
1311 add_edge_from_fields(pt, val->_idx, type_to_offset(adr_type)); 2040 add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
1312 } 2041 }
1313 break; 2042 break;
1314 } 2043 }
1315 case Op_Proj: 2044 case Op_ThreadLocal:
1316 { 2045 {
1317 ProjNode *nproj = n->as_Proj(); 2046 assert(false, "Op_ThreadLocal");
1318 Node *n0 = nproj->in(0);
1319 // we are only interested in the result projection from a call
1320 if (nproj->_con == TypeFunc::Parms && n0->is_Call() ) {
1321 process_call_result(nproj, phase);
1322 }
1323
1324 break;
1325 }
1326 case Op_CastPP:
1327 case Op_CheckCastPP:
1328 {
1329 ptadr->set_node_type(PointsToNode::LocalVar);
1330 int ti = n->in(1)->_idx;
1331 if (_nodes->at(ti).node_type() == PointsToNode::JavaObject) {
1332 add_pointsto_edge(n->_idx, ti);
1333 } else {
1334 add_deferred_edge(n->_idx, ti);
1335 }
1336 break; 2047 break;
1337 } 2048 }
1338 default: 2049 default:
1339 ; 2050 ;
1340 // nothing to do 2051 // nothing to do
1341 } 2052 }
1342 }
1343
1344 void ConnectionGraph::record_escape(Node *n, PhaseTransform *phase) {
1345 if (_collecting)
1346 record_escape_work(n, phase);
1347 } 2053 }
1348 2054
1349 #ifndef PRODUCT 2055 #ifndef PRODUCT
1350 void ConnectionGraph::dump() { 2056 void ConnectionGraph::dump() {
1351 PhaseGVN *igvn = _compile->initial_gvn(); 2057 PhaseGVN *igvn = _compile->initial_gvn();
1352 bool first = true; 2058 bool first = true;
1353 2059
1354 for (uint ni = 0; ni < (uint)_nodes->length(); ni++) { 2060 uint size = (uint)_nodes->length();
1355 PointsToNode *esp = _nodes->adr_at(ni); 2061 for (uint ni = 0; ni < size; ni++) {
1356 if (esp->node_type() == PointsToNode::UnknownType || esp->_node == NULL) 2062 PointsToNode *ptn = _nodes->adr_at(ni);
2063 PointsToNode::NodeType ptn_type = ptn->node_type();
2064
2065 if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
1357 continue; 2066 continue;
1358 PointsToNode::EscapeState es = escape_state(esp->_node, igvn); 2067 PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
1359 if (es == PointsToNode::NoEscape || (Verbose && 2068 if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
1360 (es != PointsToNode::UnknownEscape || esp->edge_count() != 0))) { 2069 if (first) {
1361 // don't print null pointer node which almost every method has 2070 tty->cr();
1362 if (esp->_node->Opcode() != Op_ConP || igvn->type(esp->_node) != TypePtr::NULL_PTR) { 2071 tty->print("======== Connection graph for ");
1363 if (first) { 2072 C()->method()->print_short_name();
1364 tty->print("======== Connection graph for "); 2073 tty->cr();
1365 C()->method()->print_short_name(); 2074 first = false;
1366 tty->cr(); 2075 }
1367 first = false; 2076 tty->print("%6d ", ni);
1368 } 2077 ptn->dump();
1369 tty->print("%4d ", ni); 2078 // Print all locals which reference this allocation
1370 esp->dump(); 2079 for (uint li = ni; li < size; li++) {
1371 } 2080 PointsToNode *ptn_loc = _nodes->adr_at(li);
2081 PointsToNode::NodeType ptn_loc_type = ptn_loc->node_type();
2082 if ( ptn_loc_type == PointsToNode::LocalVar && ptn_loc->_node != NULL &&
2083 ptn_loc->edge_count() == 1 && ptn_loc->edge_target(0) == ni ) {
2084 tty->print("%6d LocalVar [[%d]]", li, ni);
2085 _nodes->adr_at(li)->_node->dump();
2086 }
2087 }
2088 if (Verbose) {
2089 // Print all fields which reference this allocation
2090 for (uint i = 0; i < ptn->edge_count(); i++) {
2091 uint ei = ptn->edge_target(i);
2092 tty->print("%6d Field [[%d]]", ei, ni);
2093 _nodes->adr_at(ei)->_node->dump();
2094 }
2095 }
2096 tty->cr();
1372 } 2097 }
1373 } 2098 }
1374 } 2099 }
1375 #endif 2100 #endif