comparison src/share/vm/opto/escape.cpp @ 1101:7fee0a6cc6d4

6896727: nsk/logging/LoggingPermission/LoggingPermission/logperm002 fails with G1, EscapeAnalisys Summary: Move instance store's memory users to corresponding memory slices when updating its memory edge. Reviewed-by: never
author kvn
date Wed, 09 Dec 2009 19:50:14 -0800
parents f96a1a986f7b
children 4b84186a8248
comparison
equal deleted inserted replaced
1100:f96a1a986f7b 1101:7fee0a6cc6d4
541 // for the instance type. Note: C++ will not remove it since the call 541 // for the instance type. Note: C++ will not remove it since the call
542 // has side effect. 542 // has side effect.
543 int alias_idx = _compile->get_alias_index(tinst); 543 int alias_idx = _compile->get_alias_index(tinst);
544 igvn->set_type(addp, tinst); 544 igvn->set_type(addp, tinst);
545 // record the allocation in the node map 545 // record the allocation in the node map
546 assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
546 set_map(addp->_idx, get_map(base->_idx)); 547 set_map(addp->_idx, get_map(base->_idx));
547 548
548 // Set addp's Base and Address to 'base'. 549 // Set addp's Base and Address to 'base'.
549 Node *abase = addp->in(AddPNode::Base); 550 Node *abase = addp->in(AddPNode::Base);
550 Node *adr = addp->in(AddPNode::Address); 551 Node *adr = addp->in(AddPNode::Address);
616 } 617 }
617 orig_phi_worklist.append_if_missing(orig_phi); 618 orig_phi_worklist.append_if_missing(orig_phi);
618 const TypePtr *atype = C->get_adr_type(alias_idx); 619 const TypePtr *atype = C->get_adr_type(alias_idx);
619 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 620 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
620 C->copy_node_notes_to(result, orig_phi); 621 C->copy_node_notes_to(result, orig_phi);
621 set_map_phi(orig_phi->_idx, result);
622 igvn->set_type(result, result->bottom_type()); 622 igvn->set_type(result, result->bottom_type());
623 record_for_optimizer(result); 623 record_for_optimizer(result);
624
625 debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
626 assert(pn == NULL || pn == orig_phi, "wrong node");
627 set_map(orig_phi->_idx, result);
628 ptnode_adr(orig_phi->_idx)->_node = orig_phi;
629
624 new_created = true; 630 new_created = true;
625 return result; 631 return result;
626 } 632 }
627 633
628 // 634 //
706 tinst->offset() == Type::OffsetBot) ) { 712 tinst->offset() == Type::OffsetBot) ) {
707 mem = mmem->memory_at(alias_idx); 713 mem = mmem->memory_at(alias_idx);
708 // Update input if it is progress over what we have now 714 // Update input if it is progress over what we have now
709 } 715 }
710 return mem; 716 return mem;
717 }
718
719 //
720 // Move memory users to their memory slices.
721 //
722 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis, PhaseGVN *igvn) {
723 Compile* C = _compile;
724
725 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
726 assert(tp != NULL, "ptr type");
727 int alias_idx = C->get_alias_index(tp);
728 int general_idx = C->get_general_index(alias_idx);
729
730 // Move users first
731 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
732 Node* use = n->fast_out(i);
733 if (use->is_MergeMem()) {
734 MergeMemNode* mmem = use->as_MergeMem();
735 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
736 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
737 continue; // Nothing to do
738 }
739 // Replace previous general reference to mem node.
740 uint orig_uniq = C->unique();
741 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
742 assert(orig_uniq == C->unique(), "no new nodes");
743 mmem->set_memory_at(general_idx, m);
744 --imax;
745 --i;
746 } else if (use->is_MemBar()) {
747 assert(!use->is_Initialize(), "initializing stores should not be moved");
748 if (use->req() > MemBarNode::Precedent &&
749 use->in(MemBarNode::Precedent) == n) {
750 // Don't move related membars.
751 record_for_optimizer(use);
752 continue;
753 }
754 tp = use->as_MemBar()->adr_type()->isa_ptr();
755 if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
756 alias_idx == general_idx) {
757 continue; // Nothing to do
758 }
759 // Move to general memory slice.
760 uint orig_uniq = C->unique();
761 Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
762 assert(orig_uniq == C->unique(), "no new nodes");
763 igvn->hash_delete(use);
764 imax -= use->replace_edge(n, m);
765 igvn->hash_insert(use);
766 record_for_optimizer(use);
767 --i;
768 #ifdef ASSERT
769 } else if (use->is_Mem()) {
770 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
771 // Don't move related cardmark.
772 continue;
773 }
774 // Memory nodes should have new memory input.
775 tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
776 assert(tp != NULL, "ptr type");
777 int idx = C->get_alias_index(tp);
778 assert(get_map(use->_idx) != NULL || idx == alias_idx,
779 "Following memory nodes should have new memory input or be on the same memory slice");
780 } else if (use->is_Phi()) {
781 // Phi nodes should be split and moved already.
782 tp = use->as_Phi()->adr_type()->isa_ptr();
783 assert(tp != NULL, "ptr type");
784 int idx = C->get_alias_index(tp);
785 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
786 } else {
787 use->dump();
788 assert(false, "should not be here");
789 #endif
790 }
791 }
711 } 792 }
712 793
713 // 794 //
714 // Search memory chain of "mem" to find a MemNode whose address 795 // Search memory chain of "mem" to find a MemNode whose address
715 // is the specified alias index. 796 // is the specified alias index.
773 } 854 }
774 } else if (result->is_Phi() && 855 } else if (result->is_Phi() &&
775 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 856 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
776 Node *un = result->as_Phi()->unique_input(phase); 857 Node *un = result->as_Phi()->unique_input(phase);
777 if (un != NULL) { 858 if (un != NULL) {
859 orig_phis.append_if_missing(result->as_Phi());
778 result = un; 860 result = un;
779 } else { 861 } else {
780 break; 862 break;
781 } 863 }
782 } else if (result->is_ClearArray()) { 864 } else if (result->is_ClearArray()) {
905 // 100 LoadP _ 80 20 ... alias_index=4 987 // 100 LoadP _ 80 20 ... alias_index=4
906 // 988 //
907 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 989 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
908 GrowableArray<Node *> memnode_worklist; 990 GrowableArray<Node *> memnode_worklist;
909 GrowableArray<PhiNode *> orig_phis; 991 GrowableArray<PhiNode *> orig_phis;
992
910 PhaseGVN *igvn = _compile->initial_gvn(); 993 PhaseGVN *igvn = _compile->initial_gvn();
911 uint new_index_start = (uint) _compile->num_alias_types(); 994 uint new_index_start = (uint) _compile->num_alias_types();
912 VectorSet visited(Thread::current()->resource_area()); 995 Arena* arena = Thread::current()->resource_area();
913 VectorSet ptset(Thread::current()->resource_area()); 996 VectorSet visited(arena);
997 VectorSet ptset(arena);
914 998
915 999
916 // Phase 1: Process possible allocations from alloc_worklist. 1000 // Phase 1: Process possible allocations from alloc_worklist.
917 // Create instance types for the CheckCastPP for allocations where possible. 1001 // Create instance types for the CheckCastPP for allocations where possible.
918 // 1002 //
984 // in order for an object to be scalar-replaceable, it must be: 1068 // in order for an object to be scalar-replaceable, it must be:
985 // - a direct allocation (not a call returning an object) 1069 // - a direct allocation (not a call returning an object)
986 // - non-escaping 1070 // - non-escaping
987 // - eligible to be a unique type 1071 // - eligible to be a unique type
988 // - not determined to be ineligible by escape analysis 1072 // - not determined to be ineligible by escape analysis
1073 assert(ptnode_adr(alloc->_idx)->_node != NULL &&
1074 ptnode_adr(n->_idx)->_node != NULL, "should be registered");
989 set_map(alloc->_idx, n); 1075 set_map(alloc->_idx, n);
990 set_map(n->_idx, alloc); 1076 set_map(n->_idx, alloc);
991 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 1077 const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
992 if (t == NULL) 1078 if (t == NULL)
993 continue; // not a TypeInstPtr 1079 continue; // not a TypeInstPtr
1180 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn); 1266 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis, igvn);
1181 if (_compile->failing()) { 1267 if (_compile->failing()) {
1182 return; 1268 return;
1183 } 1269 }
1184 if (mem != n->in(MemNode::Memory)) { 1270 if (mem != n->in(MemNode::Memory)) {
1271 // We delay the memory edge update since we need old one in
1272 // MergeMem code below when instances memory slices are separated.
1273 debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
1274 assert(pn == NULL || pn == n, "wrong node");
1185 set_map(n->_idx, mem); 1275 set_map(n->_idx, mem);
1186 ptnode_adr(n->_idx)->_node = n; 1276 ptnode_adr(n->_idx)->_node = n;
1187 } 1277 }
1188 if (n->is_Load()) { 1278 if (n->is_Load()) {
1189 continue; // don't push users 1279 continue; // don't push users
1247 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 1337 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
1248 Node* mem = nmm->in(i); 1338 Node* mem = nmm->in(i);
1249 Node* cur = NULL; 1339 Node* cur = NULL;
1250 if (mem == NULL || mem->is_top()) 1340 if (mem == NULL || mem->is_top())
1251 continue; 1341 continue;
1342 // First, update mergemem by moving memory nodes to corresponding slices
1343 // if their type became more precise since this mergemem was created.
1252 while (mem->is_Mem()) { 1344 while (mem->is_Mem()) {
1253 const Type *at = igvn->type(mem->in(MemNode::Address)); 1345 const Type *at = igvn->type(mem->in(MemNode::Address));
1254 if (at != Type::TOP) { 1346 if (at != Type::TOP) {
1255 assert (at->isa_ptr() != NULL, "pointer type required."); 1347 assert (at->isa_ptr() != NULL, "pointer type required.");
1256 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 1348 uint idx = (uint)_compile->get_alias_index(at->is_ptr());
1265 } 1357 }
1266 mem = mem->in(MemNode::Memory); 1358 mem = mem->in(MemNode::Memory);
1267 } 1359 }
1268 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 1360 nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
1269 // Find any instance of the current type if we haven't encountered 1361 // Find any instance of the current type if we haven't encountered
1270 // a value of the instance along the chain. 1362 // already a memory slice of the instance along the memory chain.
1271 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1363 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1272 if((uint)_compile->get_general_index(ni) == i) { 1364 if((uint)_compile->get_general_index(ni) == i) {
1273 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 1365 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
1274 if (nmm->is_empty_memory(m)) { 1366 if (nmm->is_empty_memory(m)) {
1275 Node* result = find_inst_mem(mem, ni, orig_phis, igvn); 1367 Node* result = find_inst_mem(mem, ni, orig_phis, igvn);
1281 } 1373 }
1282 } 1374 }
1283 } 1375 }
1284 // Find the rest of instances values 1376 // Find the rest of instances values
1285 for (uint ni = new_index_start; ni < new_index_end; ni++) { 1377 for (uint ni = new_index_start; ni < new_index_end; ni++) {
1286 const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr(); 1378 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
1287 Node* result = step_through_mergemem(nmm, ni, tinst); 1379 Node* result = step_through_mergemem(nmm, ni, tinst);
1288 if (result == nmm->base_memory()) { 1380 if (result == nmm->base_memory()) {
1289 // Didn't find instance memory, search through general slice recursively. 1381 // Didn't find instance memory, search through general slice recursively.
1290 result = nmm->memory_at(igvn->C->get_general_index(ni)); 1382 result = nmm->memory_at(_compile->get_general_index(ni));
1291 result = find_inst_mem(result, ni, orig_phis, igvn); 1383 result = find_inst_mem(result, ni, orig_phis, igvn);
1292 if (_compile->failing()) { 1384 if (_compile->failing()) {
1293 return; 1385 return;
1294 } 1386 }
1295 nmm->set_memory_at(ni, result); 1387 nmm->set_memory_at(ni, result);
1323 igvn->hash_insert(phi); 1415 igvn->hash_insert(phi);
1324 record_for_optimizer(phi); 1416 record_for_optimizer(phi);
1325 } 1417 }
1326 1418
1327 // Update the memory inputs of MemNodes with the value we computed 1419 // Update the memory inputs of MemNodes with the value we computed
1328 // in Phase 2. 1420 // in Phase 2 and move stores memory users to corresponding memory slices.
1421 #ifdef ASSERT
1422 visited.Clear();
1423 Node_Stack old_mems(arena, _compile->unique() >> 2);
1424 #endif
1329 for (uint i = 0; i < nodes_size(); i++) { 1425 for (uint i = 0; i < nodes_size(); i++) {
1330 Node *nmem = get_map(i); 1426 Node *nmem = get_map(i);
1331 if (nmem != NULL) { 1427 if (nmem != NULL) {
1332 Node *n = ptnode_adr(i)->_node; 1428 Node *n = ptnode_adr(i)->_node;
1333 if (n != NULL && n->is_Mem()) { 1429 assert(n != NULL, "sanity");
1430 if (n->is_Mem()) {
1431 #ifdef ASSERT
1432 Node* old_mem = n->in(MemNode::Memory);
1433 if (!visited.test_set(old_mem->_idx)) {
1434 old_mems.push(old_mem, old_mem->outcnt());
1435 }
1436 #endif
1437 assert(n->in(MemNode::Memory) != nmem, "sanity");
1438 if (!n->is_Load()) {
1439 // Move memory users of a store first.
1440 move_inst_mem(n, orig_phis, igvn);
1441 }
1442 // Now update memory input
1334 igvn->hash_delete(n); 1443 igvn->hash_delete(n);
1335 n->set_req(MemNode::Memory, nmem); 1444 n->set_req(MemNode::Memory, nmem);
1336 igvn->hash_insert(n); 1445 igvn->hash_insert(n);
1337 record_for_optimizer(n); 1446 record_for_optimizer(n);
1338 } 1447 } else {
1339 } 1448 assert(n->is_Allocate() || n->is_CheckCastPP() ||
1340 } 1449 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
1450 }
1451 }
1452 }
1453 #ifdef ASSERT
1454 // Verify that memory was split correctly
1455 while (old_mems.is_nonempty()) {
1456 Node* old_mem = old_mems.node();
1457 uint old_cnt = old_mems.index();
1458 old_mems.pop();
1459 assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
1460 }
1461 #endif
1341 } 1462 }
1342 1463
1343 bool ConnectionGraph::has_candidates(Compile *C) { 1464 bool ConnectionGraph::has_candidates(Compile *C) {
1344 // EA brings benefits only when the code has allocations and/or locks which 1465 // EA brings benefits only when the code has allocations and/or locks which
1345 // are represented by ideal Macro nodes. 1466 // are represented by ideal Macro nodes.