comparison src/share/vm/opto/escape.cpp @ 1100:f96a1a986f7b

6895383: JCK test throws NPE for method compiled with Escape Analysis Summary: Add missing checks for MemBar nodes in EA. Reviewed-by: never
author kvn
date Wed, 09 Dec 2009 16:40:45 -0800
parents 7ef1d2e14917
children 7fee0a6cc6d4
comparison
equal deleted inserted replaced
1099:c5d3d979ae27 1100:f96a1a986f7b
777 if (un != NULL) { 777 if (un != NULL) {
778 result = un; 778 result = un;
779 } else { 779 } else {
780 break; 780 break;
781 } 781 }
782 } else if (result->is_ClearArray()) {
783 if (!ClearArrayNode::step_through(&result, (uint)tinst->instance_id(), phase)) {
784 // Can not bypass initialization of the instance
785 // we are looking for.
786 break;
787 }
788 // Otherwise skip it (the call updated 'result' value).
782 } else if (result->Opcode() == Op_SCMemProj) { 789 } else if (result->Opcode() == Op_SCMemProj) {
783 assert(result->in(0)->is_LoadStore(), "sanity"); 790 assert(result->in(0)->is_LoadStore(), "sanity");
784 const Type *at = phase->type(result->in(0)->in(MemNode::Address)); 791 const Type *at = phase->type(result->in(0)->in(MemNode::Address));
785 if (at != Type::TOP) { 792 if (at != Type::TOP) {
786 assert (at->isa_ptr() != NULL, "pointer type required."); 793 assert (at->isa_ptr() != NULL, "pointer type required.");
805 } 812 }
806 } 813 }
807 // the result is either MemNode, PhiNode, InitializeNode. 814 // the result is either MemNode, PhiNode, InitializeNode.
808 return result; 815 return result;
809 } 816 }
810
811 817
812 // 818 //
813 // Convert the types of unescaped object to instance types where possible, 819 // Convert the types of unescaped object to instance types where possible,
814 // propagate the new type information through the graph, and update memory 820 // propagate the new type information through the graph, and update memory
815 // edges and MergeMem inputs to reflect the new type. 821 // edges and MergeMem inputs to reflect the new type.
898 // 90 LoadP _ 120 30 ... alias_index=6 904 // 90 LoadP _ 120 30 ... alias_index=6
899 // 100 LoadP _ 80 20 ... alias_index=4 905 // 100 LoadP _ 80 20 ... alias_index=4
900 // 906 //
901 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) { 907 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) {
902 GrowableArray<Node *> memnode_worklist; 908 GrowableArray<Node *> memnode_worklist;
903 GrowableArray<Node *> mergemem_worklist;
904 GrowableArray<PhiNode *> orig_phis; 909 GrowableArray<PhiNode *> orig_phis;
905 PhaseGVN *igvn = _compile->initial_gvn(); 910 PhaseGVN *igvn = _compile->initial_gvn();
906 uint new_index_start = (uint) _compile->num_alias_types(); 911 uint new_index_start = (uint) _compile->num_alias_types();
907 VectorSet visited(Thread::current()->resource_area()); 912 VectorSet visited(Thread::current()->resource_area());
908 VectorSet ptset(Thread::current()->resource_area()); 913 VectorSet ptset(Thread::current()->resource_area());
1023 if (addp2 != NULL) { 1028 if (addp2 != NULL) {
1024 assert(alloc->is_AllocateArray(),"array allocation was expected"); 1029 assert(alloc->is_AllocateArray(),"array allocation was expected");
1025 alloc_worklist.append_if_missing(addp2); 1030 alloc_worklist.append_if_missing(addp2);
1026 } 1031 }
1027 alloc_worklist.append_if_missing(use); 1032 alloc_worklist.append_if_missing(use);
1028 } else if (use->is_Initialize()) { 1033 } else if (use->is_MemBar()) {
1029 memnode_worklist.append_if_missing(use); 1034 memnode_worklist.append_if_missing(use);
1030 } 1035 }
1031 } 1036 }
1032 } 1037 }
1033 } else if (n->is_AddP()) { 1038 } else if (n->is_AddP()) {
1034 ptset.Clear(); 1039 ptset.Clear();
1035 PointsTo(ptset, get_addp_base(n), igvn); 1040 PointsTo(ptset, get_addp_base(n), igvn);
1036 assert(ptset.Size() == 1, "AddP address is unique"); 1041 assert(ptset.Size() == 1, "AddP address is unique");
1037 uint elem = ptset.getelem(); // Allocation node's index 1042 uint elem = ptset.getelem(); // Allocation node's index
1038 if (elem == _phantom_object) 1043 if (elem == _phantom_object) {
1044 assert(false, "escaped allocation");
1039 continue; // Assume the value was set outside this method. 1045 continue; // Assume the value was set outside this method.
1046 }
1040 Node *base = get_map(elem); // CheckCastPP node 1047 Node *base = get_map(elem); // CheckCastPP node
1041 if (!split_AddP(n, base, igvn)) continue; // wrong type 1048 if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
1042 tinst = igvn->type(base)->isa_oopptr(); 1049 tinst = igvn->type(base)->isa_oopptr();
1043 } else if (n->is_Phi() || 1050 } else if (n->is_Phi() ||
1044 n->is_CheckCastPP() || 1051 n->is_CheckCastPP() ||
1045 n->is_EncodeP() || 1052 n->is_EncodeP() ||
1046 n->is_DecodeN() || 1053 n->is_DecodeN() ||
1051 } 1058 }
1052 ptset.Clear(); 1059 ptset.Clear();
1053 PointsTo(ptset, n, igvn); 1060 PointsTo(ptset, n, igvn);
1054 if (ptset.Size() == 1) { 1061 if (ptset.Size() == 1) {
1055 uint elem = ptset.getelem(); // Allocation node's index 1062 uint elem = ptset.getelem(); // Allocation node's index
1056 if (elem == _phantom_object) 1063 if (elem == _phantom_object) {
1064 assert(false, "escaped allocation");
1057 continue; // Assume the value was set outside this method. 1065 continue; // Assume the value was set outside this method.
1066 }
1058 Node *val = get_map(elem); // CheckCastPP node 1067 Node *val = get_map(elem); // CheckCastPP node
1059 TypeNode *tn = n->as_Type(); 1068 TypeNode *tn = n->as_Type();
1060 tinst = igvn->type(val)->isa_oopptr(); 1069 tinst = igvn->type(val)->isa_oopptr();
1061 assert(tinst != NULL && tinst->is_known_instance() && 1070 assert(tinst != NULL && tinst->is_known_instance() &&
1062 (uint)tinst->instance_id() == elem , "instance type expected."); 1071 (uint)tinst->instance_id() == elem , "instance type expected.");
1067 tn_t = tn_type->make_ptr()->isa_oopptr(); 1076 tn_t = tn_type->make_ptr()->isa_oopptr();
1068 } else { 1077 } else {
1069 tn_t = tn_type->isa_oopptr(); 1078 tn_t = tn_type->isa_oopptr();
1070 } 1079 }
1071 1080
1072 if (tn_t != NULL && 1081 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
1073 tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) {
1074 if (tn_type->isa_narrowoop()) { 1082 if (tn_type->isa_narrowoop()) {
1075 tn_type = tinst->make_narrowoop(); 1083 tn_type = tinst->make_narrowoop();
1076 } else { 1084 } else {
1077 tn_type = tinst; 1085 tn_type = tinst;
1078 } 1086 }
1080 igvn->set_type(tn, tn_type); 1088 igvn->set_type(tn, tn_type);
1081 tn->set_type(tn_type); 1089 tn->set_type(tn_type);
1082 igvn->hash_insert(tn); 1090 igvn->hash_insert(tn);
1083 record_for_optimizer(n); 1091 record_for_optimizer(n);
1084 } else { 1092 } else {
1085 continue; // wrong type 1093 assert(tn_type == TypePtr::NULL_PTR ||
1094 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
1095 "unexpected type");
1096 continue; // Skip dead path with different type
1086 } 1097 }
1087 } 1098 }
1088 } else { 1099 } else {
1100 debug_only(n->dump();)
1101 assert(false, "EA: unexpected node");
1089 continue; 1102 continue;
1090 } 1103 }
1091 // push users on appropriate worklist 1104 // push allocation's users on appropriate worklist
1092 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1105 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1093 Node *use = n->fast_out(i); 1106 Node *use = n->fast_out(i);
1094 if(use->is_Mem() && use->in(MemNode::Address) == n) { 1107 if(use->is_Mem() && use->in(MemNode::Address) == n) {
1108 // Load/store to instance's field
1095 memnode_worklist.append_if_missing(use); 1109 memnode_worklist.append_if_missing(use);
1096 } else if (use->is_Initialize()) { 1110 } else if (use->is_MemBar()) {
1097 memnode_worklist.append_if_missing(use); 1111 memnode_worklist.append_if_missing(use);
1098 } else if (use->is_MergeMem()) {
1099 mergemem_worklist.append_if_missing(use);
1100 } else if (use->is_SafePoint() && tinst != NULL) {
1101 // Look for MergeMem nodes for calls which reference unique allocation
1102 // (through CheckCastPP nodes) even for debug info.
1103 Node* m = use->in(TypeFunc::Memory);
1104 uint iid = tinst->instance_id();
1105 while (m->is_Proj() && m->in(0)->is_SafePoint() &&
1106 m->in(0) != use && !m->in(0)->_idx != iid) {
1107 m = m->in(0)->in(TypeFunc::Memory);
1108 }
1109 if (m->is_MergeMem()) {
1110 mergemem_worklist.append_if_missing(m);
1111 }
1112 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 1112 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
1113 Node* addp2 = find_second_addp(use, n); 1113 Node* addp2 = find_second_addp(use, n);
1114 if (addp2 != NULL) { 1114 if (addp2 != NULL) {
1115 alloc_worklist.append_if_missing(addp2); 1115 alloc_worklist.append_if_missing(addp2);
1116 } 1116 }
1119 use->is_CheckCastPP() || 1119 use->is_CheckCastPP() ||
1120 use->is_EncodeP() || 1120 use->is_EncodeP() ||
1121 use->is_DecodeN() || 1121 use->is_DecodeN() ||
1122 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 1122 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
1123 alloc_worklist.append_if_missing(use); 1123 alloc_worklist.append_if_missing(use);
1124 #ifdef ASSERT
1125 } else if (use->is_Mem()) {
1126 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
1127 } else if (use->is_MergeMem()) {
1128 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1129 } else if (use->is_SafePoint()) {
1130 // Look for MergeMem nodes for calls which reference unique allocation
1131 // (through CheckCastPP nodes) even for debug info.
1132 Node* m = use->in(TypeFunc::Memory);
1133 if (m->is_MergeMem()) {
1134 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1135 }
1136 } else {
1137 uint op = use->Opcode();
1138 if (!(op == Op_CmpP || op == Op_Conv2B ||
1139 op == Op_CastP2X || op == Op_StoreCM ||
1140 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
1141 op == Op_StrEquals || op == Op_StrIndexOf)) {
1142 n->dump();
1143 use->dump();
1144 assert(false, "EA: missing allocation reference path");
1145 }
1146 #endif
1124 } 1147 }
1125 } 1148 }
1126 1149
1127 } 1150 }
1128 // New alias types were created in split_AddP(). 1151 // New alias types were created in split_AddP().
1136 1159
1137 while (memnode_worklist.length() != 0) { 1160 while (memnode_worklist.length() != 0) {
1138 Node *n = memnode_worklist.pop(); 1161 Node *n = memnode_worklist.pop();
1139 if (visited.test_set(n->_idx)) 1162 if (visited.test_set(n->_idx))
1140 continue; 1163 continue;
1141 if (n->is_Phi()) { 1164 if (n->is_Phi() || n->is_ClearArray()) {
1142 assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required"); 1165 // we don't need to do anything, but the users must be pushed
1143 // we don't need to do anything, but the users must be pushed if we haven't processed 1166 } else if (n->is_MemBar()) { // Initialize, MemBar nodes
1144 // this Phi before 1167 // we don't need to do anything, but the users must be pushed
1145 } else if (n->is_Initialize()) { 1168 n = n->as_MemBar()->proj_out(TypeFunc::Memory);
1146 // we don't need to do anything, but the users of the memory projection must be pushed
1147 n = n->as_Initialize()->proj_out(TypeFunc::Memory);
1148 if (n == NULL) 1169 if (n == NULL)
1149 continue; 1170 continue;
1150 } else { 1171 } else {
1151 assert(n->is_Mem(), "memory node required."); 1172 assert(n->is_Mem(), "memory node required.");
1152 Node *addr = n->in(MemNode::Address); 1173 Node *addr = n->in(MemNode::Address);
1179 } 1200 }
1180 } 1201 }
1181 // push user on appropriate worklist 1202 // push user on appropriate worklist
1182 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1203 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1183 Node *use = n->fast_out(i); 1204 Node *use = n->fast_out(i);
1184 if (use->is_Phi()) { 1205 if (use->is_Phi() || use->is_ClearArray()) {
1185 memnode_worklist.append_if_missing(use); 1206 memnode_worklist.append_if_missing(use);
1186 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) { 1207 } else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
1208 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
1209 continue;
1187 memnode_worklist.append_if_missing(use); 1210 memnode_worklist.append_if_missing(use);
1188 } else if (use->is_Initialize()) { 1211 } else if (use->is_MemBar()) {
1189 memnode_worklist.append_if_missing(use); 1212 memnode_worklist.append_if_missing(use);
1213 #ifdef ASSERT
1214 } else if(use->is_Mem()) {
1215 assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
1190 } else if (use->is_MergeMem()) { 1216 } else if (use->is_MergeMem()) {
1191 mergemem_worklist.append_if_missing(use); 1217 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
1218 } else {
1219 uint op = use->Opcode();
1220 if (!(op == Op_StoreCM ||
1221 (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
1222 strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
1223 op == Op_AryEq || op == Op_StrComp ||
1224 op == Op_StrEquals || op == Op_StrIndexOf)) {
1225 n->dump();
1226 use->dump();
1227 assert(false, "EA: missing memory path");
1228 }
1229 #endif
1192 } 1230 }
1193 } 1231 }
1194 } 1232 }
1195 1233
1196 // Phase 3: Process MergeMem nodes from mergemem_worklist. 1234 // Phase 3: Process MergeMem nodes from mergemem_worklist.
1197 // Walk each memory moving the first node encountered of each 1235 // Walk each memory slice moving the first node encountered of each
1198 // instance type to the the input corresponding to its alias index. 1236 // instance type to the the input corresponding to its alias index.
1199 while (mergemem_worklist.length() != 0) { 1237 uint length = _mergemem_worklist.length();
1200 Node *n = mergemem_worklist.pop(); 1238 for( uint next = 0; next < length; ++next ) {
1201 assert(n->is_MergeMem(), "MergeMem node required."); 1239 MergeMemNode* nmm = _mergemem_worklist.at(next);
1202 if (visited.test_set(n->_idx)) 1240 assert(!visited.test_set(nmm->_idx), "should not be visited before");
1203 continue;
1204 MergeMemNode *nmm = n->as_MergeMem();
1205 // Note: we don't want to use MergeMemStream here because we only want to 1241 // Note: we don't want to use MergeMemStream here because we only want to
1206 // scan inputs which exist at the start, not ones we add during processing. 1242 // scan inputs which exist at the start, not ones we add during processing.
1243 // Note 2: MergeMem may already contains instance memory slices added
1244 // during find_inst_mem() call when memory nodes were processed above.
1245 igvn->hash_delete(nmm);
1207 uint nslices = nmm->req(); 1246 uint nslices = nmm->req();
1208 igvn->hash_delete(nmm);
1209 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 1247 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
1210 Node* mem = nmm->in(i); 1248 Node* mem = nmm->in(i);
1211 Node* cur = NULL; 1249 Node* cur = NULL;
1212 if (mem == NULL || mem->is_top()) 1250 if (mem == NULL || mem->is_top())
1213 continue; 1251 continue;
1257 nmm->set_memory_at(ni, result); 1295 nmm->set_memory_at(ni, result);
1258 } 1296 }
1259 } 1297 }
1260 igvn->hash_insert(nmm); 1298 igvn->hash_insert(nmm);
1261 record_for_optimizer(nmm); 1299 record_for_optimizer(nmm);
1262
1263 // Propagate new memory slices to following MergeMem nodes.
1264 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1265 Node *use = n->fast_out(i);
1266 if (use->is_Call()) {
1267 CallNode* in = use->as_Call();
1268 if (in->proj_out(TypeFunc::Memory) != NULL) {
1269 Node* m = in->proj_out(TypeFunc::Memory);
1270 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1271 Node* mm = m->fast_out(j);
1272 if (mm->is_MergeMem()) {
1273 mergemem_worklist.append_if_missing(mm);
1274 }
1275 }
1276 }
1277 if (use->is_Allocate()) {
1278 use = use->as_Allocate()->initialization();
1279 if (use == NULL) {
1280 continue;
1281 }
1282 }
1283 }
1284 if (use->is_Initialize()) {
1285 InitializeNode* in = use->as_Initialize();
1286 if (in->proj_out(TypeFunc::Memory) != NULL) {
1287 Node* m = in->proj_out(TypeFunc::Memory);
1288 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
1289 Node* mm = m->fast_out(j);
1290 if (mm->is_MergeMem()) {
1291 mergemem_worklist.append_if_missing(mm);
1292 }
1293 }
1294 }
1295 }
1296 }
1297 } 1300 }
1298 1301
1299 // Phase 4: Update the inputs of non-instance memory Phis and 1302 // Phase 4: Update the inputs of non-instance memory Phis and
1300 // the Memory input of memnodes 1303 // the Memory input of memnodes
1301 // First update the inputs of any non-instance Phi's from 1304 // First update the inputs of any non-instance Phi's from
1379 // for an escape status. See process_call_result() below. 1382 // for an escape status. See process_call_result() below.
1380 if (n->is_Allocate() || n->is_CallStaticJava() && 1383 if (n->is_Allocate() || n->is_CallStaticJava() &&
1381 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) { 1384 ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
1382 has_allocations = true; 1385 has_allocations = true;
1383 } 1386 }
1384 if(n->is_AddP()) 1387 if(n->is_AddP()) {
1385 cg_worklist.append(n->_idx); 1388 // Collect address nodes which directly reference an allocation.
1389 // Use them during stage 3 below to build initial connection graph
1390 // field edges. Other field edges could be added after StoreP/LoadP
1391 // nodes are processed during stage 4 below.
1392 Node* base = get_addp_base(n);
1393 if(base->is_Proj() && base->in(0)->is_Allocate()) {
1394 cg_worklist.append(n->_idx);
1395 }
1396 } else if (n->is_MergeMem()) {
1397 // Collect all MergeMem nodes to add memory slices for
1398 // scalar replaceable objects in split_unique_types().
1399 _mergemem_worklist.append(n->as_MergeMem());
1400 }
1386 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1401 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1387 Node* m = n->fast_out(i); // Get user 1402 Node* m = n->fast_out(i); // Get user
1388 worklist_init.push(m); 1403 worklist_init.push(m);
1389 } 1404 }
1390 } 1405 }
1421 if (ptn->node_type() != PointsToNode::UnknownType) 1436 if (ptn->node_type() != PointsToNode::UnknownType)
1422 cg_worklist.append(n->_idx); // Collect CG nodes 1437 cg_worklist.append(n->_idx); // Collect CG nodes
1423 } 1438 }
1424 } 1439 }
1425 1440
1426 VectorSet ptset(Thread::current()->resource_area()); 1441 Arena* arena = Thread::current()->resource_area();
1442 VectorSet ptset(arena);
1427 GrowableArray<uint> deferred_edges; 1443 GrowableArray<uint> deferred_edges;
1428 VectorSet visited(Thread::current()->resource_area()); 1444 VectorSet visited(arena);
1429 1445
1430 // 5. Remove deferred edges from the graph and collect 1446 // 5. Remove deferred edges from the graph and adjust
1431 // information needed for type splitting. 1447 // escape state of nonescaping objects.
1432 cg_length = cg_worklist.length(); 1448 cg_length = cg_worklist.length();
1433 for( uint next = 0; next < cg_length; ++next ) { 1449 for( uint next = 0; next < cg_length; ++next ) {
1434 int ni = cg_worklist.at(next); 1450 int ni = cg_worklist.at(next);
1435 PointsToNode* ptn = ptnode_adr(ni); 1451 PointsToNode* ptn = ptnode_adr(ni);
1436 PointsToNode::NodeType nt = ptn->node_type(); 1452 PointsToNode::NodeType nt = ptn->node_type();
1437 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) { 1453 if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
1438 remove_deferred(ni, &deferred_edges, &visited); 1454 remove_deferred(ni, &deferred_edges, &visited);
1439 Node *n = ptn->_node; 1455 Node *n = ptn->_node;
1440 if (n->is_AddP()) { 1456 if (n->is_AddP()) {
1441 // Search for objects which are not scalar replaceable. 1457 // Search for objects which are not scalar replaceable
1442 // Mark their escape state as ArgEscape to propagate the state 1458 // and adjust their escape state.
1443 // to referenced objects. 1459 verify_escape_state(ni, ptset, igvn);
1444 // Note: currently there are no difference in compiler optimizations
1445 // for ArgEscape objects and NoEscape objects which are not
1446 // scalar replaceable.
1447
1448 int offset = ptn->offset();
1449 Node *base = get_addp_base(n);
1450 ptset.Clear();
1451 PointsTo(ptset, base, igvn);
1452 int ptset_size = ptset.Size();
1453
1454 // Check if a field's initializing value is recorded and add
1455 // a corresponding NULL field's value if it is not recorded.
1456 // Connection Graph does not record a default initialization by NULL
1457 // captured by Initialize node.
1458 //
1459 // Note: it will disable scalar replacement in some cases:
1460 //
1461 // Point p[] = new Point[1];
1462 // p[0] = new Point(); // Will be not scalar replaced
1463 //
1464 // but it will save us from incorrect optimizations in next cases:
1465 //
1466 // Point p[] = new Point[1];
1467 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1468 //
1469 // Without a control flow analysis we can't distinguish above cases.
1470 //
1471 if (offset != Type::OffsetBot && ptset_size == 1) {
1472 uint elem = ptset.getelem(); // Allocation node's index
1473 // It does not matter if it is not Allocation node since
1474 // only non-escaping allocations are scalar replaced.
1475 if (ptnode_adr(elem)->_node->is_Allocate() &&
1476 ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
1477 AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
1478 InitializeNode* ini = alloc->initialization();
1479 Node* value = NULL;
1480 if (ini != NULL) {
1481 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1482 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
1483 if (store != NULL && store->is_Store())
1484 value = store->in(MemNode::ValueIn);
1485 }
1486 if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1487 // A field's initializing value was not recorded. Add NULL.
1488 uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1489 add_pointsto_edge(ni, null_idx);
1490 }
1491 }
1492 }
1493
1494 // An object is not scalar replaceable if the field which may point
1495 // to it has unknown offset (unknown element of an array of objects).
1496 //
1497 if (offset == Type::OffsetBot) {
1498 uint e_cnt = ptn->edge_count();
1499 for (uint ei = 0; ei < e_cnt; ei++) {
1500 uint npi = ptn->edge_target(ei);
1501 set_escape_state(npi, PointsToNode::ArgEscape);
1502 ptnode_adr(npi)->_scalar_replaceable = false;
1503 }
1504 }
1505
1506 // Currently an object is not scalar replaceable if a LoadStore node
1507 // access its field since the field value is unknown after it.
1508 //
1509 bool has_LoadStore = false;
1510 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1511 Node *use = n->fast_out(i);
1512 if (use->is_LoadStore()) {
1513 has_LoadStore = true;
1514 break;
1515 }
1516 }
1517 // An object is not scalar replaceable if the address points
1518 // to unknown field (unknown element for arrays, offset is OffsetBot).
1519 //
1520 // Or the address may point to more then one object. This may produce
1521 // the false positive result (set scalar_replaceable to false)
1522 // since the flow-insensitive escape analysis can't separate
1523 // the case when stores overwrite the field's value from the case
1524 // when stores happened on different control branches.
1525 //
1526 if (ptset_size > 1 || ptset_size != 0 &&
1527 (has_LoadStore || offset == Type::OffsetBot)) {
1528 for( VectorSetI j(&ptset); j.test(); ++j ) {
1529 set_escape_state(j.elem, PointsToNode::ArgEscape);
1530 ptnode_adr(j.elem)->_scalar_replaceable = false;
1531 }
1532 }
1533 } 1460 }
1534 } 1461 }
1535 } 1462 }
1536 1463
1537 // 6. Propagate escape states. 1464 // 6. Propagate escape states.
1644 #endif 1571 #endif
1645 } 1572 }
1646 return has_non_escaping_obj; 1573 return has_non_escaping_obj;
1647 } 1574 }
1648 1575
1576 // Search for objects which are not scalar replaceable.
1577 void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
1578 PointsToNode* ptn = ptnode_adr(nidx);
1579 Node* n = ptn->_node;
1580 assert(n->is_AddP(), "Should be called for AddP nodes only");
1581 // Search for objects which are not scalar replaceable.
1582 // Mark their escape state as ArgEscape to propagate the state
1583 // to referenced objects.
1584 // Note: currently there are no difference in compiler optimizations
1585 // for ArgEscape objects and NoEscape objects which are not
1586 // scalar replaceable.
1587
1588 Compile* C = _compile;
1589
1590 int offset = ptn->offset();
1591 Node* base = get_addp_base(n);
1592 ptset.Clear();
1593 PointsTo(ptset, base, phase);
1594 int ptset_size = ptset.Size();
1595
1596 // Check if a oop field's initializing value is recorded and add
1597 // a corresponding NULL field's value if it is not recorded.
1598 // Connection Graph does not record a default initialization by NULL
1599 // captured by Initialize node.
1600 //
1601 // Note: it will disable scalar replacement in some cases:
1602 //
1603 // Point p[] = new Point[1];
1604 // p[0] = new Point(); // Will be not scalar replaced
1605 //
1606 // but it will save us from incorrect optimizations in next cases:
1607 //
1608 // Point p[] = new Point[1];
1609 // if ( x ) p[0] = new Point(); // Will be not scalar replaced
1610 //
1611 // Do a simple control flow analysis to distinguish above cases.
1612 //
1613 if (offset != Type::OffsetBot && ptset_size == 1) {
1614 uint elem = ptset.getelem(); // Allocation node's index
1615 // It does not matter if it is not Allocation node since
1616 // only non-escaping allocations are scalar replaced.
1617 if (ptnode_adr(elem)->_node->is_Allocate() &&
1618 ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
1619 AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
1620 InitializeNode* ini = alloc->initialization();
1621
1622 // Check only oop fields.
1623 const Type* adr_type = n->as_AddP()->bottom_type();
1624 BasicType basic_field_type = T_INT;
1625 if (adr_type->isa_instptr()) {
1626 ciField* field = C->alias_type(adr_type->isa_instptr())->field();
1627 if (field != NULL) {
1628 basic_field_type = field->layout_type();
1629 } else {
1630 // Ignore non field load (for example, klass load)
1631 }
1632 } else if (adr_type->isa_aryptr()) {
1633 const Type* elemtype = adr_type->isa_aryptr()->elem();
1634 basic_field_type = elemtype->array_element_basic_type();
1635 } else {
1636 // Raw pointers are used for initializing stores so skip it.
1637 assert(adr_type->isa_rawptr() && base->is_Proj() &&
1638 (base->in(0) == alloc),"unexpected pointer type");
1639 }
1640 if (basic_field_type == T_OBJECT ||
1641 basic_field_type == T_NARROWOOP ||
1642 basic_field_type == T_ARRAY) {
1643 Node* value = NULL;
1644 if (ini != NULL) {
1645 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
1646 Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
1647 if (store != NULL && store->is_Store()) {
1648 value = store->in(MemNode::ValueIn);
1649 } else if (ptn->edge_count() > 0) { // Are there oop stores?
1650 // Check for a store which follows allocation without branches.
1651 // For example, a volatile field store is not collected
1652 // by Initialize node. TODO: it would be nice to use idom() here.
1653 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1654 store = n->fast_out(i);
1655 if (store->is_Store() && store->in(0) != NULL) {
1656 Node* ctrl = store->in(0);
1657 while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
1658 ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
1659 ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
1660 ctrl = ctrl->in(0);
1661 }
1662 if (ctrl == ini || ctrl == alloc) {
1663 value = store->in(MemNode::ValueIn);
1664 break;
1665 }
1666 }
1667 }
1668 }
1669 }
1670 if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
1671 // A field's initializing value was not recorded. Add NULL.
1672 uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
1673 add_pointsto_edge(nidx, null_idx);
1674 }
1675 }
1676 }
1677 }
1678
1679 // An object is not scalar replaceable if the field which may point
1680 // to it has unknown offset (unknown element of an array of objects).
1681 //
1682 if (offset == Type::OffsetBot) {
1683 uint e_cnt = ptn->edge_count();
1684 for (uint ei = 0; ei < e_cnt; ei++) {
1685 uint npi = ptn->edge_target(ei);
1686 set_escape_state(npi, PointsToNode::ArgEscape);
1687 ptnode_adr(npi)->_scalar_replaceable = false;
1688 }
1689 }
1690
1691 // Currently an object is not scalar replaceable if a LoadStore node
1692 // access its field since the field value is unknown after it.
1693 //
1694 bool has_LoadStore = false;
1695 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1696 Node *use = n->fast_out(i);
1697 if (use->is_LoadStore()) {
1698 has_LoadStore = true;
1699 break;
1700 }
1701 }
1702 // An object is not scalar replaceable if the address points
1703 // to unknown field (unknown element for arrays, offset is OffsetBot).
1704 //
1705 // Or the address may point to more then one object. This may produce
1706 // the false positive result (set scalar_replaceable to false)
1707 // since the flow-insensitive escape analysis can't separate
1708 // the case when stores overwrite the field's value from the case
1709 // when stores happened on different control branches.
1710 //
1711 if (ptset_size > 1 || ptset_size != 0 &&
1712 (has_LoadStore || offset == Type::OffsetBot)) {
1713 for( VectorSetI j(&ptset); j.test(); ++j ) {
1714 set_escape_state(j.elem, PointsToNode::ArgEscape);
1715 ptnode_adr(j.elem)->_scalar_replaceable = false;
1716 }
1717 }
1718 }
1719
1649 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) { 1720 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
1650 1721
1651 switch (call->Opcode()) { 1722 switch (call->Opcode()) {
1652 #ifdef ASSERT 1723 #ifdef ASSERT
1653 case Op_Allocate: 1724 case Op_Allocate:
1655 case Op_Lock: 1726 case Op_Lock:
1656 case Op_Unlock: 1727 case Op_Unlock:
1657 assert(false, "should be done already"); 1728 assert(false, "should be done already");
1658 break; 1729 break;
1659 #endif 1730 #endif
1731 case Op_CallLeaf:
1660 case Op_CallLeafNoFP: 1732 case Op_CallLeafNoFP:
1661 { 1733 {
1662 // Stub calls, objects do not escape but they are not scale replaceable. 1734 // Stub calls, objects do not escape but they are not scale replaceable.
1663 // Adjust escape state for outgoing arguments. 1735 // Adjust escape state for outgoing arguments.
1664 const TypeTuple * d = call->tf()->domain(); 1736 const TypeTuple * d = call->tf()->domain();
1665 VectorSet ptset(Thread::current()->resource_area()); 1737 VectorSet ptset(Thread::current()->resource_area());
1666 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1738 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1667 const Type* at = d->field_at(i); 1739 const Type* at = d->field_at(i);
1668 Node *arg = call->in(i)->uncast(); 1740 Node *arg = call->in(i)->uncast();
1669 const Type *aat = phase->type(arg); 1741 const Type *aat = phase->type(arg);
1670 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) { 1742 if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
1743 ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
1744
1671 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1745 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
1672 aat->isa_ptr() != NULL, "expecting an Ptr"); 1746 aat->isa_ptr() != NULL, "expecting an Ptr");
1747 #ifdef ASSERT
1748 if (!(call->Opcode() == Op_CallLeafNoFP &&
1749 call->as_CallLeaf()->_name != NULL &&
1750 (strstr(call->as_CallLeaf()->_name, "arraycopy") != 0) ||
1751 call->as_CallLeaf()->_name != NULL &&
1752 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
1753 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
1754 ) {
1755 call->dump();
1756 assert(false, "EA: unexpected CallLeaf");
1757 }
1758 #endif
1673 set_escape_state(arg->_idx, PointsToNode::ArgEscape); 1759 set_escape_state(arg->_idx, PointsToNode::ArgEscape);
1674 if (arg->is_AddP()) { 1760 if (arg->is_AddP()) {
1675 // 1761 //
1676 // The inline_native_clone() case when the arraycopy stub is called 1762 // The inline_native_clone() case when the arraycopy stub is called
1677 // after the allocation before Initialize and CheckCastPP nodes. 1763 // after the allocation before Initialize and CheckCastPP nodes.
1704 VectorSet ptset(Thread::current()->resource_area()); 1790 VectorSet ptset(Thread::current()->resource_area());
1705 bool copy_dependencies = false; 1791 bool copy_dependencies = false;
1706 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1792 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1707 const Type* at = d->field_at(i); 1793 const Type* at = d->field_at(i);
1708 int k = i - TypeFunc::Parms; 1794 int k = i - TypeFunc::Parms;
1709 1795 Node *arg = call->in(i)->uncast();
1710 if (at->isa_oopptr() != NULL) { 1796
1711 Node *arg = call->in(i)->uncast(); 1797 if (at->isa_oopptr() != NULL &&
1798 ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
1712 1799
1713 bool global_escapes = false; 1800 bool global_escapes = false;
1714 bool fields_escapes = false; 1801 bool fields_escapes = false;
1715 if (!call_analyzer->is_arg_stack(k)) { 1802 if (!call_analyzer->is_arg_stack(k)) {
1716 // The argument global escapes, mark everything it could point to 1803 // The argument global escapes, mark everything it could point to
1940 // Put Lock and Unlock nodes on IGVN worklist to process them during 2027 // Put Lock and Unlock nodes on IGVN worklist to process them during
1941 // the first IGVN optimization when escape information is still available. 2028 // the first IGVN optimization when escape information is still available.
1942 record_for_optimizer(n); 2029 record_for_optimizer(n);
1943 _processed.set(n->_idx); 2030 _processed.set(n->_idx);
1944 } else { 2031 } else {
1945 // Have to process call's arguments first. 2032 // Don't mark as processed since call's arguments have to be processed.
1946 PointsToNode::NodeType nt = PointsToNode::UnknownType; 2033 PointsToNode::NodeType nt = PointsToNode::UnknownType;
2034 PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
1947 2035
1948 // Check if a call returns an object. 2036 // Check if a call returns an object.
1949 const TypeTuple *r = n->as_Call()->tf()->range(); 2037 const TypeTuple *r = n->as_Call()->tf()->range();
1950 if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms && 2038 if (r->cnt() > TypeFunc::Parms &&
2039 r->field_at(TypeFunc::Parms)->isa_ptr() &&
1951 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) { 2040 n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
1952 // Note: use isa_ptr() instead of isa_oopptr() here because 2041 nt = PointsToNode::JavaObject;
1953 // the _multianewarray functions return a TypeRawPtr. 2042 if (!n->is_CallStaticJava()) {
1954 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) { 2043 // Since the called mathod is statically unknown assume
1955 nt = PointsToNode::JavaObject; 2044 // the worst case that the returned value globally escapes.
1956 } 2045 es = PointsToNode::GlobalEscape;
1957 } 2046 }
1958 add_node(n, nt, PointsToNode::UnknownEscape, false); 2047 }
2048 add_node(n, nt, es, false);
1959 } 2049 }
1960 return; 2050 return;
1961 } 2051 }
1962 2052
1963 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 2053 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
2086 _delayed_worklist.push(n); 2176 _delayed_worklist.push(n);
2087 break; 2177 break;
2088 } 2178 }
2089 case Op_Proj: 2179 case Op_Proj:
2090 { 2180 {
2091 // we are only interested in the result projection from a call 2181 // we are only interested in the oop result projection from a call
2092 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2182 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2093 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false); 2183 const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
2094 process_call_result(n->as_Proj(), phase); 2184 assert(r->cnt() > TypeFunc::Parms, "sanity");
2095 if (!_processed.test(n->_idx)) { 2185 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
2096 // The call's result may need to be processed later if the call 2186 add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
2097 // returns it's argument and the argument is not processed yet. 2187 int ti = n->in(0)->_idx;
2098 _delayed_worklist.push(n); 2188 // The call may not be registered yet (since not all its inputs are registered)
2099 } 2189 // if this is the projection from backbranch edge of Phi.
2100 } else { 2190 if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
2101 _processed.set(n->_idx); 2191 process_call_result(n->as_Proj(), phase);
2102 } 2192 }
2193 if (!_processed.test(n->_idx)) {
2194 // The call's result may need to be processed later if the call
2195 // returns it's argument and the argument is not processed yet.
2196 _delayed_worklist.push(n);
2197 }
2198 break;
2199 }
2200 }
2201 _processed.set(n->_idx);
2103 break; 2202 break;
2104 } 2203 }
2105 case Op_Return: 2204 case Op_Return:
2106 { 2205 {
2107 if( n->req() > TypeFunc::Parms && 2206 if( n->req() > TypeFunc::Parms &&
2158 _processed.set(n->_idx); 2257 _processed.set(n->_idx);
2159 return; 2258 return;
2160 } 2259 }
2161 break; 2260 break;
2162 } 2261 }
2262 case Op_AryEq:
2263 case Op_StrComp:
2264 case Op_StrEquals:
2265 case Op_StrIndexOf:
2266 {
2267 // char[] arrays passed to string intrinsics are not scalar replaceable.
2268 add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
2269 break;
2270 }
2163 case Op_ThreadLocal: 2271 case Op_ThreadLocal:
2164 { 2272 {
2165 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true); 2273 add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
2166 break; 2274 break;
2167 } 2275 }
2172 return; 2280 return;
2173 } 2281 }
2174 2282
2175 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) { 2283 void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
2176 uint n_idx = n->_idx; 2284 uint n_idx = n->_idx;
2285 assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
2177 2286
2178 // Don't set processed bit for AddP, LoadP, StoreP since 2287 // Don't set processed bit for AddP, LoadP, StoreP since
2179 // they may need more then one pass to process. 2288 // they may need more then one pass to process.
2180 if (_processed.test(n_idx)) 2289 if (_processed.test(n_idx))
2181 return; // No need to redefine node's state. 2290 return; // No need to redefine node's state.
2209 case Op_CheckCastPP: 2318 case Op_CheckCastPP:
2210 case Op_EncodeP: 2319 case Op_EncodeP:
2211 case Op_DecodeN: 2320 case Op_DecodeN:
2212 { 2321 {
2213 int ti = n->in(1)->_idx; 2322 int ti = n->in(1)->_idx;
2323 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
2214 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2324 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2215 add_pointsto_edge(n_idx, ti); 2325 add_pointsto_edge(n_idx, ti);
2216 } else { 2326 } else {
2217 add_deferred_edge(n_idx, ti); 2327 add_deferred_edge(n_idx, ti);
2218 } 2328 }
2248 if (t->make_ptr() == NULL) 2358 if (t->make_ptr() == NULL)
2249 assert(false, "Op_LoadP"); 2359 assert(false, "Op_LoadP");
2250 #endif 2360 #endif
2251 2361
2252 Node* adr = n->in(MemNode::Address)->uncast(); 2362 Node* adr = n->in(MemNode::Address)->uncast();
2253 const Type *adr_type = phase->type(adr);
2254 Node* adr_base; 2363 Node* adr_base;
2255 if (adr->is_AddP()) { 2364 if (adr->is_AddP()) {
2256 adr_base = get_addp_base(adr); 2365 adr_base = get_addp_base(adr);
2257 } else { 2366 } else {
2258 adr_base = adr; 2367 adr_base = adr;
2300 _processed.set(n_idx); 2409 _processed.set(n_idx);
2301 break; 2410 break;
2302 } 2411 }
2303 case Op_Proj: 2412 case Op_Proj:
2304 { 2413 {
2305 // we are only interested in the result projection from a call 2414 // we are only interested in the oop result projection from a call
2306 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) { 2415 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
2307 process_call_result(n->as_Proj(), phase); 2416 assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
2308 assert(_processed.test(n_idx), "all call results should be processed"); 2417 "all nodes should be registered");
2309 } else { 2418 const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
2310 assert(false, "Op_Proj"); 2419 assert(r->cnt() > TypeFunc::Parms, "sanity");
2311 } 2420 if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
2421 process_call_result(n->as_Proj(), phase);
2422 assert(_processed.test(n_idx), "all call results should be processed");
2423 break;
2424 }
2425 }
2426 assert(false, "Op_Proj");
2312 break; 2427 break;
2313 } 2428 }
2314 case Op_Return: 2429 case Op_Return:
2315 { 2430 {
2316 #ifdef ASSERT 2431 #ifdef ASSERT
2318 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) { 2433 !phase->type(n->in(TypeFunc::Parms))->isa_oopptr() ) {
2319 assert(false, "Op_Return"); 2434 assert(false, "Op_Return");
2320 } 2435 }
2321 #endif 2436 #endif
2322 int ti = n->in(TypeFunc::Parms)->_idx; 2437 int ti = n->in(TypeFunc::Parms)->_idx;
2438 assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
2323 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) { 2439 if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
2324 add_pointsto_edge(n_idx, ti); 2440 add_pointsto_edge(n_idx, ti);
2325 } else { 2441 } else {
2326 add_deferred_edge(n_idx, ti); 2442 add_deferred_edge(n_idx, ti);
2327 } 2443 }
2352 uint pt = i.elem; 2468 uint pt = i.elem;
2353 add_edge_from_fields(pt, val->_idx, address_offset(adr, phase)); 2469 add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
2354 } 2470 }
2355 break; 2471 break;
2356 } 2472 }
2473 case Op_AryEq:
2474 case Op_StrComp:
2475 case Op_StrEquals:
2476 case Op_StrIndexOf:
2477 {
2478 // char[] arrays passed to string intrinsic do not escape but
2479 // they are not scalar replaceable. Adjust escape state for them.
2480 // Start from in(2) edge since in(1) is memory edge.
2481 for (uint i = 2; i < n->req(); i++) {
2482 Node* adr = n->in(i)->uncast();
2483 const Type *at = phase->type(adr);
2484 if (!adr->is_top() && at->isa_ptr()) {
2485 assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
2486 at->isa_ptr() != NULL, "expecting an Ptr");
2487 if (adr->is_AddP()) {
2488 adr = get_addp_base(adr);
2489 }
2490 // Mark as ArgEscape everything "adr" could point to.
2491 set_escape_state(adr->_idx, PointsToNode::ArgEscape);
2492 }
2493 }
2494 _processed.set(n_idx);
2495 break;
2496 }
2357 case Op_ThreadLocal: 2497 case Op_ThreadLocal:
2358 { 2498 {
2359 assert(false, "Op_ThreadLocal"); 2499 assert(false, "Op_ThreadLocal");
2360 break; 2500 break;
2361 } 2501 }
2362 default: 2502 default:
2363 ; 2503 // This method should be called only for EA specific nodes.
2364 // nothing to do 2504 ShouldNotReachHere();
2365 } 2505 }
2366 } 2506 }
2367 2507
2368 #ifndef PRODUCT 2508 #ifndef PRODUCT
2369 void ConnectionGraph::dump() { 2509 void ConnectionGraph::dump() {