comparison src/share/vm/opto/graphKit.cpp @ 18041:52b4284cb496

Merge with jdk8u20-b26
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 15 Oct 2014 16:02:50 +0200
parents 89152779163c 00c8a1255912
children 7848fc12602b
comparison
equal deleted inserted replaced
17606:45d7b2c7029d 18041:52b4284cb496
418 } else { 418 } else {
419 while (dst->req() < region->req()) add_one_req(dst, src); 419 while (dst->req() < region->req()) add_one_req(dst, src);
420 } 420 }
421 const Type* srctype = _gvn.type(src); 421 const Type* srctype = _gvn.type(src);
422 if (phi->type() != srctype) { 422 if (phi->type() != srctype) {
423 const Type* dsttype = phi->type()->meet(srctype); 423 const Type* dsttype = phi->type()->meet_speculative(srctype);
424 if (phi->type() != dsttype) { 424 if (phi->type() != dsttype) {
425 phi->set_type(dsttype); 425 phi->set_type(dsttype);
426 _gvn.set_type(phi, dsttype); 426 _gvn.set_type(phi, dsttype);
427 } 427 }
428 } 428 }
492 // take the uncommon_trap in the BuildCutout below. 492 // take the uncommon_trap in the BuildCutout below.
493 493
494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread 494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread
495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); 495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); 496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false); 497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
498 498
499 // Test the should_post_on_exceptions_flag vs. 0 499 // Test the should_post_on_exceptions_flag vs. 0
500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); 500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); 501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
502 502
594 int offset = java_lang_Throwable::get_detailMessage_offset(); 594 int offset = java_lang_Throwable::get_detailMessage_offset();
595 const TypePtr* adr_typ = ex_con->add_offset(offset); 595 const TypePtr* adr_typ = ex_con->add_offset(offset);
596 596
597 Node *adr = basic_plus_adr(ex_node, ex_node, offset); 597 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); 598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
599 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT); 599 // Conservatively release stores of object references.
600 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
600 601
601 add_exception_state(make_exception_state(ex_node)); 602 add_exception_state(make_exception_state(ex_node));
602 return; 603 return;
603 } 604 }
604 } 605 }
609 // create the stack trace. 610 // create the stack trace.
610 611
611 // Usual case: Bail to interpreter. 612 // Usual case: Bail to interpreter.
612 // Reserve the right to recompile if we haven't seen anything yet. 613 // Reserve the right to recompile if we haven't seen anything yet.
613 614
615 assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
614 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; 616 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
615 if (treat_throw_as_hot 617 if (treat_throw_as_hot
616 && (method()->method_data()->trap_recompiled_at(bci()) 618 && (method()->method_data()->trap_recompiled_at(bci(), NULL)
617 || C->too_many_traps(reason))) { 619 || C->too_many_traps(reason))) {
618 // We cannot afford to take more traps here. Suffer in the interpreter. 620 // We cannot afford to take more traps here. Suffer in the interpreter.
619 if (C->log() != NULL) 621 if (C->log() != NULL)
620 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", 622 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
621 Deoptimization::trap_reason_name(reason), 623 Deoptimization::trap_reason_name(reason),
1121 if (offset_con != Type::OffsetBot) { 1123 if (offset_con != Type::OffsetBot) {
1122 return longcon((jlong) offset_con); 1124 return longcon((jlong) offset_con);
1123 } 1125 }
1124 return _gvn.transform( new (C) ConvI2LNode(offset)); 1126 return _gvn.transform( new (C) ConvI2LNode(offset));
1125 } 1127 }
1128
1129 Node* GraphKit::ConvI2UL(Node* offset) {
1130 juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
1131 if (offset_con != (juint) Type::OffsetBot) {
1132 return longcon((julong) offset_con);
1133 }
1134 Node* conv = _gvn.transform( new (C) ConvI2LNode(offset));
1135 Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
1136 return _gvn.transform( new (C) AndLNode(conv, mask) );
1137 }
1138
1126 Node* GraphKit::ConvL2I(Node* offset) { 1139 Node* GraphKit::ConvL2I(Node* offset) {
1127 // short-circuit a common case 1140 // short-circuit a common case
1128 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); 1141 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1129 if (offset_con != (jlong)Type::OffsetBot) { 1142 if (offset_con != (jlong)Type::OffsetBot) {
1130 return intcon((int) offset_con); 1143 return intcon((int) offset_con);
1221 } 1234 }
1222 } else { 1235 } else {
1223 // See if mixing in the NULL pointer changes type. 1236 // See if mixing in the NULL pointer changes type.
1224 // If so, then the NULL pointer was not allowed in the original 1237 // If so, then the NULL pointer was not allowed in the original
1225 // type. In other words, "value" was not-null. 1238 // type. In other words, "value" was not-null.
1226 if (t->meet(TypePtr::NULL_PTR) != t) { 1239 if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
1227 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... 1240 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
1228 explicit_null_checks_elided++; 1241 explicit_null_checks_elided++;
1229 return value; // Elided null check quickly! 1242 return value; // Elided null check quickly!
1230 } 1243 }
1231 } 1244 }
1354 1367
1355 //------------------------------cast_not_null---------------------------------- 1368 //------------------------------cast_not_null----------------------------------
1356 // Cast obj to not-null on this path 1369 // Cast obj to not-null on this path
1357 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { 1370 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1358 const Type *t = _gvn.type(obj); 1371 const Type *t = _gvn.type(obj);
1359 const Type *t_not_null = t->join(TypePtr::NOTNULL); 1372 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1360 // Object is already not-null? 1373 // Object is already not-null?
1361 if( t == t_not_null ) return obj; 1374 if( t == t_not_null ) return obj;
1362 1375
1363 Node *cast = new (C) CastPPNode(obj,t_not_null); 1376 Node *cast = new (C) CastPPNode(obj,t_not_null);
1364 cast->init_req(0, control()); 1377 cast->init_req(0, control());
1481 // 1494 //
1482 1495
1483 // factory methods in "int adr_idx" 1496 // factory methods in "int adr_idx"
1484 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 1497 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1485 int adr_idx, 1498 int adr_idx,
1486 bool require_atomic_access) { 1499 MemNode::MemOrd mo, bool require_atomic_access) {
1487 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); 1500 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1488 const TypePtr* adr_type = NULL; // debug-mode-only argument 1501 const TypePtr* adr_type = NULL; // debug-mode-only argument
1489 debug_only(adr_type = C->get_adr_type(adr_idx)); 1502 debug_only(adr_type = C->get_adr_type(adr_idx));
1490 Node* mem = memory(adr_idx); 1503 Node* mem = memory(adr_idx);
1491 Node* ld; 1504 Node* ld;
1492 if (require_atomic_access && bt == T_LONG) { 1505 if (require_atomic_access && bt == T_LONG) {
1493 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); 1506 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
1494 } else { 1507 } else {
1495 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); 1508 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
1496 } 1509 }
1497 ld = _gvn.transform(ld); 1510 ld = _gvn.transform(ld);
1498 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { 1511 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1499 // Improve graph before escape analysis and boxing elimination. 1512 // Improve graph before escape analysis and boxing elimination.
1500 record_for_igvn(ld); 1513 record_for_igvn(ld);
1502 return ld; 1515 return ld;
1503 } 1516 }
1504 1517
1505 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 1518 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1506 int adr_idx, 1519 int adr_idx,
1520 MemNode::MemOrd mo,
1507 bool require_atomic_access) { 1521 bool require_atomic_access) {
1508 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 1522 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1509 const TypePtr* adr_type = NULL; 1523 const TypePtr* adr_type = NULL;
1510 debug_only(adr_type = C->get_adr_type(adr_idx)); 1524 debug_only(adr_type = C->get_adr_type(adr_idx));
1511 Node *mem = memory(adr_idx); 1525 Node *mem = memory(adr_idx);
1512 Node* st; 1526 Node* st;
1513 if (require_atomic_access && bt == T_LONG) { 1527 if (require_atomic_access && bt == T_LONG) {
1514 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val); 1528 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1515 } else { 1529 } else {
1516 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt); 1530 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1517 } 1531 }
1518 st = _gvn.transform(st); 1532 st = _gvn.transform(st);
1519 set_memory(st, adr_idx); 1533 set_memory(st, adr_idx);
1520 // Back-to-back stores can only remove intermediate store with DU info 1534 // Back-to-back stores can only remove intermediate store with DU info
1521 // so push on worklist for optimizer. 1535 // so push on worklist for optimizer.
1611 Node* adr, 1625 Node* adr,
1612 const TypePtr* adr_type, 1626 const TypePtr* adr_type,
1613 Node* val, 1627 Node* val,
1614 const TypeOopPtr* val_type, 1628 const TypeOopPtr* val_type,
1615 BasicType bt, 1629 BasicType bt,
1616 bool use_precise) { 1630 bool use_precise,
1631 MemNode::MemOrd mo) {
1617 // Transformation of a value which could be NULL pointer (CastPP #NULL) 1632 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1618 // could be delayed during Parse (for example, in adjust_map_after_if()). 1633 // could be delayed during Parse (for example, in adjust_map_after_if()).
1619 // Execute transformation here to avoid barrier generation in such case. 1634 // Execute transformation here to avoid barrier generation in such case.
1620 if (_gvn.type(val) == TypePtr::NULL_PTR) 1635 if (_gvn.type(val) == TypePtr::NULL_PTR)
1621 val = _gvn.makecon(TypePtr::NULL_PTR); 1636 val = _gvn.makecon(TypePtr::NULL_PTR);
1631 pre_barrier(true /* do_load */, 1646 pre_barrier(true /* do_load */,
1632 control(), obj, adr, adr_idx, val, val_type, 1647 control(), obj, adr, adr_idx, val, val_type,
1633 NULL /* pre_val */, 1648 NULL /* pre_val */,
1634 bt); 1649 bt);
1635 1650
1636 Node* store = store_to_memory(control(), adr, val, bt, adr_idx); 1651 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo);
1637 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); 1652 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1638 return store; 1653 return store;
1639 } 1654 }
1640 1655
1641 // Could be an array or object we don't know at compile time (unsafe ref.) 1656 // Could be an array or object we don't know at compile time (unsafe ref.)
1642 Node* GraphKit::store_oop_to_unknown(Node* ctl, 1657 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1643 Node* obj, // containing obj 1658 Node* obj, // containing obj
1644 Node* adr, // actual adress to store val at 1659 Node* adr, // actual adress to store val at
1645 const TypePtr* adr_type, 1660 const TypePtr* adr_type,
1646 Node* val, 1661 Node* val,
1647 BasicType bt) { 1662 BasicType bt,
1663 MemNode::MemOrd mo) {
1648 Compile::AliasType* at = C->alias_type(adr_type); 1664 Compile::AliasType* at = C->alias_type(adr_type);
1649 const TypeOopPtr* val_type = NULL; 1665 const TypeOopPtr* val_type = NULL;
1650 if (adr_type->isa_instptr()) { 1666 if (adr_type->isa_instptr()) {
1651 if (at->field() != NULL) { 1667 if (at->field() != NULL) {
1652 // known field. This code is a copy of the do_put_xxx logic. 1668 // known field. This code is a copy of the do_put_xxx logic.
1661 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); 1677 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1662 } 1678 }
1663 if (val_type == NULL) { 1679 if (val_type == NULL) {
1664 val_type = TypeInstPtr::BOTTOM; 1680 val_type = TypeInstPtr::BOTTOM;
1665 } 1681 }
1666 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true); 1682 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
1667 } 1683 }
1668 1684
1669 1685
1670 //-------------------------array_element_address------------------------- 1686 //-------------------------array_element_address-------------------------
1671 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, 1687 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1705 //-------------------------load_array_element------------------------- 1721 //-------------------------load_array_element-------------------------
1706 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { 1722 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1707 const Type* elemtype = arytype->elem(); 1723 const Type* elemtype = arytype->elem();
1708 BasicType elembt = elemtype->array_element_basic_type(); 1724 BasicType elembt = elemtype->array_element_basic_type();
1709 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); 1725 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1710 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype); 1726 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1711 return ld; 1727 return ld;
1712 } 1728 }
1713 1729
1714 //-------------------------set_arguments_for_java_call------------------------- 1730 //-------------------------set_arguments_for_java_call-------------------------
1715 // Arguments (pre-popped from the stack) are taken from the JVMS. 1731 // Arguments (pre-popped from the stack) are taken from the JVMS.
1940 } 1956 }
1941 1957
1942 void GraphKit::increment_counter(Node* counter_addr) { 1958 void GraphKit::increment_counter(Node* counter_addr) {
1943 int adr_type = Compile::AliasIdxRaw; 1959 int adr_type = Compile::AliasIdxRaw;
1944 Node* ctrl = control(); 1960 Node* ctrl = control();
1945 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type); 1961 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
1946 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); 1962 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
1947 store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type ); 1963 store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered);
1948 } 1964 }
1949 1965
1950 1966
1951 //------------------------------uncommon_trap---------------------------------- 1967 //------------------------------uncommon_trap----------------------------------
1952 // Bail out to the interpreter in mid-method. Implemented by calling the 1968 // Bail out to the interpreter in mid-method. Implemented by calling the
2106 * @param exact_kls type from profiling 2122 * @param exact_kls type from profiling
2107 * 2123 *
2108 * @return node with improved type 2124 * @return node with improved type
2109 */ 2125 */
2110 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) { 2126 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
2111 const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr(); 2127 const Type* current_type = _gvn.type(n);
2112 assert(UseTypeSpeculation, "type speculation must be on"); 2128 assert(UseTypeSpeculation, "type speculation must be on");
2113 if (exact_kls != NULL && 2129
2114 // nothing to improve if type is already exact 2130 const TypeOopPtr* speculative = current_type->speculative();
2115 (current_type == NULL || 2131
2116 (!current_type->klass_is_exact() && 2132 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2117 (current_type->speculative() == NULL ||
2118 !current_type->speculative()->klass_is_exact())))) {
2119 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); 2133 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2120 const TypeOopPtr* xtype = tklass->as_instance_type(); 2134 const TypeOopPtr* xtype = tklass->as_instance_type();
2121 assert(xtype->klass_is_exact(), "Should be exact"); 2135 assert(xtype->klass_is_exact(), "Should be exact");
2122 2136 // record the new speculative type's depth
2137 speculative = xtype->with_inline_depth(jvms()->depth());
2138 }
2139
2140 if (speculative != current_type->speculative()) {
2123 // Build a type with a speculative type (what we think we know 2141 // Build a type with a speculative type (what we think we know
2124 // about the type but will need a guard when we use it) 2142 // about the type but will need a guard when we use it)
2125 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype); 2143 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2126 // We're changing the type, we need a new cast node to carry the 2144 // We're changing the type, we need a new CheckCast node to carry
2127 // new type. The new type depends on the control: what profiling 2145 // the new type. The new type depends on the control: what
2128 // tells us is only valid from here as far as we can tell. 2146 // profiling tells us is only valid from here as far as we can
2129 Node* cast = new(C) CastPPNode(n, spec_type); 2147 // tell.
2130 cast->init_req(0, control()); 2148 Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2131 cast = _gvn.transform(cast); 2149 cast = _gvn.transform(cast);
2132 replace_in_map(n, cast); 2150 replace_in_map(n, cast);
2133 n = cast; 2151 n = cast;
2134 } 2152 }
2153
2135 return n; 2154 return n;
2136 } 2155 }
2137 2156
2138 /** 2157 /**
2139 * Record profiling data from receiver profiling at an invoke with the 2158 * Record profiling data from receiver profiling at an invoke with the
2140 * type system so that it can propagate it (speculation) 2159 * type system so that it can propagate it (speculation)
2141 * 2160 *
2142 * @param n receiver node 2161 * @param n receiver node
2143 * 2162 *
2144 * @return node with improved type 2163 * @return node with improved type
2145 */ 2164 */
2146 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { 2165 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2147 if (!UseTypeSpeculation) { 2166 if (!UseTypeSpeculation) {
2148 return n; 2167 return n;
2149 } 2168 }
2437 } 2456 }
2438 } 2457 }
2439 2458
2440 //------------------------------make_slow_call_ex------------------------------ 2459 //------------------------------make_slow_call_ex------------------------------
2441 // Make the exception handler hookups for the slow call 2460 // Make the exception handler hookups for the slow call
2442 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) { 2461 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) {
2443 if (stopped()) return; 2462 if (stopped()) return;
2444 2463
2445 // Make a catch node with just two handlers: fall-through and catch-all 2464 // Make a catch node with just two handlers: fall-through and catch-all
2446 Node* i_o = _gvn.transform( new (C) ProjNode(call, TypeFunc::I_O, separate_io_proj) ); 2465 Node* i_o = _gvn.transform( new (C) ProjNode(call, TypeFunc::I_O, separate_io_proj) );
2447 Node* catc = _gvn.transform( new (C) CatchNode(control(), i_o, 2) ); 2466 Node* catc = _gvn.transform( new (C) CatchNode(control(), i_o, 2) );
2451 { PreserveJVMState pjvms(this); 2470 { PreserveJVMState pjvms(this);
2452 set_control(excp); 2471 set_control(excp);
2453 set_i_o(i_o); 2472 set_i_o(i_o);
2454 2473
2455 if (excp != top()) { 2474 if (excp != top()) {
2456 // Create an exception state also. 2475 if (deoptimize) {
2457 // Use an exact type if the caller has specified a specific exception. 2476 // Deoptimize if an exception is caught. Don't construct exception state in this case.
2458 const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); 2477 uncommon_trap(Deoptimization::Reason_unhandled,
2459 Node* ex_oop = new (C) CreateExNode(ex_type, control(), i_o); 2478 Deoptimization::Action_none);
2460 add_exception_state(make_exception_state(_gvn.transform(ex_oop))); 2479 } else {
2480 // Create an exception state also.
2481 // Use an exact type if the caller has specified a specific exception.
2482 const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
2483 Node* ex_oop = new (C) CreateExNode(ex_type, control(), i_o);
2484 add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
2485 }
2461 } 2486 }
2462 } 2487 }
2463 2488
2464 // Get the no-exception control from the CatchNode. 2489 // Get the no-exception control from the CatchNode.
2465 set_control(norm); 2490 set_control(norm);
2523 // if the subklass is the unique subtype of the superklass, the check 2548 // if the subklass is the unique subtype of the superklass, the check
2524 // will always succeed. We could leave a dependency behind to ensure this. 2549 // will always succeed. We could leave a dependency behind to ensure this.
2525 2550
2526 // First load the super-klass's check-offset 2551 // First load the super-klass's check-offset
2527 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); 2552 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
2528 Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) ); 2553 Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
2554 TypeInt::INT, MemNode::unordered));
2529 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); 2555 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2530 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); 2556 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2531 2557
2532 // Load from the sub-klass's super-class display list, or a 1-word cache of 2558 // Load from the sub-klass's super-class display list, or a 1-word cache of
2533 // the secondary superclass list, or a failing value with a sentinel offset 2559 // the secondary superclass list, or a failing value with a sentinel offset
2732 //------------------------maybe_cast_profiled_receiver------------------------- 2758 //------------------------maybe_cast_profiled_receiver-------------------------
2733 // If the profile has seen exactly one type, narrow to exactly that type. 2759 // If the profile has seen exactly one type, narrow to exactly that type.
2734 // Subsequent type checks will always fold up. 2760 // Subsequent type checks will always fold up.
2735 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, 2761 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
2736 ciKlass* require_klass, 2762 ciKlass* require_klass,
2737 ciKlass* spec_klass, 2763 ciKlass* spec_klass,
2738 bool safe_for_replace) { 2764 bool safe_for_replace) {
2739 if (!UseTypeProfile || !TypeProfileCasts) return NULL; 2765 if (!UseTypeProfile || !TypeProfileCasts) return NULL;
2740 2766
2767 Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
2768
2741 // Make sure we haven't already deoptimized from this tactic. 2769 // Make sure we haven't already deoptimized from this tactic.
2742 if (too_many_traps(Deoptimization::Reason_class_check)) 2770 if (too_many_traps(reason))
2743 return NULL; 2771 return NULL;
2744 2772
2745 // (No, this isn't a call, but it's enough like a virtual call 2773 // (No, this isn't a call, but it's enough like a virtual call
2746 // to use the same ciMethod accessor to get the profile info...) 2774 // to use the same ciMethod accessor to get the profile info...)
2747 // If we have a speculative type use it instead of profiling (which 2775 // If we have a speculative type use it instead of profiling (which
2759 Node* exact_obj = not_null_obj; // will get updated in place... 2787 Node* exact_obj = not_null_obj; // will get updated in place...
2760 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2788 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
2761 &exact_obj); 2789 &exact_obj);
2762 { PreserveJVMState pjvms(this); 2790 { PreserveJVMState pjvms(this);
2763 set_control(slow_ctl); 2791 set_control(slow_ctl);
2764 uncommon_trap(Deoptimization::Reason_class_check, 2792 uncommon_trap(reason,
2765 Deoptimization::Action_maybe_recompile); 2793 Deoptimization::Action_maybe_recompile);
2766 } 2794 }
2767 if (safe_for_replace) { 2795 if (safe_for_replace) {
2768 replace_in_map(not_null_obj, exact_obj); 2796 replace_in_map(not_null_obj, exact_obj);
2769 } 2797 }
2786 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, 2814 Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
2787 ciKlass* type, 2815 ciKlass* type,
2788 bool not_null) { 2816 bool not_null) {
2789 // type == NULL if profiling tells us this object is always null 2817 // type == NULL if profiling tells us this object is always null
2790 if (type != NULL) { 2818 if (type != NULL) {
2791 if (!too_many_traps(Deoptimization::Reason_null_check) && 2819 Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
2792 !too_many_traps(Deoptimization::Reason_class_check)) { 2820 Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
2821 if (!too_many_traps(null_reason) &&
2822 !too_many_traps(class_reason)) {
2793 Node* not_null_obj = NULL; 2823 Node* not_null_obj = NULL;
2794 // not_null is true if we know the object is not null and 2824 // not_null is true if we know the object is not null and
2795 // there's no need for a null check 2825 // there's no need for a null check
2796 if (!not_null) { 2826 if (!not_null) {
2797 Node* null_ctl = top(); 2827 Node* null_ctl = top();
2806 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2836 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
2807 &exact_obj); 2837 &exact_obj);
2808 { 2838 {
2809 PreserveJVMState pjvms(this); 2839 PreserveJVMState pjvms(this);
2810 set_control(slow_ctl); 2840 set_control(slow_ctl);
2811 uncommon_trap(Deoptimization::Reason_class_check, 2841 uncommon_trap(class_reason,
2812 Deoptimization::Action_maybe_recompile); 2842 Deoptimization::Action_maybe_recompile);
2813 } 2843 }
2814 replace_in_map(not_null_obj, exact_obj); 2844 replace_in_map(not_null_obj, exact_obj);
2815 obj = exact_obj; 2845 obj = exact_obj;
2816 } 2846 }
2875 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false); 2905 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
2876 } 2906 }
2877 } 2907 }
2878 2908
2879 if (known_statically && UseTypeSpeculation) { 2909 if (known_statically && UseTypeSpeculation) {
2880 // If we know the type check always succeed then we don't use the 2910 // If we know the type check always succeeds then we don't use the
2881 // profiling data at this bytecode. Don't lose it, feed it to the 2911 // profiling data at this bytecode. Don't lose it, feed it to the
2882 // type system as a speculative type. 2912 // type system as a speculative type.
2883 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj); 2913 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
2884 } else { 2914 } else {
2885 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2915 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2992 region->del_req(_null_path); 3022 region->del_req(_null_path);
2993 phi ->del_req(_null_path); 3023 phi ->del_req(_null_path);
2994 } 3024 }
2995 3025
2996 Node* cast_obj = NULL; 3026 Node* cast_obj = NULL;
2997 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 3027 if (tk->klass_is_exact()) {
2998 // We may not have profiling here or it may not help us. If we have 3028 // The following optimization tries to statically cast the speculative type of the object
2999 // a speculative type use it to perform an exact cast. 3029 // (for example obtained during profiling) to the type of the superklass and then do a
3000 ciKlass* spec_obj_type = obj_type->speculative_type(); 3030 // dynamic check that the type of the object is what we expect. To work correctly
3001 if (spec_obj_type != NULL || 3031 // for checkcast and aastore the type of superklass should be exact.
3002 (data != NULL && 3032 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3003 // Counter has never been decremented (due to cast failure). 3033 // We may not have profiling here or it may not help us. If we have
3004 // ...This is a reasonable thing to expect. It is true of 3034 // a speculative type use it to perform an exact cast.
3005 // all casts inserted by javac to implement generic types. 3035 ciKlass* spec_obj_type = obj_type->speculative_type();
3006 data->as_CounterData()->count() >= 0)) { 3036 if (spec_obj_type != NULL ||
3007 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); 3037 (data != NULL &&
3008 if (cast_obj != NULL) { 3038 // Counter has never been decremented (due to cast failure).
3009 if (failure_control != NULL) // failure is now impossible 3039 // ...This is a reasonable thing to expect. It is true of
3010 (*failure_control) = top(); 3040 // all casts inserted by javac to implement generic types.
3011 // adjust the type of the phi to the exact klass: 3041 data->as_CounterData()->count() >= 0)) {
3012 phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR)); 3042 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3043 if (cast_obj != NULL) {
3044 if (failure_control != NULL) // failure is now impossible
3045 (*failure_control) = top();
3046 // adjust the type of the phi to the exact klass:
3047 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3048 }
3013 } 3049 }
3014 } 3050 }
3015 3051
3016 if (cast_obj == NULL) { 3052 if (cast_obj == NULL) {
3017 // Load the object's klass 3053 // Load the object's klass
3130 // Box the stack location 3166 // Box the stack location
3131 Node* box = _gvn.transform(new (C) BoxLockNode(next_monitor())); 3167 Node* box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
3132 Node* mem = reset_memory(); 3168 Node* mem = reset_memory();
3133 3169
3134 FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock(); 3170 FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
3135 if (PrintPreciseBiasedLockingStatistics) { 3171 if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3136 // Create the counters for this fast lock. 3172 // Create the counters for this fast lock.
3137 flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci 3173 flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3138 } 3174 }
3175
3176 // Create the rtm counters for this fast lock if needed.
3177 flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3178
3139 // Add monitor to debug info for the slow path. If we block inside the 3179 // Add monitor to debug info for the slow path. If we block inside the
3140 // slow path and de-opt, we need the monitor hanging around 3180 // slow path and de-opt, we need the monitor hanging around
3141 map()->push_monitor( flock ); 3181 map()->push_monitor( flock );
3142 3182
3143 const TypeFunc *tf = LockNode::lock_type(); 3183 const TypeFunc *tf = LockNode::lock_type();
3236 } 3276 }
3237 } 3277 }
3238 } 3278 }
3239 constant_value = Klass::_lh_neutral_value; // put in a known value 3279 constant_value = Klass::_lh_neutral_value; // put in a known value
3240 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); 3280 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3241 return make_load(NULL, lhp, TypeInt::INT, T_INT); 3281 return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3242 } 3282 }
3243 3283
3244 // We just put in an allocate/initialize with a big raw-memory effect. 3284 // We just put in an allocate/initialize with a big raw-memory effect.
3245 // Hook selected additional alias categories on the initialization. 3285 // Hook selected additional alias categories on the initialization.
3246 static void hook_memory_on_init(GraphKit& kit, int alias_idx, 3286 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3254 kit.set_memory(init_out_raw, alias_idx); 3294 kit.set_memory(init_out_raw, alias_idx);
3255 } 3295 }
3256 3296
3257 //---------------------------set_output_for_allocation------------------------- 3297 //---------------------------set_output_for_allocation-------------------------
3258 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, 3298 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3259 const TypeOopPtr* oop_type) { 3299 const TypeOopPtr* oop_type,
3300 bool deoptimize_on_exception) {
3260 int rawidx = Compile::AliasIdxRaw; 3301 int rawidx = Compile::AliasIdxRaw;
3261 alloc->set_req( TypeFunc::FramePtr, frameptr() ); 3302 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3262 add_safepoint_edges(alloc); 3303 add_safepoint_edges(alloc);
3263 Node* allocx = _gvn.transform(alloc); 3304 Node* allocx = _gvn.transform(alloc);
3264 set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) ); 3305 set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) );
3265 // create memory projection for i_o 3306 // create memory projection for i_o
3266 set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); 3307 set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3267 make_slow_call_ex(allocx, env()->Throwable_klass(), true); 3308 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3268 3309
3269 // create a memory projection as for the normal control path 3310 // create a memory projection as for the normal control path
3270 Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory)); 3311 Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory));
3271 set_memory(malloc, rawidx); 3312 set_memory(malloc, rawidx);
3272 3313
3340 // for either, and the graph will fold nicely if the optimizer later reduces 3381 // for either, and the graph will fold nicely if the optimizer later reduces
3341 // the type to a constant. 3382 // the type to a constant.
3342 // The optional arguments are for specialized use by intrinsics: 3383 // The optional arguments are for specialized use by intrinsics:
3343 // - If 'extra_slow_test' if not null is an extra condition for the slow-path. 3384 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3344 // - If 'return_size_val', report the the total object size to the caller. 3385 // - If 'return_size_val', report the the total object size to the caller.
3386 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3345 Node* GraphKit::new_instance(Node* klass_node, 3387 Node* GraphKit::new_instance(Node* klass_node,
3346 Node* extra_slow_test, 3388 Node* extra_slow_test,
3347 Node* *return_size_val) { 3389 Node* *return_size_val,
3390 bool deoptimize_on_exception) {
3348 // Compute size in doublewords 3391 // Compute size in doublewords
3349 // The size is always an integral number of doublewords, represented 3392 // The size is always an integral number of doublewords, represented
3350 // as a positive bytewise size stored in the klass's layout_helper. 3393 // as a positive bytewise size stored in the klass's layout_helper.
3351 // The layout_helper also encodes (in a low bit) the need for a slow path. 3394 // The layout_helper also encodes (in a low bit) the need for a slow path.
3352 jint layout_con = Klass::_lh_neutral_value; 3395 jint layout_con = Klass::_lh_neutral_value;
3411 = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP), 3454 = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3412 control(), mem, i_o(), 3455 control(), mem, i_o(),
3413 size, klass_node, 3456 size, klass_node,
3414 initial_slow_test); 3457 initial_slow_test);
3415 3458
3416 return set_output_for_allocation(alloc, oop_type); 3459 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3417 } 3460 }
3418 3461
3419 //-------------------------------new_array------------------------------------- 3462 //-------------------------------new_array-------------------------------------
3420 // helper for both newarray and anewarray 3463 // helper for both newarray and anewarray
3421 // The 'length' parameter is (obviously) the length of the array. 3464 // The 'length' parameter is (obviously) the length of the array.
3422 // See comments on new_instance for the meaning of the other arguments. 3465 // See comments on new_instance for the meaning of the other arguments.
3423 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) 3466 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3424 Node* length, // number of array elements 3467 Node* length, // number of array elements
3425 int nargs, // number of arguments to push back for uncommon trap 3468 int nargs, // number of arguments to push back for uncommon trap
3426 Node* *return_size_val) { 3469 Node* *return_size_val,
3470 bool deoptimize_on_exception) {
3427 jint layout_con = Klass::_lh_neutral_value; 3471 jint layout_con = Klass::_lh_neutral_value;
3428 Node* layout_val = get_layout_helper(klass_node, layout_con); 3472 Node* layout_val = get_layout_helper(klass_node, layout_con);
3429 int layout_is_con = (layout_val == NULL); 3473 int layout_is_con = (layout_val == NULL);
3430 3474
3431 if (!layout_is_con && !StressReflectiveCode && 3475 if (!layout_is_con && !StressReflectiveCode &&
3564 if (ary_type->isa_aryptr() && length_type != NULL) { 3608 if (ary_type->isa_aryptr() && length_type != NULL) {
3565 // Try to get a better type than POS for the size 3609 // Try to get a better type than POS for the size
3566 ary_type = ary_type->is_aryptr()->cast_to_size(length_type); 3610 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3567 } 3611 }
3568 3612
3569 Node* javaoop = set_output_for_allocation(alloc, ary_type); 3613 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3570 3614
3571 // Cast length on remaining path to be as narrow as possible 3615 // Cast length on remaining path to be as narrow as possible
3572 if (map()->find_edge(length) >= 0) { 3616 if (map()->find_edge(length) >= 0) {
3573 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn); 3617 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3574 if (ccast != length) { 3618 if (ccast != length) {
3771 __ if_then(card_val, BoolTest::ne, zero); 3815 __ if_then(card_val, BoolTest::ne, zero);
3772 } 3816 }
3773 3817
3774 // Smash zero into card 3818 // Smash zero into card
3775 if( !UseConcMarkSweepGC ) { 3819 if( !UseConcMarkSweepGC ) {
3776 __ store(__ ctrl(), card_adr, zero, bt, adr_type); 3820 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
3777 } else { 3821 } else {
3778 // Specialized path for CM store barrier 3822 // Specialized path for CM store barrier
3779 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); 3823 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3780 } 3824 }
3781 3825
3868 // decrement the index 3912 // decrement the index
3869 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); 3913 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3870 3914
3871 // Now get the buffer location we will log the previous value into and store it 3915 // Now get the buffer location we will log the previous value into and store it
3872 Node *log_addr = __ AddP(no_base, buffer, next_index); 3916 Node *log_addr = __ AddP(no_base, buffer, next_index);
3873 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); 3917 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
3874 // update the index 3918 // update the index
3875 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw); 3919 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
3876 3920
3877 } __ else_(); { 3921 } __ else_(); {
3878 3922
3879 // logging buffer is full, call the runtime 3923 // logging buffer is full, call the runtime
3880 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); 3924 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3910 __ if_then(index, BoolTest::ne, zeroX); { 3954 __ if_then(index, BoolTest::ne, zeroX); {
3911 3955
3912 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); 3956 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3913 Node* log_addr = __ AddP(no_base, buffer, next_index); 3957 Node* log_addr = __ AddP(no_base, buffer, next_index);
3914 3958
3915 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); 3959 // Order, see storeCM.
3916 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw); 3960 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
3961 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
3917 3962
3918 } __ else_(); { 3963 } __ else_(); {
3919 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); 3964 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3920 } __ end_if(); 3965 } __ end_if();
3921 3966
4041 false, NULL, 0); 4086 false, NULL, 0);
4042 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); 4087 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4043 int offset_field_idx = C->get_alias_index(offset_field_type); 4088 int offset_field_idx = C->get_alias_index(offset_field_type);
4044 return make_load(ctrl, 4089 return make_load(ctrl,
4045 basic_plus_adr(str, str, offset_offset), 4090 basic_plus_adr(str, str, offset_offset),
4046 TypeInt::INT, T_INT, offset_field_idx); 4091 TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
4047 } else { 4092 } else {
4048 return intcon(0); 4093 return intcon(0);
4049 } 4094 }
4050 } 4095 }
4051 4096
4056 false, NULL, 0); 4101 false, NULL, 0);
4057 const TypePtr* count_field_type = string_type->add_offset(count_offset); 4102 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4058 int count_field_idx = C->get_alias_index(count_field_type); 4103 int count_field_idx = C->get_alias_index(count_field_type);
4059 return make_load(ctrl, 4104 return make_load(ctrl,
4060 basic_plus_adr(str, str, count_offset), 4105 basic_plus_adr(str, str, count_offset),
4061 TypeInt::INT, T_INT, count_field_idx); 4106 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
4062 } else { 4107 } else {
4063 return load_array_length(load_String_value(ctrl, str)); 4108 return load_array_length(load_String_value(ctrl, str));
4064 } 4109 }
4065 } 4110 }
4066 4111
4072 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, 4117 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4073 TypeAry::make(TypeInt::CHAR,TypeInt::POS), 4118 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4074 ciTypeArrayKlass::make(T_CHAR), true, 0); 4119 ciTypeArrayKlass::make(T_CHAR), true, 0);
4075 int value_field_idx = C->get_alias_index(value_field_type); 4120 int value_field_idx = C->get_alias_index(value_field_type);
4076 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), 4121 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4077 value_type, T_OBJECT, value_field_idx); 4122 value_type, T_OBJECT, value_field_idx, MemNode::unordered);
4078 // String.value field is known to be @Stable. 4123 // String.value field is known to be @Stable.
4079 if (UseImplicitStableValues) { 4124 if (UseImplicitStableValues) {
4080 load = cast_array_to_stable(load, value_type); 4125 load = cast_array_to_stable(load, value_type);
4081 } 4126 }
4082 return load; 4127 return load;
4087 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4132 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4088 false, NULL, 0); 4133 false, NULL, 0);
4089 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); 4134 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4090 int offset_field_idx = C->get_alias_index(offset_field_type); 4135 int offset_field_idx = C->get_alias_index(offset_field_type);
4091 store_to_memory(ctrl, basic_plus_adr(str, offset_offset), 4136 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4092 value, T_INT, offset_field_idx); 4137 value, T_INT, offset_field_idx, MemNode::unordered);
4093 } 4138 }
4094 4139
4095 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) { 4140 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4096 int value_offset = java_lang_String::value_offset_in_bytes(); 4141 int value_offset = java_lang_String::value_offset_in_bytes();
4097 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4142 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4098 false, NULL, 0); 4143 false, NULL, 0);
4099 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4144 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4100 4145
4101 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, 4146 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4102 value, TypeAryPtr::CHARS, T_OBJECT); 4147 value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
4103 } 4148 }
4104 4149
4105 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) { 4150 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4106 int count_offset = java_lang_String::count_offset_in_bytes(); 4151 int count_offset = java_lang_String::count_offset_in_bytes();
4107 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4152 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4108 false, NULL, 0); 4153 false, NULL, 0);
4109 const TypePtr* count_field_type = string_type->add_offset(count_offset); 4154 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4110 int count_field_idx = C->get_alias_index(count_field_type); 4155 int count_field_idx = C->get_alias_index(count_field_type);
4111 store_to_memory(ctrl, basic_plus_adr(str, count_offset), 4156 store_to_memory(ctrl, basic_plus_adr(str, count_offset),
4112 value, T_INT, count_field_idx); 4157 value, T_INT, count_field_idx, MemNode::unordered);
4113 } 4158 }
4114 4159
4115 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { 4160 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4116 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity 4161 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4117 // assumption of CCP analysis. 4162 // assumption of CCP analysis.