Mercurial > hg > truffle
comparison src/share/vm/opto/graphKit.cpp @ 14518:d8041d695d19
Merged with jdk9/dev/hotspot changeset 3812c088b945
author | twisti |
---|---|
date | Tue, 11 Mar 2014 18:45:59 -0700 |
parents | 096c224171c4 16c705d792be |
children | b51e29501f30 4ca6dc0799b6 |
comparison
equal
deleted
inserted
replaced
14141:f97c5ec83832 | 14518:d8041d695d19 |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | 4 * |
5 * This code is free software; you can redistribute it and/or modify it | 5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as | 6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. | 7 * published by the Free Software Foundation. |
418 } else { | 418 } else { |
419 while (dst->req() < region->req()) add_one_req(dst, src); | 419 while (dst->req() < region->req()) add_one_req(dst, src); |
420 } | 420 } |
421 const Type* srctype = _gvn.type(src); | 421 const Type* srctype = _gvn.type(src); |
422 if (phi->type() != srctype) { | 422 if (phi->type() != srctype) { |
423 const Type* dsttype = phi->type()->meet(srctype); | 423 const Type* dsttype = phi->type()->meet_speculative(srctype); |
424 if (phi->type() != dsttype) { | 424 if (phi->type() != dsttype) { |
425 phi->set_type(dsttype); | 425 phi->set_type(dsttype); |
426 _gvn.set_type(phi, dsttype); | 426 _gvn.set_type(phi, dsttype); |
427 } | 427 } |
428 } | 428 } |
492 // take the uncommon_trap in the BuildCutout below. | 492 // take the uncommon_trap in the BuildCutout below. |
493 | 493 |
494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread | 494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread |
495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); | 495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); |
496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); | 496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); |
497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false); | 497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); |
498 | 498 |
499 // Test the should_post_on_exceptions_flag vs. 0 | 499 // Test the should_post_on_exceptions_flag vs. 0 |
500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); | 500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); |
501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); | 501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); |
502 | 502 |
594 int offset = java_lang_Throwable::get_detailMessage_offset(); | 594 int offset = java_lang_Throwable::get_detailMessage_offset(); |
595 const TypePtr* adr_typ = ex_con->add_offset(offset); | 595 const TypePtr* adr_typ = ex_con->add_offset(offset); |
596 | 596 |
597 Node *adr = basic_plus_adr(ex_node, ex_node, offset); | 597 Node *adr = basic_plus_adr(ex_node, ex_node, offset); |
598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); | 598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); |
599 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT); | 599 // Conservatively release stores of object references. |
600 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release); | |
600 | 601 |
601 add_exception_state(make_exception_state(ex_node)); | 602 add_exception_state(make_exception_state(ex_node)); |
602 return; | 603 return; |
603 } | 604 } |
604 } | 605 } |
609 // create the stack trace. | 610 // create the stack trace. |
610 | 611 |
611 // Usual case: Bail to interpreter. | 612 // Usual case: Bail to interpreter. |
612 // Reserve the right to recompile if we haven't seen anything yet. | 613 // Reserve the right to recompile if we haven't seen anything yet. |
613 | 614 |
615 assert(!Deoptimization::reason_is_speculate(reason), "unsupported"); | |
614 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; | 616 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; |
615 if (treat_throw_as_hot | 617 if (treat_throw_as_hot |
616 && (method()->method_data()->trap_recompiled_at(bci()) | 618 && (method()->method_data()->trap_recompiled_at(bci(), NULL) |
617 || C->too_many_traps(reason))) { | 619 || C->too_many_traps(reason))) { |
618 // We cannot afford to take more traps here. Suffer in the interpreter. | 620 // We cannot afford to take more traps here. Suffer in the interpreter. |
619 if (C->log() != NULL) | 621 if (C->log() != NULL) |
620 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", | 622 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", |
621 Deoptimization::trap_reason_name(reason), | 623 Deoptimization::trap_reason_name(reason), |
861 can_prune_locals = true; | 863 can_prune_locals = true; |
862 stack_slots_not_pruned = inputs; | 864 stack_slots_not_pruned = inputs; |
863 } | 865 } |
864 } | 866 } |
865 | 867 |
866 if (env()->jvmti_can_access_local_variables()) { | 868 if (env()->should_retain_local_variables()) { |
867 // At any safepoint, this method can get breakpointed, which would | 869 // At any safepoint, this method can get breakpointed, which would |
868 // then require an immediate deoptimization. | 870 // then require an immediate deoptimization. |
869 can_prune_locals = false; // do not prune locals | 871 can_prune_locals = false; // do not prune locals |
870 stack_slots_not_pruned = 0; | 872 stack_slots_not_pruned = 0; |
871 } | 873 } |
1221 } | 1223 } |
1222 } else { | 1224 } else { |
1223 // See if mixing in the NULL pointer changes type. | 1225 // See if mixing in the NULL pointer changes type. |
1224 // If so, then the NULL pointer was not allowed in the original | 1226 // If so, then the NULL pointer was not allowed in the original |
1225 // type. In other words, "value" was not-null. | 1227 // type. In other words, "value" was not-null. |
1226 if (t->meet(TypePtr::NULL_PTR) != t) { | 1228 if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { |
1227 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... | 1229 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... |
1228 explicit_null_checks_elided++; | 1230 explicit_null_checks_elided++; |
1229 return value; // Elided null check quickly! | 1231 return value; // Elided null check quickly! |
1230 } | 1232 } |
1231 } | 1233 } |
1354 | 1356 |
1355 //------------------------------cast_not_null---------------------------------- | 1357 //------------------------------cast_not_null---------------------------------- |
1356 // Cast obj to not-null on this path | 1358 // Cast obj to not-null on this path |
1357 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { | 1359 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { |
1358 const Type *t = _gvn.type(obj); | 1360 const Type *t = _gvn.type(obj); |
1359 const Type *t_not_null = t->join(TypePtr::NOTNULL); | 1361 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); |
1360 // Object is already not-null? | 1362 // Object is already not-null? |
1361 if( t == t_not_null ) return obj; | 1363 if( t == t_not_null ) return obj; |
1362 | 1364 |
1363 Node *cast = new (C) CastPPNode(obj,t_not_null); | 1365 Node *cast = new (C) CastPPNode(obj,t_not_null); |
1364 cast->init_req(0, control()); | 1366 cast->init_req(0, control()); |
1481 // | 1483 // |
1482 | 1484 |
1483 // factory methods in "int adr_idx" | 1485 // factory methods in "int adr_idx" |
1484 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, | 1486 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, |
1485 int adr_idx, | 1487 int adr_idx, |
1486 bool require_atomic_access) { | 1488 MemNode::MemOrd mo, bool require_atomic_access) { |
1487 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); | 1489 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); |
1488 const TypePtr* adr_type = NULL; // debug-mode-only argument | 1490 const TypePtr* adr_type = NULL; // debug-mode-only argument |
1489 debug_only(adr_type = C->get_adr_type(adr_idx)); | 1491 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1490 Node* mem = memory(adr_idx); | 1492 Node* mem = memory(adr_idx); |
1491 Node* ld; | 1493 Node* ld; |
1492 if (require_atomic_access && bt == T_LONG) { | 1494 if (require_atomic_access && bt == T_LONG) { |
1493 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); | 1495 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo); |
1494 } else { | 1496 } else { |
1495 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); | 1497 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo); |
1496 } | 1498 } |
1497 ld = _gvn.transform(ld); | 1499 ld = _gvn.transform(ld); |
1498 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { | 1500 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { |
1499 // Improve graph before escape analysis and boxing elimination. | 1501 // Improve graph before escape analysis and boxing elimination. |
1500 record_for_igvn(ld); | 1502 record_for_igvn(ld); |
1502 return ld; | 1504 return ld; |
1503 } | 1505 } |
1504 | 1506 |
1505 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, | 1507 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, |
1506 int adr_idx, | 1508 int adr_idx, |
1509 MemNode::MemOrd mo, | |
1507 bool require_atomic_access) { | 1510 bool require_atomic_access) { |
1508 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); | 1511 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); |
1509 const TypePtr* adr_type = NULL; | 1512 const TypePtr* adr_type = NULL; |
1510 debug_only(adr_type = C->get_adr_type(adr_idx)); | 1513 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1511 Node *mem = memory(adr_idx); | 1514 Node *mem = memory(adr_idx); |
1512 Node* st; | 1515 Node* st; |
1513 if (require_atomic_access && bt == T_LONG) { | 1516 if (require_atomic_access && bt == T_LONG) { |
1514 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val); | 1517 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo); |
1515 } else { | 1518 } else { |
1516 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt); | 1519 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); |
1517 } | 1520 } |
1518 st = _gvn.transform(st); | 1521 st = _gvn.transform(st); |
1519 set_memory(st, adr_idx); | 1522 set_memory(st, adr_idx); |
1520 // Back-to-back stores can only remove intermediate store with DU info | 1523 // Back-to-back stores can only remove intermediate store with DU info |
1521 // so push on worklist for optimizer. | 1524 // so push on worklist for optimizer. |
1611 Node* adr, | 1614 Node* adr, |
1612 const TypePtr* adr_type, | 1615 const TypePtr* adr_type, |
1613 Node* val, | 1616 Node* val, |
1614 const TypeOopPtr* val_type, | 1617 const TypeOopPtr* val_type, |
1615 BasicType bt, | 1618 BasicType bt, |
1616 bool use_precise) { | 1619 bool use_precise, |
1620 MemNode::MemOrd mo) { | |
1617 // Transformation of a value which could be NULL pointer (CastPP #NULL) | 1621 // Transformation of a value which could be NULL pointer (CastPP #NULL) |
1618 // could be delayed during Parse (for example, in adjust_map_after_if()). | 1622 // could be delayed during Parse (for example, in adjust_map_after_if()). |
1619 // Execute transformation here to avoid barrier generation in such case. | 1623 // Execute transformation here to avoid barrier generation in such case. |
1620 if (_gvn.type(val) == TypePtr::NULL_PTR) | 1624 if (_gvn.type(val) == TypePtr::NULL_PTR) |
1621 val = _gvn.makecon(TypePtr::NULL_PTR); | 1625 val = _gvn.makecon(TypePtr::NULL_PTR); |
1631 pre_barrier(true /* do_load */, | 1635 pre_barrier(true /* do_load */, |
1632 control(), obj, adr, adr_idx, val, val_type, | 1636 control(), obj, adr, adr_idx, val, val_type, |
1633 NULL /* pre_val */, | 1637 NULL /* pre_val */, |
1634 bt); | 1638 bt); |
1635 | 1639 |
1636 Node* store = store_to_memory(control(), adr, val, bt, adr_idx); | 1640 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); |
1637 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); | 1641 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); |
1638 return store; | 1642 return store; |
1639 } | 1643 } |
1640 | 1644 |
1641 // Could be an array or object we don't know at compile time (unsafe ref.) | 1645 // Could be an array or object we don't know at compile time (unsafe ref.) |
1642 Node* GraphKit::store_oop_to_unknown(Node* ctl, | 1646 Node* GraphKit::store_oop_to_unknown(Node* ctl, |
1643 Node* obj, // containing obj | 1647 Node* obj, // containing obj |
1644 Node* adr, // actual adress to store val at | 1648 Node* adr, // actual adress to store val at |
1645 const TypePtr* adr_type, | 1649 const TypePtr* adr_type, |
1646 Node* val, | 1650 Node* val, |
1647 BasicType bt) { | 1651 BasicType bt, |
1652 MemNode::MemOrd mo) { | |
1648 Compile::AliasType* at = C->alias_type(adr_type); | 1653 Compile::AliasType* at = C->alias_type(adr_type); |
1649 const TypeOopPtr* val_type = NULL; | 1654 const TypeOopPtr* val_type = NULL; |
1650 if (adr_type->isa_instptr()) { | 1655 if (adr_type->isa_instptr()) { |
1651 if (at->field() != NULL) { | 1656 if (at->field() != NULL) { |
1652 // known field. This code is a copy of the do_put_xxx logic. | 1657 // known field. This code is a copy of the do_put_xxx logic. |
1661 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); | 1666 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); |
1662 } | 1667 } |
1663 if (val_type == NULL) { | 1668 if (val_type == NULL) { |
1664 val_type = TypeInstPtr::BOTTOM; | 1669 val_type = TypeInstPtr::BOTTOM; |
1665 } | 1670 } |
1666 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true); | 1671 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); |
1667 } | 1672 } |
1668 | 1673 |
1669 | 1674 |
1670 //-------------------------array_element_address------------------------- | 1675 //-------------------------array_element_address------------------------- |
1671 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, | 1676 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, |
1705 //-------------------------load_array_element------------------------- | 1710 //-------------------------load_array_element------------------------- |
1706 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { | 1711 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { |
1707 const Type* elemtype = arytype->elem(); | 1712 const Type* elemtype = arytype->elem(); |
1708 BasicType elembt = elemtype->array_element_basic_type(); | 1713 BasicType elembt = elemtype->array_element_basic_type(); |
1709 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); | 1714 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); |
1710 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype); | 1715 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); |
1711 return ld; | 1716 return ld; |
1712 } | 1717 } |
1713 | 1718 |
1714 //-------------------------set_arguments_for_java_call------------------------- | 1719 //-------------------------set_arguments_for_java_call------------------------- |
1715 // Arguments (pre-popped from the stack) are taken from the JVMS. | 1720 // Arguments (pre-popped from the stack) are taken from the JVMS. |
1940 } | 1945 } |
1941 | 1946 |
1942 void GraphKit::increment_counter(Node* counter_addr) { | 1947 void GraphKit::increment_counter(Node* counter_addr) { |
1943 int adr_type = Compile::AliasIdxRaw; | 1948 int adr_type = Compile::AliasIdxRaw; |
1944 Node* ctrl = control(); | 1949 Node* ctrl = control(); |
1945 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type); | 1950 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); |
1946 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); | 1951 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); |
1947 store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type ); | 1952 store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); |
1948 } | 1953 } |
1949 | 1954 |
1950 | 1955 |
1951 //------------------------------uncommon_trap---------------------------------- | 1956 //------------------------------uncommon_trap---------------------------------- |
1952 // Bail out to the interpreter in mid-method. Implemented by calling the | 1957 // Bail out to the interpreter in mid-method. Implemented by calling the |
2106 * @param exact_kls type from profiling | 2111 * @param exact_kls type from profiling |
2107 * | 2112 * |
2108 * @return node with improved type | 2113 * @return node with improved type |
2109 */ | 2114 */ |
2110 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) { | 2115 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) { |
2111 const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr(); | 2116 const Type* current_type = _gvn.type(n); |
2112 assert(UseTypeSpeculation, "type speculation must be on"); | 2117 assert(UseTypeSpeculation, "type speculation must be on"); |
2113 if (exact_kls != NULL && | 2118 |
2114 // nothing to improve if type is already exact | 2119 const TypeOopPtr* speculative = current_type->speculative(); |
2115 (current_type == NULL || | 2120 |
2116 (!current_type->klass_is_exact() && | 2121 if (current_type->would_improve_type(exact_kls, jvms()->depth())) { |
2117 (current_type->speculative() == NULL || | |
2118 !current_type->speculative()->klass_is_exact())))) { | |
2119 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); | 2122 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); |
2120 const TypeOopPtr* xtype = tklass->as_instance_type(); | 2123 const TypeOopPtr* xtype = tklass->as_instance_type(); |
2121 assert(xtype->klass_is_exact(), "Should be exact"); | 2124 assert(xtype->klass_is_exact(), "Should be exact"); |
2122 | 2125 // record the new speculative type's depth |
2126 speculative = xtype->with_inline_depth(jvms()->depth()); | |
2127 } | |
2128 | |
2129 if (speculative != current_type->speculative()) { | |
2123 // Build a type with a speculative type (what we think we know | 2130 // Build a type with a speculative type (what we think we know |
2124 // about the type but will need a guard when we use it) | 2131 // about the type but will need a guard when we use it) |
2125 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype); | 2132 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); |
2126 // We're changing the type, we need a new cast node to carry the | 2133 // We're changing the type, we need a new CheckCast node to carry |
2127 // new type. The new type depends on the control: what profiling | 2134 // the new type. The new type depends on the control: what |
2128 // tells us is only valid from here as far as we can tell. | 2135 // profiling tells us is only valid from here as far as we can |
2129 Node* cast = new(C) CastPPNode(n, spec_type); | 2136 // tell. |
2130 cast->init_req(0, control()); | 2137 Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); |
2131 cast = _gvn.transform(cast); | 2138 cast = _gvn.transform(cast); |
2132 replace_in_map(n, cast); | 2139 replace_in_map(n, cast); |
2133 n = cast; | 2140 n = cast; |
2134 } | 2141 } |
2142 | |
2135 return n; | 2143 return n; |
2136 } | 2144 } |
2137 | 2145 |
2138 /** | 2146 /** |
2139 * Record profiling data from receiver profiling at an invoke with the | 2147 * Record profiling data from receiver profiling at an invoke with the |
2140 * type system so that it can propagate it (speculation) | 2148 * type system so that it can propagate it (speculation) |
2141 * | 2149 * |
2142 * @param n receiver node | 2150 * @param n receiver node |
2143 * | 2151 * |
2144 * @return node with improved type | 2152 * @return node with improved type |
2145 */ | 2153 */ |
2146 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { | 2154 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { |
2147 if (!UseTypeSpeculation) { | 2155 if (!UseTypeSpeculation) { |
2148 return n; | 2156 return n; |
2149 } | 2157 } |
2523 // if the subklass is the unique subtype of the superklass, the check | 2531 // if the subklass is the unique subtype of the superklass, the check |
2524 // will always succeed. We could leave a dependency behind to ensure this. | 2532 // will always succeed. We could leave a dependency behind to ensure this. |
2525 | 2533 |
2526 // First load the super-klass's check-offset | 2534 // First load the super-klass's check-offset |
2527 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); | 2535 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); |
2528 Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) ); | 2536 Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(), |
2537 TypeInt::INT, MemNode::unordered)); | |
2529 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); | 2538 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); |
2530 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); | 2539 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); |
2531 | 2540 |
2532 // Load from the sub-klass's super-class display list, or a 1-word cache of | 2541 // Load from the sub-klass's super-class display list, or a 1-word cache of |
2533 // the secondary superclass list, or a failing value with a sentinel offset | 2542 // the secondary superclass list, or a failing value with a sentinel offset |
2732 //------------------------maybe_cast_profiled_receiver------------------------- | 2741 //------------------------maybe_cast_profiled_receiver------------------------- |
2733 // If the profile has seen exactly one type, narrow to exactly that type. | 2742 // If the profile has seen exactly one type, narrow to exactly that type. |
2734 // Subsequent type checks will always fold up. | 2743 // Subsequent type checks will always fold up. |
2735 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, | 2744 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, |
2736 ciKlass* require_klass, | 2745 ciKlass* require_klass, |
2737 ciKlass* spec_klass, | 2746 ciKlass* spec_klass, |
2738 bool safe_for_replace) { | 2747 bool safe_for_replace) { |
2739 if (!UseTypeProfile || !TypeProfileCasts) return NULL; | 2748 if (!UseTypeProfile || !TypeProfileCasts) return NULL; |
2740 | 2749 |
2750 Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check; | |
2751 | |
2741 // Make sure we haven't already deoptimized from this tactic. | 2752 // Make sure we haven't already deoptimized from this tactic. |
2742 if (too_many_traps(Deoptimization::Reason_class_check)) | 2753 if (too_many_traps(reason)) |
2743 return NULL; | 2754 return NULL; |
2744 | 2755 |
2745 // (No, this isn't a call, but it's enough like a virtual call | 2756 // (No, this isn't a call, but it's enough like a virtual call |
2746 // to use the same ciMethod accessor to get the profile info...) | 2757 // to use the same ciMethod accessor to get the profile info...) |
2747 // If we have a speculative type use it instead of profiling (which | 2758 // If we have a speculative type use it instead of profiling (which |
2759 Node* exact_obj = not_null_obj; // will get updated in place... | 2770 Node* exact_obj = not_null_obj; // will get updated in place... |
2760 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, | 2771 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, |
2761 &exact_obj); | 2772 &exact_obj); |
2762 { PreserveJVMState pjvms(this); | 2773 { PreserveJVMState pjvms(this); |
2763 set_control(slow_ctl); | 2774 set_control(slow_ctl); |
2764 uncommon_trap(Deoptimization::Reason_class_check, | 2775 uncommon_trap(reason, |
2765 Deoptimization::Action_maybe_recompile); | 2776 Deoptimization::Action_maybe_recompile); |
2766 } | 2777 } |
2767 if (safe_for_replace) { | 2778 if (safe_for_replace) { |
2768 replace_in_map(not_null_obj, exact_obj); | 2779 replace_in_map(not_null_obj, exact_obj); |
2769 } | 2780 } |
2786 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, | 2797 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, |
2787 ciKlass* type, | 2798 ciKlass* type, |
2788 bool not_null) { | 2799 bool not_null) { |
2789 // type == NULL if profiling tells us this object is always null | 2800 // type == NULL if profiling tells us this object is always null |
2790 if (type != NULL) { | 2801 if (type != NULL) { |
2791 if (!too_many_traps(Deoptimization::Reason_null_check) && | 2802 Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; |
2792 !too_many_traps(Deoptimization::Reason_class_check)) { | 2803 Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check; |
2804 if (!too_many_traps(null_reason) && | |
2805 !too_many_traps(class_reason)) { | |
2793 Node* not_null_obj = NULL; | 2806 Node* not_null_obj = NULL; |
2794 // not_null is true if we know the object is not null and | 2807 // not_null is true if we know the object is not null and |
2795 // there's no need for a null check | 2808 // there's no need for a null check |
2796 if (!not_null) { | 2809 if (!not_null) { |
2797 Node* null_ctl = top(); | 2810 Node* null_ctl = top(); |
2806 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, | 2819 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, |
2807 &exact_obj); | 2820 &exact_obj); |
2808 { | 2821 { |
2809 PreserveJVMState pjvms(this); | 2822 PreserveJVMState pjvms(this); |
2810 set_control(slow_ctl); | 2823 set_control(slow_ctl); |
2811 uncommon_trap(Deoptimization::Reason_class_check, | 2824 uncommon_trap(class_reason, |
2812 Deoptimization::Action_maybe_recompile); | 2825 Deoptimization::Action_maybe_recompile); |
2813 } | 2826 } |
2814 replace_in_map(not_null_obj, exact_obj); | 2827 replace_in_map(not_null_obj, exact_obj); |
2815 obj = exact_obj; | 2828 obj = exact_obj; |
2816 } | 2829 } |
2875 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false); | 2888 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false); |
2876 } | 2889 } |
2877 } | 2890 } |
2878 | 2891 |
2879 if (known_statically && UseTypeSpeculation) { | 2892 if (known_statically && UseTypeSpeculation) { |
2880 // If we know the type check always succeed then we don't use the | 2893 // If we know the type check always succeeds then we don't use the |
2881 // profiling data at this bytecode. Don't lose it, feed it to the | 2894 // profiling data at this bytecode. Don't lose it, feed it to the |
2882 // type system as a speculative type. | 2895 // type system as a speculative type. |
2883 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj); | 2896 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj); |
2884 } else { | 2897 } else { |
2885 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); | 2898 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); |
3007 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); | 3020 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); |
3008 if (cast_obj != NULL) { | 3021 if (cast_obj != NULL) { |
3009 if (failure_control != NULL) // failure is now impossible | 3022 if (failure_control != NULL) // failure is now impossible |
3010 (*failure_control) = top(); | 3023 (*failure_control) = top(); |
3011 // adjust the type of the phi to the exact klass: | 3024 // adjust the type of the phi to the exact klass: |
3012 phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR)); | 3025 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); |
3013 } | 3026 } |
3014 } | 3027 } |
3015 | 3028 |
3016 if (cast_obj == NULL) { | 3029 if (cast_obj == NULL) { |
3017 // Load the object's klass | 3030 // Load the object's klass |
3236 } | 3249 } |
3237 } | 3250 } |
3238 } | 3251 } |
3239 constant_value = Klass::_lh_neutral_value; // put in a known value | 3252 constant_value = Klass::_lh_neutral_value; // put in a known value |
3240 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); | 3253 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); |
3241 return make_load(NULL, lhp, TypeInt::INT, T_INT); | 3254 return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); |
3242 } | 3255 } |
3243 | 3256 |
3244 // We just put in an allocate/initialize with a big raw-memory effect. | 3257 // We just put in an allocate/initialize with a big raw-memory effect. |
3245 // Hook selected additional alias categories on the initialization. | 3258 // Hook selected additional alias categories on the initialization. |
3246 static void hook_memory_on_init(GraphKit& kit, int alias_idx, | 3259 static void hook_memory_on_init(GraphKit& kit, int alias_idx, |
3771 __ if_then(card_val, BoolTest::ne, zero); | 3784 __ if_then(card_val, BoolTest::ne, zero); |
3772 } | 3785 } |
3773 | 3786 |
3774 // Smash zero into card | 3787 // Smash zero into card |
3775 if( !UseConcMarkSweepGC ) { | 3788 if( !UseConcMarkSweepGC ) { |
3776 __ store(__ ctrl(), card_adr, zero, bt, adr_type); | 3789 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release); |
3777 } else { | 3790 } else { |
3778 // Specialized path for CM store barrier | 3791 // Specialized path for CM store barrier |
3779 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); | 3792 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); |
3780 } | 3793 } |
3781 | 3794 |
3868 // decrement the index | 3881 // decrement the index |
3869 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); | 3882 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); |
3870 | 3883 |
3871 // Now get the buffer location we will log the previous value into and store it | 3884 // Now get the buffer location we will log the previous value into and store it |
3872 Node *log_addr = __ AddP(no_base, buffer, next_index); | 3885 Node *log_addr = __ AddP(no_base, buffer, next_index); |
3873 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw); | 3886 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); |
3874 // update the index | 3887 // update the index |
3875 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw); | 3888 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); |
3876 | 3889 |
3877 } __ else_(); { | 3890 } __ else_(); { |
3878 | 3891 |
3879 // logging buffer is full, call the runtime | 3892 // logging buffer is full, call the runtime |
3880 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); | 3893 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); |
3910 __ if_then(index, BoolTest::ne, zeroX); { | 3923 __ if_then(index, BoolTest::ne, zeroX); { |
3911 | 3924 |
3912 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); | 3925 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); |
3913 Node* log_addr = __ AddP(no_base, buffer, next_index); | 3926 Node* log_addr = __ AddP(no_base, buffer, next_index); |
3914 | 3927 |
3915 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); | 3928 // Order, see storeCM. |
3916 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw); | 3929 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); |
3930 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered); | |
3917 | 3931 |
3918 } __ else_(); { | 3932 } __ else_(); { |
3919 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); | 3933 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); |
3920 } __ end_if(); | 3934 } __ end_if(); |
3921 | 3935 |
4041 false, NULL, 0); | 4055 false, NULL, 0); |
4042 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); | 4056 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); |
4043 int offset_field_idx = C->get_alias_index(offset_field_type); | 4057 int offset_field_idx = C->get_alias_index(offset_field_type); |
4044 return make_load(ctrl, | 4058 return make_load(ctrl, |
4045 basic_plus_adr(str, str, offset_offset), | 4059 basic_plus_adr(str, str, offset_offset), |
4046 TypeInt::INT, T_INT, offset_field_idx); | 4060 TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered); |
4047 } else { | 4061 } else { |
4048 return intcon(0); | 4062 return intcon(0); |
4049 } | 4063 } |
4050 } | 4064 } |
4051 | 4065 |
4056 false, NULL, 0); | 4070 false, NULL, 0); |
4057 const TypePtr* count_field_type = string_type->add_offset(count_offset); | 4071 const TypePtr* count_field_type = string_type->add_offset(count_offset); |
4058 int count_field_idx = C->get_alias_index(count_field_type); | 4072 int count_field_idx = C->get_alias_index(count_field_type); |
4059 return make_load(ctrl, | 4073 return make_load(ctrl, |
4060 basic_plus_adr(str, str, count_offset), | 4074 basic_plus_adr(str, str, count_offset), |
4061 TypeInt::INT, T_INT, count_field_idx); | 4075 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered); |
4062 } else { | 4076 } else { |
4063 return load_array_length(load_String_value(ctrl, str)); | 4077 return load_array_length(load_String_value(ctrl, str)); |
4064 } | 4078 } |
4065 } | 4079 } |
4066 | 4080 |
4072 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, | 4086 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, |
4073 TypeAry::make(TypeInt::CHAR,TypeInt::POS), | 4087 TypeAry::make(TypeInt::CHAR,TypeInt::POS), |
4074 ciTypeArrayKlass::make(T_CHAR), true, 0); | 4088 ciTypeArrayKlass::make(T_CHAR), true, 0); |
4075 int value_field_idx = C->get_alias_index(value_field_type); | 4089 int value_field_idx = C->get_alias_index(value_field_type); |
4076 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), | 4090 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), |
4077 value_type, T_OBJECT, value_field_idx); | 4091 value_type, T_OBJECT, value_field_idx, MemNode::unordered); |
4078 // String.value field is known to be @Stable. | 4092 // String.value field is known to be @Stable. |
4079 if (UseImplicitStableValues) { | 4093 if (UseImplicitStableValues) { |
4080 load = cast_array_to_stable(load, value_type); | 4094 load = cast_array_to_stable(load, value_type); |
4081 } | 4095 } |
4082 return load; | 4096 return load; |
4087 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), | 4101 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4088 false, NULL, 0); | 4102 false, NULL, 0); |
4089 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); | 4103 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); |
4090 int offset_field_idx = C->get_alias_index(offset_field_type); | 4104 int offset_field_idx = C->get_alias_index(offset_field_type); |
4091 store_to_memory(ctrl, basic_plus_adr(str, offset_offset), | 4105 store_to_memory(ctrl, basic_plus_adr(str, offset_offset), |
4092 value, T_INT, offset_field_idx); | 4106 value, T_INT, offset_field_idx, MemNode::unordered); |
4093 } | 4107 } |
4094 | 4108 |
4095 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) { | 4109 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) { |
4096 int value_offset = java_lang_String::value_offset_in_bytes(); | 4110 int value_offset = java_lang_String::value_offset_in_bytes(); |
4097 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), | 4111 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4098 false, NULL, 0); | 4112 false, NULL, 0); |
4099 const TypePtr* value_field_type = string_type->add_offset(value_offset); | 4113 const TypePtr* value_field_type = string_type->add_offset(value_offset); |
4100 | 4114 |
4101 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, | 4115 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, |
4102 value, TypeAryPtr::CHARS, T_OBJECT); | 4116 value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered); |
4103 } | 4117 } |
4104 | 4118 |
4105 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) { | 4119 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) { |
4106 int count_offset = java_lang_String::count_offset_in_bytes(); | 4120 int count_offset = java_lang_String::count_offset_in_bytes(); |
4107 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), | 4121 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), |
4108 false, NULL, 0); | 4122 false, NULL, 0); |
4109 const TypePtr* count_field_type = string_type->add_offset(count_offset); | 4123 const TypePtr* count_field_type = string_type->add_offset(count_offset); |
4110 int count_field_idx = C->get_alias_index(count_field_type); | 4124 int count_field_idx = C->get_alias_index(count_field_type); |
4111 store_to_memory(ctrl, basic_plus_adr(str, count_offset), | 4125 store_to_memory(ctrl, basic_plus_adr(str, count_offset), |
4112 value, T_INT, count_field_idx); | 4126 value, T_INT, count_field_idx, MemNode::unordered); |
4113 } | 4127 } |
4114 | 4128 |
4115 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { | 4129 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { |
4116 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity | 4130 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity |
4117 // assumption of CCP analysis. | 4131 // assumption of CCP analysis. |