comparison src/share/vm/opto/graphKit.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents d8041d695d19
children 89152779163c
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
1 /* 1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
418 } else { 418 } else {
419 while (dst->req() < region->req()) add_one_req(dst, src); 419 while (dst->req() < region->req()) add_one_req(dst, src);
420 } 420 }
421 const Type* srctype = _gvn.type(src); 421 const Type* srctype = _gvn.type(src);
422 if (phi->type() != srctype) { 422 if (phi->type() != srctype) {
423 const Type* dsttype = phi->type()->meet_speculative(srctype); 423 const Type* dsttype = phi->type()->meet(srctype);
424 if (phi->type() != dsttype) { 424 if (phi->type() != dsttype) {
425 phi->set_type(dsttype); 425 phi->set_type(dsttype);
426 _gvn.set_type(phi, dsttype); 426 _gvn.set_type(phi, dsttype);
427 } 427 }
428 } 428 }
492 // take the uncommon_trap in the BuildCutout below. 492 // take the uncommon_trap in the BuildCutout below.
493 493
494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread 494 // first must access the should_post_on_exceptions_flag in this thread's JavaThread
495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); 495 Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); 496 Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); 497 Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
498 498
499 // Test the should_post_on_exceptions_flag vs. 0 499 // Test the should_post_on_exceptions_flag vs. 0
500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); 500 Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); 501 Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
502 502
594 int offset = java_lang_Throwable::get_detailMessage_offset(); 594 int offset = java_lang_Throwable::get_detailMessage_offset();
595 const TypePtr* adr_typ = ex_con->add_offset(offset); 595 const TypePtr* adr_typ = ex_con->add_offset(offset);
596 596
597 Node *adr = basic_plus_adr(ex_node, ex_node, offset); 597 Node *adr = basic_plus_adr(ex_node, ex_node, offset);
598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass()); 598 const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
599 // Conservatively release stores of object references. 599 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT);
600 Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
601 600
602 add_exception_state(make_exception_state(ex_node)); 601 add_exception_state(make_exception_state(ex_node));
603 return; 602 return;
604 } 603 }
605 } 604 }
610 // create the stack trace. 609 // create the stack trace.
611 610
612 // Usual case: Bail to interpreter. 611 // Usual case: Bail to interpreter.
613 // Reserve the right to recompile if we haven't seen anything yet. 612 // Reserve the right to recompile if we haven't seen anything yet.
614 613
615 assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
616 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; 614 Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
617 if (treat_throw_as_hot 615 if (treat_throw_as_hot
618 && (method()->method_data()->trap_recompiled_at(bci(), NULL) 616 && (method()->method_data()->trap_recompiled_at(bci())
619 || C->too_many_traps(reason))) { 617 || C->too_many_traps(reason))) {
620 // We cannot afford to take more traps here. Suffer in the interpreter. 618 // We cannot afford to take more traps here. Suffer in the interpreter.
621 if (C->log() != NULL) 619 if (C->log() != NULL)
622 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", 620 C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
623 Deoptimization::trap_reason_name(reason), 621 Deoptimization::trap_reason_name(reason),
863 can_prune_locals = true; 861 can_prune_locals = true;
864 stack_slots_not_pruned = inputs; 862 stack_slots_not_pruned = inputs;
865 } 863 }
866 } 864 }
867 865
868 if (env()->should_retain_local_variables()) { 866 if (env()->jvmti_can_access_local_variables()) {
869 // At any safepoint, this method can get breakpointed, which would 867 // At any safepoint, this method can get breakpointed, which would
870 // then require an immediate deoptimization. 868 // then require an immediate deoptimization.
871 can_prune_locals = false; // do not prune locals 869 can_prune_locals = false; // do not prune locals
872 stack_slots_not_pruned = 0; 870 stack_slots_not_pruned = 0;
873 } 871 }
1223 } 1221 }
1224 } else { 1222 } else {
1225 // See if mixing in the NULL pointer changes type. 1223 // See if mixing in the NULL pointer changes type.
1226 // If so, then the NULL pointer was not allowed in the original 1224 // If so, then the NULL pointer was not allowed in the original
1227 // type. In other words, "value" was not-null. 1225 // type. In other words, "value" was not-null.
1228 if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { 1226 if (t->meet(TypePtr::NULL_PTR) != t) {
1229 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... 1227 // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
1230 explicit_null_checks_elided++; 1228 explicit_null_checks_elided++;
1231 return value; // Elided null check quickly! 1229 return value; // Elided null check quickly!
1232 } 1230 }
1233 } 1231 }
1356 1354
1357 //------------------------------cast_not_null---------------------------------- 1355 //------------------------------cast_not_null----------------------------------
1358 // Cast obj to not-null on this path 1356 // Cast obj to not-null on this path
1359 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { 1357 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1360 const Type *t = _gvn.type(obj); 1358 const Type *t = _gvn.type(obj);
1361 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL); 1359 const Type *t_not_null = t->join(TypePtr::NOTNULL);
1362 // Object is already not-null? 1360 // Object is already not-null?
1363 if( t == t_not_null ) return obj; 1361 if( t == t_not_null ) return obj;
1364 1362
1365 Node *cast = new (C) CastPPNode(obj,t_not_null); 1363 Node *cast = new (C) CastPPNode(obj,t_not_null);
1366 cast->init_req(0, control()); 1364 cast->init_req(0, control());
1483 // 1481 //
1484 1482
1485 // factory methods in "int adr_idx" 1483 // factory methods in "int adr_idx"
1486 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 1484 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1487 int adr_idx, 1485 int adr_idx,
1488 MemNode::MemOrd mo, bool require_atomic_access) { 1486 bool require_atomic_access) {
1489 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); 1487 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1490 const TypePtr* adr_type = NULL; // debug-mode-only argument 1488 const TypePtr* adr_type = NULL; // debug-mode-only argument
1491 debug_only(adr_type = C->get_adr_type(adr_idx)); 1489 debug_only(adr_type = C->get_adr_type(adr_idx));
1492 Node* mem = memory(adr_idx); 1490 Node* mem = memory(adr_idx);
1493 Node* ld; 1491 Node* ld;
1494 if (require_atomic_access && bt == T_LONG) { 1492 if (require_atomic_access && bt == T_LONG) {
1495 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo); 1493 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1496 } else { 1494 } else {
1497 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo); 1495 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1498 } 1496 }
1499 ld = _gvn.transform(ld); 1497 ld = _gvn.transform(ld);
1500 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { 1498 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1501 // Improve graph before escape analysis and boxing elimination. 1499 // Improve graph before escape analysis and boxing elimination.
1502 record_for_igvn(ld); 1500 record_for_igvn(ld);
1504 return ld; 1502 return ld;
1505 } 1503 }
1506 1504
1507 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 1505 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1508 int adr_idx, 1506 int adr_idx,
1509 MemNode::MemOrd mo,
1510 bool require_atomic_access) { 1507 bool require_atomic_access) {
1511 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 1508 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1512 const TypePtr* adr_type = NULL; 1509 const TypePtr* adr_type = NULL;
1513 debug_only(adr_type = C->get_adr_type(adr_idx)); 1510 debug_only(adr_type = C->get_adr_type(adr_idx));
1514 Node *mem = memory(adr_idx); 1511 Node *mem = memory(adr_idx);
1515 Node* st; 1512 Node* st;
1516 if (require_atomic_access && bt == T_LONG) { 1513 if (require_atomic_access && bt == T_LONG) {
1517 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo); 1514 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
1518 } else { 1515 } else {
1519 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); 1516 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
1520 } 1517 }
1521 st = _gvn.transform(st); 1518 st = _gvn.transform(st);
1522 set_memory(st, adr_idx); 1519 set_memory(st, adr_idx);
1523 // Back-to-back stores can only remove intermediate store with DU info 1520 // Back-to-back stores can only remove intermediate store with DU info
1524 // so push on worklist for optimizer. 1521 // so push on worklist for optimizer.
1614 Node* adr, 1611 Node* adr,
1615 const TypePtr* adr_type, 1612 const TypePtr* adr_type,
1616 Node* val, 1613 Node* val,
1617 const TypeOopPtr* val_type, 1614 const TypeOopPtr* val_type,
1618 BasicType bt, 1615 BasicType bt,
1619 bool use_precise, 1616 bool use_precise) {
1620 MemNode::MemOrd mo) {
1621 // Transformation of a value which could be NULL pointer (CastPP #NULL) 1617 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1622 // could be delayed during Parse (for example, in adjust_map_after_if()). 1618 // could be delayed during Parse (for example, in adjust_map_after_if()).
1623 // Execute transformation here to avoid barrier generation in such case. 1619 // Execute transformation here to avoid barrier generation in such case.
1624 if (_gvn.type(val) == TypePtr::NULL_PTR) 1620 if (_gvn.type(val) == TypePtr::NULL_PTR)
1625 val = _gvn.makecon(TypePtr::NULL_PTR); 1621 val = _gvn.makecon(TypePtr::NULL_PTR);
1635 pre_barrier(true /* do_load */, 1631 pre_barrier(true /* do_load */,
1636 control(), obj, adr, adr_idx, val, val_type, 1632 control(), obj, adr, adr_idx, val, val_type,
1637 NULL /* pre_val */, 1633 NULL /* pre_val */,
1638 bt); 1634 bt);
1639 1635
1640 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); 1636 Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
1641 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); 1637 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1642 return store; 1638 return store;
1643 } 1639 }
1644 1640
1645 // Could be an array or object we don't know at compile time (unsafe ref.) 1641 // Could be an array or object we don't know at compile time (unsafe ref.)
1646 Node* GraphKit::store_oop_to_unknown(Node* ctl, 1642 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1647 Node* obj, // containing obj 1643 Node* obj, // containing obj
1648 Node* adr, // actual adress to store val at 1644 Node* adr, // actual adress to store val at
1649 const TypePtr* adr_type, 1645 const TypePtr* adr_type,
1650 Node* val, 1646 Node* val,
1651 BasicType bt, 1647 BasicType bt) {
1652 MemNode::MemOrd mo) {
1653 Compile::AliasType* at = C->alias_type(adr_type); 1648 Compile::AliasType* at = C->alias_type(adr_type);
1654 const TypeOopPtr* val_type = NULL; 1649 const TypeOopPtr* val_type = NULL;
1655 if (adr_type->isa_instptr()) { 1650 if (adr_type->isa_instptr()) {
1656 if (at->field() != NULL) { 1651 if (at->field() != NULL) {
1657 // known field. This code is a copy of the do_put_xxx logic. 1652 // known field. This code is a copy of the do_put_xxx logic.
1666 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); 1661 val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1667 } 1662 }
1668 if (val_type == NULL) { 1663 if (val_type == NULL) {
1669 val_type = TypeInstPtr::BOTTOM; 1664 val_type = TypeInstPtr::BOTTOM;
1670 } 1665 }
1671 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); 1666 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
1672 } 1667 }
1673 1668
1674 1669
1675 //-------------------------array_element_address------------------------- 1670 //-------------------------array_element_address-------------------------
1676 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, 1671 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1710 //-------------------------load_array_element------------------------- 1705 //-------------------------load_array_element-------------------------
1711 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { 1706 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1712 const Type* elemtype = arytype->elem(); 1707 const Type* elemtype = arytype->elem();
1713 BasicType elembt = elemtype->array_element_basic_type(); 1708 BasicType elembt = elemtype->array_element_basic_type();
1714 Node* adr = array_element_address(ary, idx, elembt, arytype->size()); 1709 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1715 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); 1710 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype);
1716 return ld; 1711 return ld;
1717 } 1712 }
1718 1713
1719 //-------------------------set_arguments_for_java_call------------------------- 1714 //-------------------------set_arguments_for_java_call-------------------------
1720 // Arguments (pre-popped from the stack) are taken from the JVMS. 1715 // Arguments (pre-popped from the stack) are taken from the JVMS.
1945 } 1940 }
1946 1941
1947 void GraphKit::increment_counter(Node* counter_addr) { 1942 void GraphKit::increment_counter(Node* counter_addr) {
1948 int adr_type = Compile::AliasIdxRaw; 1943 int adr_type = Compile::AliasIdxRaw;
1949 Node* ctrl = control(); 1944 Node* ctrl = control();
1950 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 1945 Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
1951 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); 1946 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
1952 store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); 1947 store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
1953 } 1948 }
1954 1949
1955 1950
1956 //------------------------------uncommon_trap---------------------------------- 1951 //------------------------------uncommon_trap----------------------------------
1957 // Bail out to the interpreter in mid-method. Implemented by calling the 1952 // Bail out to the interpreter in mid-method. Implemented by calling the
2111 * @param exact_kls type from profiling 2106 * @param exact_kls type from profiling
2112 * 2107 *
2113 * @return node with improved type 2108 * @return node with improved type
2114 */ 2109 */
2115 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) { 2110 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
2116 const Type* current_type = _gvn.type(n); 2111 const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr();
2117 assert(UseTypeSpeculation, "type speculation must be on"); 2112 assert(UseTypeSpeculation, "type speculation must be on");
2118 2113 if (exact_kls != NULL &&
2119 const TypeOopPtr* speculative = current_type->speculative(); 2114 // nothing to improve if type is already exact
2120 2115 (current_type == NULL ||
2121 if (current_type->would_improve_type(exact_kls, jvms()->depth())) { 2116 (!current_type->klass_is_exact() &&
2117 (current_type->speculative() == NULL ||
2118 !current_type->speculative()->klass_is_exact())))) {
2122 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls); 2119 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2123 const TypeOopPtr* xtype = tklass->as_instance_type(); 2120 const TypeOopPtr* xtype = tklass->as_instance_type();
2124 assert(xtype->klass_is_exact(), "Should be exact"); 2121 assert(xtype->klass_is_exact(), "Should be exact");
2125 // record the new speculative type's depth 2122
2126 speculative = xtype->with_inline_depth(jvms()->depth());
2127 }
2128
2129 if (speculative != current_type->speculative()) {
2130 // Build a type with a speculative type (what we think we know 2123 // Build a type with a speculative type (what we think we know
2131 // about the type but will need a guard when we use it) 2124 // about the type but will need a guard when we use it)
2132 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative); 2125 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype);
2133 // We're changing the type, we need a new CheckCast node to carry 2126 // We're changing the type, we need a new cast node to carry the
2134 // the new type. The new type depends on the control: what 2127 // new type. The new type depends on the control: what profiling
2135 // profiling tells us is only valid from here as far as we can 2128 // tells us is only valid from here as far as we can tell.
2136 // tell. 2129 Node* cast = new(C) CastPPNode(n, spec_type);
2137 Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type)); 2130 cast->init_req(0, control());
2138 cast = _gvn.transform(cast); 2131 cast = _gvn.transform(cast);
2139 replace_in_map(n, cast); 2132 replace_in_map(n, cast);
2140 n = cast; 2133 n = cast;
2141 } 2134 }
2142
2143 return n; 2135 return n;
2144 } 2136 }
2145 2137
2146 /** 2138 /**
2147 * Record profiling data from receiver profiling at an invoke with the 2139 * Record profiling data from receiver profiling at an invoke with the
2148 * type system so that it can propagate it (speculation) 2140 * type system so that it can propagate it (speculation)
2149 * 2141 *
2150 * @param n receiver node 2142 * @param n receiver node
2151 * 2143 *
2152 * @return node with improved type 2144 * @return node with improved type
2153 */ 2145 */
2154 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { 2146 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2155 if (!UseTypeSpeculation) { 2147 if (!UseTypeSpeculation) {
2156 return n; 2148 return n;
2157 } 2149 }
2531 // if the subklass is the unique subtype of the superklass, the check 2523 // if the subklass is the unique subtype of the superklass, the check
2532 // will always succeed. We could leave a dependency behind to ensure this. 2524 // will always succeed. We could leave a dependency behind to ensure this.
2533 2525
2534 // First load the super-klass's check-offset 2526 // First load the super-klass's check-offset
2535 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) ); 2527 Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
2536 Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(), 2528 Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
2537 TypeInt::INT, MemNode::unordered));
2538 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); 2529 int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2539 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); 2530 bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2540 2531
2541 // Load from the sub-klass's super-class display list, or a 1-word cache of 2532 // Load from the sub-klass's super-class display list, or a 1-word cache of
2542 // the secondary superclass list, or a failing value with a sentinel offset 2533 // the secondary superclass list, or a failing value with a sentinel offset
2741 //------------------------maybe_cast_profiled_receiver------------------------- 2732 //------------------------maybe_cast_profiled_receiver-------------------------
2742 // If the profile has seen exactly one type, narrow to exactly that type. 2733 // If the profile has seen exactly one type, narrow to exactly that type.
2743 // Subsequent type checks will always fold up. 2734 // Subsequent type checks will always fold up.
2744 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, 2735 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
2745 ciKlass* require_klass, 2736 ciKlass* require_klass,
2746 ciKlass* spec_klass, 2737 ciKlass* spec_klass,
2747 bool safe_for_replace) { 2738 bool safe_for_replace) {
2748 if (!UseTypeProfile || !TypeProfileCasts) return NULL; 2739 if (!UseTypeProfile || !TypeProfileCasts) return NULL;
2749 2740
2750 Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
2751
2752 // Make sure we haven't already deoptimized from this tactic. 2741 // Make sure we haven't already deoptimized from this tactic.
2753 if (too_many_traps(reason)) 2742 if (too_many_traps(Deoptimization::Reason_class_check))
2754 return NULL; 2743 return NULL;
2755 2744
2756 // (No, this isn't a call, but it's enough like a virtual call 2745 // (No, this isn't a call, but it's enough like a virtual call
2757 // to use the same ciMethod accessor to get the profile info...) 2746 // to use the same ciMethod accessor to get the profile info...)
2758 // If we have a speculative type use it instead of profiling (which 2747 // If we have a speculative type use it instead of profiling (which
2770 Node* exact_obj = not_null_obj; // will get updated in place... 2759 Node* exact_obj = not_null_obj; // will get updated in place...
2771 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2760 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
2772 &exact_obj); 2761 &exact_obj);
2773 { PreserveJVMState pjvms(this); 2762 { PreserveJVMState pjvms(this);
2774 set_control(slow_ctl); 2763 set_control(slow_ctl);
2775 uncommon_trap(reason, 2764 uncommon_trap(Deoptimization::Reason_class_check,
2776 Deoptimization::Action_maybe_recompile); 2765 Deoptimization::Action_maybe_recompile);
2777 } 2766 }
2778 if (safe_for_replace) { 2767 if (safe_for_replace) {
2779 replace_in_map(not_null_obj, exact_obj); 2768 replace_in_map(not_null_obj, exact_obj);
2780 } 2769 }
2797 Node* GraphKit::maybe_cast_profiled_obj(Node* obj, 2786 Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
2798 ciKlass* type, 2787 ciKlass* type,
2799 bool not_null) { 2788 bool not_null) {
2800 // type == NULL if profiling tells us this object is always null 2789 // type == NULL if profiling tells us this object is always null
2801 if (type != NULL) { 2790 if (type != NULL) {
2802 Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; 2791 if (!too_many_traps(Deoptimization::Reason_null_check) &&
2803 Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check; 2792 !too_many_traps(Deoptimization::Reason_class_check)) {
2804 if (!too_many_traps(null_reason) &&
2805 !too_many_traps(class_reason)) {
2806 Node* not_null_obj = NULL; 2793 Node* not_null_obj = NULL;
2807 // not_null is true if we know the object is not null and 2794 // not_null is true if we know the object is not null and
2808 // there's no need for a null check 2795 // there's no need for a null check
2809 if (!not_null) { 2796 if (!not_null) {
2810 Node* null_ctl = top(); 2797 Node* null_ctl = top();
2819 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 2806 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
2820 &exact_obj); 2807 &exact_obj);
2821 { 2808 {
2822 PreserveJVMState pjvms(this); 2809 PreserveJVMState pjvms(this);
2823 set_control(slow_ctl); 2810 set_control(slow_ctl);
2824 uncommon_trap(class_reason, 2811 uncommon_trap(Deoptimization::Reason_class_check,
2825 Deoptimization::Action_maybe_recompile); 2812 Deoptimization::Action_maybe_recompile);
2826 } 2813 }
2827 replace_in_map(not_null_obj, exact_obj); 2814 replace_in_map(not_null_obj, exact_obj);
2828 obj = exact_obj; 2815 obj = exact_obj;
2829 } 2816 }
2888 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false); 2875 known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
2889 } 2876 }
2890 } 2877 }
2891 2878
2892 if (known_statically && UseTypeSpeculation) { 2879 if (known_statically && UseTypeSpeculation) {
2893 // If we know the type check always succeeds then we don't use the 2880 // If we know the type check always succeed then we don't use the
2894 // profiling data at this bytecode. Don't lose it, feed it to the 2881 // profiling data at this bytecode. Don't lose it, feed it to the
2895 // type system as a speculative type. 2882 // type system as a speculative type.
2896 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj); 2883 not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
2897 } else { 2884 } else {
2898 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2885 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3020 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace); 3007 cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3021 if (cast_obj != NULL) { 3008 if (cast_obj != NULL) {
3022 if (failure_control != NULL) // failure is now impossible 3009 if (failure_control != NULL) // failure is now impossible
3023 (*failure_control) = top(); 3010 (*failure_control) = top();
3024 // adjust the type of the phi to the exact klass: 3011 // adjust the type of the phi to the exact klass:
3025 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); 3012 phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
3026 } 3013 }
3027 } 3014 }
3028 3015
3029 if (cast_obj == NULL) { 3016 if (cast_obj == NULL) {
3030 // Load the object's klass 3017 // Load the object's klass
3249 } 3236 }
3250 } 3237 }
3251 } 3238 }
3252 constant_value = Klass::_lh_neutral_value; // put in a known value 3239 constant_value = Klass::_lh_neutral_value; // put in a known value
3253 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); 3240 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3254 return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 3241 return make_load(NULL, lhp, TypeInt::INT, T_INT);
3255 } 3242 }
3256 3243
3257 // We just put in an allocate/initialize with a big raw-memory effect. 3244 // We just put in an allocate/initialize with a big raw-memory effect.
3258 // Hook selected additional alias categories on the initialization. 3245 // Hook selected additional alias categories on the initialization.
3259 static void hook_memory_on_init(GraphKit& kit, int alias_idx, 3246 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3784 __ if_then(card_val, BoolTest::ne, zero); 3771 __ if_then(card_val, BoolTest::ne, zero);
3785 } 3772 }
3786 3773
3787 // Smash zero into card 3774 // Smash zero into card
3788 if( !UseConcMarkSweepGC ) { 3775 if( !UseConcMarkSweepGC ) {
3789 __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release); 3776 __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3790 } else { 3777 } else {
3791 // Specialized path for CM store barrier 3778 // Specialized path for CM store barrier
3792 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type); 3779 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3793 } 3780 }
3794 3781
3881 // decrement the index 3868 // decrement the index
3882 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); 3869 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3883 3870
3884 // Now get the buffer location we will log the previous value into and store it 3871 // Now get the buffer location we will log the previous value into and store it
3885 Node *log_addr = __ AddP(no_base, buffer, next_index); 3872 Node *log_addr = __ AddP(no_base, buffer, next_index);
3886 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered); 3873 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
3887 // update the index 3874 // update the index
3888 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered); 3875 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
3889 3876
3890 } __ else_(); { 3877 } __ else_(); {
3891 3878
3892 // logging buffer is full, call the runtime 3879 // logging buffer is full, call the runtime
3893 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); 3880 const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3923 __ if_then(index, BoolTest::ne, zeroX); { 3910 __ if_then(index, BoolTest::ne, zeroX); {
3924 3911
3925 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t)))); 3912 Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3926 Node* log_addr = __ AddP(no_base, buffer, next_index); 3913 Node* log_addr = __ AddP(no_base, buffer, next_index);
3927 3914
3928 // Order, see storeCM. 3915 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3929 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); 3916 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3930 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
3931 3917
3932 } __ else_(); { 3918 } __ else_(); {
3933 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); 3919 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3934 } __ end_if(); 3920 } __ end_if();
3935 3921
4055 false, NULL, 0); 4041 false, NULL, 0);
4056 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); 4042 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4057 int offset_field_idx = C->get_alias_index(offset_field_type); 4043 int offset_field_idx = C->get_alias_index(offset_field_type);
4058 return make_load(ctrl, 4044 return make_load(ctrl,
4059 basic_plus_adr(str, str, offset_offset), 4045 basic_plus_adr(str, str, offset_offset),
4060 TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered); 4046 TypeInt::INT, T_INT, offset_field_idx);
4061 } else { 4047 } else {
4062 return intcon(0); 4048 return intcon(0);
4063 } 4049 }
4064 } 4050 }
4065 4051
4070 false, NULL, 0); 4056 false, NULL, 0);
4071 const TypePtr* count_field_type = string_type->add_offset(count_offset); 4057 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4072 int count_field_idx = C->get_alias_index(count_field_type); 4058 int count_field_idx = C->get_alias_index(count_field_type);
4073 return make_load(ctrl, 4059 return make_load(ctrl,
4074 basic_plus_adr(str, str, count_offset), 4060 basic_plus_adr(str, str, count_offset),
4075 TypeInt::INT, T_INT, count_field_idx, MemNode::unordered); 4061 TypeInt::INT, T_INT, count_field_idx);
4076 } else { 4062 } else {
4077 return load_array_length(load_String_value(ctrl, str)); 4063 return load_array_length(load_String_value(ctrl, str));
4078 } 4064 }
4079 } 4065 }
4080 4066
4086 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, 4072 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4087 TypeAry::make(TypeInt::CHAR,TypeInt::POS), 4073 TypeAry::make(TypeInt::CHAR,TypeInt::POS),
4088 ciTypeArrayKlass::make(T_CHAR), true, 0); 4074 ciTypeArrayKlass::make(T_CHAR), true, 0);
4089 int value_field_idx = C->get_alias_index(value_field_type); 4075 int value_field_idx = C->get_alias_index(value_field_type);
4090 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), 4076 Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
4091 value_type, T_OBJECT, value_field_idx, MemNode::unordered); 4077 value_type, T_OBJECT, value_field_idx);
4092 // String.value field is known to be @Stable. 4078 // String.value field is known to be @Stable.
4093 if (UseImplicitStableValues) { 4079 if (UseImplicitStableValues) {
4094 load = cast_array_to_stable(load, value_type); 4080 load = cast_array_to_stable(load, value_type);
4095 } 4081 }
4096 return load; 4082 return load;
4101 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4087 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4102 false, NULL, 0); 4088 false, NULL, 0);
4103 const TypePtr* offset_field_type = string_type->add_offset(offset_offset); 4089 const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
4104 int offset_field_idx = C->get_alias_index(offset_field_type); 4090 int offset_field_idx = C->get_alias_index(offset_field_type);
4105 store_to_memory(ctrl, basic_plus_adr(str, offset_offset), 4091 store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
4106 value, T_INT, offset_field_idx, MemNode::unordered); 4092 value, T_INT, offset_field_idx);
4107 } 4093 }
4108 4094
4109 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) { 4095 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
4110 int value_offset = java_lang_String::value_offset_in_bytes(); 4096 int value_offset = java_lang_String::value_offset_in_bytes();
4111 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4097 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4112 false, NULL, 0); 4098 false, NULL, 0);
4113 const TypePtr* value_field_type = string_type->add_offset(value_offset); 4099 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4114 4100
4115 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type, 4101 store_oop_to_object(ctrl, str, basic_plus_adr(str, value_offset), value_field_type,
4116 value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered); 4102 value, TypeAryPtr::CHARS, T_OBJECT);
4117 } 4103 }
4118 4104
4119 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) { 4105 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
4120 int count_offset = java_lang_String::count_offset_in_bytes(); 4106 int count_offset = java_lang_String::count_offset_in_bytes();
4121 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), 4107 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4122 false, NULL, 0); 4108 false, NULL, 0);
4123 const TypePtr* count_field_type = string_type->add_offset(count_offset); 4109 const TypePtr* count_field_type = string_type->add_offset(count_offset);
4124 int count_field_idx = C->get_alias_index(count_field_type); 4110 int count_field_idx = C->get_alias_index(count_field_type);
4125 store_to_memory(ctrl, basic_plus_adr(str, count_offset), 4111 store_to_memory(ctrl, basic_plus_adr(str, count_offset),
4126 value, T_INT, count_field_idx, MemNode::unordered); 4112 value, T_INT, count_field_idx);
4127 } 4113 }
4128 4114
4129 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) { 4115 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4130 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity 4116 // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4131 // assumption of CCP analysis. 4117 // assumption of CCP analysis.