Mercurial > hg > graal-compiler
diff src/share/vm/opto/parse1.cpp @ 18041:52b4284cb496
Merge with jdk8u20-b26
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Wed, 15 Oct 2014 16:02:50 +0200 |
parents | 89152779163c 78bbf4d43a14 |
children | 7848fc12602b |
line wrap: on
line diff
--- a/src/share/vm/opto/parse1.cpp Thu Oct 16 10:21:29 2014 +0200 +++ b/src/share/vm/opto/parse1.cpp Wed Oct 15 16:02:50 2014 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,24 +106,24 @@ // Very similar to LoadNode::make, except we handle un-aligned longs and // doubles on Sparc. Intel can handle them just fine directly. Node *l; - switch( bt ) { // Signature is flattened - case T_INT: l = new (C) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; - case T_FLOAT: l = new (C) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; - case T_ADDRESS: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; - case T_OBJECT: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; + switch (bt) { // Signature is flattened + case T_INT: l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; + case T_FLOAT: l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; + case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break; + case T_OBJECT: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break; case T_LONG: case T_DOUBLE: { // Since arguments are in reverse order, the argument address 'adr' // refers to the back half of the long/double. Recompute adr. - adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); - if( Matcher::misaligned_doubles_ok ) { + adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize); + if (Matcher::misaligned_doubles_ok) { l = (bt == T_DOUBLE) - ? (Node*)new (C) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) - : (Node*)new (C) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); + ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered) + : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered); } else { l = (bt == T_DOUBLE) - ? (Node*)new (C) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) - : (Node*)new (C) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); + ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered) + : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered); } break; } @@ -229,7 +229,7 @@ Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf); - store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw); + store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the // monitor into our debug info. @@ -390,6 +390,7 @@ _expected_uses = expected_uses; _depth = 1 + (caller->has_method() ? caller->depth() : 0); _wrote_final = false; + _wrote_volatile = false; _alloc_with_final = NULL; _entry_bci = InvocationEntryBci; _tf = NULL; @@ -564,6 +565,10 @@ set_map(entry_map); do_method_entry(); } + if (depth() == 1) { + // Add check to deoptimize the nmethod if RTM state was changed + rtm_deopt(); + } // Check for bailouts during method entry. if (failing()) { @@ -602,7 +607,7 @@ set_map(entry_map); do_exits(); - if (log) log->done("parse nodes='%d' live='%d' memory='%d'", + if (log) log->done("parse nodes='%d' live='%d' memory='" SIZE_FORMAT "'", C->unique(), C->live_nodes(), C->node_arena()->used()); } @@ -907,7 +912,13 @@ Node* iophi = _exits.i_o(); _exits.set_i_o(gvn().transform(iophi)); - if (wrote_final()) { + // On PPC64, also add MemBarRelease for constructors which write + // volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu + // is set on PPC64, no sync instruction is issued after volatile + // stores. We want to quarantee the same behaviour as on platforms + // with total store order, although this is not required by the Java + // memory model. So as with finals, we add a barrier here. + if (wrote_final() PPC64_ONLY(|| (wrote_volatile() && method()->is_initializer()))) { // This method (which must be a constructor by the rules of Java) // wrote a final. The effects of all initializations must be // committed to memory before any code after the constructor @@ -1358,7 +1369,7 @@ tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo()); } if (b->is_loop_head()) tty->print(" lphd"); - tty->print_cr(""); + tty->cr(); } assert(block()->is_merged(), "must be merged before being parsed"); @@ -1649,7 +1660,7 @@ assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); map()->set_req(j, _gvn.transform_no_reclaim(phi)); debug_only(const Type* bt2 = phi->bottom_type()); - assert(bt2->higher_equal(bt1), "must be consistent with type-flow"); + assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow"); record_for_igvn(phi); } } @@ -1931,7 +1942,7 @@ Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); - Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); + Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); Node* mask = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0))); @@ -1975,6 +1986,42 @@ set_control( _gvn.transform(result_rgn) ); } +// Add check to deoptimize if RTM state is not ProfileRTM +void Parse::rtm_deopt() { +#if INCLUDE_RTM_OPT + if (C->profile_rtm()) { + assert(C->method() != NULL, "only for normal compilations"); + assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state"); + assert(depth() == 1, "generate check only for main compiled method"); + + // Set starting bci for uncommon trap. + set_parse_bci(is_osr_parse() ? osr_bci() : 0); + + // Load the rtm_state from the MethodData. + const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data()); + Node* mdo = makecon(adr_type); + int offset = MethodData::rtm_state_offset_in_bytes(); + Node* adr_node = basic_plus_adr(mdo, mdo, offset); + Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); + + // Separate Load from Cmp by Opaque. + // In expand_macro_nodes() it will be replaced either + // with this load when there are locks in the code + // or with ProfileRTM (cmp->in(2)) otherwise so that + // the check will fold. + Node* profile_state = makecon(TypeInt::make(ProfileRTM)); + Node* opq = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) ); + Node* chk = _gvn.transform( new (C) CmpINode(opq, profile_state) ); + Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); + // Branch to failure if state was changed + { BuildCutout unless(this, tst, PROB_ALWAYS); + uncommon_trap(Deoptimization::Reason_rtm_state_change, + Deoptimization::Action_make_not_entrant); + } + } +#endif +} + //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { @@ -2022,7 +2069,7 @@ !tp->klass()->is_interface()) { // sharpen the type eagerly; this eases certain assert checking if (tp->higher_equal(TypeInstPtr::NOTNULL)) - tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr(); + tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr(); value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr)); } }