# HG changeset patch # User ysr # Date 1219861246 25200 # Node ID 5d254928c888f71352ce409c541a310587235af3 # Parent d60e4e6d7f7229492116df3cda3856593335caf3# Parent d515536da1890eb795ac32b423399f60e84407a0 Merge diff -r d515536da189 -r 5d254928c888 .hgtags --- a/.hgtags Tue Aug 26 00:46:03 2008 -0400 +++ b/.hgtags Wed Aug 27 11:20:46 2008 -0700 @@ -7,3 +7,4 @@ d1605aabd0a15ecf93787c47de63073c33fba52d jdk7-b30 9c2ecc2ffb125f14fab3857fe7689598956348a0 jdk7-b31 b727c32788a906c04839516ae7443a085185a300 jdk7-b32 +585535ec8a14adafa6bfea65d6975e29094c8cec jdk7-b33 diff -r d515536da189 -r 5d254928c888 make/hotspot_distro --- a/make/hotspot_distro Tue Aug 26 00:46:03 2008 -0400 +++ b/make/hotspot_distro Wed Aug 27 11:20:46 2008 -0700 @@ -1,6 +1,24 @@ -# -# Copyright 2006-2008 Sun Microsystems, Inc. All rights reserved. -# SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. +# +# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. # # diff -r d515536da189 -r 5d254928c888 make/linux/makefiles/vm.make --- a/make/linux/makefiles/vm.make Tue Aug 26 00:46:03 2008 -0400 +++ b/make/linux/makefiles/vm.make Wed Aug 27 11:20:46 2008 -0700 @@ -195,7 +195,7 @@ if [ $$? = 0 ] ; then \ /usr/bin/chcon -t textrel_shlib_t $@; \ if [ $$? != 0 ]; then \ - echo "ERROR: Cannot chcon $@"; exit 1; \ + echo "ERROR: Cannot chcon $@"; \ fi \ fi \ fi \ diff -r d515536da189 -r 5d254928c888 src/share/vm/adlc/output_h.cpp --- a/src/share/vm/adlc/output_h.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/adlc/output_h.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -1848,6 +1848,19 @@ fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n", offset, offset+1, offset+1); } + else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) { + int offset = 1; + // Special special hack to see if the Cmp? has been incorporated in the conditional move + MatchNode *rl = instr->_matrule->_rChild->_lChild; + if( rl && !strcmp(rl->_opType, "Binary") ) { + MatchNode *rlr = rl->_rChild; + if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0) + offset = 2; + } + // Special hack for ideal CMoveN; ideal type depends on inputs + fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n", + offset, offset+1, offset+1); + } else if( instr->needs_base_oop_edge(_globalNames) ) { // Special hack for ideal AddP. Bottom type is an oop IFF it has a // legal base-pointer input. Otherwise it is NOT an oop. diff -r d515536da189 -r 5d254928c888 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -2790,10 +2790,11 @@ assert(n_threads > 0, "Unexpected n_threads argument"); const size_t task_size = rescan_task_size(); size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; - assert((used_region().start() + (n_tasks - 1)*task_size < - used_region().end()) && - (used_region().start() + n_tasks*task_size >= - used_region().end()), "n_task calculation incorrect"); + assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect"); + assert(n_tasks == 0 || + ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && + (used_region().start() + n_tasks*task_size >= used_region().end())), + "n_tasks calculation incorrect"); SequentialSubTasksDone* pst = conc_par_seq_tasks(); assert(!pst->valid(), "Clobbering existing data?"); pst->set_par_threads(n_threads); @@ -2833,7 +2834,7 @@ assert(n_tasks == 0 || ((span.start() + (n_tasks - 1)*task_size < span.end()) && (span.start() + n_tasks*task_size >= span.end())), - "n_task calculation incorrect"); + "n_tasks calculation incorrect"); SequentialSubTasksDone* pst = conc_par_seq_tasks(); assert(!pst->valid(), "Clobbering existing data?"); pst->set_par_threads(n_threads); diff -r d515536da189 -r 5d254928c888 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -3651,6 +3651,7 @@ CompactibleFreeListSpace* _cms_space; CompactibleFreeListSpace* _perm_space; HeapWord* _global_finger; + HeapWord* _restart_addr; // Exposed here for yielding support Mutex* const _bit_map_lock; @@ -3681,7 +3682,7 @@ _term.set_task(this); assert(_cms_space->bottom() < _perm_space->bottom(), "Finger incorrectly initialized below"); - _global_finger = _cms_space->bottom(); + _restart_addr = _global_finger = _cms_space->bottom(); } @@ -3699,6 +3700,10 @@ bool result() { return _result; } void reset(HeapWord* ra) { + assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); + assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); + assert(ra < _perm_space->end(), "ra too large"); + _restart_addr = _global_finger = ra; _term.reset_for_reuse(); } @@ -3843,16 +3848,24 @@ int n_tasks = pst->n_tasks(); // We allow that there may be no tasks to do here because // we are restarting after a stack overflow. - assert(pst->valid() || n_tasks == 0, "Uninitializd use?"); + assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); int nth_task = 0; - HeapWord* start = sp->bottom(); + HeapWord* aligned_start = sp->bottom(); + if (sp->used_region().contains(_restart_addr)) { + // Align down to a card boundary for the start of 0th task + // for this space. + aligned_start = + (HeapWord*)align_size_down((uintptr_t)_restart_addr, + CardTableModRefBS::card_size); + } + size_t chunk_size = sp->marking_task_size(); while (!pst->is_task_claimed(/* reference */ nth_task)) { // Having claimed the nth task in this space, // compute the chunk that it corresponds to: - MemRegion span = MemRegion(start + nth_task*chunk_size, - start + (nth_task+1)*chunk_size); + MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, + aligned_start + (nth_task+1)*chunk_size); // Try and bump the global finger via a CAS; // note that we need to do the global finger bump // _before_ taking the intersection below, because @@ -3867,26 +3880,40 @@ // beyond the "top" address of the space. span = span.intersection(sp->used_region()); if (!span.is_empty()) { // Non-null task - // We want to skip the first object because - // the protocol is to scan any object in its entirety - // that _starts_ in this span; a fortiori, any - // object starting in an earlier span is scanned - // as part of an earlier claimed task. - // Below we use the "careful" version of block_start - // so we do not try to navigate uninitialized objects. - HeapWord* prev_obj = sp->block_start_careful(span.start()); - // Below we use a variant of block_size that uses the - // Printezis bits to avoid waiting for allocated - // objects to become initialized/parsable. - while (prev_obj < span.start()) { - size_t sz = sp->block_size_no_stall(prev_obj, _collector); - if (sz > 0) { - prev_obj += sz; + HeapWord* prev_obj; + assert(!span.contains(_restart_addr) || nth_task == 0, + "Inconsistency"); + if (nth_task == 0) { + // For the 0th task, we'll not need to compute a block_start. + if (span.contains(_restart_addr)) { + // In the case of a restart because of stack overflow, + // we might additionally skip a chunk prefix. + prev_obj = _restart_addr; } else { - // In this case we may end up doing a bit of redundant - // scanning, but that appears unavoidable, short of - // locking the free list locks; see bug 6324141. - break; + prev_obj = span.start(); + } + } else { + // We want to skip the first object because + // the protocol is to scan any object in its entirety + // that _starts_ in this span; a fortiori, any + // object starting in an earlier span is scanned + // as part of an earlier claimed task. + // Below we use the "careful" version of block_start + // so we do not try to navigate uninitialized objects. + prev_obj = sp->block_start_careful(span.start()); + // Below we use a variant of block_size that uses the + // Printezis bits to avoid waiting for allocated + // objects to become initialized/parsable. + while (prev_obj < span.start()) { + size_t sz = sp->block_size_no_stall(prev_obj, _collector); + if (sz > 0) { + prev_obj += sz; + } else { + // In this case we may end up doing a bit of redundant + // scanning, but that appears unavoidable, short of + // locking the free list locks; see bug 6324141. + break; + } } } if (prev_obj < span.end()) { @@ -3939,12 +3966,14 @@ void handle_stack_overflow(HeapWord* lost); }; -// Grey object rescan during work stealing phase -- -// the salient assumption here is that stolen oops must -// always be initialized, so we do not need to check for -// uninitialized objects before scanning here. +// Grey object scanning during work stealing phase -- +// the salient assumption here is that any references +// that are in these stolen objects being scanned must +// already have been initialized (else they would not have +// been published), so we do not need to check for +// uninitialized objects before pushing here. void Par_ConcMarkingClosure::do_oop(oop obj) { - assert(obj->is_oop_or_null(), "expected an oop or NULL"); + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); HeapWord* addr = (HeapWord*)obj; // Check if oop points into the CMS generation // and is not marked @@ -4002,7 +4031,7 @@ // in CMSCollector's _restart_address. void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other - // workers from interfering with the expansion below. + // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded @@ -6558,7 +6587,7 @@ if (obj != NULL) { // Ignore mark word because this could be an already marked oop // that may be chained at the end of the overflow list. - assert(obj->is_oop(), "expected an oop"); + assert(obj->is_oop(true), "expected an oop"); HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_bit_map->isMarked(addr)) { @@ -7296,6 +7325,8 @@ _should_remember_klasses(collector->should_unload_classes()) { } +// Assumes thread-safe access by callers, who are +// responsible for mutual exclusion. void CMSCollector::lower_restart_addr(HeapWord* low) { assert(_span.contains(low), "Out of bounds addr"); if (_restart_addr == NULL) { @@ -7321,7 +7352,7 @@ // in CMSCollector's _restart_address. void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other - // workers from interfering with the expansion below. + // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded diff -r d515536da189 -r 5d254928c888 src/share/vm/memory/dump.cpp --- a/src/share/vm/memory/dump.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/memory/dump.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -1200,10 +1200,12 @@ mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); _rw_space->set_saved_mark(); mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), - md_top - _md_vs->low(), SharedMiscDataSize, + pointer_delta(md_top, _md_vs->low(), sizeof(char)), + SharedMiscDataSize, false, false); mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), - mc_top - _mc_vs->low(), SharedMiscCodeSize, + pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), + SharedMiscCodeSize, true, true); // Pass 2 - write data. @@ -1212,10 +1214,12 @@ mapinfo->write_space(CompactingPermGenGen::ro, _ro_space, true); mapinfo->write_space(CompactingPermGenGen::rw, _rw_space, false); mapinfo->write_region(CompactingPermGenGen::md, _md_vs->low(), - md_top - _md_vs->low(), SharedMiscDataSize, + pointer_delta(md_top, _md_vs->low(), sizeof(char)), + SharedMiscDataSize, false, false); mapinfo->write_region(CompactingPermGenGen::mc, _mc_vs->low(), - mc_top - _mc_vs->low(), SharedMiscCodeSize, + pointer_delta(mc_top, _mc_vs->low(), sizeof(char)), + SharedMiscCodeSize, true, true); mapinfo->close(); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/addnode.cpp --- a/src/share/vm/opto/addnode.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/addnode.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -695,6 +695,8 @@ guarantee(tptr == NULL, "must be only one pointer operand"); tptr = et->isa_oopptr(); guarantee(tptr != NULL, "non-int operand must be pointer"); + if (tptr->higher_equal(tp->add_offset(tptr->offset()))) + tp = tptr; // Set more precise type for bailout continue; } if ( eti->_hi != eti->_lo ) goto bottom_out; diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/callGenerator.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -464,6 +464,12 @@ } } + if (kit.stopped()) { + // Instance exactly does not matches the desired type. + kit.set_jvms(slow_jvms); + return kit.transfer_exceptions_into_jvms(); + } + // fall through if the instance exactly matches the desired type kit.replace_in_map(receiver, exact_receiver); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/chaitin.cpp --- a/src/share/vm/opto/chaitin.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/chaitin.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -43,7 +43,7 @@ if( _degree_valid ) tty->print( "%d ", _eff_degree ); else tty->print("? "); - if( _def == NodeSentinel ) { + if( is_multidef() ) { tty->print("MultiDef "); if (_defs != NULL) { tty->print("("); @@ -765,7 +765,7 @@ // if the LRG is an unaligned pair, we will have to spill // so clear the LRG's register mask if it is not already spilled if ( !n->is_SpillCopy() && - (lrg._def == NULL || lrg._def == NodeSentinel || !lrg._def->is_SpillCopy()) && + (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && lrgmask.is_misaligned_Pair()) { lrg.Clear(); } @@ -1282,7 +1282,7 @@ // Live range is live and no colors available else { assert( lrg->alive(), "" ); - assert( !lrg->_fat_proj || lrg->_def == NodeSentinel || + assert( !lrg->_fat_proj || lrg->is_multidef() || lrg->_def->outcnt() > 0, "fat_proj cannot spill"); assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/chaitin.hpp --- a/src/share/vm/opto/chaitin.hpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/chaitin.hpp Wed Aug 27 11:20:46 2008 -0700 @@ -156,6 +156,8 @@ // Alive if non-zero, dead if zero bool alive() const { return _def != NULL; } + bool is_multidef() const { return _def == NodeSentinel; } + bool is_singledef() const { return _def != NodeSentinel; } #ifndef PRODUCT void dump( ) const; @@ -320,7 +322,8 @@ uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ); - Node *split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ); + Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, + int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru); // True if lidx is used before any real register is def'd in the block bool prompt_use( Block *b, uint lidx ); Node *get_spillcopy_wide( Node *def, Node *use, uint uidx ); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/coalesce.cpp --- a/src/share/vm/opto/coalesce.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/coalesce.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -604,8 +604,8 @@ // If both are single def, then src_def powers one live range // and def_copy powers the other. After merging, src_def powers // the combined live range. - lrgs(lr1)._def = (lrgs(lr1)._def == NodeSentinel || - lrgs(lr2)._def == NodeSentinel ) + lrgs(lr1)._def = (lrgs(lr1).is_multidef() || + lrgs(lr2).is_multidef() ) ? NodeSentinel : src_def; lrgs(lr2)._def = NULL; // No def for lrg 2 lrgs(lr2).Clear(); // Force empty mask for LRG 2 diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/compile.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -2111,6 +2111,7 @@ n->subsume_by( cmpN ); } } + break; #endif case Op_ModI: diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/divnode.cpp --- a/src/share/vm/opto/divnode.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/divnode.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -264,8 +264,14 @@ Node *t1 = phase->transform(new (phase->C, 3) URShiftLNode(lolo_product, phase->intcon(N / 2))); Node *t2 = phase->transform(new (phase->C, 3) AddLNode(hilo_product, t1)); - Node *t3 = phase->transform(new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2))); - Node *t4 = phase->transform(new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF))); + + // Construct both t3 and t4 before transforming so t2 doesn't go dead + // prematurely. + Node *t3 = new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2)); + Node *t4 = new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF)); + t3 = phase->transform(t3); + t4 = phase->transform(t4); + Node *t5 = phase->transform(new (phase->C, 3) AddLNode(t4, lohi_product)); Node *t6 = phase->transform(new (phase->C, 3) RShiftLNode(t5, phase->intcon(N / 2))); Node *t7 = phase->transform(new (phase->C, 3) AddLNode(t3, hihi_product)); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/escape.cpp --- a/src/share/vm/opto/escape.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/escape.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -492,13 +492,13 @@ // Adjust the type and inputs of an AddP which computes the // address of a field of an instance // -void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { +bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); if (t == NULL) { // We are computing a raw address for a store captured by an Initialize - // compute an appropriate address type. + // compute an appropriate address type (cases #3 and #5). assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); @@ -508,6 +508,25 @@ int inst_id = base_t->instance_id(); assert(!t->is_known_instance() || t->instance_id() == inst_id, "old type must be non-instance or match new type"); + + // The type 't' could be subclass of 'base_t'. + // As result t->offset() could be large then base_t's size and it will + // cause the failure in add_offset() with narrow oops since TypeOopPtr() + // constructor verifies correctness of the offset. + // + // It could happend on subclass's branch (from the type profiling + // inlining) which was not eliminated during parsing since the exactness + // of the allocation type was not propagated to the subclass type check. + // + // Do nothing for such AddP node and don't process its users since + // this code branch will go away. + // + if (!t->is_known_instance() && + !t->klass()->equals(base_t->klass()) && + t->klass()->is_subtype_of(base_t->klass())) { + return false; // bail out + } + const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); // Do NOT remove the next call: ensure an new alias index is allocated // for the instance type @@ -542,6 +561,7 @@ } // Put on IGVN worklist since at least addp's type was changed above. record_for_optimizer(addp); + return true; } // @@ -969,7 +989,7 @@ if (elem == _phantom_object) continue; // Assume the value was set outside this method. Node *base = get_map(elem); // CheckCastPP node - split_AddP(n, base, igvn); + if (!split_AddP(n, base, igvn)) continue; // wrong type tinst = igvn->type(base)->isa_oopptr(); } else if (n->is_Phi() || n->is_CheckCastPP() || @@ -1012,6 +1032,8 @@ tn->set_type(tn_type); igvn->hash_insert(tn); record_for_optimizer(n); + } else { + continue; // wrong type } } } else { diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/escape.hpp --- a/src/share/vm/opto/escape.hpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/escape.hpp Wed Aug 27 11:20:46 2008 -0700 @@ -286,7 +286,7 @@ // MemNode - new memory input for this node // ChecCastPP - allocation that this is a cast of // allocation - CheckCastPP of the allocation - void split_AddP(Node *addp, Node *base, PhaseGVN *igvn); + bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn); PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn, bool &new_created); PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn); Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn); diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/ifg.cpp --- a/src/share/vm/opto/ifg.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/ifg.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -594,7 +594,7 @@ // Insure high score for immediate-use spill copies so they get a color if( n->is_SpillCopy() - && lrgs(r)._def != NodeSentinel // MultiDef live range can still split + && lrgs(r).is_singledef() // MultiDef live range can still split && n->outcnt() == 1 // and use must be in this block && _cfg._bbs[n->unique_out()->_idx] == b ) { // All single-use MachSpillCopy(s) that immediately precede their diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/loopnode.cpp --- a/src/share/vm/opto/loopnode.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/loopnode.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -2625,9 +2625,11 @@ case Op_LoadF: case Op_LoadI: case Op_LoadKlass: + case Op_LoadNKlass: case Op_LoadL: case Op_LoadS: case Op_LoadP: + case Op_LoadN: case Op_LoadRange: case Op_LoadD_unaligned: case Op_LoadL_unaligned: diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/loopopts.cpp --- a/src/share/vm/opto/loopopts.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/loopopts.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -96,6 +96,10 @@ // our new node, even though we may throw the node away. // (Note: This tweaking with igvn only works because x is a new node.) _igvn.set_type(x, t); + // If x is a TypeNode, capture any more-precise type permanently into Node + // othewise it will be not updated during igvn->transform since + // igvn->type(x) is set to x->Value() already. + x->raise_bottom_type(t); Node *y = x->Identity(&_igvn); if( y != x ) { wins++; @@ -464,11 +468,11 @@ case T_FLOAT: case T_DOUBLE: case T_ADDRESS: // (RawPtr) - case T_NARROWOOP: cost++; break; + case T_NARROWOOP: // Fall through case T_OBJECT: { // Base oops are OK, but not derived oops - const TypeOopPtr *tp = phi->type()->isa_oopptr(); + const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus // CMOVE'ing a derived pointer requires we also CMOVE the base. If we @@ -499,11 +503,11 @@ return NULL; // Too much speculative goo } } - // See if the Phi is used by a Cmp. This will likely Split-If, a - // higher-payoff operation. + // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. + // This will likely Split-If, a higher-payoff operation. for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { Node* use = phi->fast_out(k); - if( use->is_Cmp() ) + if( use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP() ) return NULL; } } diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/memnode.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -1231,6 +1231,10 @@ // our new node, even though we may throw the node away. // (This tweaking with igvn only works because x is a new node.) igvn->set_type(x, t); + // If x is a TypeNode, capture any more-precise type permanently into Node + // othewise it will be not updated during igvn->transform since + // igvn->type(x) is set to x->Value() already. + x->raise_bottom_type(t); Node *y = x->Identity(igvn); if( y != x ) { wins++; @@ -1409,7 +1413,7 @@ // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. if ((t->isa_int() == NULL) && (t->isa_long() == NULL) - && Opcode() != Op_LoadKlass) { + && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. // Make sure the reference is not into the header, by comparing diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/reg_split.cpp --- a/src/share/vm/opto/reg_split.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/reg_split.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -284,7 +284,7 @@ // Check for single-def (LRG cannot redefined) uint lidx = n2lidx(in); if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy - if( lrgs(lidx)._def != NodeSentinel ) continue; + if (lrgs(lidx).is_singledef()) continue; Block *b_def = _cfg._bbs[def->_idx]; int idx_def = b_def->find_node(def); @@ -311,12 +311,20 @@ uint lidx = Find_id(in); // Walk backwards thru spill copy node intermediates - if( walkThru ) + if (walkThru) { while ( in->is_SpillCopy() && lidx >= _maxlrg ) { in = in->in(1); lidx = Find_id(in); } + if (lidx < _maxlrg && lrgs(lidx).is_multidef()) { + // walkThru found a multidef LRG, which is unsafe to use, so + // just keep the original def used in the clone. + in = spill->in(i); + lidx = Find_id(in); + } + } + if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) { Node *rdef = Reachblock[lrg2reach[lidx]]; if( rdef ) spill->set_req(i,rdef); @@ -505,7 +513,7 @@ // Do not bother splitting or putting in Phis for single-def // rematerialized live ranges. This happens alot to constants // with long live ranges. - if( lrgs(lidx)._def != NodeSentinel && + if( lrgs(lidx).is_singledef() && lrgs(lidx)._def->rematerialize() ) { // reset the Reaches & UP entries Reachblock[slidx] = lrgs(lidx)._def; diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/subnode.cpp --- a/src/share/vm/opto/subnode.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/subnode.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -633,20 +633,31 @@ kps != 1 && // both or neither are klass pointers !klass0->is_interface() && // do not trust interfaces !klass1->is_interface()) { + bool unrelated_classes = false; // See if neither subclasses the other, or if the class on top - // is precise. In either of these cases, the compare must fail. + // is precise. In either of these cases, the compare is known + // to fail if at least one of the pointers is provably not null. if (klass0->equals(klass1) || // if types are unequal but klasses are !klass0->is_java_klass() || // types not part of Java language? !klass1->is_java_klass()) { // types not part of Java language? // Do nothing; we know nothing for imprecise types } else if (klass0->is_subtype_of(klass1)) { - // If klass1's type is PRECISE, then we can fail. - if (xklass1) return TypeInt::CC_GT; + // If klass1's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass1; } else if (klass1->is_subtype_of(klass0)) { - // If klass0's type is PRECISE, then we can fail. - if (xklass0) return TypeInt::CC_GT; + // If klass0's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass0; } else { // Neither subtypes the other - return TypeInt::CC_GT; // ...so always fail + unrelated_classes = true; + } + if (unrelated_classes) { + // The oops classes are known to be unrelated. If the joined PTRs of + // two oops is not Null and not Bottom, then we are sure that one + // of the two oops is non-null, and the comparison will always fail. + TypePtr::PTR jp = r0->join_ptr(r1->_ptr); + if (jp != TypePtr::Null && jp != TypePtr::BotPTR) { + return TypeInt::CC_GT; + } } } } @@ -681,7 +692,11 @@ // Now check for LoadKlass on left. Node* ldk1 = in(1); - if (ldk1->Opcode() != Op_LoadKlass) + if (ldk1->is_DecodeN()) { + ldk1 = ldk1->in(1); + if (ldk1->Opcode() != Op_LoadNKlass ) + return NULL; + } else if (ldk1->Opcode() != Op_LoadKlass ) return NULL; // Take apart the address of the LoadKlass: Node* adr1 = ldk1->in(MemNode::Address); @@ -702,7 +717,11 @@ // Check for a LoadKlass from primary supertype array. // Any nested loadklass from loadklass+con must be from the p.s. array. - if (ldk2->Opcode() != Op_LoadKlass) + if (ldk2->is_DecodeN()) { + // Keep ldk2 as DecodeN since it could be used in CmpP below. + if (ldk2->in(1)->Opcode() != Op_LoadNKlass ) + return NULL; + } else if (ldk2->Opcode() != Op_LoadKlass) return NULL; // Verify that we understand the situation @@ -769,20 +788,31 @@ kps != 1 && // both or neither are klass pointers !klass0->is_interface() && // do not trust interfaces !klass1->is_interface()) { + bool unrelated_classes = false; // See if neither subclasses the other, or if the class on top - // is precise. In either of these cases, the compare must fail. + // is precise. In either of these cases, the compare is known + // to fail if at least one of the pointers is provably not null. if (klass0->equals(klass1) || // if types are unequal but klasses are !klass0->is_java_klass() || // types not part of Java language? !klass1->is_java_klass()) { // types not part of Java language? // Do nothing; we know nothing for imprecise types } else if (klass0->is_subtype_of(klass1)) { - // If klass1's type is PRECISE, then we can fail. - if (xklass1) return TypeInt::CC_GT; + // If klass1's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass1; } else if (klass1->is_subtype_of(klass0)) { - // If klass0's type is PRECISE, then we can fail. - if (xklass0) return TypeInt::CC_GT; + // If klass0's type is PRECISE, then classes are unrelated. + unrelated_classes = xklass0; } else { // Neither subtypes the other - return TypeInt::CC_GT; // ...so always fail + unrelated_classes = true; + } + if (unrelated_classes) { + // The oops classes are known to be unrelated. If the joined PTRs of + // two oops is not Null and not Bottom, then we are sure that one + // of the two oops is non-null, and the comparison will always fail. + TypePtr::PTR jp = r0->join_ptr(r1->_ptr); + if (jp != TypePtr::Null && jp != TypePtr::BotPTR) { + return TypeInt::CC_GT; + } } } } diff -r d515536da189 -r 5d254928c888 src/share/vm/opto/type.cpp --- a/src/share/vm/opto/type.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/opto/type.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -804,6 +804,7 @@ case InstPtr: case KlassPtr: case AryPtr: + case NarrowOop: case Int: case Long: case DoubleTop: @@ -2263,6 +2264,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -3465,7 +3467,7 @@ return _ooptype->empty(); } -//------------------------------meet------------------------------------------- +//------------------------------xmeet------------------------------------------ // Compute the MEET of two types. It returns a new Type object. const Type *TypeNarrowOop::xmeet( const Type *t ) const { // Perform a fast test for common case; meeting the same types together. @@ -3483,6 +3485,13 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case AnyPtr: + case RawPtr: + case OopPtr: + case InstPtr: + case KlassPtr: + case AryPtr: + case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: @@ -3499,16 +3508,9 @@ default: // All else is a mistake typerr(t); - case RawPtr: - case AnyPtr: - case OopPtr: - case InstPtr: - case KlassPtr: - case AryPtr: - typerr(t); - return Type::BOTTOM; - } // End of switch + + return this; } const Type *TypeNarrowOop::xdual() const { // Compute dual right now. @@ -3702,6 +3704,7 @@ case DoubleTop: case DoubleCon: case DoubleBot: + case NarrowOop: case Bottom: // Ye Olde Default return Type::BOTTOM; case Top: diff -r d515536da189 -r 5d254928c888 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Tue Aug 26 00:46:03 2008 -0400 +++ b/src/share/vm/runtime/thread.cpp Wed Aug 27 11:20:46 2008 -0700 @@ -2777,7 +2777,13 @@ // For now, just manually iterate through them. tc->do_thread(VMThread::vm_thread()); Universe::heap()->gc_threads_do(tc); - tc->do_thread(WatcherThread::watcher_thread()); + { + // Grab the Terminator_lock to prevent watcher_thread from being terminated. + MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag); + WatcherThread *wt = WatcherThread::watcher_thread(); + if (wt != NULL) + tc->do_thread(wt); + } // If CompilerThreads ever become non-JavaThreads, add them here } diff -r d515536da189 -r 5d254928c888 test/compiler/6646019/Test.java diff -r d515536da189 -r 5d254928c888 test/compiler/6689060/Test.java diff -r d515536da189 -r 5d254928c888 test/compiler/6695810/Test.java