# HG changeset patch # User ysr # Date 1249862631 25200 # Node ID b1773b9a2ca1027cc44b4a73cca3959361577fe3 # Parent 9c65a08a31a383f893538360cc133e59103e6660# Parent 3ee342e25e573c2d016d6803e7d666ef3a4ee2f2 Merge diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/make/saenv.sh --- a/agent/make/saenv.sh Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/make/saenv.sh Sun Aug 09 17:03:51 2009 -0700 @@ -48,6 +48,8 @@ CPU=i386 fi else + LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so + export LD_AUDIT_32 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p` OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" CPU=sparc diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/make/saenv64.sh --- a/agent/make/saenv64.sh Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/make/saenv64.sh Sun Aug 09 17:03:51 2009 -0700 @@ -43,6 +43,8 @@ fi fi +LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so +export LD_AUDIT_64 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger" diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/os/solaris/proc/Makefile --- a/agent/src/os/solaris/proc/Makefile Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/os/solaris/proc/Makefile Sun Aug 09 17:03:51 2009 -0700 @@ -56,24 +56,28 @@ @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal CC -G -KPIC -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ -M mapfile -o $@/libsaproc.so -ldemangle + CC -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc amd64:: javahomecheck $(MKDIRS) $@ @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal CC -G -KPIC -xarch=amd64 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ -M mapfile -o $@/libsaproc.so -ldemangle + CC -xarch=amd64 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc sparc:: javahomecheck $(MKDIRS) $@ @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal CC -G -KPIC -xarch=v8 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ -M mapfile -o $@/libsaproc.so -ldemangle + CC -xarch=v8 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc sparcv9:: javahomecheck $(MKDIRS) $@ @javah -classpath $(CLASSES_DIR) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal CC -G -KPIC -xarch=v9 -I${JAVA_HOME}/include -I${JAVA_HOME}/include/solaris saproc.cpp \ -M mapfile -o $@/libsaproc.so -ldemangle + CC -xarch=v9 -o $@/libsaproc_audit.so -G -Kpic -z defs saproc_audit.cpp -lmapmalloc -ldl -lc clean:: $(RM) -rf sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/os/solaris/proc/mapfile --- a/agent/src/os/solaris/proc/mapfile Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/os/solaris/proc/mapfile Sun Aug 09 17:03:51 2009 -0700 @@ -45,6 +45,8 @@ Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_resume0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_suspend0; Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_writeBytesToProcess0; + # this is needed by saproc_audit.c to redirect opens in libproc.so + libsaproc_open; local: *; }; diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/os/solaris/proc/saproc.cpp --- a/agent/src/os/solaris/proc/saproc.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/os/solaris/proc/saproc.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -214,49 +214,58 @@ } } -static int find_file_hook(const char * name, int elf_checksum) { - init_alt_root(); - - if (_libsaproc_debug) { - printf("libsaproc DEBUG: find_file_hook %s 0x%x\n", name, elf_checksum); - } +// This function is a complete substitute for the open system call +// since it's also used to override open calls from libproc to +// implement as a pathmap style facility for the SA. If libproc +// starts using other interfaces then this might have to extended to +// cover other calls. +extern "C" int libsaproc_open(const char * name, int oflag, ...) { + if (oflag == O_RDONLY) { + init_alt_root(); - if (alt_root_len > 0) { - int fd = -1; - char alt_path[PATH_MAX+1]; - - strcpy(alt_path, alt_root); - strcat(alt_path, name); - fd = open(alt_path, O_RDONLY); - if (fd >= 0) { - if (_libsaproc_debug) { - printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path); - } - return fd; + if (_libsaproc_debug) { + printf("libsaproc DEBUG: libsaproc_open %s\n", name); } - if (strrchr(name, '/')) { + if (alt_root_len > 0) { + int fd = -1; + char alt_path[PATH_MAX+1]; + strcpy(alt_path, alt_root); - strcat(alt_path, strrchr(name, '/')); + strcat(alt_path, name); fd = open(alt_path, O_RDONLY); if (fd >= 0) { if (_libsaproc_debug) { - printf("libsaproc DEBUG: find_file_hook substituted %s\n", alt_path); + printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path); } return fd; } + + if (strrchr(name, '/')) { + strcpy(alt_path, alt_root); + strcat(alt_path, strrchr(name, '/')); + fd = open(alt_path, O_RDONLY); + if (fd >= 0) { + if (_libsaproc_debug) { + printf("libsaproc DEBUG: libsaproc_open substituted %s\n", alt_path); + } + return fd; + } + } } } - return -1; + + { + mode_t mode; + va_list ap; + va_start(ap, oflag); + mode = va_arg(ap, mode_t); + va_end(ap); + + return open(name, oflag, mode); + } } -static int pathmap_open(const char* name) { - int fd = open(name, O_RDONLY); - if (fd < 0) { - fd = find_file_hook(name, 0); - } - return fd; -} static void * pathmap_dlopen(const char * name, int mode) { init_alt_root(); @@ -608,7 +617,7 @@ print_debug("looking for %s\n", classes_jsa); // open the classes[_g].jsa - int fd = pathmap_open(classes_jsa); + int fd = libsaproc_open(classes_jsa, O_RDONLY); if (fd < 0) { char errMsg[ERR_MSG_SIZE]; sprintf(errMsg, "can't open shared archive file %s", classes_jsa); @@ -1209,8 +1218,6 @@ return res; } -typedef int (*find_file_hook_t)(const char *, int elf_checksum); - /* * Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal * Method: initIDs @@ -1230,16 +1237,6 @@ if (libproc_handle == 0) THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!"); - // If possible, set shared object find file hook. - void (*set_hook)(find_file_hook_t) = (void(*)(find_file_hook_t))dlsym(libproc_handle, "Pset_find_file_hook"); - if (set_hook) { - // we found find file hook symbol, set up our hook function. - set_hook(find_file_hook); - } else if (getenv(SA_ALTROOT)) { - printf("libsaproc WARNING: %s set, but can't set file hook. " \ - "Did you use right version of libproc.so?\n", SA_ALTROOT); - } - p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J"); CHECK_EXCEPTION; diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/os/solaris/proc/saproc_audit.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/os/solaris/proc/saproc_audit.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -0,0 +1,98 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This class sets up an interposer on open calls from libproc.so to +// support a pathmap facility in the SA. + +static uintptr_t* libproc_cookie; +static uintptr_t* libc_cookie; +static uintptr_t* libsaproc_cookie; + + +uint_t +la_version(uint_t version) +{ + return (LAV_CURRENT); +} + + +uint_t +la_objopen(Link_map * lmp, Lmid_t lmid, uintptr_t * cookie) +{ + if (strstr(lmp->l_name, "/libproc.so") != NULL) { + libproc_cookie = cookie; + return LA_FLG_BINDFROM; + } + if (strstr(lmp->l_name, "/libc.so") != NULL) { + libc_cookie = cookie; + return LA_FLG_BINDTO; + } + if (strstr(lmp->l_name, "/libsaproc.so") != NULL) { + libsaproc_cookie = cookie; + return LA_FLG_BINDTO | LA_FLG_BINDFROM; + } + return 0; +} + + +#if defined(_LP64) +uintptr_t +la_symbind64(Elf64_Sym *symp, uint_t symndx, uintptr_t *refcook, + uintptr_t *defcook, uint_t *sb_flags, const char *sym_name) +#else +uintptr_t +la_symbind32(Elf32_Sym *symp, uint_t symndx, uintptr_t *refcook, + uintptr_t *defcook, uint_t *sb_flags) +#endif +{ +#if !defined(_LP64) + const char *sym_name = (const char *)symp->st_name; +#endif + if (strcmp(sym_name, "open") == 0 && refcook == libproc_cookie) { + // redirect all open calls from libproc.so through libsaproc_open which will + // try the alternate library locations first. + void* handle = dlmopen(LM_ID_BASE, "libsaproc.so", RTLD_NOLOAD); + if (handle == NULL) { + fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc.so during linking\n"); + } else { + uintptr_t libsaproc_open = (uintptr_t)dlsym(handle, "libsaproc_open"); + if (libsaproc_open == 0) { + fprintf(stderr, "libsaproc_audit.so: didn't find libsaproc_open during linking\n"); + } else { + return libsaproc_open; + } + } + } + return symp->st_value; +} diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java --- a/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java Sun Aug 09 17:03:51 2009 -0700 @@ -81,8 +81,4 @@ Assert.that(false, "should not reach here"); return null; } - - public int readBCI() { - return readInt() + InvocationEntryBCI; - } } diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java --- a/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Sun Aug 09 17:03:51 2009 -0700 @@ -82,6 +82,7 @@ tty.print(" "); sd.getMethod().printValueOn(tty); tty.print(" @" + sd.getBCI()); + tty.print(" reexecute=" + sd.getReexecute()); tty.println(); } } diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java --- a/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java Sun Aug 09 17:03:51 2009 -0700 @@ -41,6 +41,7 @@ private NMethod code; private Method method; private int bci; + private boolean reexecute; /** Decoding offsets */ private int decodeOffset; private int senderDecodeOffset; @@ -61,7 +62,7 @@ senderDecodeOffset = stream.readInt(); method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); - bci = stream.readBCI(); + setBCIAndReexecute(stream.readInt()); // Decode offsets for body and sender localsDecodeOffset = stream.readInt(); expressionsDecodeOffset = stream.readInt(); @@ -78,7 +79,7 @@ senderDecodeOffset = stream.readInt(); method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); - bci = stream.readBCI(); + setBCIAndReexecute(stream.readInt()); // Decode offsets for body and sender localsDecodeOffset = stream.readInt(); expressionsDecodeOffset = stream.readInt(); @@ -88,6 +89,7 @@ public NMethod getNMethod() { return code; } public Method getMethod() { return method; } public int getBCI() { return bci; } + public boolean getReexecute() {return reexecute;} /** Returns a List<ScopeValue> */ public List getLocals() { @@ -150,6 +152,7 @@ tty.print("ScopeDesc for "); method.printValueOn(tty); tty.println(" @bci " + bci); + tty.println(" reexecute: " + reexecute); } // FIXME: add more accessors @@ -157,6 +160,11 @@ //-------------------------------------------------------------------------------- // Internals only below this point // + private void setBCIAndReexecute(int combination) { + int InvocationEntryBci = VM.getVM().getInvocationEntryBCI(); + bci = (combination >> 1) + InvocationEntryBci; + reexecute = (combination & 1)==1 ? true : false; + } private DebugInfoReadStream streamAt(int decodeOffset) { return new DebugInfoReadStream(code, decodeOffset, objects); diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java --- a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java Sun Aug 09 17:03:51 2009 -0700 @@ -176,19 +176,6 @@ for (; cur.lessThan(limit);) { Address klassOop = cur.getAddressAt(addressSize); - // FIXME: need to do a better job here. - // can I use bitMap here? - if (klassOop == null) { - //Find the object size using Printezis bits and skip over - System.err.println("Finding object size using Printezis bits and skipping over..."); - long size = collector().blockSizeUsingPrintezisBits(cur); - if (size == -1) { - System.err.println("Printezis bits not set..."); - break; - } - cur = cur.addOffsetTo(adjustObjectSizeInBytes(size)); - } - if (FreeChunk.indicatesFreeChunk(cur)) { if (! cur.equals(regionStart)) { res.add(new MemRegion(regionStart, cur)); @@ -200,12 +187,21 @@ } // note that fc.size() gives chunk size in heap words cur = cur.addOffsetTo(chunkSize * addressSize); - System.err.println("Free chunk in CMS heap, size="+chunkSize * addressSize); regionStart = cur; } else if (klassOop != null) { Oop obj = heap.newOop(cur.addOffsetToAsOopHandle(0)); long objectSize = obj.getObjectSize(); cur = cur.addOffsetTo(adjustObjectSizeInBytes(objectSize)); + } else { + // FIXME: need to do a better job here. + // can I use bitMap here? + //Find the object size using Printezis bits and skip over + long size = collector().blockSizeUsingPrintezisBits(cur); + if (size == -1) { + System.err.println("Printezis bits not set..."); + break; + } + cur = cur.addOffsetTo(adjustObjectSizeInBytes(size)); } } return res; diff -r 3ee342e25e57 -r b1773b9a2ca1 agent/src/share/classes/sun/jvm/hotspot/memory/FreeChunk.java --- a/agent/src/share/classes/sun/jvm/hotspot/memory/FreeChunk.java Wed Aug 05 12:33:29 2009 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/FreeChunk.java Sun Aug 09 17:03:51 2009 -0700 @@ -63,7 +63,7 @@ public long size() { if (VM.getVM().isCompressedOopsEnabled()) { - Mark mark = new Mark(sizeField.getValue(addr)); + Mark mark = new Mark(addr.addOffsetTo(sizeField.getOffset())); return mark.getSize(); } else { Address size = sizeField.getValue(addr); @@ -83,7 +83,7 @@ public boolean isFree() { if (VM.getVM().isCompressedOopsEnabled()) { - Mark mark = new Mark(sizeField.getValue(addr)); + Mark mark = new Mark(addr.addOffsetTo(sizeField.getOffset())); return mark.isCmsFreeChunk(); } else { Address prev = prevField.getValue(addr); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/cpu/x86/vm/assembler_x86.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -8335,15 +8335,13 @@ // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. - if (Universe::narrow_oop_base() == NULL) { - if (Universe::narrow_oop_shift() != 0) { - assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); - shlq(r, LogMinObjAlignmentInBytes); - } + if (Universe::narrow_oop_shift() != 0) { + assert (Address::times_8 == LogMinObjAlignmentInBytes && + Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); + // Don't use Shift since it modifies flags. + leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); } else { - assert (Address::times_8 == LogMinObjAlignmentInBytes && - Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); - leaq(r, Address(r12_heapbase, r, Address::times_8, 0)); + assert (Universe::narrow_oop_base() == NULL, "sanity"); } } @@ -8358,6 +8356,7 @@ Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong"); leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); } else if (dst != src) { + assert (Universe::narrow_oop_base() == NULL, "sanity"); movq(dst, src); } } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/os/solaris/vm/os_solaris.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -1643,7 +1643,8 @@ inline hrtime_t getTimeNanos() { if (VM_Version::supports_cx8()) { const hrtime_t now = gethrtime(); - const hrtime_t prev = max_hrtime; + // Use atomic long load since 32-bit x86 uses 2 registers to keep long. + const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); if (now <= prev) return prev; // same or retrograde time; const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); assert(obsv >= prev, "invariant"); // Monotonicity diff -r 3ee342e25e57 -r b1773b9a2ca1 src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp --- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -46,6 +46,8 @@ inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); } inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); } +inline jlong Atomic::load(volatile jlong* src) { return *src; } + #ifdef _GNU_SOURCE inline jint Atomic::add (jint add_value, volatile jint* dest) { diff -r 3ee342e25e57 -r b1773b9a2ca1 src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp --- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -99,6 +99,8 @@ return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP()); } +inline jlong Atomic::load(volatile jlong* src) { return *src; } + #else // !AMD64 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { @@ -131,6 +133,15 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); } + +extern "C" void _Atomic_load_long(volatile jlong* src, volatile jlong* dst); + +inline jlong Atomic::load(volatile jlong* src) { + volatile jlong dest; + _Atomic_load_long(src, &dest); + return dest; +} + #endif // AMD64 #ifdef _GNU_SOURCE diff -r 3ee342e25e57 -r b1773b9a2ca1 src/os_cpu/solaris_x86/vm/solaris_x86_32.il --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Wed Aug 05 12:33:29 2009 -0700 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Sun Aug 09 17:03:51 2009 -0700 @@ -97,6 +97,15 @@ popl %ebx .end + // Support for void Atomic::load(volatile jlong* src, volatile jlong* dest). + .inline _Atomic_load_long,2 + movl 0(%esp), %eax // src + fildll (%eax) + movl 4(%esp), %eax // dest + fistpll (%eax) + .end + + // Support for OrderAccess::acquire() .inline _OrderAccess_acquire,0 movl 0(%esp), %eax diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/c1/c1_IR.cpp --- a/src/share/vm/c1/c1_IR.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/c1/c1_IR.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -208,6 +208,15 @@ return scope->caller_bci(); } +bool IRScopeDebugInfo::should_reexecute() { + ciMethod* cur_method = scope()->method(); + int cur_bci = bci(); + if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) { + Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); + return Interpreter::bytecode_should_reexecute(code); + } else + return false; +} // Implementation of CodeEmitInfo @@ -253,7 +262,7 @@ void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { // record the safepoint before recording the debug info for enclosing scopes recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); - _scope_debug_info->record_debug_info(recorder, pc_offset); + _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/); recorder->end_safepoint(pc_offset); } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/c1/c1_IR.hpp --- a/src/share/vm/c1/c1_IR.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/c1/c1_IR.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -239,15 +239,20 @@ GrowableArray* monitors() { return _monitors; } IRScopeDebugInfo* caller() { return _caller; } - void record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { + //Whether we should reexecute this bytecode for deopt + bool should_reexecute(); + + void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) { if (caller() != NULL) { // Order is significant: Must record caller first. - caller()->record_debug_info(recorder, pc_offset); + caller()->record_debug_info(recorder, pc_offset, false/*topmost*/); } DebugToken* locvals = recorder->create_scope_values(locals()); DebugToken* expvals = recorder->create_scope_values(expressions()); DebugToken* monvals = recorder->create_monitor_values(monitors()); - recorder->describe_scope(pc_offset, scope()->method(), bci(), locvals, expvals, monvals); + // reexecute allowed only for the topmost frame + bool reexecute = topmost ? should_reexecute() : false; + recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals); } }; diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/c1/c1_LIRAssembler.cpp --- a/src/share/vm/c1/c1_LIRAssembler.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -379,7 +379,8 @@ ValueStack* s = nth_oldest(vstack, n, s_bci); if (s == NULL) break; IRScope* scope = s->scope(); - debug_info->describe_scope(pc_offset, scope->method(), s_bci); + //Always pass false for reexecute since these ScopeDescs are never used for deopt + debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/); } debug_info->end_non_safepoint(pc_offset); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/classfile/javaClasses.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -1229,10 +1229,13 @@ // Compiled java method case. if (decode_offset != 0) { + bool dummy_reexecute = false; DebugInfoReadStream stream(nm, decode_offset); decode_offset = stream.read_int(); method = (methodOop)nm->oop_at(stream.read_int()); - bci = stream.read_bci(); + //fill_in_stack_trace does not need the reexecute information which is designed + //for the deopt to reexecute + bci = stream.read_bci_and_reexecute(dummy_reexecute); } else { if (fr.is_first_frame()) break; address pc = fr.pc(); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/code/debugInfo.hpp --- a/src/share/vm/code/debugInfo.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/code/debugInfo.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -255,7 +255,8 @@ ScopeValue* read_object_value(); ScopeValue* get_cached_object(); // BCI encoding is mostly unsigned, but -1 is a distinguished value - int read_bci() { return read_int() + InvocationEntryBci; } + // Decoding based on encoding: bci = InvocationEntryBci + read_int()/2; reexecute = read_int()%2 == 1 ? true : false; + int read_bci_and_reexecute(bool& reexecute) { int i = read_int(); reexecute = (i & 1) ? true : false; return (i >> 1) + InvocationEntryBci; } }; // DebugInfoWriteStream specializes CompressedWriteStream for @@ -268,5 +269,6 @@ public: DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size); void write_handle(jobject h); - void write_bci(int bci) { write_int(bci - InvocationEntryBci); } + //Encoding bci and reexecute into one word as (bci - InvocationEntryBci)*2 + reexecute + void write_bci_and_reexecute(int bci, bool reexecute) { write_int(((bci - InvocationEntryBci) << 1) + (reexecute ? 1 : 0)); } }; diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/code/debugInfoRec.cpp --- a/src/share/vm/code/debugInfoRec.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/code/debugInfoRec.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -280,6 +280,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset, ciMethod* method, int bci, + bool reexecute, DebugToken* locals, DebugToken* expressions, DebugToken* monitors) { @@ -297,7 +298,7 @@ // serialize scope jobject method_enc = (method == NULL)? NULL: method->encoding(); stream()->write_int(oop_recorder()->find_index(method_enc)); - stream()->write_bci(bci); + stream()->write_bci_and_reexecute(bci, reexecute); assert(method == NULL || (method->is_native() && bci == 0) || (!method->is_native() && 0 <= bci && bci < method->code_size()) || diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/code/debugInfoRec.hpp --- a/src/share/vm/code/debugInfoRec.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/code/debugInfoRec.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -87,6 +87,7 @@ void describe_scope(int pc_offset, ciMethod* method, int bci, + bool reexecute, DebugToken* locals = NULL, DebugToken* expressions = NULL, DebugToken* monitors = NULL); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/code/scopeDesc.cpp --- a/src/share/vm/code/scopeDesc.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/code/scopeDesc.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -46,6 +46,7 @@ _decode_offset = parent->_sender_decode_offset; _objects = parent->_objects; decode_body(); + assert(_reexecute == false, "reexecute not allowed"); } @@ -56,6 +57,7 @@ _sender_decode_offset = DebugInformationRecorder::serialized_null; _method = methodHandle(_code->method()); _bci = InvocationEntryBci; + _reexecute = false; _locals_decode_offset = DebugInformationRecorder::serialized_null; _expressions_decode_offset = DebugInformationRecorder::serialized_null; _monitors_decode_offset = DebugInformationRecorder::serialized_null; @@ -65,7 +67,8 @@ _sender_decode_offset = stream->read_int(); _method = methodHandle((methodOop) stream->read_oop()); - _bci = stream->read_bci(); + _bci = stream->read_bci_and_reexecute(_reexecute); + // decode offsets for body and sender _locals_decode_offset = stream->read_int(); _expressions_decode_offset = stream->read_int(); @@ -170,6 +173,7 @@ st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin()); st->print_cr(" offset: %d", _decode_offset); st->print_cr(" bci: %d", bci()); + st->print_cr(" reexecute: %s", should_reexecute() ? "true" : "false"); st->print_cr(" locals: %d", _locals_decode_offset); st->print_cr(" stack: %d", _expressions_decode_offset); st->print_cr(" monitor: %d", _monitors_decode_offset); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/code/scopeDesc.hpp --- a/src/share/vm/code/scopeDesc.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/code/scopeDesc.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -39,7 +39,8 @@ DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset()); int ignore_sender = buffer.read_int(); _method = methodOop(buffer.read_oop()); - _bci = buffer.read_bci(); + bool dummy_reexecute; //only methodOop and bci are needed! + _bci = buffer.read_bci_and_reexecute(dummy_reexecute); } methodOop method() { return _method; } @@ -60,8 +61,9 @@ ScopeDesc(const nmethod* code, int decode_offset); // JVM state - methodHandle method() const { return _method; } - int bci() const { return _bci; } + methodHandle method() const { return _method; } + int bci() const { return _bci; } + bool should_reexecute() const { return _reexecute; } GrowableArray* locals(); GrowableArray* expressions(); @@ -86,6 +88,7 @@ // JVM state methodHandle _method; int _bci; + bool _reexecute; // Decoding offsets int _decode_offset; diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/interpreter/abstractInterpreter.hpp --- a/src/share/vm/interpreter/abstractInterpreter.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -122,11 +122,15 @@ static int size_top_interpreter_activation(methodOop method); // Deoptimization support - static address continuation_for(methodOop method, - address bcp, - int callee_parameters, - bool is_top_frame, - bool& use_next_mdp); + // Compute the entry address for continuation after + static address deopt_continue_after_entry(methodOop method, + address bcp, + int callee_parameters, + bool is_top_frame); + // Compute the entry address for reexecution + static address deopt_reexecute_entry(methodOop method, address bcp); + // Deoptimization should reexecute this bytecode + static bool bytecode_should_reexecute(Bytecodes::Code code); // share implementation of size_activation and layout_activation: static int size_activation(methodOop method, diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/interpreter/interpreter.cpp --- a/src/share/vm/interpreter/interpreter.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/interpreter/interpreter.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -284,76 +284,19 @@ //------------------------------------------------------------------------------------------------------------------------ // Deoptimization support -// If deoptimization happens, this method returns the point where to continue in -// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next -// bci and the top of stack is in eax/edx/FPU tos. -// For putfield/getfield, put/getstatic, the continuation is at the same -// bci and the TOS is on stack. - -// Note: deopt_entry(type, 0) means reexecute bytecode -// deopt_entry(type, length) means continue at next bytecode - -address AbstractInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) { +// If deoptimization happens, this function returns the point of next bytecode to continue execution +address AbstractInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) { assert(method->contains(bcp), "just checkin'"); Bytecodes::Code code = Bytecodes::java_code_at(bcp); + assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); int bci = method->bci_from(bcp); int length = -1; // initial value for debugging // compute continuation length length = Bytecodes::length_at(bcp); // compute result type BasicType type = T_ILLEGAL; - // when continuing after a compiler safepoint, re-execute the bytecode - // (an invoke is continued after the safepoint) - use_next_mdp = true; + switch (code) { - case Bytecodes::_lookupswitch: - case Bytecodes::_tableswitch: - case Bytecodes::_fast_binaryswitch: - case Bytecodes::_fast_linearswitch: - // recompute condtional expression folded into _if - case Bytecodes::_lcmp : - case Bytecodes::_fcmpl : - case Bytecodes::_fcmpg : - case Bytecodes::_dcmpl : - case Bytecodes::_dcmpg : - case Bytecodes::_ifnull : - case Bytecodes::_ifnonnull : - case Bytecodes::_goto : - case Bytecodes::_goto_w : - case Bytecodes::_ifeq : - case Bytecodes::_ifne : - case Bytecodes::_iflt : - case Bytecodes::_ifge : - case Bytecodes::_ifgt : - case Bytecodes::_ifle : - case Bytecodes::_if_icmpeq : - case Bytecodes::_if_icmpne : - case Bytecodes::_if_icmplt : - case Bytecodes::_if_icmpge : - case Bytecodes::_if_icmpgt : - case Bytecodes::_if_icmple : - case Bytecodes::_if_acmpeq : - case Bytecodes::_if_acmpne : - // special cases - case Bytecodes::_getfield : - case Bytecodes::_putfield : - case Bytecodes::_getstatic : - case Bytecodes::_putstatic : - case Bytecodes::_aastore : - // reexecute the operation and TOS value is on stack - assert(is_top_frame, "must be top frame"); - use_next_mdp = false; - return Interpreter::deopt_entry(vtos, 0); - break; - -#ifdef COMPILER1 - case Bytecodes::_athrow : - assert(is_top_frame, "must be top frame"); - use_next_mdp = false; - return Interpreter::rethrow_exception_entry(); - break; -#endif /* COMPILER1 */ - case Bytecodes::_invokevirtual : case Bytecodes::_invokespecial : case Bytecodes::_invokestatic : @@ -392,6 +335,70 @@ : Interpreter::return_entry(as_TosState(type), length); } +// If deoptimization happens, this function returns the point where the interpreter reexecutes +// the bytecode. +// Note: Bytecodes::_athrow is a special case in that it does not return +// Interpreter::deopt_entry(vtos, 0) like others +address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) { + assert(method->contains(bcp), "just checkin'"); + Bytecodes::Code code = Bytecodes::java_code_at(bcp); +#ifdef COMPILER1 + if(code == Bytecodes::_athrow ) { + return Interpreter::rethrow_exception_entry(); + } +#endif /* COMPILER1 */ + return Interpreter::deopt_entry(vtos, 0); +} + +// If deoptimization happens, the interpreter should reexecute these bytecodes. +// This function mainly helps the compilers to set up the reexecute bit. +bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) { + switch (code) { + case Bytecodes::_lookupswitch: + case Bytecodes::_tableswitch: + case Bytecodes::_fast_binaryswitch: + case Bytecodes::_fast_linearswitch: + // recompute condtional expression folded into _if + case Bytecodes::_lcmp : + case Bytecodes::_fcmpl : + case Bytecodes::_fcmpg : + case Bytecodes::_dcmpl : + case Bytecodes::_dcmpg : + case Bytecodes::_ifnull : + case Bytecodes::_ifnonnull : + case Bytecodes::_goto : + case Bytecodes::_goto_w : + case Bytecodes::_ifeq : + case Bytecodes::_ifne : + case Bytecodes::_iflt : + case Bytecodes::_ifge : + case Bytecodes::_ifgt : + case Bytecodes::_ifle : + case Bytecodes::_if_icmpeq : + case Bytecodes::_if_icmpne : + case Bytecodes::_if_icmplt : + case Bytecodes::_if_icmpge : + case Bytecodes::_if_icmpgt : + case Bytecodes::_if_icmple : + case Bytecodes::_if_acmpeq : + case Bytecodes::_if_acmpne : + // special cases + case Bytecodes::_getfield : + case Bytecodes::_putfield : + case Bytecodes::_getstatic : + case Bytecodes::_putstatic : + case Bytecodes::_aastore : +#ifdef COMPILER1 + //special case of reexecution + case Bytecodes::_athrow : +#endif + return true; + + default: + return false; + } +} + void AbstractInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Quick & dirty stack overflow checking: bang the stack & handle trap. // Note that we do the banging after the frame is setup, since the exception diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -605,28 +605,41 @@ } } -// If deoptimization happens, this method returns the point where to continue in -// interpreter. For calls (invokexxxx, newxxxx) the continuation is at next -// bci and the top of stack is in eax/edx/FPU tos. -// For putfield/getfield, put/getstatic, the continuation is at the same -// bci and the TOS is on stack. +//------------------------------------------------------------------------------------------------------------------------ +// Deoptimization support -// Note: deopt_entry(type, 0) means reexecute bytecode -// deopt_entry(type, length) means continue at next bytecode +// If deoptimization happens, this function returns the point of next bytecode to continue execution +address TemplateInterpreter::deopt_continue_after_entry(methodOop method, address bcp, int callee_parameters, bool is_top_frame) { + return AbstractInterpreter::deopt_continue_after_entry(method, bcp, callee_parameters, is_top_frame); +} -address TemplateInterpreter::continuation_for(methodOop method, address bcp, int callee_parameters, bool is_top_frame, bool& use_next_mdp) { +// If deoptimization happens, this function returns the point where the interpreter reexecutes +// the bytecode. +// Note: Bytecodes::_athrow (C1 only) and Bytecodes::_return are the special cases +// that do not return "Interpreter::deopt_entry(vtos, 0)" +address TemplateInterpreter::deopt_reexecute_entry(methodOop method, address bcp) { assert(method->contains(bcp), "just checkin'"); Bytecodes::Code code = Bytecodes::java_code_at(bcp); if (code == Bytecodes::_return) { - // This is used for deopt during registration of finalizers - // during Object.. We simply need to resume execution at - // the standard return vtos bytecode to pop the frame normally. - // reexecuting the real bytecode would cause double registration - // of the finalizable object. - assert(is_top_frame, "must be on top"); - return _normal_table.entry(Bytecodes::_return).entry(vtos); + // This is used for deopt during registration of finalizers + // during Object.. We simply need to resume execution at + // the standard return vtos bytecode to pop the frame normally. + // reexecuting the real bytecode would cause double registration + // of the finalizable object. + return _normal_table.entry(Bytecodes::_return).entry(vtos); } else { - return AbstractInterpreter::continuation_for(method, bcp, callee_parameters, is_top_frame, use_next_mdp); + return AbstractInterpreter::deopt_reexecute_entry(method, bcp); + } +} + +// If deoptimization happens, the interpreter should reexecute this bytecode. +// This function mainly helps the compilers to set up the reexecute bit. +bool TemplateInterpreter::bytecode_should_reexecute(Bytecodes::Code code) { + if (code == Bytecodes::_return) { + //Yes, we consider Bytecodes::_return as a special case of reexecution + return true; + } else { + return AbstractInterpreter::bytecode_should_reexecute(code); } } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/interpreter/templateInterpreter.hpp --- a/src/share/vm/interpreter/templateInterpreter.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -171,11 +171,15 @@ static void ignore_safepoints(); // ignores safepoints // Deoptimization support - static address continuation_for(methodOop method, - address bcp, - int callee_parameters, - bool is_top_frame, - bool& use_next_mdp); + // Compute the entry address for continuation after + static address deopt_continue_after_entry(methodOop method, + address bcp, + int callee_parameters, + bool is_top_frame); + // Deoptimization should reexecute this bytecode + static bool bytecode_should_reexecute(Bytecodes::Code code); + // Compute the address for reexecution + static address deopt_reexecute_entry(methodOop method, address bcp); #include "incls/_templateInterpreter_pd.hpp.incl" diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/memory/serialize.cpp --- a/src/share/vm/memory/serialize.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/memory/serialize.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -51,7 +51,7 @@ soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(sizeof(constantPoolOopDesc)); soc->do_tag(sizeof(constantPoolCacheOopDesc)); - soc->do_tag(objArrayOopDesc::base_offset_in_bytes(T_BYTE)); + soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); soc->do_tag(sizeof(symbolOopDesc)); soc->do_tag(sizeof(klassOopDesc)); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/oops/objArrayOop.hpp --- a/src/share/vm/oops/objArrayOop.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/oops/objArrayOop.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -38,6 +38,11 @@ } public: + // Returns the offset of the first element. + static int base_offset_in_bytes() { + return arrayOopDesc::base_offset_in_bytes(T_OBJECT); + } + // base is the address following the header. HeapWord* base() const { return (HeapWord*) arrayOopDesc::base(T_OBJECT); } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/block.cpp --- a/src/share/vm/opto/block.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/block.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -910,7 +910,16 @@ !(b->head()->is_Loop() && n->is_Phi()) && // See (+++) comment in reg_split.cpp !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) { - assert( b->find_node(def) < j, "uses must follow definitions" ); + bool is_loop = false; + if (n->is_Phi()) { + for( uint l = 1; l < def->req(); l++ ) { + if (n == def->in(l)) { + is_loop = true; + break; // Some kind of loop + } + } + } + assert( is_loop || b->find_node(def) < j, "uses must follow definitions" ); } if( def->is_SafePointScalarObject() ) { assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node"); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/bytecodeInfo.cpp --- a/src/share/vm/opto/bytecodeInfo.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/bytecodeInfo.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -37,6 +37,7 @@ // Keep a private copy of the caller_jvms: _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms()); _caller_jvms->set_bci(caller_jvms->bci()); + assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining"); } assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter"); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/callnode.cpp --- a/src/share/vm/opto/callnode.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/callnode.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -223,6 +223,7 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) { assert(method != NULL, "must be valid call site"); _method = method; + _reexecute = Reexecute_Undefined; debug_only(_bci = -99); // random garbage value debug_only(_map = (SafePointNode*)-1); _caller = caller; @@ -237,6 +238,7 @@ JVMState::JVMState(int stack_size) { _method = NULL; _bci = InvocationEntryBci; + _reexecute = Reexecute_Undefined; debug_only(_map = (SafePointNode*)-1); _caller = NULL; _depth = 1; @@ -269,6 +271,7 @@ if (p->_method != q->_method) return false; if (p->_method == NULL) return true; // bci is irrelevant if (p->_bci != q->_bci) return false; + if (p->_reexecute != q->_reexecute) return false; p = p->caller(); q = q->caller(); if (p == q) return true; @@ -490,6 +493,7 @@ if (!printed) _method->print_short_name(st); st->print(" @ bci:%d",_bci); + st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false"); } else { st->print(" runtime stub"); } @@ -509,8 +513,8 @@ } _map->dump(2); } - st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=", - depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci()); + st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", + depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); if (_method == NULL) { st->print_cr("(none)"); } else { @@ -537,6 +541,7 @@ JVMState* JVMState::clone_shallow(Compile* C) const { JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); n->set_bci(_bci); + n->_reexecute = _reexecute; n->set_locoff(_locoff); n->set_stkoff(_stkoff); n->set_monoff(_monoff); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/callnode.hpp --- a/src/share/vm/opto/callnode.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/callnode.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -178,6 +178,13 @@ // This provides a way to map the optimized program back into the interpreter, // or to let the GC mark the stack. class JVMState : public ResourceObj { +public: + typedef enum { + Reexecute_Undefined = -1, // not defined -- will be translated into false later + Reexecute_False = 0, // false -- do not reexecute + Reexecute_True = 1 // true -- reexecute the bytecode + } ReexecuteState; //Reexecute State + private: JVMState* _caller; // List pointer for forming scope chains uint _depth; // One mroe than caller depth, or one. @@ -188,10 +195,12 @@ uint _endoff; // Offset to end of input edge mapping uint _sp; // Jave Expression Stack Pointer for this state int _bci; // Byte Code Index of this JVM point + ReexecuteState _reexecute; // Whether this bytecode need to be re-executed ciMethod* _method; // Method Pointer SafePointNode* _map; // Map node associated with this scope public: friend class Compile; + friend class PreserveReexecuteState; // Because JVMState objects live over the entire lifetime of the // Compile object, they are allocated into the comp_arena, which @@ -222,16 +231,18 @@ bool is_mon(uint i) const { return i >= _monoff && i < _scloff; } bool is_scl(uint i) const { return i >= _scloff && i < _endoff; } - uint sp() const { return _sp; } - int bci() const { return _bci; } - bool has_method() const { return _method != NULL; } - ciMethod* method() const { assert(has_method(), ""); return _method; } - JVMState* caller() const { return _caller; } - SafePointNode* map() const { return _map; } - uint depth() const { return _depth; } - uint debug_start() const; // returns locoff of root caller - uint debug_end() const; // returns endoff of self - uint debug_size() const { + uint sp() const { return _sp; } + int bci() const { return _bci; } + bool should_reexecute() const { return _reexecute==Reexecute_True; } + bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } + bool has_method() const { return _method != NULL; } + ciMethod* method() const { assert(has_method(), ""); return _method; } + JVMState* caller() const { return _caller; } + SafePointNode* map() const { return _map; } + uint depth() const { return _depth; } + uint debug_start() const; // returns locoff of root caller + uint debug_end() const; // returns endoff of self + uint debug_size() const { return loc_size() + sp() + mon_size() + scl_size(); } uint debug_depth() const; // returns sum of debug_size values at all depths @@ -267,7 +278,9 @@ } void set_map(SafePointNode *map) { _map = map; } void set_sp(uint sp) { _sp = sp; } - void set_bci(int bci) { _bci = bci; } + // _reexecute is initialized to "undefined" for a new bci + void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } + void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} // Miscellaneous utility functions JVMState* clone_deep(Compile* C) const; // recursively clones caller chain diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/cfgnode.cpp --- a/src/share/vm/opto/cfgnode.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/cfgnode.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -1792,15 +1792,12 @@ if (UseCompressedOops && can_reshape && progress == NULL) { bool may_push = true; bool has_decodeN = false; - Node* in_decodeN = NULL; for (uint i=1; iis_DecodeN() && ii->bottom_type() == bottom_type()) { - // Note: in_decodeN is used only to define the type of new phi. - // Find a non dead path otherwise phi type will be wrong. + // Do optimization if a non dead path exist. if (ii->in(1)->bottom_type() != Type::TOP) { has_decodeN = true; - in_decodeN = ii->in(1); } } else if (!ii->is_Phi()) { may_push = false; @@ -1809,7 +1806,9 @@ if (has_decodeN && may_push) { PhaseIterGVN *igvn = phase->is_IterGVN(); - PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN); + // Make narrow type for new phi. + const Type* narrow_t = TypeNarrowOop::make(this->bottom_type()->is_ptr()); + PhiNode* new_phi = new (phase->C, r->req()) PhiNode(r, narrow_t); uint orig_cnt = req(); for (uint i=1; ias_Phi() == this) { new_ii = new_phi; } else { - new_ii = new (phase->C, 2) EncodePNode(ii, in_decodeN->bottom_type()); + new_ii = new (phase->C, 2) EncodePNode(ii, narrow_t); igvn->register_new_node_with_optimizer(new_ii); } } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/graphKit.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -620,6 +620,16 @@ assert(kit->stopped(), "cutout code must stop, throw, return, etc."); } +//---------------------------PreserveReexecuteState---------------------------- +PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) { + _kit = kit; + _sp = kit->sp(); + _reexecute = kit->jvms()->_reexecute; +} +PreserveReexecuteState::~PreserveReexecuteState() { + _kit->jvms()->_reexecute = _reexecute; + _kit->set_sp(_sp); +} //------------------------------clone_map-------------------------------------- // Implementation of PreserveJVMState @@ -738,6 +748,18 @@ #endif //ASSERT +// Helper function for enforcing certain bytecodes to reexecute if +// deoptimization happens +static bool should_reexecute_implied_by_bytecode(JVMState *jvms) { + ciMethod* cur_method = jvms->method(); + int cur_bci = jvms->bci(); + if (cur_method != NULL && cur_bci != InvocationEntryBci) { + Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); + return Interpreter::bytecode_should_reexecute(code); + } else + return false; +} + // Helper function for adding JVMState and debug information to node void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { // Add the safepoint edges to the call (or other safepoint). @@ -781,6 +803,13 @@ JVMState* out_jvms = youngest_jvms->clone_deep(C); call->set_jvms(out_jvms); // Start jvms list for call node + // For a known set of bytecodes, the interpreter should reexecute them if + // deoptimization happens. We set the reexecute state for them here + if (out_jvms->is_reexecute_undefined() && //don't change if already specified + should_reexecute_implied_by_bytecode(out_jvms)) { + out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed + } + // Presize the call: debug_only(uint non_debug_edges = call->req()); call->add_req_batch(top(), youngest_jvms->debug_depth()); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/graphKit.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -763,3 +763,16 @@ BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); ~BuildCutout(); }; + +// Helper class to preserve the original _reexecute bit and _sp and restore +// them back +class PreserveReexecuteState: public StackObj { + protected: + GraphKit* _kit; + uint _sp; + JVMState::ReexecuteState _reexecute; + + public: + PreserveReexecuteState(GraphKit* kit); + ~PreserveReexecuteState(); +}; diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/library_call.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -2064,7 +2064,7 @@ // See if it is a narrow oop array. if (adr_type->isa_aryptr()) { - if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) { + if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) { const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr(); if (elem_type != NULL) { sharpened_klass = elem_type->klass(); @@ -3169,78 +3169,85 @@ Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); - _sp += nargs; // set original stack for use by uncommon_trap - array_type_mirror = do_null_check(array_type_mirror, T_OBJECT); - original = do_null_check(original, T_OBJECT); - _sp -= nargs; - - // Check if a null path was taken unconditionally. - if (stopped()) return true; - - Node* orig_length = load_array_length(original); - - Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nargs, - NULL, 0); - _sp += nargs; // set original stack for use by uncommon_trap - klass_node = do_null_check(klass_node, T_OBJECT); - _sp -= nargs; - - RegionNode* bailout = new (C, 1) RegionNode(1); - record_for_igvn(bailout); - - // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. - // Bail out if that is so. - Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); - if (not_objArray != NULL) { - // Improve the klass node's type from the new optimistic assumption: - ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); - const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); - Node* cast = new (C, 2) CastPPNode(klass_node, akls); - cast->init_req(0, control()); - klass_node = _gvn.transform(cast); - } - - // Bail out if either start or end is negative. - generate_negative_guard(start, bailout, &start); - generate_negative_guard(end, bailout, &end); - - Node* length = end; - if (_gvn.type(start) != TypeInt::ZERO) { - length = _gvn.transform( new (C, 3) SubINode(end, start) ); - } - - // Bail out if length is negative. - // ...Not needed, since the new_array will throw the right exception. - //generate_negative_guard(length, bailout, &length); - - if (bailout->req() > 1) { - PreserveJVMState pjvms(this); - set_control( _gvn.transform(bailout) ); - _sp += nargs; // push the arguments back on the stack - uncommon_trap(Deoptimization::Reason_intrinsic, - Deoptimization::Action_maybe_recompile); - } - - if (!stopped()) { - // How many elements will we copy from the original? - // The answer is MinI(orig_length - start, length). - Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); - Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); - - const bool raw_mem_only = true; - Node* newcopy = new_array(klass_node, length, nargs, raw_mem_only); - - // Generate a direct call to the right arraycopy function(s). - // We know the copy is disjoint but we might not know if the - // oop stores need checking. - // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). - // This will fail a store-check if x contains any non-nulls. - bool disjoint_bases = true; - bool length_never_negative = true; - generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, - original, start, newcopy, intcon(0), moved, - disjoint_bases, length_never_negative); - + Node* newcopy; + + //set the original stack and the reexecute bit for the interpreter to reexecute + //the bytecode that invokes Arrays.copyOf if deoptimization happens + { PreserveReexecuteState preexecs(this); + _sp += nargs; + jvms()->set_should_reexecute(true); + + array_type_mirror = do_null_check(array_type_mirror, T_OBJECT); + original = do_null_check(original, T_OBJECT); + + // Check if a null path was taken unconditionally. + if (stopped()) return true; + + Node* orig_length = load_array_length(original); + + Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0, + NULL, 0); + klass_node = do_null_check(klass_node, T_OBJECT); + + RegionNode* bailout = new (C, 1) RegionNode(1); + record_for_igvn(bailout); + + // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. + // Bail out if that is so. + Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); + if (not_objArray != NULL) { + // Improve the klass node's type from the new optimistic assumption: + ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); + const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); + Node* cast = new (C, 2) CastPPNode(klass_node, akls); + cast->init_req(0, control()); + klass_node = _gvn.transform(cast); + } + + // Bail out if either start or end is negative. + generate_negative_guard(start, bailout, &start); + generate_negative_guard(end, bailout, &end); + + Node* length = end; + if (_gvn.type(start) != TypeInt::ZERO) { + length = _gvn.transform( new (C, 3) SubINode(end, start) ); + } + + // Bail out if length is negative. + // ...Not needed, since the new_array will throw the right exception. + //generate_negative_guard(length, bailout, &length); + + if (bailout->req() > 1) { + PreserveJVMState pjvms(this); + set_control( _gvn.transform(bailout) ); + uncommon_trap(Deoptimization::Reason_intrinsic, + Deoptimization::Action_maybe_recompile); + } + + if (!stopped()) { + + // How many elements will we copy from the original? + // The answer is MinI(orig_length - start, length). + Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); + Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); + + const bool raw_mem_only = true; + newcopy = new_array(klass_node, length, 0, raw_mem_only); + + // Generate a direct call to the right arraycopy function(s). + // We know the copy is disjoint but we might not know if the + // oop stores need checking. + // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). + // This will fail a store-check if x contains any non-nulls. + bool disjoint_bases = true; + bool length_never_negative = true; + generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, + original, start, newcopy, intcon(0), moved, + disjoint_bases, length_never_negative); + } + } //original reexecute and sp are set back here + + if(!stopped()) { push(newcopy); } @@ -3992,146 +3999,159 @@ // bool LibraryCallKit::inline_native_clone(bool is_virtual) { int nargs = 1; - Node* obj = null_check_receiver(callee()); - if (stopped()) return true; - Node* obj_klass = load_object_klass(obj); - const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); - const TypeOopPtr* toop = ((tklass != NULL) + PhiNode* result_val; + + //set the original stack and the reexecute bit for the interpreter to reexecute + //the bytecode that invokes Object.clone if deoptimization happens + { PreserveReexecuteState preexecs(this); + jvms()->set_should_reexecute(true); + + //null_check_receiver will adjust _sp (push and pop) + Node* obj = null_check_receiver(callee()); + if (stopped()) return true; + + _sp += nargs; + + Node* obj_klass = load_object_klass(obj); + const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); + const TypeOopPtr* toop = ((tklass != NULL) ? tklass->as_instance_type() : TypeInstPtr::NOTNULL); - // Conservatively insert a memory barrier on all memory slices. - // Do not let writes into the original float below the clone. - insert_mem_bar(Op_MemBarCPUOrder); - - // paths into result_reg: - enum { - _slow_path = 1, // out-of-line call to clone method (virtual or not) - _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy - _array_path, // plain array allocation, plus arrayof_long_arraycopy - _instance_path, // plain instance allocation, plus arrayof_long_arraycopy - PATH_LIMIT - }; - RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); - PhiNode* result_val = new(C, PATH_LIMIT) PhiNode(result_reg, - TypeInstPtr::NOTNULL); - PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); - PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, - TypePtr::BOTTOM); - record_for_igvn(result_reg); - - const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; - int raw_adr_idx = Compile::AliasIdxRaw; - const bool raw_mem_only = true; - - Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); - if (array_ctl != NULL) { - // It's an array. - PreserveJVMState pjvms(this); - set_control(array_ctl); - Node* obj_length = load_array_length(obj); - Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, nargs, - raw_mem_only, &obj_size); - - if (!use_ReduceInitialCardMarks()) { - // If it is an oop array, it requires very special treatment, - // because card marking is required on each card of the array. - Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); - if (is_obja != NULL) { - PreserveJVMState pjvms2(this); - set_control(is_obja); - // Generate a direct call to the right arraycopy function(s). - bool disjoint_bases = true; - bool length_never_negative = true; - generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, - obj, intcon(0), alloc_obj, intcon(0), - obj_length, - disjoint_bases, length_never_negative); - result_reg->init_req(_objArray_path, control()); - result_val->init_req(_objArray_path, alloc_obj); - result_i_o ->set_req(_objArray_path, i_o()); - result_mem ->set_req(_objArray_path, reset_memory()); + // Conservatively insert a memory barrier on all memory slices. + // Do not let writes into the original float below the clone. + insert_mem_bar(Op_MemBarCPUOrder); + + // paths into result_reg: + enum { + _slow_path = 1, // out-of-line call to clone method (virtual or not) + _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy + _array_path, // plain array allocation, plus arrayof_long_arraycopy + _instance_path, // plain instance allocation, plus arrayof_long_arraycopy + PATH_LIMIT + }; + RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); + result_val = new(C, PATH_LIMIT) PhiNode(result_reg, + TypeInstPtr::NOTNULL); + PhiNode* result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO); + PhiNode* result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY, + TypePtr::BOTTOM); + record_for_igvn(result_reg); + + const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; + int raw_adr_idx = Compile::AliasIdxRaw; + const bool raw_mem_only = true; + + + Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); + if (array_ctl != NULL) { + // It's an array. + PreserveJVMState pjvms(this); + set_control(array_ctl); + Node* obj_length = load_array_length(obj); + Node* obj_size = NULL; + Node* alloc_obj = new_array(obj_klass, obj_length, 0, + raw_mem_only, &obj_size); + + if (!use_ReduceInitialCardMarks()) { + // If it is an oop array, it requires very special treatment, + // because card marking is required on each card of the array. + Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); + if (is_obja != NULL) { + PreserveJVMState pjvms2(this); + set_control(is_obja); + // Generate a direct call to the right arraycopy function(s). + bool disjoint_bases = true; + bool length_never_negative = true; + generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, + obj, intcon(0), alloc_obj, intcon(0), + obj_length, + disjoint_bases, length_never_negative); + result_reg->init_req(_objArray_path, control()); + result_val->init_req(_objArray_path, alloc_obj); + result_i_o ->set_req(_objArray_path, i_o()); + result_mem ->set_req(_objArray_path, reset_memory()); + } + } + // We can dispense with card marks if we know the allocation + // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks + // causes the non-eden paths to simulate a fresh allocation, + // insofar that no further card marks are required to initialize + // the object. + + // Otherwise, there are no card marks to worry about. + + if (!stopped()) { + copy_to_clone(obj, alloc_obj, obj_size, true, false); + + // Present the results of the copy. + result_reg->init_req(_array_path, control()); + result_val->init_req(_array_path, alloc_obj); + result_i_o ->set_req(_array_path, i_o()); + result_mem ->set_req(_array_path, reset_memory()); } } - // We can dispense with card marks if we know the allocation - // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks - // causes the non-eden paths to simulate a fresh allocation, - // insofar that no further card marks are required to initialize - // the object. - - // Otherwise, there are no card marks to worry about. + + // We only go to the instance fast case code if we pass a number of guards. + // The paths which do not pass are accumulated in the slow_region. + RegionNode* slow_region = new (C, 1) RegionNode(1); + record_for_igvn(slow_region); + if (!stopped()) { + // It's an instance (we did array above). Make the slow-path tests. + // If this is a virtual call, we generate a funny guard. We grab + // the vtable entry corresponding to clone() from the target object. + // If the target method which we are calling happens to be the + // Object clone() method, we pass the guard. We do not need this + // guard for non-virtual calls; the caller is known to be the native + // Object clone(). + if (is_virtual) { + generate_virtual_guard(obj_klass, slow_region); + } + + // The object must be cloneable and must not have a finalizer. + // Both of these conditions may be checked in a single test. + // We could optimize the cloneable test further, but we don't care. + generate_access_flags_guard(obj_klass, + // Test both conditions: + JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER, + // Must be cloneable but not finalizer: + JVM_ACC_IS_CLONEABLE, + slow_region); + } if (!stopped()) { - copy_to_clone(obj, alloc_obj, obj_size, true, false); - - // Present the results of the copy. - result_reg->init_req(_array_path, control()); - result_val->init_req(_array_path, alloc_obj); - result_i_o ->set_req(_array_path, i_o()); - result_mem ->set_req(_array_path, reset_memory()); - } - } - - // We only go to the instance fast case code if we pass a number of guards. - // The paths which do not pass are accumulated in the slow_region. - RegionNode* slow_region = new (C, 1) RegionNode(1); - record_for_igvn(slow_region); - if (!stopped()) { - // It's an instance (we did array above). Make the slow-path tests. - // If this is a virtual call, we generate a funny guard. We grab - // the vtable entry corresponding to clone() from the target object. - // If the target method which we are calling happens to be the - // Object clone() method, we pass the guard. We do not need this - // guard for non-virtual calls; the caller is known to be the native - // Object clone(). - if (is_virtual) { - generate_virtual_guard(obj_klass, slow_region); + // It's an instance, and it passed the slow-path tests. + PreserveJVMState pjvms(this); + Node* obj_size = NULL; + Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); + + copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); + + // Present the results of the slow call. + result_reg->init_req(_instance_path, control()); + result_val->init_req(_instance_path, alloc_obj); + result_i_o ->set_req(_instance_path, i_o()); + result_mem ->set_req(_instance_path, reset_memory()); } - // The object must be cloneable and must not have a finalizer. - // Both of these conditions may be checked in a single test. - // We could optimize the cloneable test further, but we don't care. - generate_access_flags_guard(obj_klass, - // Test both conditions: - JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER, - // Must be cloneable but not finalizer: - JVM_ACC_IS_CLONEABLE, - slow_region); - } - - if (!stopped()) { - // It's an instance, and it passed the slow-path tests. - PreserveJVMState pjvms(this); - Node* obj_size = NULL; - Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); - - copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); - - // Present the results of the slow call. - result_reg->init_req(_instance_path, control()); - result_val->init_req(_instance_path, alloc_obj); - result_i_o ->set_req(_instance_path, i_o()); - result_mem ->set_req(_instance_path, reset_memory()); - } - - // Generate code for the slow case. We make a call to clone(). - set_control(_gvn.transform(slow_region)); - if (!stopped()) { - PreserveJVMState pjvms(this); - CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual); - Node* slow_result = set_results_for_java_call(slow_call); - // this->control() comes from set_results_for_java_call - result_reg->init_req(_slow_path, control()); - result_val->init_req(_slow_path, slow_result); - result_i_o ->set_req(_slow_path, i_o()); - result_mem ->set_req(_slow_path, reset_memory()); - } - - // Return the combined state. - set_control( _gvn.transform(result_reg) ); - set_i_o( _gvn.transform(result_i_o) ); - set_all_memory( _gvn.transform(result_mem) ); + // Generate code for the slow case. We make a call to clone(). + set_control(_gvn.transform(slow_region)); + if (!stopped()) { + PreserveJVMState pjvms(this); + CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual); + Node* slow_result = set_results_for_java_call(slow_call); + // this->control() comes from set_results_for_java_call + result_reg->init_req(_slow_path, control()); + result_val->init_req(_slow_path, slow_result); + result_i_o ->set_req(_slow_path, i_o()); + result_mem ->set_req(_slow_path, reset_memory()); + } + + // Return the combined state. + set_control( _gvn.transform(result_reg) ); + set_i_o( _gvn.transform(result_i_o) ); + set_all_memory( _gvn.transform(result_mem) ); + } //original reexecute and sp are set back here push(_gvn.transform(result_val)); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/mulnode.cpp --- a/src/share/vm/opto/mulnode.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/mulnode.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -608,16 +608,14 @@ } // Are we masking a long that was converted from an int with a mask - // that fits in 32-bits? Commute them and use an AndINode. - if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF00000000)) == 0) { - // If we are doing an UI2L conversion (i.e. the mask is - // 0x00000000FFFFFFFF) we cannot convert the AndL to an AndI - // because the AndI would be optimized away later in Identity. - if (mask != CONST64(0x00000000FFFFFFFF)) { - Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask)); - andi = phase->transform(andi); - return new (phase->C, 2) ConvI2LNode(andi); - } + // that fits in 32-bits? Commute them and use an AndINode. Don't + // convert masks which would cause a sign extension of the integer + // value. This check includes UI2L masks (0x00000000FFFFFFFF) which + // would be optimized away later in Identity. + if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) { + Node* andi = new (phase->C, 3) AndINode(in1->in(1), phase->intcon(mask)); + andi = phase->transform(andi); + return new (phase->C, 2) ConvI2LNode(andi); } // Masking off sign bits? Dont make them! diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/opto/output.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -911,8 +911,9 @@ ciMethod* scope_method = method ? method : _method; // Describe the scope here assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI"); + assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest"); // Now we can describe the scope. - debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),locvals,expvals,monvals); + debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),jvms->should_reexecute(),locvals,expvals,monvals); } // End jvms loop // Mark the end of the scope set. @@ -994,7 +995,8 @@ for (int depth = 1; depth <= max_depth; depth++) { JVMState* jvms = youngest_jvms->of_depth(depth); ciMethod* method = jvms->has_method() ? jvms->method() : NULL; - debug_info->describe_scope(pc_offset, method, jvms->bci()); + assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest"); + debug_info->describe_scope(pc_offset, method, jvms->bci(), jvms->should_reexecute()); } // Mark the end of the scope set. diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/atomic.hpp --- a/src/share/vm/runtime/atomic.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/atomic.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -39,6 +39,8 @@ static void store_ptr(intptr_t store_value, volatile intptr_t* dest); static void store_ptr(void* store_value, volatile void* dest); + static jlong load(volatile jlong* src); + // Atomically add to a location, return updated value static jint add (jint add_value, volatile jint* dest); static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/globals.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -3309,7 +3309,7 @@ product(bool, AnonymousClasses, false, \ "support sun.misc.Unsafe.defineAnonymousClass") \ \ - product(bool, EnableMethodHandles, false, \ + experimental(bool, EnableMethodHandles, false, \ "support method handles (true by default under JSR 292)") \ \ diagnostic(intx, MethodHandlePushLimit, 3, \ @@ -3324,7 +3324,7 @@ diagnostic(bool, OptimizeMethodHandles, true, \ "when constructing method handles, try to improve them") \ \ - product(bool, EnableInvokeDynamic, false, \ + experimental(bool, EnableInvokeDynamic, false, \ "recognize the invokedynamic instruction") \ \ develop(bool, TraceInvokeDynamic, false, \ diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/vframe.hpp --- a/src/share/vm/runtime/vframe.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/vframe.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -402,7 +402,12 @@ DebugInfoReadStream buffer(nm(), decode_offset); _sender_decode_offset = buffer.read_int(); _method = methodOop(buffer.read_oop()); - _bci = buffer.read_bci(); + // Deoptimization needs reexecute bit to determine whether to reexecute the bytecode + // only at the time when it "unpack_frames", and the reexecute bit info could always + // be obtained from the scopeDesc in the compiledVFrame. As a result, we don't keep + // the reexecute bit here. + bool dummy_reexecute; + _bci = buffer.read_bci_and_reexecute(dummy_reexecute); assert(_method->is_method(), "checking type of decoded method"); } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/vframeArray.cpp --- a/src/share/vm/runtime/vframeArray.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/vframeArray.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -44,6 +44,7 @@ _method = vf->method(); _bci = vf->raw_bci(); + _reexecute = vf->should_reexecute(); int index; @@ -148,16 +149,20 @@ // C++ interpreter doesn't need a pc since it will figure out what to do when it // begins execution address pc; - bool use_next_mdp; // true if we should use the mdp associated with the next bci - // rather than the one associated with bcp + bool use_next_mdp = false; // true if we should use the mdp associated with the next bci + // rather than the one associated with bcp if (raw_bci() == SynchronizationEntryBCI) { // We are deoptimizing while hanging in prologue code for synchronized method bcp = method()->bcp_from(0); // first byte code pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode - use_next_mdp = false; + } else if (should_reexecute()) { //reexecute this bytecode + assert(is_top_frame, "reexecute allowed only for the top frame"); + bcp = method()->bcp_from(bci()); + pc = Interpreter::deopt_reexecute_entry(method(), bcp); } else { bcp = method()->bcp_from(bci()); - pc = Interpreter::continuation_for(method(), bcp, callee_parameters, is_top_frame, use_next_mdp); + pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame); + use_next_mdp = true; } assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode"); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/vframeArray.hpp --- a/src/share/vm/runtime/vframeArray.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/vframeArray.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -41,7 +41,8 @@ private: frame _frame; // the interpreter frame we will unpack into - int _bci; // raw bci for this vframe + int _bci; // raw bci for this vframe + bool _reexecute; // whether sould we reexecute this bytecode methodOop _method; // the method for this vframe MonitorChunk* _monitors; // active monitors for this vframe StackValueCollection* _locals; @@ -54,6 +55,7 @@ int bci(void) const; int raw_bci(void) const { return _bci; } + bool should_reexecute(void) const { return _reexecute; } methodOop method(void) const { return _method; } diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/vframe_hp.cpp --- a/src/share/vm/runtime/vframe_hp.cpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/vframe_hp.cpp Sun Aug 09 17:03:51 2009 -0700 @@ -276,6 +276,15 @@ return scope()->bci(); } +bool compiledVFrame::should_reexecute() const { + if (scope() == NULL) { + // native nmethods have no scope the method/bci is implied + nmethod* nm = code(); + assert(nm->is_native_method(), "must be native"); + return false; + } + return scope()->should_reexecute(); +} vframe* compiledVFrame::sender() const { const frame f = fr(); diff -r 3ee342e25e57 -r b1773b9a2ca1 src/share/vm/runtime/vframe_hp.hpp --- a/src/share/vm/runtime/vframe_hp.hpp Wed Aug 05 12:33:29 2009 -0700 +++ b/src/share/vm/runtime/vframe_hp.hpp Sun Aug 09 17:03:51 2009 -0700 @@ -25,11 +25,12 @@ class compiledVFrame: public javaVFrame { public: // JVM state - methodOop method() const; - int bci() const; - StackValueCollection* locals() const; - StackValueCollection* expressions() const; - GrowableArray* monitors() const; + methodOop method() const; + int bci() const; + bool should_reexecute() const; + StackValueCollection* locals() const; + StackValueCollection* expressions() const; + GrowableArray* monitors() const; void set_locals(StackValueCollection* values) const; diff -r 3ee342e25e57 -r b1773b9a2ca1 test/compiler/6826736/Test.java --- a/test/compiler/6826736/Test.java Wed Aug 05 12:33:29 2009 -0700 +++ b/test/compiler/6826736/Test.java Sun Aug 09 17:03:51 2009 -0700 @@ -27,7 +27,7 @@ * @bug 6826736 * @summary CMS: core dump with -XX:+UseCompressedOops * - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test + * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test */ public class Test { diff -r 3ee342e25e57 -r b1773b9a2ca1 test/compiler/6833129/Test.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6833129/Test.java Sun Aug 09 17:03:51 2009 -0700 @@ -0,0 +1,62 @@ +/* + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ + +/** + * @test + * @bug 6833129 + * @summary Object.clone() and Arrays.copyOf ignore coping with -XX:+DeoptimizeALot + * @run main/othervm -Xbatch -XX:+DeoptimizeALot Test + */ + +public class Test{ + public static void init(int src[]) { + for (int i =0; i { + protected static final int DEFAULT_INITIAL_CAPACITY = 16; + private static final int MAXIMUM_CAPACITY = 1 << 30; + private static final float DEFAULT_LOAD_FACTOR = 0.75f; + + protected Entry[] table; + + private int size; + protected int threshold; + private final float loadFactor; + private final ReferenceQueue queue = new ReferenceQueue(); + + public WeakPool() + { + this.loadFactor = DEFAULT_LOAD_FACTOR; + threshold = DEFAULT_INITIAL_CAPACITY; + table = new Entry[DEFAULT_INITIAL_CAPACITY]; + } + + /** + * Check for equality of non-null reference x and possibly-null y. By + * default uses Object.equals. + */ + private boolean eq(Object x, Object y) + { + return x == y || x.equals(y); + } + + /** + * Return index for hash code h. + */ + private int indexFor(int h, int length) + { + return h & length - 1; + } + + /** + * Expunge stale entries from the table. + */ + private void expungeStaleEntries() + { + Object r; + while ((r = queue.poll()) != null) + { + Entry e = (Entry) r; + int h = e.hash; + int i = indexFor(h, table.length); + + // System.out.println("EXPUNGING " + h); + Entry prev = table[i]; + Entry p = prev; + while (p != null) + { + Entry next = p.next; + if (p == e) + { + if (prev == e) + { + table[i] = next; + } + else + { + prev.next = next; + } + e.next = null; // Help GC + size--; + break; + } + prev = p; + p = next; + } + } + } + + /** + * Return the table after first expunging stale entries + */ + private Entry[] getTable() + { + expungeStaleEntries(); + return table; + } + + /** + * Returns the number of key-value mappings in this map. + * This result is a snapshot, and may not reflect unprocessed + * entries that will be removed before next attempted access + * because they are no longer referenced. + */ + public int size() + { + if (size == 0) + { + return 0; + } + expungeStaleEntries(); + return size; + } + + /** + * Returns true if this map contains no key-value mappings. + * This result is a snapshot, and may not reflect unprocessed + * entries that will be removed before next attempted access + * because they are no longer referenced. + */ + public boolean isEmpty() + { + return size() == 0; + } + + /** + * Returns the value stored in the pool that equals the requested key + * or null if the map contains no mapping for + * this key (or the key is null) + * + * @param key the key whose equals value is to be returned. + * @return the object that is equal the specified key, or + * null if key is null or no object in the pool equals the key. + */ + public V get(V key) + { + if (key == null) + { + return null; + } + int h = key.hashCode(); + Entry[] tab = getTable(); + int index = indexFor(h, tab.length); + Entry e = tab[index]; + while (e != null) + { + V candidate = e.get(); + if (e.hash == h && eq(key, candidate)) + { + return candidate; + } + e = e.next; + } + return null; + } + + /** + * Returns the entry associated with the specified key in the HashMap. + * Returns null if the HashMap contains no mapping for this key. + */ + Entry getEntry(Object key) + { + int h = key.hashCode(); + Entry[] tab = getTable(); + int index = indexFor(h, tab.length); + Entry e = tab[index]; + while (e != null && !(e.hash == h && eq(key, e.get()))) + { + e = e.next; + } + return e; + } + + /** + * Places the object into the pool. If the object is null, nothing happens. + * If an equal object already exists, it is not replaced. + * + * @param key the object to put into the pool. key may be null. + * @return the object in the pool that is equal to the key, or the newly placed key if no such object existed when put was called + */ + public V put(V key) + { + if (key == null) + { + return null; + } + int h = key.hashCode(); + Entry[] tab = getTable(); + int i = indexFor(h, tab.length); + + for (Entry e = tab[i]; e != null; e = e.next) + { + V candidate = e.get(); + if (h == e.hash && eq(key, candidate)) + { + return candidate; + } + } + + tab[i] = new Entry(key, queue, h, tab[i]); + + if (++size >= threshold) + { + resize(tab.length * 2); + } + + // System.out.println("Added " + key + " to pool"); + return key; + } + + /** + * Rehashes the contents of this map into a new array with a + * larger capacity. This method is called automatically when the + * number of keys in this map reaches its threshold. + *

+ * If current capacity is MAXIMUM_CAPACITY, this method does not + * resize the map, but but sets threshold to Integer.MAX_VALUE. + * This has the effect of preventing future calls. + * + * @param newCapacity the new capacity, MUST be a power of two; + * must be greater than current capacity unless current + * capacity is MAXIMUM_CAPACITY (in which case value + * is irrelevant). + */ + void resize(int newCapacity) + { + Entry[] oldTable = getTable(); + int oldCapacity = oldTable.length; + if (oldCapacity == MAXIMUM_CAPACITY) + { + threshold = Integer.MAX_VALUE; + return; + } + + Entry[] newTable = new Entry[newCapacity]; + transfer(oldTable, newTable); + table = newTable; + + /* + * If ignoring null elements and processing ref queue caused massive + * shrinkage, then restore old table. This should be rare, but avoids + * unbounded expansion of garbage-filled tables. + */ + if (size >= threshold / 2) + { + threshold = (int) (newCapacity * loadFactor); + } + else + { + expungeStaleEntries(); + transfer(newTable, oldTable); + table = oldTable; + } + } + + /** + * Transfer all entries from src to dest tables + */ + private void transfer(Entry[] src, Entry[] dest) + { + for (int j = 0; j < src.length; ++j) + { + Entry e = src[j]; + src[j] = null; + while (e != null) + { + Entry next = e.next; + Object key = e.get(); + if (key == null) + { + e.next = null; // Help GC + size--; + } + else + { + int i = indexFor(e.hash, dest.length); + e.next = dest[i]; + dest[i] = e; + } + e = next; + } + } + } + + /** + * Removes the object in the pool that equals the key. + * + * @param key + * @return previous value associated with specified key, or null + * if there was no mapping for key or the key is null. + */ + public V removeFromPool(V key) + { + if (key == null) + { + return null; + } + int h = key.hashCode(); + Entry[] tab = getTable(); + int i = indexFor(h, tab.length); + Entry prev = tab[i]; + Entry e = prev; + + while (e != null) + { + Entry next = e.next; + V candidate = e.get(); + if (h == e.hash && eq(key, candidate)) + { + size--; + if (prev == e) + { + tab[i] = next; + } + else + { + prev.next = next; + } + return candidate; + } + prev = e; + e = next; + } + + return null; + } + + /** + * Removes all mappings from this map. + */ + public void clear() + { + // clear out ref queue. We don't need to expunge entries + // since table is getting cleared. + while (queue.poll() != null) + { + // nop + } + + table = new Entry[DEFAULT_INITIAL_CAPACITY]; + threshold = DEFAULT_INITIAL_CAPACITY; + size = 0; + + // Allocation of array may have caused GC, which may have caused + // additional entries to go stale. Removing these entries from the + // reference queue will make them eligible for reclamation. + while (queue.poll() != null) + { + // nop + } + } + + /** + * The entries in this hash table extend WeakReference, using its main ref + * field as the key. + */ + protected static class Entry + extends WeakReference + { + private final int hash; + private Entry next; + + /** + * Create new entry. + */ + Entry(final V key, final ReferenceQueue queue, final int hash, final Entry next) + { + super(key, queue); + this.hash = hash; + this.next = next; + } + + public V getKey() + { + return super.get(); + } + + public boolean equals(Object o) + { + if (!(o instanceof WeakPool.Entry)) + { + return false; + } + WeakPool.Entry that = (WeakPool.Entry) o; + V k1 = this.getKey(); + V k2 = that.getKey(); + return (k1==k2 || k1.equals(k2)); + } + + public int hashCode() + { + return this.hash; + } + + public String toString() + { + return String.valueOf(this.getKey()); + } + } +} + +final class MultiSynonymKey { + private List keys; + + public MultiSynonymKey() { + keys = new ArrayList(); + } + + public MultiSynonymKey(MyList... arg) { + keys = Arrays.asList(arg); + } + + public List getKeys() { + return keys; + } + + public int hashCode() { + return this.getKeys().hashCode(); + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof MultiSynonymKey)) { + return false; + } + + MultiSynonymKey that = (MultiSynonymKey) obj; + return this.getKeys().equals(that.getKeys()); + } + + public String toString() { + return this.getClass().getName() + this.getKeys().toString(); + } +} + +public class Test extends Thread { + static public Test test; + static private byte[] arg1; + static private byte[] arg2; + static public WeakPool wp; + public volatile MultiSynonymKey ml1; + public volatile MultiSynonymKey ml2; + private volatile MultiSynonymKey ml3; + + public void run() { + int count=0; + while (true) { + try { + Thread.sleep(10); + } catch (Exception e) {} + synchronized (wp) { + ml2 = new MultiSynonymKey(new DoubletonList(new String(arg1), new String(arg2))); + wp.put(ml2); + ml3 = new MultiSynonymKey(new DoubletonList(new String(arg1), new String(arg2))); + } + try { + Thread.sleep(10); + } catch (Exception e) {} + synchronized (wp) { + ml1 = new MultiSynonymKey(new SingletonList(new String(arg1))); + wp.put(ml1); + ml3 = new MultiSynonymKey(new SingletonList(new String(arg1))); + } + if (count++==100) + System.exit(95); + } + } + + public static void main(String[] args) throws Exception { + wp = new WeakPool(); + test = new Test(); + + test.arg1 = args[0].getBytes(); + test.arg2 = args[1].getBytes(); + + test.ml1 = new MultiSynonymKey(new SingletonList(new String(test.arg1))); + test.ml2 = new MultiSynonymKey(new DoubletonList(new String(test.arg1), new String(test.arg2))); + test.ml3 = new MultiSynonymKey(new DoubletonList(new String(test.arg1), new String(test.arg2))); + + wp.put(test.ml1); + wp.put(test.ml2); + + test.setDaemon(true); + test.start(); + + int counter = 0; + while (true) { + synchronized (wp) { + MultiSynonymKey foo = test.ml3; + + if (wp.put(foo) == foo) { + // System.out.println("foo " + counter); + // System.out.println(foo); + } + } + counter++; + } + } + + private boolean eq(Object x, Object y) { + return x == y || x.equals(y); + } +}