# HG changeset patch # User dcubed # Date 1205443068 25200 # Node ID 75b0f3cb19431f73f3f84c3020ec6908030e0524 # Parent 8b6e4918764062b4e92c050a58fa2d32d3c00eb7# Parent 1ffa5cdd0b7eeb0f2f2849219c0712d8e4141f6d Merge diff -r 8b6e49187640 -r 75b0f3cb1943 agent/src/os/linux/ps_core.c --- a/agent/src/os/linux/ps_core.c Thu Mar 13 05:40:44 2008 -0700 +++ b/agent/src/os/linux/ps_core.c Thu Mar 13 14:17:48 2008 -0700 @@ -518,10 +518,10 @@ } static ps_prochandle_ops core_ops = { - release: core_release, - p_pread: core_read_data, - p_pwrite: core_write_data, - get_lwp_regs: core_get_lwp_regs + .release= core_release, + .p_pread= core_read_data, + .p_pwrite= core_write_data, + .get_lwp_regs= core_get_lwp_regs }; // read regs and create thread from NT_PRSTATUS entries from core file diff -r 8b6e49187640 -r 75b0f3cb1943 agent/src/os/linux/ps_proc.c --- a/agent/src/os/linux/ps_proc.c Thu Mar 13 05:40:44 2008 -0700 +++ b/agent/src/os/linux/ps_proc.c Thu Mar 13 14:17:48 2008 -0700 @@ -291,10 +291,10 @@ } static ps_prochandle_ops process_ops = { - release: process_cleanup, - p_pread: process_read_data, - p_pwrite: process_write_data, - get_lwp_regs: process_get_lwp_regs + .release= process_cleanup, + .p_pread= process_read_data, + .p_pwrite= process_write_data, + .get_lwp_regs= process_get_lwp_regs }; // attach to the process. One and only one exposed stuff diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/Makefile --- a/build/linux/Makefile Thu Mar 13 05:40:44 2008 -0700 +++ b/build/linux/Makefile Thu Mar 13 14:17:48 2008 -0700 @@ -80,6 +80,11 @@ MFLAGS += " LP64=1 " endif +# pass USE_SUNCC further, through MFLAGS +ifdef USE_SUNCC + MFLAGS += " USE_SUNCC=1 " +endif + # The following renders pathnames in generated Makefiles valid on # machines other than the machine containing the build tree. # diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/makefiles/amd64.make --- a/build/linux/makefiles/amd64.make Thu Mar 13 05:40:44 2008 -0700 +++ b/build/linux/makefiles/amd64.make Thu Mar 13 14:17:48 2008 -0700 @@ -35,6 +35,8 @@ CFLAGS += -D_LP64=1 # The serviceability agent relies on frame pointer (%rbp) to walk thread stack -CFLAGS += -fno-omit-frame-pointer +ifndef USE_SUNCC + CFLAGS += -fno-omit-frame-pointer +endif OPT_CFLAGS/compactingPermGenGen.o = -O1 diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/makefiles/buildtree.make --- a/build/linux/makefiles/buildtree.make Thu Mar 13 05:40:44 2008 -0700 +++ b/build/linux/makefiles/buildtree.make Thu Mar 13 14:17:48 2008 -0700 @@ -63,7 +63,11 @@ # For now, until the compiler is less wobbly: TESTFLAGS = -Xbatch -showversion -PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH) +ifdef USE_SUNCC +PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH).suncc +else +PLATFORM_FILE = $(GAMMADIR)/build/$(OS_FAMILY)/platform_$(BUILDARCH) +endif ifdef FORCE_TIERED ifeq ($(VARIANT),tiered) diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/makefiles/sparcWorks.make --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/build/linux/makefiles/sparcWorks.make Thu Mar 13 14:17:48 2008 -0700 @@ -0,0 +1,93 @@ +# +# Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, +# CA 95054 USA or visit www.sun.com if you need additional information or +# have any questions. +# +# + +#------------------------------------------------------------------------ +# CC, CPP & AS + +CPP = CC +CC = cc +AS = $(CC) -c + +ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) +ARCHFLAG/i486 = -m32 +ARCHFLAG/amd64 = -m64 + +CFLAGS += $(ARCHFLAG) +AOUT_FLAGS += $(ARCHFLAG) +LFLAGS += $(ARCHFLAG) +ASFLAGS += $(ARCHFLAG) + +#------------------------------------------------------------------------ +# Compiler flags + +# position-independent code +PICFLAG = -KPIC + +CFLAGS += $(PICFLAG) +# no more exceptions +CFLAGS += -features=no%except +# Reduce code bloat by reverting back to 5.0 behavior for static initializers +CFLAGS += -features=no%split_init +# allow zero sized arrays +CFLAGS += -features=zla + +# Use C++ Interpreter +ifdef CC_INTERP + CFLAGS += -DCC_INTERP +endif + +# We don't need libCstd.so and librwtools7.so, only libCrun.so +CFLAGS += -library=Crun +LIBS += -lCrun + +CFLAGS += -mt +LFLAGS += -mt + +# Compiler warnings are treated as errors +#WARNINGS_ARE_ERRORS = -errwarn=%all +CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) +# Special cases +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) + +# The flags to use for an Optimized build +OPT_CFLAGS+=-xO4 +OPT_CFLAGS/NOOPT=-xO0 + +#------------------------------------------------------------------------ +# Linker flags + +# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. +MAPFLAG = -Wl,--version-script=FILENAME + +# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj +SONAMEFLAG = -h SONAME + +# Build shared library +SHARED_FLAG = -G + +#------------------------------------------------------------------------ +# Debug flags +DEBUG_CFLAGS += -g +FASTDEBUG_CFLAGS = -g0 + diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/platform_amd64.suncc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/build/linux/platform_amd64.suncc Thu Mar 13 14:17:48 2008 -0700 @@ -0,0 +1,17 @@ +os_family = linux + +arch = x86 + +arch_model = x86_64 + +os_arch = linux_x86 + +os_arch_model = linux_x86_64 + +lib_arch = amd64 + +compiler = sparcWorks + +gnu_dis_arch = amd64 + +sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DAMD64 diff -r 8b6e49187640 -r 75b0f3cb1943 build/linux/platform_i486.suncc --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/build/linux/platform_i486.suncc Thu Mar 13 14:17:48 2008 -0700 @@ -0,0 +1,17 @@ +os_family = linux + +arch = x86 + +arch_model = x86_32 + +os_arch = linux_x86 + +os_arch_model = linux_x86_32 + +lib_arch = i386 + +compiler = sparcWorks + +gnu_dis_arch = i386 + +sysdefs = -DLINUX -DSPARC_WORKS -D_GNU_SOURCE -DIA32 diff -r 8b6e49187640 -r 75b0f3cb1943 src/cpu/x86/vm/assembler_x86_64.cpp --- a/src/cpu/x86/vm/assembler_x86_64.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/cpu/x86/vm/assembler_x86_64.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -1304,7 +1304,7 @@ emit_operand(src, dst); } -void Assembler::mov64(Register dst, int64_t imm64) { +void Assembler::mov64(Register dst, intptr_t imm64) { InstructionMark im(this); int encode = prefixq_and_encode(dst->encoding()); emit_byte(0xB8 | encode); @@ -1331,7 +1331,7 @@ emit_operand(dst, src); } -void Assembler::mov64(Address dst, int64_t imm32) { +void Assembler::mov64(Address dst, intptr_t imm32) { assert(is_simm32(imm32), "lost bits"); InstructionMark im(this); prefixq(dst); diff -r 8b6e49187640 -r 75b0f3cb1943 src/os/linux/vm/attachListener_linux.cpp --- a/src/os/linux/vm/attachListener_linux.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/os/linux/vm/attachListener_linux.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -232,7 +232,7 @@ // where is the protocol version (1), is the command // name ("load", "datadump", ...), and is an argument int expected_str_count = 2 + AttachOperation::arg_count_max; - int max_len = (strlen(ver_str) + 1) + (AttachOperation::name_length_max + 1) + + const int max_len = (sizeof(ver_str) + 1) + (AttachOperation::name_length_max + 1) + AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1); char buf[max_len]; diff -r 8b6e49187640 -r 75b0f3cb1943 src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp --- a/src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -60,7 +60,18 @@ #ifdef AMD64 inline u8 Bytes::swap_u8(u8 x) { +#ifdef SPARC_WORKS + // workaround for SunStudio12 CR6615391 + __asm__ __volatile__ ( + "bswapq %0" + :"=r" (x) // output : register 0 => x + :"0" (x) // input : x => register 0 + :"0" // clobbered register + ); + return x; +#else return bswap_64(x); +#endif } #else // Helper function for swap_u8 diff -r 8b6e49187640 -r 75b0f3cb1943 src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -62,8 +62,14 @@ #endif // AMD64 address os::current_stack_pointer() { +#ifdef SPARC_WORKS + register void *esp; + __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); + return (address) ((char*)esp + sizeof(long)*2); +#else register void *esp __asm__ (SPELL_REG_SP); return (address) esp; +#endif } char* os::non_memory_address_word() { @@ -139,7 +145,12 @@ } intptr_t* _get_previous_fp() { +#ifdef SPARC_WORKS + register intptr_t **ebp; + __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); +#else register intptr_t **ebp __asm__ (SPELL_REG_FP); +#endif return (intptr_t*) *ebp; // we want what it points to. } @@ -560,7 +571,9 @@ #else size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; +#ifdef __GNUC__ #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) +#endif // Test if pthread library can support variable thread stack size. LinuxThreads // in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads @@ -591,7 +604,11 @@ // return true and skip _thread_safety_check(), so we may not be able to // detect stack-heap collisions. But otherwise it's harmless. // +#ifdef __GNUC__ return (GET_GS() != 0); +#else + return false; +#endif } } #endif // AMD64 diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/classfile/dictionary.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -155,8 +155,8 @@ for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) { // check the previous versions array for GC'ed weak refs PreviousVersionNode * pv_node = ik->previous_versions()->at(i); - jweak cp_ref = pv_node->prev_constant_pool(); - assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared"); + jobject cp_ref = pv_node->prev_constant_pool(); + assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); if (cp_ref == NULL) { delete pv_node; ik->previous_versions()->remove_at(i); diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/includeDB_core --- a/src/share/vm/includeDB_core Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/includeDB_core Thu Mar 13 14:17:48 2008 -0700 @@ -3067,6 +3067,7 @@ oopMapCache.cpp allocation.inline.hpp oopMapCache.cpp handles.inline.hpp +oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMapCache.cpp oop.inline.hpp oopMapCache.cpp oopMapCache.hpp oopMapCache.cpp resourceArea.hpp diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/interpreter/oopMapCache.cpp --- a/src/share/vm/interpreter/oopMapCache.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/interpreter/oopMapCache.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -532,6 +532,10 @@ if (!_array[i].is_empty() && _array[i].method()->is_old()) { // Cache entry is occupied by an old redefined method and we don't want // to pin it down so flush the entry. + RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", + _array[i].method()->name()->as_C_string(), + _array[i].method()->signature()->as_C_string(), i)); + _array[i].flush(); } } @@ -577,6 +581,15 @@ // Entry is not in hashtable. // Compute entry and return it + if (method->should_not_be_cached()) { + // It is either not safe or not a good idea to cache this methodOop + // at this time. We give the caller of lookup() a copy of the + // interesting info via parameter entry_for, but we don't add it to + // the cache. See the gory details in methodOop.cpp. + compute_one_oop_map(method, bci, entry_for); + return; + } + // First search for an empty slot for(i = 0; i < _probe_depth; i++) { entry = entry_at(probe + i); @@ -584,12 +597,6 @@ entry->fill(method, bci); entry_for->resource_copy(entry); assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); - if (method->is_old()) { - // The caller of lookup() will receive a copy of the interesting - // info via entry_for, but we don't keep an old redefined method in - // the cache to avoid pinning down the method. - entry->flush(); - } return; } } @@ -623,13 +630,6 @@ } assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); - if (method->is_old()) { - // The caller of lookup() will receive a copy of the interesting - // info via entry_for, but we don't keep an old redefined method in - // the cache to avoid pinning down the method. - entry->flush(); - } - return; } diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/memory/compactingPermGenGen.cpp --- a/src/share/vm/memory/compactingPermGenGen.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/memory/compactingPermGenGen.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -26,9 +26,27 @@ #include "incls/_compactingPermGenGen.cpp.incl" -// Recursively adjust all pointers in an object and all objects by -// referenced it. Clear marks on objects in order to prevent visiting -// any object twice. +// An ObjectClosure helper: Recursively adjust all pointers in an object +// and all objects by referenced it. Clear marks on objects in order to +// prevent visiting any object twice. This helper is used when the +// RedefineClasses() API has been called. + +class AdjustSharedObjectClosure : public ObjectClosure { +public: + void do_object(oop obj) { + if (obj->is_shared_readwrite()) { + if (obj->mark()->is_marked()) { + obj->init_mark(); // Don't revisit this object. + obj->adjust_pointers(); // Adjust this object's references. + } + } + } +}; + + +// An OopClosure helper: Recursively adjust all pointers in an object +// and all objects by referenced it. Clear marks on objects in order +// to prevent visiting any object twice. class RecursiveAdjustSharedObjectClosure : public OopClosure { public: @@ -274,15 +292,34 @@ // objects in the space will page in more objects than we need. // Instead, use the system dictionary as strong roots into the read // write space. +// +// If a RedefineClasses() call has been made, then we have to iterate +// over the entire shared read-write space in order to find all the +// objects that need to be forwarded. For example, it is possible for +// an nmethod to be found and marked in GC phase-1 only for the nmethod +// to be freed by the time we reach GC phase-3. The underlying method +// is still marked, but we can't (easily) find it in GC phase-3 so we +// blow up in GC phase-4. With RedefineClasses() we want replaced code +// (EMCP or obsolete) to go away (i.e., be collectible) once it is no +// longer being executed by any thread so we keep minimal attachments +// to the replaced code. However, we can't guarantee when those EMCP +// or obsolete methods will be collected so they may still be out there +// even after we've severed our minimal attachments. void CompactingPermGenGen::pre_adjust_pointers() { if (spec()->enable_shared_spaces()) { - RecursiveAdjustSharedObjectClosure blk; - Universe::oops_do(&blk); - StringTable::oops_do(&blk); - SystemDictionary::always_strong_classes_do(&blk); - TraversePlaceholdersClosure tpc; - SystemDictionary::placeholders_do(&tpc); + if (JvmtiExport::has_redefined_a_class()) { + // RedefineClasses() requires a brute force approach + AdjustSharedObjectClosure blk; + rw_space()->object_iterate(&blk); + } else { + RecursiveAdjustSharedObjectClosure blk; + Universe::oops_do(&blk); + StringTable::oops_do(&blk); + SystemDictionary::always_strong_classes_do(&blk); + TraversePlaceholdersClosure tpc; + SystemDictionary::placeholders_do(&tpc); + } } } diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -972,7 +972,6 @@ // These allocations will have to be freed if they are unused. // Allocate a new array of methods. - jmethodID* to_dealloc_jmeths = NULL; jmethodID* new_jmeths = NULL; if (length <= idnum) { // A new array will be needed (unless some other thread beats us to it) @@ -983,7 +982,6 @@ } // Allocate a new method ID. - jmethodID to_dealloc_id = NULL; jmethodID new_id = NULL; if (method_h->is_old() && !method_h->is_obsolete()) { // The method passed in is old (but not obsolete), we need to use the current version @@ -997,40 +995,51 @@ new_id = JNIHandles::make_jmethod_id(method_h); } - { + if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) { + // No need and unsafe to lock the JmethodIdCreation_lock at safepoint. + id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); + } else { MutexLocker ml(JmethodIdCreation_lock); + id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths); + } + } + return id; +} - // We must not go to a safepoint while holding this lock. - debug_only(No_Safepoint_Verifier nosafepoints;) + +jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, + jmethodID new_id, jmethodID* new_jmeths) { + // Retry lookup after we got the lock or ensured we are at safepoint + jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); + jmethodID id = NULL; + jmethodID to_dealloc_id = NULL; + jmethodID* to_dealloc_jmeths = NULL; + size_t length; - // Retry lookup after we got the lock - jmeths = ik_h->methods_jmethod_ids_acquire(); - if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) { - if (jmeths != NULL) { - // We have grown the array: copy the existing entries, and delete the old array - for (size_t index = 0; index < length; index++) { - new_jmeths[index+1] = jmeths[index+1]; - } - to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one - } - ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); - } else { - id = jmeths[idnum+1]; - to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one + if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) { + if (jmeths != NULL) { + // We have grown the array: copy the existing entries, and delete the old array + for (size_t index = 0; index < length; index++) { + new_jmeths[index+1] = jmeths[index+1]; } - if (id == NULL) { - id = new_id; - jmeths[idnum+1] = id; // install the new method ID - } else { - to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation - } + to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one } + ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); + } else { + id = jmeths[idnum+1]; + to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one + } + if (id == NULL) { + id = new_id; + jmeths[idnum+1] = id; // install the new method ID + } else { + to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation + } - // Free up unneeded or no longer needed resources - FreeHeap(to_dealloc_jmeths); - if (to_dealloc_id != NULL) { - JNIHandles::destroy_jmethod_id(to_dealloc_id); - } + // Free up unneeded or no longer needed resources + FreeHeap(to_dealloc_jmeths); + if (to_dealloc_id != NULL) { + JNIHandles::destroy_jmethod_id(to_dealloc_id); } return id; } @@ -2187,12 +2196,20 @@ RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", ikh->external_name(), _previous_versions->length(), emcp_method_count)); constantPoolHandle cp_h(ikh->constants()); - jweak cp_ref = JNIHandles::make_weak_global(cp_h); + jobject cp_ref; + if (cp_h->is_shared()) { + // a shared ConstantPool requires a regular reference; a weak + // reference would be collectible + cp_ref = JNIHandles::make_global(cp_h); + } else { + cp_ref = JNIHandles::make_weak_global(cp_h); + } PreviousVersionNode * pv_node = NULL; objArrayOop old_methods = ikh->methods(); if (emcp_method_count == 0) { - pv_node = new PreviousVersionNode(cp_ref, NULL); + // non-shared ConstantPool gets a weak reference + pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); RC_TRACE(0x00000400, ("add: all methods are obsolete; flushing any EMCP weak refs")); } else { @@ -2212,7 +2229,8 @@ } } } - pv_node = new PreviousVersionNode(cp_ref, method_refs); + // non-shared ConstantPool gets a weak reference + pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); } _previous_versions->append(pv_node); @@ -2230,7 +2248,7 @@ // check the previous versions array for a GC'ed weak refs pv_node = _previous_versions->at(i); cp_ref = pv_node->prev_constant_pool(); - assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared"); + assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); if (cp_ref == NULL) { delete pv_node; _previous_versions->remove_at(i); @@ -2303,7 +2321,7 @@ // check the previous versions array for a GC'ed weak refs pv_node = _previous_versions->at(j); cp_ref = pv_node->prev_constant_pool(); - assert(cp_ref != NULL, "weak cp ref was unexpectedly cleared"); + assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); if (cp_ref == NULL) { delete pv_node; _previous_versions->remove_at(j); @@ -2401,8 +2419,8 @@ // been GC'ed PreviousVersionNode * pv_node = _previous_versions->at(i); - jweak cp_ref = pv_node->prev_constant_pool(); - assert(cp_ref != NULL, "weak reference was unexpectedly cleared"); + jobject cp_ref = pv_node->prev_constant_pool(); + assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); if (cp_ref == NULL) { continue; // robustness } @@ -2462,10 +2480,11 @@ // Construct a PreviousVersionNode entry for the array hung off // the instanceKlass. -PreviousVersionNode::PreviousVersionNode(jweak prev_constant_pool, - GrowableArray* prev_EMCP_methods) { +PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, + bool prev_cp_is_weak, GrowableArray* prev_EMCP_methods) { _prev_constant_pool = prev_constant_pool; + _prev_cp_is_weak = prev_cp_is_weak; _prev_EMCP_methods = prev_EMCP_methods; } @@ -2473,7 +2492,11 @@ // Destroy a PreviousVersionNode PreviousVersionNode::~PreviousVersionNode() { if (_prev_constant_pool != NULL) { - JNIHandles::destroy_weak_global(_prev_constant_pool); + if (_prev_cp_is_weak) { + JNIHandles::destroy_weak_global(_prev_constant_pool); + } else { + JNIHandles::destroy_global(_prev_constant_pool); + } } if (_prev_EMCP_methods != NULL) { @@ -2493,8 +2516,8 @@ _prev_constant_pool_handle = constantPoolHandle(); // NULL handle _prev_EMCP_method_handles = NULL; - jweak cp_ref = pv_node->prev_constant_pool(); - assert(cp_ref != NULL, "weak constant pool ref was unexpectedly cleared"); + jobject cp_ref = pv_node->prev_constant_pool(); + assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); if (cp_ref == NULL) { return; // robustness } diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -432,6 +432,8 @@ _enclosing_method_method_index = method_index; } // jmethodID support + static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, + jmethodID new_id, jmethodID* new_jmeths); static jmethodID jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h); jmethodID jmethod_id_or_null(methodOop method); @@ -838,11 +840,20 @@ // A collection point for interesting information about the previous // version(s) of an instanceKlass. This class uses weak references to // the information so that the information may be collected as needed -// by the system. A GrowableArray of PreviousVersionNodes is attached +// by the system. If the information is shared, then a regular +// reference must be used because a weak reference would be seen as +// collectible. A GrowableArray of PreviousVersionNodes is attached // to the instanceKlass as needed. See PreviousVersionWalker below. class PreviousVersionNode : public CHeapObj { private: - jweak _prev_constant_pool; + // A shared ConstantPool is never collected so we'll always have + // a reference to it so we can update items in the cache. We'll + // have a weak reference to a non-shared ConstantPool until all + // of the methods (EMCP or obsolete) have been collected; the + // non-shared ConstantPool becomes collectible at that point. + jobject _prev_constant_pool; // regular or weak reference + bool _prev_cp_is_weak; // true if not a shared ConstantPool + // If the previous version of the instanceKlass doesn't have any // EMCP methods, then _prev_EMCP_methods will be NULL. If all the // EMCP methods have been collected, then _prev_EMCP_methods can @@ -850,10 +861,10 @@ GrowableArray* _prev_EMCP_methods; public: - PreviousVersionNode(jweak prev_constant_pool, + PreviousVersionNode(jobject prev_constant_pool, bool prev_cp_is_weak, GrowableArray* prev_EMCP_methods); ~PreviousVersionNode(); - jweak prev_constant_pool() const { + jobject prev_constant_pool() const { return _prev_constant_pool; } GrowableArray* prev_EMCP_methods() const { diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/markOop.cpp --- a/src/share/vm/oops/markOop.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/markOop.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -37,3 +37,32 @@ st->print("age %d)", age()); } } + + +// Give advice about whether the oop that contains this markOop +// should be cached or not. +bool markOopDesc::should_not_be_cached() const { + // the cast is because decode_pointer() isn't marked const + if (is_marked() && ((markOopDesc *)this)->decode_pointer() != NULL) { + // If the oop containing this markOop is being forwarded, then + // we are in the middle of GC and we do not want the containing + // oop to be added to a cache. We have no way of knowing whether + // the cache has already been visited by the current GC phase so + // we don't know whether the forwarded oop will be properly + // processed in this phase. If the forwarded oop is not properly + // processed, then we'll see strange crashes or asserts during + // the next GC run because the markOop will contain an unexpected + // value. + // + // This situation has been seen when we are GC'ing a methodOop + // because we use the methodOop while we're GC'ing it. Scary + // stuff. Some of the uses the methodOop cause the methodOop to + // be added to the OopMapCache in the instanceKlass as a side + // effect. This check lets the cache maintainer know when a + // cache addition would not be safe. + return true; + } + + // caching the containing oop should be just fine + return false; +} diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/markOop.hpp --- a/src/share/vm/oops/markOop.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/markOop.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -357,4 +357,7 @@ // Recover address of oop from encoded form used in mark inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); } + + // see the definition in markOop.cpp for the gory details + bool should_not_be_cached() const; }; diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/methodOop.cpp --- a/src/share/vm/oops/methodOop.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/methodOop.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -765,6 +765,28 @@ } +// give advice about whether this methodOop should be cached or not +bool methodOopDesc::should_not_be_cached() const { + if (is_old()) { + // This method has been redefined. It is either EMCP or obsolete + // and we don't want to cache it because that would pin the method + // down and prevent it from being collectible if and when it + // finishes executing. + return true; + } + + if (mark()->should_not_be_cached()) { + // It is either not safe or not a good idea to cache this + // method at this time because of the state of the embedded + // markOop. See markOop.cpp for the gory details. + return true; + } + + // caching this method should be just fine + return false; +} + + methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS) { // Code below does not work for native methods - they should never get rewritten anyway diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/oops/methodOop.hpp --- a/src/share/vm/oops/methodOop.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/oops/methodOop.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -524,6 +524,8 @@ void set_is_old() { _access_flags.set_is_old(); } bool is_obsolete() const { return access_flags().is_obsolete(); } void set_is_obsolete() { _access_flags.set_is_obsolete(); } + // see the definition in methodOop.cpp for the gory details + bool should_not_be_cached() const; // JVMTI Native method prefixing support: bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/prims/jvmtiRedefineClassesTrace.hpp --- a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -64,7 +64,7 @@ // 0x01000000 | 16777216 - impl details: nmethod evolution info // 0x02000000 | 33554432 - impl details: annotation updates // 0x04000000 | 67108864 - impl details: StackMapTable updates -// 0x08000000 | 134217728 - unused +// 0x08000000 | 134217728 - impl details: OopMapCache updates // 0x10000000 | 268435456 - unused // 0x20000000 | 536870912 - unused // 0x40000000 | 1073741824 - unused diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/runtime/reflection.cpp --- a/src/share/vm/runtime/reflection.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/runtime/reflection.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -1548,10 +1548,11 @@ } instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror)); - if (!klass->methods()->is_within_bounds(slot)) { + methodOop m = klass->method_with_idnum(slot); + if (m == NULL) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } - methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot))); + methodHandle method(THREAD, m); return invoke(klass, method, receiver, override, ptypes, rtype, args, true, THREAD); } @@ -1564,10 +1565,11 @@ objArrayHandle ptypes(THREAD, objArrayOop(java_lang_reflect_Constructor::parameter_types(constructor_mirror))); instanceKlassHandle klass(THREAD, java_lang_Class::as_klassOop(mirror)); - if (!klass->methods()->is_within_bounds(slot)) { + methodOop m = klass->method_with_idnum(slot); + if (m == NULL) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } - methodHandle method(THREAD, methodOop(klass->methods()->obj_at(slot))); + methodHandle method(THREAD, m); assert(method->name() == vmSymbols::object_initializer_name(), "invalid constructor"); // Make sure klass gets initialize diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/runtime/thread.cpp Thu Mar 13 14:17:48 2008 -0700 @@ -1317,10 +1317,6 @@ ThreadSafepointState::destroy(this); if (_thread_profiler != NULL) delete _thread_profiler; if (_thread_stat != NULL) delete _thread_stat; - - if (jvmti_thread_state() != NULL) { - JvmtiExport::cleanup_thread(this); - } } @@ -1571,6 +1567,10 @@ tlab().make_parsable(true); // retire TLAB } + if (jvmti_thread_state() != NULL) { + JvmtiExport::cleanup_thread(this); + } + // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread Threads::remove(this); } diff -r 8b6e49187640 -r 75b0f3cb1943 src/share/vm/utilities/globalDefinitions_sparcWorks.hpp --- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Thu Mar 13 05:40:44 2008 -0700 +++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Thu Mar 13 14:17:48 2008 -0700 @@ -37,23 +37,45 @@ # include # include # include +#ifdef SOLARIS # include +#endif # include +#ifdef LINUX +#ifndef FP_PZERO + // Linux doesn't have positive/negative zero + #define FP_PZERO FP_ZERO +#endif +#ifndef fpclass + #define fpclass fpclassify +#endif +#endif # include # include # include # include +#ifdef SOLARIS # include +#endif # include # include +#ifdef SOLARIS # include # include # include # include # include +#endif # ifdef SOLARIS_MUTATOR_LIBTHREAD # include # endif +#ifdef LINUX +# include +# include +# include +# include +#endif + // 4810578: varargs unsafe on 32-bit integer/64-bit pointer architectures // When __cplusplus is defined, NULL is defined as 0 (32-bit constant) in @@ -68,6 +90,11 @@ // pointer when it extracts the argument, then we have a problem. // // Solution: For 64-bit architectures, redefine NULL as 64-bit constant 0. +// +// Note: this fix doesn't work well on Linux because NULL will be overwritten +// whenever a system header file is included. Linux handles NULL correctly +// through a special type '__null'. +#ifdef SOLARIS #ifdef _LP64 #undef NULL #define NULL 0L @@ -76,13 +103,25 @@ #define NULL 0 #endif #endif +#endif // NULL vs NULL_WORD: // On Linux NULL is defined as a special type '__null'. Assigning __null to // integer variable will cause gcc warning. Use NULL_WORD in places where a -// pointer is stored as integer value. -#define NULL_WORD NULL +// pointer is stored as integer value. On some platforms, sizeof(intptr_t) > +// sizeof(void*), so here we want something which is integer type, but has the +// same size as a pointer. +#ifdef LINUX + #ifdef _LP64 + #define NULL_WORD 0L + #else + #define NULL_WORD 0 + #endif +#else + #define NULL_WORD NULL +#endif +#ifndef LINUX // Compiler-specific primitive types typedef unsigned short uint16_t; #ifndef _UINT32_T @@ -100,6 +139,7 @@ // If this gets an error, figure out a symbol XXX that implies the // prior definition of intptr_t, and add "&& !defined(XXX)" above. #endif +#endif // Additional Java basic types @@ -128,7 +168,7 @@ const jlong min_jlong = CONST64(0x8000000000000000); const jlong max_jlong = CONST64(0x7fffffffffffffff); - +#ifdef SOLARIS //---------------------------------------------------------------------------------------------------- // ANSI C++ fixes // NOTE:In the ANSI committee's continuing attempt to make each version @@ -162,7 +202,7 @@ typedef int (*int_fnP_cond_tP_i_vP)(cond_t *cv, int scope, void *arg); typedef int (*int_fnP_cond_tP)(cond_t *cv); }; - +#endif //---------------------------------------------------------------------------------------------------- // Debugging @@ -173,7 +213,7 @@ #define BREAKPOINT ::breakpoint() // checking for nanness - +#ifdef SOLARIS #ifdef SPARC inline int g_isnan(float f) { return isnanf(f); } #else @@ -182,6 +222,12 @@ #endif inline int g_isnan(double f) { return isnand(f); } +#elif LINUX +inline int g_isnan(float f) { return isnanf(f); } +inline int g_isnan(double f) { return isnan(f); } +#else +#error "missing platform-specific definition here" +#endif // Checking for finiteness @@ -195,9 +241,11 @@ // Misc +// NOTE: This one leads to an infinite recursion on Linux +#ifndef LINUX int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr); #define vsnprintf local_vsnprintf - +#endif // Portability macros #define PRAGMA_INTERFACE