# HG changeset patch # User never # Date 1274818729 25200 # Node ID f9a202dd8899ef5eaeb13d2eb344403dec79ca7b # Parent 1747f04ad0c400ad4b577bca2c70a33ce9e542a5# Parent 110501f54a99ee2c365d0af0e4ddfd604e13d000 Merge diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/amd64.make --- a/make/solaris/makefiles/amd64.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/amd64.make Tue May 25 13:18:49 2010 -0700 @@ -33,14 +33,8 @@ # ifeq ("${Platform_compiler}", "sparcWorks") -# Temporary until C++ compiler is fixed - -# _lwp_create_interpose must have a frame -OPT_CFLAGS/os_solaris_x86_64.o = -xO1 - # Temporary until SS10 C++ compiler is fixed OPT_CFLAGS/generateOptoStub.o = -xO2 -OPT_CFLAGS/thread.o = -xO2 else diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/fastdebug.make --- a/make/solaris/makefiles/fastdebug.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/fastdebug.make Tue May 25 13:18:49 2010 -0700 @@ -36,15 +36,15 @@ ifeq ("${Platform_compiler}", "sparcWorks") OPT_CFLAGS/SLOWER = -xO2 -# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) ifeq ($(COMPILER_REV_NUMERIC), 509) - # To avoid jvm98 crash - OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER) - # Not clear this workaround could be skipped in some cases. - OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) - OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) - OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -endif +# To avoid jvm98 crash +OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER) +endif # COMPILER_NUMERIC_REV == 509 + +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) +# dtrace cannot handle tail call optimization (6672627, 6693876) +OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) +endif # COMPILER_NUMERIC_REV >= 509 ifeq ($(COMPILER_REV_NUMERIC), 505) # CC 5.5 has bug 4908364 with -xO4 (Fixed in 5.6) diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/i486.make --- a/make/solaris/makefiles/i486.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/i486.make Tue May 25 13:18:49 2010 -0700 @@ -33,25 +33,6 @@ # Special case flags for compilers and compiler versions on i486. # ifeq ("${Platform_compiler}", "sparcWorks") - -# _lwp_create_interpose must have a frame -OPT_CFLAGS/os_solaris_x86.o = -xO1 -else - -ifeq ("${Platform_compiler}", "gcc") -# gcc -# _lwp_create_interpose must have a frame -OPT_CFLAGS/os_solaris_x86.o = -fno-omit-frame-pointer -# -else -# error -_JUNK2_ := $(shell echo >&2 \ - "*** ERROR: this compiler is not yet supported by this code base!") - @exit 1 -endif -endif - -ifeq ("${Platform_compiler}", "sparcWorks") # ILD is gone as of SS11 (5.8), not supported in SS10 (5.7) ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1) # diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/launcher.make --- a/make/solaris/makefiles/launcher.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/launcher.make Tue May 25 13:18:49 2010 -0700 @@ -80,15 +80,12 @@ } > $@ $(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE) +ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) + @echo Linking launcher... + $(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK) $(QUIETLY) \ - case "$(CFLAGS_BROWSE)" in \ - -sbfast|-xsbfast) \ - ;; \ - *) \ - echo Linking launcher...; \ - $(LINK_LAUNCHER/PRE_HOOK) \ - $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \ - $(LINK_LAUNCHER/POST_HOOK) \ - [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \ - ;; \ - esac + $(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER) + $(QUIETLY) $(LINK_LAUNCHER/POST_HOOK) + [ -f $(LAUNCHER_G) ] || ln -s $@ $(LAUNCHER_G) +endif # filter -sbfast -xsbfast + diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/optimized.make --- a/make/solaris/makefiles/optimized.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/optimized.make Tue May 25 13:18:49 2010 -0700 @@ -32,13 +32,10 @@ # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) ifeq ("${Platform_compiler}", "sparcWorks") -# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) -ifeq ($(COMPILER_REV_NUMERIC),509) - # Not clear this workaround could be skipped in some cases. - OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g - OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g - OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g -endif +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) +# dtrace cannot handle tail call optimization (6672627, 6693876) +OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) +endif # COMPILER_NUMERIC_REV >= 509 # Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12) ifeq ($(COMPILER_REV_NUMERIC),508) diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/product.make --- a/make/solaris/makefiles/product.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/product.make Tue May 25 13:18:49 2010 -0700 @@ -40,13 +40,10 @@ # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) ifeq ("${Platform_compiler}", "sparcWorks") -# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876) -ifeq ($(COMPILER_REV_NUMERIC),509) - # Not clear this workaround could be skipped in some cases. - OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g - OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g - OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g -endif +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) +# dtrace cannot handle tail call optimization (6672627, 6693876) +OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT) +endif # COMPILER_NUMERIC_REV >= 509 # Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12) ifeq ($(COMPILER_REV_NUMERIC),508) diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/sparcWorks.make --- a/make/solaris/makefiles/sparcWorks.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/sparcWorks.make Tue May 25 13:18:49 2010 -0700 @@ -48,27 +48,33 @@ # Pick which compiler is validated ifeq ($(JRE_RELEASE_VER),1.6.0) # Validated compiler for JDK6 is SS11 (5.8) - VALIDATED_COMPILER_REV := 5.8 - VALIDATED_C_COMPILER_REV := 5.8 + VALIDATED_COMPILER_REVS := 5.8 + VALIDATED_C_COMPILER_REVS := 5.8 else - # Validated compiler for JDK7 is SS12 (5.9) - VALIDATED_COMPILER_REV := 5.9 - VALIDATED_C_COMPILER_REV := 5.9 + # Validated compilers for JDK7 are SS12 (5.9) or SS12 update 1 (5.10) + VALIDATED_COMPILER_REVS := 5.9 5.10 + VALIDATED_C_COMPILER_REVS := 5.9 5.10 endif -# Warning messages about not using the above validated version -ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := ${VALIDATED_COMPILER_REV} -ifneq (${COMPILER_REV},${ENFORCE_COMPILER_REV}) -dummy_target_to_enforce_compiler_rev:=\ -$(shell echo >&2 WARNING: You are using CC version ${COMPILER_REV} \ -and should be using version ${ENFORCE_COMPILER_REV}. Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this warning.) +# Warning messages about not using the above validated versions +ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := $(strip ${VALIDATED_COMPILER_REVS}) +ifeq ($(filter ${ENFORCE_COMPILER_REV},${COMPILER_REV}),) +PRINTABLE_CC_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_COMPILER_REV}) +dummy_var_to_enforce_compiler_rev := $(shell \ + echo >&2 WARNING: You are using CC version ${COMPILER_REV} and \ + should be using version ${PRINTABLE_CC_REVS}.; \ + echo >&2 Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this \ + warning.) endif -ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := ${VALIDATED_C_COMPILER_REV} -ifneq (${C_COMPILER_REV},${ENFORCE_C_COMPILER_REV}) -dummy_target_to_enforce_c_compiler_rev:=\ -$(shell echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} \ -and should be using version ${ENFORCE_C_COMPILER_REV}. Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this warning.) +ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := $(strip ${VALIDATED_C_COMPILER_REVS}) +ifeq ($(filter ${ENFORCE_C_COMPILER_REV},${C_COMPILER_REV}),) +PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_C_COMPILER_REV}) +dummy_var_to_enforce_c_compiler_rev := $(shell \ + echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} and \ + should be using version ${PRINTABLE_C_REVS}.; \ + echo >&2 Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this \ + warning.) endif COMPILER_REV_NUMERIC := $(shell echo $(COMPILER_REV) | awk -F. '{ print $$1 * 100 + $$2 }') @@ -139,6 +145,13 @@ OPT_CFLAGS/O2=-xO2 OPT_CFLAGS/NOOPT=-xO1 +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) +ifeq ($(Platform_arch), x86) +OPT_CFLAGS/NO_TAIL_CALL_OPT = -Wu,-O~yz +OPT_CCFLAGS/NO_TAIL_CALL_OPT = -Qoption ube -O~yz +endif # Platform_arch == x86 +endif # COMPILER_REV_NUMERIC >= 509 + ################################################# # Begin current (>=5.6) Forte compiler options # ################################################# @@ -181,10 +194,7 @@ ifeq ("${Platform_arch_model}", "x86_32") -OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS) - -# UBE (CC 5.5) has bug 4923569 with -xO4 -OPT_CFLAGS+=-xO3 +OPT_CFLAGS=-xtarget=pentium -xO4 $(EXTRA_OPT_CFLAGS) endif # 32bit x86 @@ -461,7 +471,7 @@ # The -g0 setting allows the C++ frontend to inline, which is a big win. # Special global options for SS12 -ifeq ($(COMPILER_REV_NUMERIC),509) +ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1) # There appears to be multiple issues with the new Dwarf2 debug format, so # we tell the compiler to use the older 'stabs' debug format all the time. # Note that this needs to be used in optimized compiles too to be 100%. diff -r 1747f04ad0c4 -r f9a202dd8899 make/solaris/makefiles/vm.make --- a/make/solaris/makefiles/vm.make Mon May 24 13:53:38 2010 -0700 +++ b/make/solaris/makefiles/vm.make Tue May 25 13:18:49 2010 -0700 @@ -174,19 +174,16 @@ endif # making the library: $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) - $(QUIETLY) \ - case "$(CFLAGS_BROWSE)" in \ - -sbfast|-xsbfast) \ - ;; \ - *) \ - echo Linking vm...; \ - $(LINK_LIB.CC/PRE_HOOK) \ - $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \ - $(LINK_LIB.CC/POST_HOOK) \ - rm -f $@.1; ln -s $@ $@.1; \ - [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \ - ;; \ - esac +ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) + @echo Linking vm... + $(QUIETLY) $(LINK_LIB.CC/PRE_HOOK) + $(QUIETLY) $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM) + $(QUIETLY) $(LINK_LIB.CC/POST_HOOK) + $(QUIETLY) rm -f $@.1 && ln -s $@ $@.1 + $(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G) + $(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1 +endif # filter -sbfast -xsbfast + DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM) diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/sparc/vm/assembler_sparc.hpp --- a/src/cpu/sparc/vm/assembler_sparc.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Tue May 25 13:18:49 2010 -0700 @@ -2234,7 +2234,7 @@ AddressLiteral constant_oop_address(jobject obj); // find_index inline void set_oop (jobject obj, Register d); // uses allocate_oop_address inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address - inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address + inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address void set_narrow_oop( jobject obj, Register d ); diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/sparc/vm/assembler_sparc.inline.hpp --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp Tue May 25 13:18:49 2010 -0700 @@ -712,7 +712,7 @@ } -inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) { +inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) { assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); set(obj_addr, d); } diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/sparc/vm/runtime_sparc.cpp --- a/src/cpu/sparc/vm/runtime_sparc.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/sparc/vm/runtime_sparc.cpp Tue May 25 13:18:49 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -116,6 +116,11 @@ __ mov(O0, G3_scratch); // Move handler address to temp __ restore(); + // Restore SP from L7 if the exception PC is a MethodHandle call site. + __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), O7); + __ tst(O7); + __ movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); + // G3_scratch contains handler address // Since this may be the deopt blob we must set O7 to look like we returned // from the original pc that threw the exception diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/sparc/vm/sparc.ad Tue May 25 13:18:49 2010 -0700 @@ -534,7 +534,10 @@ // The "return address" is the address of the call instruction, plus 8. int MachCallStaticJavaNode::ret_addr_offset() { - return NativeCall::instruction_size; // call; delay slot + int offset = NativeCall::instruction_size; // call; delay slot + if (_method_handle_invoke) + offset += 4; // restore SP + return offset; } int MachCallDynamicJavaNode::ret_addr_offset() { @@ -1858,7 +1861,7 @@ } const RegMask Matcher::method_handle_invoke_SP_save_mask() { - return RegMask(); + return L7_REGP_mask; } %} @@ -2441,6 +2444,16 @@ /*preserve_g2=*/true, /*force far call*/true); %} + enc_class preserve_SP %{ + MacroAssembler _masm(&cbuf); + __ mov(SP, L7_mh_SP_save); + %} + + enc_class restore_SP %{ + MacroAssembler _masm(&cbuf); + __ mov(L7_mh_SP_save, SP); + %} + enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine // who we intended to call. @@ -9213,6 +9226,7 @@ // Call Java Static Instruction instruct CallStaticJavaDirect( method meth ) %{ match(CallStaticJava); + predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); effect(USE meth); size(8); @@ -9223,6 +9237,20 @@ ins_pipe(simple_call); %} +// Call Java Static Instruction (method handle version) +instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ + match(CallStaticJava); + predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); + effect(USE meth, KILL l7_mh_SP_save); + + size(8); + ins_cost(CALL_COST); + format %{ "CALL,static/MethodHandle" %} + ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); + ins_pc_relative(1); + ins_pipe(simple_call); +%} + // Call Java Dynamic Instruction instruct CallDynamicJavaDirect( method meth ) %{ match(CallDynamicJava); diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/x86/vm/runtime_x86_32.cpp --- a/src/cpu/x86/vm/runtime_x86_32.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/x86/vm/runtime_x86_32.cpp Tue May 25 13:18:49 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,7 +117,7 @@ // Restore SP from BP if the exception PC is a MethodHandle call site. __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0); - __ cmovptr(Assembler::notEqual, rsp, rbp); + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); // We have a handler in rax, (could be deopt blob) // rdx - throwing pc, deopt blob will need it. diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue May 25 13:18:49 2010 -0700 @@ -3305,7 +3305,7 @@ // Restore SP from BP if the exception PC is a MethodHandle call site. __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0); - __ cmovptr(Assembler::notEqual, rsp, rbp); + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); // We have a handler in rax (could be deopt blob). __ mov(r8, rax); diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/x86/vm/x86_32.ad Tue May 25 13:18:49 2010 -0700 @@ -1841,14 +1841,14 @@ MacroAssembler _masm(&cbuf); // RBP is preserved across all calls, even compiled calls. // Use it to preserve RSP in places where the callee might change the SP. - __ movptr(rbp, rsp); + __ movptr(rbp_mh_SP_save, rsp); debug_only(int off1 = cbuf.code_size()); assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); %} enc_class restore_SP %{ MacroAssembler _masm(&cbuf); - __ movptr(rsp, rbp); + __ movptr(rsp, rbp_mh_SP_save); %} enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL @@ -13570,7 +13570,7 @@ // Call Java Static Instruction (method handle version) // Note: If this code changes, the corresponding ret_addr_offset() and // compute_padding() functions will have to be adjusted. -instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{ +instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{ match(CallStaticJava); predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); effect(USE meth); diff -r 1747f04ad0c4 -r f9a202dd8899 src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Mon May 24 13:53:38 2010 -0700 +++ b/src/cpu/x86/vm/x86_64.ad Tue May 25 13:18:49 2010 -0700 @@ -2635,14 +2635,14 @@ MacroAssembler _masm(&cbuf); // RBP is preserved across all calls, even compiled calls. // Use it to preserve RSP in places where the callee might change the SP. - __ movptr(rbp, rsp); + __ movptr(rbp_mh_SP_save, rsp); debug_only(int off1 = cbuf.code_size()); assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); %} enc_class restore_SP %{ MacroAssembler _masm(&cbuf); - __ movptr(rsp, rbp); + __ movptr(rsp, rbp_mh_SP_save); %} enc_class Java_Static_Call(method meth) @@ -12604,7 +12604,7 @@ // Call Java Static Instruction (method handle version) // Note: If this code changes, the corresponding ret_addr_offset() and // compute_padding() functions will have to be adjusted. -instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{ +instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{ match(CallStaticJava); predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke()); effect(USE meth); diff -r 1747f04ad0c4 -r f9a202dd8899 src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp --- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Tue May 25 13:18:49 2010 -0700 @@ -47,40 +47,56 @@ // For Sun Studio - implementation is in solaris_x86_[32/64].il. // For gcc - implementation is just below. -extern "C" jint _Atomic_add(jint add_value, volatile jint* dest, int mp); -extern "C" jint _Atomic_xchg(jint exchange_value, volatile jint* dest); -extern "C" jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp); -extern "C" jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp); +// The lock prefix can be omitted for certain instructions on uniprocessors; to +// facilitate this, os::is_MP() is passed as an additional argument. 64-bit +// processors are assumed to be multi-threaded and/or multi-core, so the extra +// argument is unnecessary. +#ifndef _LP64 +#define IS_MP_DECL() , int is_mp +#define IS_MP_ARG() , (int) os::is_MP() +#else +#define IS_MP_DECL() +#define IS_MP_ARG() +#endif // _LP64 + +extern "C" { + jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL()); + jint _Atomic_xchg(jint exchange_value, volatile jint* dest); + jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, + jint compare_value IS_MP_DECL()); + jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, + jlong compare_value IS_MP_DECL()); +} inline jint Atomic::add (jint add_value, volatile jint* dest) { - return _Atomic_add(add_value, dest, (int) os::is_MP()); + return _Atomic_add(add_value, dest IS_MP_ARG()); +} + +inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { + return _Atomic_xchg(exchange_value, dest); } inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { - return _Atomic_cmpxchg(exchange_value, dest, compare_value, (int) os::is_MP()); + return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG()); } inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { - return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, (int) os::is_MP()); + return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG()); } #ifdef AMD64 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } -extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp); +extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest); extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest); inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { - return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest, (int) os::is_MP()); + return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); } inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { - return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest, (int) os::is_MP()); -} - -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { - return _Atomic_xchg(exchange_value, dest); + return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest); } inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { @@ -92,11 +108,11 @@ } inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { - return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP()); + return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); } inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { - return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP()); + return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); } inline jlong Atomic::load(volatile jlong* src) { return *src; } @@ -111,13 +127,6 @@ return (void*)add((jint)add_value, (volatile jint*)dest); } -inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { - // We noticed a CC5.5 bug (4894807), so keep calling the stub just to be safe. - // Will use the inline template version after 4894807 is fixed. - // return _Atomic_xchg(exchange_value, dest); - return (*os::atomic_xchg_func)(exchange_value, dest); -} - inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); } @@ -179,9 +188,6 @@ #endif // AMD64 inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) { - - // 32bit version originally did nothing!! - __asm__ __volatile__ ("xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) diff -r 1747f04ad0c4 -r f9a202dd8899 src/os_cpu/solaris_x86/vm/solaris_x86_32.il --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Mon May 24 13:53:38 2010 -0700 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Tue May 25 13:18:49 2010 -0700 @@ -50,10 +50,12 @@ movl 4(%esp), %edx // dest movl %eax, %ecx cmpl $0, 8(%esp) // MP test - je 1f - lock -1: xaddl %eax, (%edx) - addl %ecx, %eax + jne 1f + xaddl %eax, (%edx) + jmp 2f +1: lock + xaddl %eax, (%edx) +2: addl %ecx, %eax .end // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). @@ -72,9 +74,12 @@ movl 0(%esp), %ecx // exchange_value movl 4(%esp), %edx // dest cmp $0, 12(%esp) // MP test - je 1f - lock -1: cmpxchgl %ecx, (%edx) + jne 1f + cmpxchgl %ecx, (%edx) + jmp 2f +1: lock + cmpxchgl %ecx, (%edx) +2: .end // Support for jlong Atomic::cmpxchg(jlong exchange_value, @@ -90,10 +95,12 @@ movl 8(%esp), %ebx // exchange_value (low) movl 12(%esp), %ecx // exchange_high (high) cmp $0, 28(%esp) // MP test - je 1f - lock -1: cmpxchg8b (%edi) - popl %edi + jne 1f + cmpxchg8b (%edi) + jmp 2f +1: lock + cmpxchg8b (%edi) +2: popl %edi popl %ebx .end diff -r 1747f04ad0c4 -r f9a202dd8899 src/os_cpu/solaris_x86/vm/solaris_x86_64.il --- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Mon May 24 13:53:38 2010 -0700 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Tue May 25 13:18:49 2010 -0700 @@ -37,24 +37,18 @@ .end // Support for jint Atomic::add(jint add_value, volatile jint* dest) - // An additional bool (os::is_MP()) is passed as the last argument. - .inline _Atomic_add,3 + .inline _Atomic_add,2 movl %edi, %eax // save add_value for return - testl %edx, %edx // MP test - je 1f lock -1: xaddl %edi, (%rsi) + xaddl %edi, (%rsi) addl %edi, %eax .end // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) - // An additional bool (os::is_MP()) is passed as the last argument. - .inline _Atomic_add_long,3 + .inline _Atomic_add_long,2 movq %rdi, %rax // save add_value for return - testq %rdx, %rdx // MP test - je 1f lock -1: xaddq %rdi, (%rsi) + xaddq %rdi, (%rsi) addq %rdi, %rax .end @@ -73,25 +67,19 @@ // Support for jint Atomic::cmpxchg(jint exchange_value, // volatile jint *dest, // jint compare_value) - // An additional bool (os::is_MP()) is passed as the last argument. - .inline _Atomic_cmpxchg,4 + .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value - testl %ecx, %ecx // MP test - je 1f lock -1: cmpxchgl %edi, (%rsi) + cmpxchgl %edi, (%rsi) .end // Support for jlong Atomic::cmpxchg(jlong exchange_value, // volatile jlong* dest, // jlong compare_value) - // An additional bool (os::is_MP()) is passed as the last argument. - .inline _Atomic_cmpxchg_long,6 + .inline _Atomic_cmpxchg_long,3 movq %rdx, %rax // compare_value - testq %rcx, %rcx // MP test - je 1f lock -1: cmpxchgq %rdi, (%rsi) + cmpxchgq %rdi, (%rsi) .end // Support for OrderAccess::acquire() diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp Tue May 25 13:18:49 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp Tue May 25 13:18:49 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,30 +32,75 @@ displaced_mark = nth_bit(2), // i.e. 0x4 next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) }; - intptr_t _next; + + // Below, we want _narrow_next in the "higher" 32 bit slot, + // whose position will depend on endian-ness of the platform. + // This is so that there is no interference with the + // cms_free_bit occupying bit position 7 (lsb == 0) + // when we are using compressed oops; see FreeChunk::isFree(). + // We cannot move the cms_free_bit down because currently + // biased locking code assumes that age bits are contiguous + // with the lock bits. Even if that assumption were relaxed, + // the least position we could move this bit to would be + // to bit position 3, which would require 16 byte alignment. + typedef struct { +#ifdef VM_LITTLE_ENDIAN + LP64_ONLY(narrowOop _pad;) + narrowOop _narrow_next; +#else + narrowOop _narrow_next; + LP64_ONLY(narrowOop _pad;) +#endif + } Data; + + union { + intptr_t _next; + Data _data; + }; public: inline PromotedObject* next() const { - return (PromotedObject*)(_next & next_mask); + assert(!((FreeChunk*)this)->isFree(), "Error"); + PromotedObject* res; + if (UseCompressedOops) { + // The next pointer is a compressed oop stored in the top 32 bits + res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next); + } else { + res = (PromotedObject*)(_next & next_mask); + } + assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?"); + return res; } inline void setNext(PromotedObject* x) { - assert(((intptr_t)x & ~next_mask) == 0, - "Conflict in bit usage, " - " or insufficient alignment of objects"); - _next |= (intptr_t)x; + assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, " + "or insufficient alignment of objects"); + if (UseCompressedOops) { + assert(_data._narrow_next == 0, "Overwrite?"); + _data._narrow_next = oopDesc::encode_heap_oop(oop(x)); + } else { + _next |= (intptr_t)x; + } + assert(!((FreeChunk*)this)->isFree(), "Error"); } inline void setPromotedMark() { _next |= promoted_mask; + assert(!((FreeChunk*)this)->isFree(), "Error"); } inline bool hasPromotedMark() const { + assert(!((FreeChunk*)this)->isFree(), "Error"); return (_next & promoted_mask) == promoted_mask; } inline void setDisplacedMark() { _next |= displaced_mark; + assert(!((FreeChunk*)this)->isFree(), "Error"); } inline bool hasDisplacedMark() const { + assert(!((FreeChunk*)this)->isFree(), "Error"); return (_next & displaced_mark) != 0; } - inline void clearNext() { _next = 0; } + inline void clearNext() { + _next = 0; + assert(!((FreeChunk*)this)->isFree(), "Error"); + } debug_only(void *next_addr() { return (void *) &_next; }) }; diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue May 25 13:18:49 2010 -0700 @@ -766,10 +766,12 @@ _has_aborted = false; +#ifndef PRODUCT if (G1PrintReachableAtInitialMark) { print_reachable("at-cycle-start", true /* use_prev_marking */, true /* all */); } +#endif // Initialise marking structures. This has to be done in a STW phase. reset(); diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue May 25 13:18:49 2010 -0700 @@ -471,21 +471,23 @@ res->zero_fill_state() == HeapRegion::Allocated)), "Alloc Regions must be zero filled (and non-H)"); } - if (res != NULL && res->is_empty()) _free_regions--; - assert(res == NULL || - (!res->isHumongous() && - (!zero_filled || - res->zero_fill_state() == HeapRegion::Allocated)), - "Non-young alloc Regions must be zero filled (and non-H)"); - - if (G1PrintHeapRegions) { - if (res != NULL) { + if (res != NULL) { + if (res->is_empty()) { + _free_regions--; + } + assert(!res->isHumongous() && + (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated), + err_msg("Non-young alloc Regions must be zero filled (and non-H):" + " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d", + res->isHumongous(), zero_filled, res->zero_fill_state())); + assert(!res->is_on_unclean_list(), + "Alloc Regions must not be on the unclean list"); + if (G1PrintHeapRegions) { gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " "top "PTR_FORMAT, res->hrs_index(), res->bottom(), res->end(), res->top()); } } - return res; } @@ -2338,10 +2340,12 @@ gclog_or_tty->print_cr("Heap:"); print_on(gclog_or_tty, true /* extended */); gclog_or_tty->print_cr(""); +#ifndef PRODUCT if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { concurrent_mark()->print_reachable("at-verification-failure", use_prev_marking, false /* all */); } +#endif gclog_or_tty->flush(); } guarantee(!failures, "there should not have been any failures"); @@ -4600,6 +4604,15 @@ void G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) { assert(ZF_mon->owned_by_self(), "precondition."); +#ifdef ASSERT + if (r->is_gc_alloc_region()) { + ResourceMark rm; + stringStream region_str; + print_on(®ion_str); + assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s", + region_str.as_string())); + } +#endif _unclean_region_list.insert_before_head(r); } diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue May 25 13:18:49 2010 -0700 @@ -554,11 +554,19 @@ #endif void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) { - assert(top() == bottom() || zfs == Allocated, - "Region must be empty, or we must be setting it to allocated."); assert(ZF_mon->owned_by_self() || Universe::heap()->is_gc_active(), "Must hold the lock or be a full GC to modify."); +#ifdef ASSERT + if (top() != bottom() && zfs != Allocated) { + ResourceMark rm; + stringStream region_str; + print_on(®ion_str); + assert(top() == bottom() || zfs == Allocated, + err_msg("Region must be empty, or we must be setting it to allocated. " + "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string())); + } +#endif _zfs = zfs; } diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/shared/spaceDecorator.hpp --- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Tue May 25 13:18:49 2010 -0700 @@ -109,7 +109,7 @@ // is fully constructed. Also is used when a generation is expanded // and possibly before the spaces have been reshaped to to the new // size of the generation. - static void mangle_region(MemRegion mr); + static void mangle_region(MemRegion mr) PRODUCT_RETURN; }; class ContiguousSpace; diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue May 25 13:18:49 2010 -0700 @@ -32,10 +32,12 @@ // for the other file anymore. The dtrace probes have to remain stable. void VM_GC_Operation::notify_gc_begin(bool full) { HS_DTRACE_PROBE1(hotspot, gc__begin, full); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); } void VM_GC_Operation::notify_gc_end() { HS_DTRACE_PROBE(hotspot, gc__end); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); } void VM_GC_Operation::acquire_pending_list_lock() { diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/gc_interface/collectedHeap.cpp --- a/src/share/vm/gc_interface/collectedHeap.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Tue May 25 13:18:49 2010 -0700 @@ -65,7 +65,7 @@ void CollectedHeap::pre_initialize() { // Used for ReduceInitialCardMarks (when COMPILER2 is used); // otherwise remains unused. -#ifdef COMPLER2 +#ifdef COMPILER2 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() && (DeferInitialCardMark || card_mark_must_follow_store()); #else @@ -309,7 +309,7 @@ DEBUG_ONLY(fill_args_check(start, words);) HandleMark hm; // Free handles before leaving. -#ifdef LP64 +#ifdef _LP64 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. // First fill with arrays, ensuring that any remaining space is big enough to // fill. The remainder is filled with a single object. diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/oops/markOop.hpp --- a/src/share/vm/oops/markOop.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/oops/markOop.hpp Tue May 25 13:18:49 2010 -0700 @@ -27,12 +27,26 @@ // Note that the mark is not a real oop but just a word. // It is placed in the oop hierarchy for historical reasons. // -// Bit-format of an object header (most significant first): +// Bit-format of an object header (most significant first, big endian layout below): +// +// 32 bits: +// -------- +// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object) +// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object) +// size:32 ------------------------------------------>| (CMS free block) +// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) // -// 32 bits: unused:0 hash:25 age:4 biased_lock:1 lock:2 -// 64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2 -// unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms -// free chunk) +// 64 bits: +// -------- +// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object) +// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object) +// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) +// size:64 ----------------------------------------------------->| (CMS free block) +// +// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object) +// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object) +// narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object) +// unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block) // // - hash contains the identity hash value: largest value is // 31 bits, see os::random(). Also, 64-bit vm's require @@ -61,8 +75,9 @@ // significant fraction of the eden semispaces and were not // promoted promptly, causing an increase in the amount of copying // performed. The runtime system aligns all JavaThread* pointers to -// a very large value (currently 128 bytes) to make room for the -// age bits when biased locking is enabled. +// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM)) +// to make room for the age bits & the epoch bits (used in support of +// biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs). // // [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread // [0 | epoch | age | 1 | 01] lock is anonymously biased diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/runtime/java.cpp --- a/src/share/vm/runtime/java.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/runtime/java.cpp Tue May 25 13:18:49 2010 -0700 @@ -470,6 +470,7 @@ void notify_vm_shutdown() { // For now, just a dtrace probe. HS_DTRACE_PROBE(hotspot, vm__shutdown); + HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); } void vm_direct_exit(int code) { diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/runtime/vframe.cpp --- a/src/share/vm/runtime/vframe.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/runtime/vframe.cpp Tue May 25 13:18:49 2010 -0700 @@ -101,8 +101,8 @@ bool found_first_monitor = false; ObjectMonitor *pending_monitor = thread()->current_pending_monitor(); ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor(); - oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : NULL); - oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : NULL); + oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL); + oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL); for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/runtime/vm_version.cpp --- a/src/share/vm/runtime/vm_version.cpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/runtime/vm_version.cpp Tue May 25 13:18:49 2010 -0700 @@ -190,6 +190,8 @@ #define HOTSPOT_BUILD_COMPILER "Workshop 5.8" #elif __SUNPRO_CC == 0x590 #define HOTSPOT_BUILD_COMPILER "Workshop 5.9" + #elif __SUNPRO_CC == 0x5100 + #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1" #else #define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC) #endif diff -r 1747f04ad0c4 -r f9a202dd8899 src/share/vm/utilities/dtrace.hpp --- a/src/share/vm/utilities/dtrace.hpp Mon May 24 13:53:38 2010 -0700 +++ b/src/share/vm/utilities/dtrace.hpp Tue May 25 13:18:49 2010 -0700 @@ -29,6 +29,10 @@ #define DTRACE_ONLY(x) x #define NOT_DTRACE(x) +// Work around dtrace tail call bug 6672627 until it is fixed in solaris 10. +#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \ + do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0) + #else // ndef SOLARIS || ndef DTRACE_ENABLED #define DTRACE_ONLY(x) @@ -41,6 +45,8 @@ #define DTRACE_PROBE4(a,b,c,d,e,f) {;} #define DTRACE_PROBE5(a,b,c,d,e,f,g) {;} +#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() + #endif #define HS_DTRACE_PROBE_FN(provider,name)\