# HG changeset patch # User kvn # Date 1266017256 28800 # Node ID 7b4415a18c8afb70569c8188db8a85f6154a8948 # Parent 95d21201c29ac4943b77287cd335294221304e97# Parent c09ee209b65c742568e86da87f2335701423a445 Merge diff -r c09ee209b65c -r 7b4415a18c8a .hgtags --- a/.hgtags Fri Feb 12 10:34:11 2010 -0800 +++ b/.hgtags Fri Feb 12 15:27:36 2010 -0800 @@ -54,3 +54,5 @@ 455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77 e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78 a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79 +3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80 +1f9b07674480c224828852ffe137beea36b3cab5 jdk7-b81 diff -r c09ee209b65c -r 7b4415a18c8a make/hotspot_version --- a/make/hotspot_version Fri Feb 12 10:34:11 2010 -0800 +++ b/make/hotspot_version Fri Feb 12 15:27:36 2010 -0800 @@ -35,7 +35,7 @@ HS_MAJOR_VER=17 HS_MINOR_VER=0 -HS_BUILD_NUMBER=07 +HS_BUILD_NUMBER=09 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff -r c09ee209b65c -r 7b4415a18c8a src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -378,7 +378,7 @@ int offset = code_offset(); - if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { + if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) { __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); __ delayed()->nop(); } diff -r c09ee209b65c -r 7b4415a18c8a src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -438,7 +438,7 @@ // if the method does not have an exception handler, then there is // no reason to search for one - if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { + if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) { // the exception oop and pc are in rax, and rdx // no other registers need to be preserved, so invalidate them __ invalidate_registers(false, true, true, false, true, true); diff -r c09ee209b65c -r 7b4415a18c8a src/cpu/x86/vm/templateTable_x86_64.cpp --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -3238,17 +3238,19 @@ __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops __ store_klass(rax, rsi); // store klass last + + { + SkipIfEqual skip(_masm, &DTraceAllocProbes, false); + // Trigger dtrace event for fastpath + __ push(atos); // save the return value + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); + __ pop(atos); // restore the return value + + } __ jmp(done); } - { - SkipIfEqual skip(_masm, &DTraceAllocProbes, false); - // Trigger dtrace event for fastpath - __ push(atos); // save the return value - __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); - __ pop(atos); // restore the return value - } // slow case __ bind(slow_case); diff -r c09ee209b65c -r 7b4415a18c8a src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/os/windows/vm/os_windows.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -142,6 +142,9 @@ } #ifndef _WIN64 +// previous UnhandledExceptionFilter, if there is one +static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; + LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); #endif void os::init_system_properties_values() { @@ -260,7 +263,8 @@ } #ifndef _WIN64 - SetUnhandledExceptionFilter(Handle_FLT_Exception); + // set our UnhandledExceptionFilter and save any previous one + prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); #endif // Done @@ -1969,7 +1973,7 @@ #ifndef _WIN64 //----------------------------------------------------------------------------- LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { - // handle exception caused by native mothod modifying control word + // handle exception caused by native method modifying control word PCONTEXT ctx = exceptionInfo->ContextRecord; DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; @@ -1990,6 +1994,13 @@ return EXCEPTION_CONTINUE_EXECUTION; } } + + if (prev_uef_handler != NULL) { + // We didn't handle this exception so pass it to the previous + // UnhandledExceptionFilter. + return (prev_uef_handler)(exceptionInfo); + } + return EXCEPTION_CONTINUE_SEARCH; } #else //_WIN64 diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1765,7 +1765,7 @@ __ null_check(exception_opr, new CodeEmitInfo(info, true)); } - if (compilation()->env()->jvmti_can_post_exceptions() && + if (compilation()->env()->jvmti_can_post_on_exceptions() && !block()->is_set(BlockBegin::default_exception_handler_flag)) { // we need to go through the exception lookup path to get JVMTI // notification done diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/c1/c1_Runtime1.cpp --- a/src/share/vm/c1/c1_Runtime1.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,8 +110,8 @@ RegisterMap reg_map(thread, false); frame runtime_frame = thread->last_frame(); frame caller_frame = runtime_frame.sender(®_map); - VM_DeoptimizeFrame deopt(thread, caller_frame.id()); - VMThread::execute(&deopt); + // bypass VM_DeoptimizeFrame and deoptimize the frame directly + Deoptimization::deoptimize_frame(thread, caller_frame.id()); assert(caller_is_deopted(), "Must be deoptimized"); } } @@ -354,7 +354,7 @@ JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread)) - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { vframeStream vfst(thread, true); address bcp = vfst.method()->bcp_from(vfst.bci()); JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop()); @@ -437,7 +437,7 @@ bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { // To ensure correct notification of exception catches and throws // we have to deoptimize here. If we attempted to notify the // catches and throws during this exception lookup it's possible @@ -1075,6 +1075,7 @@ }; +// Below length is the # elements copied. template int obj_arraycopy_work(oopDesc* src, T* src_addr, oopDesc* dst, T* dst_addr, int length) { @@ -1083,22 +1084,22 @@ // barrier. The assert will fail if this is not the case. // Note that we use the non-virtual inlineable variant of write_ref_array. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->has_write_ref_array_opt(), - "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); if (src == dst) { // same object, no check + bs->write_ref_array_pre(dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); + bs->write_ref_array((HeapWord*)dst_addr, length); return ac_ok; } else { klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { // Elements are guaranteed to be subtypes, so no check necessary + bs->write_ref_array_pre(dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length); - bs->write_ref_array(MemRegion((HeapWord*)dst_addr, - (HeapWord*)(dst_addr + length))); + bs->write_ref_array((HeapWord*)dst_addr, length); return ac_ok; } } @@ -1162,9 +1163,16 @@ #endif if (num == 0) return; + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); + if (UseCompressedOops) { + bs->write_ref_array_pre((narrowOop*)dst, num); + } else { + bs->write_ref_array_pre((oop*)dst, num); + } Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); - BarrierSet* bs = Universe::heap()->barrier_set(); - bs->write_ref_array(MemRegion(dst, dst + num)); + bs->write_ref_array(dst, num); JRT_END diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/ci/ciEnv.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,7 +178,7 @@ _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint(); _jvmti_can_examine_or_deopt_anywhere = JvmtiExport::can_examine_or_deopt_anywhere(); _jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables(); - _jvmti_can_post_exceptions = JvmtiExport::can_post_exceptions(); + _jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions(); } // ------------------------------------------------------------------ @@ -891,8 +891,8 @@ JvmtiExport::can_examine_or_deopt_anywhere()) || (!jvmti_can_access_local_variables() && JvmtiExport::can_access_local_variables()) || - (!jvmti_can_post_exceptions() && - JvmtiExport::can_post_exceptions()) )) { + (!jvmti_can_post_on_exceptions() && + JvmtiExport::can_post_on_exceptions()) )) { record_failure("Jvmti state change invalidated dependencies"); } diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/ci/ciEnv.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ bool _jvmti_can_hotswap_or_post_breakpoint; bool _jvmti_can_examine_or_deopt_anywhere; bool _jvmti_can_access_local_variables; - bool _jvmti_can_post_exceptions; + bool _jvmti_can_post_on_exceptions; // Cache DTrace flags bool _dtrace_extended_probes; @@ -259,7 +259,7 @@ bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; } bool jvmti_can_examine_or_deopt_anywhere() const { return _jvmti_can_examine_or_deopt_anywhere; } bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; } - bool jvmti_can_post_exceptions() const { return _jvmti_can_post_exceptions; } + bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; } // Cache DTrace flags void cache_dtrace_flags(); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/classfile/javaClasses.cpp --- a/src/share/vm/classfile/javaClasses.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/classfile/javaClasses.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1121,10 +1121,23 @@ } void flush() { + // The following appears to have been an optimization to save from + // doing a barrier for each individual store into the _methods array, + // but rather to do it for the entire array after the series of writes. + // That optimization seems to have been lost when compressed oops was + // implemented. However, the extra card-marks below was left in place, + // but is now redundant because the individual stores into the + // _methods array already execute the barrier code. CR 6918185 has + // been filed so the original code may be restored by deferring the + // barriers until after the entire sequence of stores, thus re-enabling + // the intent of the original optimization. In the meantime the redundant + // card mark below is now disabled. if (_dirty && _methods != NULL) { +#if 0 BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); bs->write_ref_array((HeapWord*)_methods->base(), _methods->length()); +#endif _dirty = false; } } @@ -1168,9 +1181,7 @@ method = mhandle(); } - _methods->obj_at_put(_index, method); - // bad for UseCompressedOops - // *_methods->obj_at_addr(_index) = method; + _methods->obj_at_put(_index, method); _bcis->ushort_at_put(_index, bci); _index++; _dirty = true; diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -300,7 +300,23 @@ int count; jbyte* cached_ptr = add_card_count(card_ptr, &count, defer); assert(cached_ptr != NULL, "bad cached card ptr"); - assert(!is_young_card(cached_ptr), "shouldn't get a card in young region"); + + if (is_young_card(cached_ptr)) { + // The region containing cached_ptr has been freed during a clean up + // pause, reallocated, and tagged as young. + assert(cached_ptr != card_ptr, "shouldn't be"); + + // We've just inserted a new old-gen card pointer into the card count + // cache and evicted the previous contents of that count slot. + // The evicted card pointer has been determined to be in a young region + // and so cannot be the newly inserted card pointer (that will be + // in an old region). + // The count for newly inserted card will be set to zero during the + // insertion, so we don't want to defer the cleaning of the newly + // inserted card pointer. + assert(*defer == false, "deferring non-hot card"); + return NULL; + } // The card pointer we obtained from card count cache is not hot // so do not store it in the cache; return it for immediate diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -2505,6 +2505,7 @@ } void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { + // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Call allocation profiler AllocationProfiler::iterate_since_last_gc(); @@ -2518,6 +2519,7 @@ // is set. COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "derived pointer present")); + // always_do_update_barrier = true; } void G1CollectedHeap::do_collection_pause() { diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/includeDB_core --- a/src/share/vm/includeDB_core Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/includeDB_core Fri Feb 12 15:27:36 2010 -0800 @@ -175,6 +175,7 @@ arguments.cpp management.hpp arguments.cpp oop.inline.hpp arguments.cpp os_.inline.hpp +arguments.cpp referenceProcessor.hpp arguments.cpp universe.inline.hpp arguments.cpp vm_version_.hpp diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/interpreter/interpreterRuntime.cpp --- a/src/share/vm/interpreter/interpreterRuntime.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -397,7 +397,7 @@ // notify JVMTI of an exception throw; JVMTI will detect if this is a first // time throw or a stack unwinding throw and accordingly notify the debugger - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception()); } @@ -426,7 +426,7 @@ } // notify debugger of an exception catch // (this is good for exceptions caught in native methods as well) - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL)); } diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/memory/barrierSet.hpp --- a/src/share/vm/memory/barrierSet.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/memory/barrierSet.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -124,8 +124,6 @@ // Below length is the # array elements being written virtual void write_ref_array_pre( oop* dst, int length) {} virtual void write_ref_array_pre(narrowOop* dst, int length) {} - // Below MemRegion mr is expected to be HeapWord-aligned - inline void write_ref_array(MemRegion mr); // Below count is the # array elements being written, starting // at the address "start", which may not necessarily be HeapWord-aligned inline void write_ref_array(HeapWord* start, size_t count); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/memory/barrierSet.inline.hpp --- a/src/share/vm/memory/barrierSet.inline.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/memory/barrierSet.inline.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -42,16 +42,6 @@ } } -void BarrierSet::write_ref_array(MemRegion mr) { - assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start"); - assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); - if (kind() == CardTableModRef) { - ((CardTableModRefBS*)this)->inline_write_ref_array(mr); - } else { - write_ref_array_work(mr); - } -} - // count is number of array elements being written void BarrierSet::write_ref_array(HeapWord* start, size_t count) { assert(count <= (size_t)max_intx, "count too large"); @@ -61,12 +51,12 @@ // strictly necessary for current uses, but a case of good hygiene and, // if you will, aesthetics) and the second upward (this is essential for // current uses) to a HeapWord boundary, so we mark all cards overlapping - // this write. In the event that this evolves in the future to calling a + // this write. If this evolves in the future to calling a // logging barrier of narrow oop granularity, like the pre-barrier for G1 // (mentioned here merely by way of example), we will need to change this - // interface, much like the pre-barrier one above, so it is "exactly precise" - // (if i may be allowed the adverbial redundancy for emphasis) and does not - // include narrow oop slots not included in the original write interval. + // interface, so it is "exactly precise" (if i may be allowed the adverbial + // redundancy for emphasis) and does not include narrow oop slots not + // included in the original write interval. HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); // If compressed oops were not being used, these should already be aligned diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/memory/referenceProcessor.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -263,10 +263,13 @@ int parallel_gc_threads = 1, bool mt_processing = false, bool discovered_list_needs_barrier = false); + // RefDiscoveryPolicy values - enum { + enum DiscoveryPolicy { ReferenceBasedDiscovery = 0, - ReferentBasedDiscovery = 1 + ReferentBasedDiscovery = 1, + DiscoveryPolicyMin = ReferenceBasedDiscovery, + DiscoveryPolicyMax = ReferentBasedDiscovery }; static void init_statics(); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/opto/graphKit.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -455,16 +455,44 @@ return Bytecodes::_illegal; } +void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, + bool must_throw) { + // if the exception capability is set, then we will generate code + // to check the JavaThread.should_post_on_exceptions flag to see + // if we actually need to report exception events (for this + // thread). If we don't need to report exception events, we will + // take the normal fast path provided by add_exception_events. If + // exception event reporting is enabled for this thread, we will + // take the uncommon_trap in the BuildCutout below. + + // first must access the should_post_on_exceptions_flag in this thread's JavaThread + Node* jthread = _gvn.transform(new (C, 1) ThreadLocalNode()); + Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); + Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false); + + // Test the should_post_on_exceptions_flag vs. 0 + Node* chk = _gvn.transform( new (C, 3) CmpINode(should_post_flag, intcon(0)) ); + Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, BoolTest::eq) ); + + // Branch to slow_path if should_post_on_exceptions_flag was true + { BuildCutout unless(this, tst, PROB_MAX); + // Do not try anything fancy if we're notifying the VM on every throw. + // Cf. case Bytecodes::_athrow in parse2.cpp. + uncommon_trap(reason, Deoptimization::Action_none, + (ciKlass*)NULL, (char*)NULL, must_throw); + } + +} + //------------------------------builtin_throw---------------------------------- void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) { bool must_throw = true; - if (env()->jvmti_can_post_exceptions()) { - // Do not try anything fancy if we're notifying the VM on every throw. - // Cf. case Bytecodes::_athrow in parse2.cpp. - uncommon_trap(reason, Deoptimization::Action_none, - (ciKlass*)NULL, (char*)NULL, must_throw); - return; + if (env()->jvmti_can_post_on_exceptions()) { + // check if we must post exception events, take uncommon trap if so + uncommon_trap_if_should_post_on_exceptions(reason, must_throw); + // here if should_post_on_exceptions is false + // continue on with the normal codegen } // If this particular condition has not yet happened at this diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/opto/graphKit.hpp --- a/src/share/vm/opto/graphKit.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/opto/graphKit.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -251,6 +251,11 @@ // via an uncommon trap. void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL); + // Helper to check the JavaThread::_should_post_on_exceptions flag + // and branch to an uncommon_trap if it is true (with the specified reason and must_throw) + void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, + bool must_throw) ; + // Helper Functions for adding debug information void kill_dead_locals(); #ifdef ASSERT diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/opto/parse2.cpp --- a/src/share/vm/opto/parse2.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/opto/parse2.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2079,13 +2079,6 @@ // null exception oop throws NULL pointer exception do_null_check(peek(), T_OBJECT); if (stopped()) return; - if (env()->jvmti_can_post_exceptions()) { - // "Full-speed throwing" is not necessary here, - // since we're notifying the VM on every throw. - uncommon_trap(Deoptimization::Reason_unhandled, - Deoptimization::Action_none); - return; - } // Hook the thrown exception directly to subsequent handlers. if (BailoutToInterpreterForThrows) { // Keep method interpreted from now on. @@ -2093,6 +2086,11 @@ Deoptimization::Action_make_not_compilable); return; } + if (env()->jvmti_can_post_on_exceptions()) { + // check if we must post exception events, take uncommon trap if so (with must_throw = false) + uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); + } + // Here if either can_post_on_exceptions or should_post_on_exceptions is false add_exception_state(make_exception_state(peek())); break; diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/opto/runtime.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -815,7 +815,7 @@ // we are switching to old paradigm: search for exception handler in caller_frame // instead in exception handler of caller_frame.sender() - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { // "Full-speed catching" is not necessary here, // since we're notifying the VM on every catch. // Force deoptimization and the rest of the lookup @@ -980,8 +980,8 @@ assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); frame caller_frame = stub_frame.sender(®_map); - VM_DeoptimizeFrame deopt(thread, caller_frame.id()); - VMThread::execute(&deopt); + // bypass VM_DeoptimizeFrame and deoptimize the frame directly + Deoptimization::deoptimize_frame(thread, caller_frame.id()); } } diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/prims/jvmtiEventController.cpp --- a/src/share/vm/prims/jvmtiEventController.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/prims/jvmtiEventController.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,7 +82,7 @@ THREAD_START_BIT | THREAD_END_BIT | DYNAMIC_CODE_GENERATED_BIT; static const jlong GLOBAL_EVENT_BITS = ~THREAD_FILTERED_EVENT_BITS; - +static const jlong SHOULD_POST_ON_EXCEPTIONS_BITS = EXCEPTION_BITS | METHOD_EXIT_BIT | FRAME_POP_BIT; /////////////////////////////////////////////////////////////// // @@ -511,7 +511,12 @@ leave_interp_only_mode(state); } } + + // update the JavaThread cached value for thread-specific should_post_on_exceptions value + bool should_post_on_exceptions = (any_env_enabled & SHOULD_POST_ON_EXCEPTIONS_BITS) != 0; + state->set_should_post_on_exceptions(should_post_on_exceptions); } + return any_env_enabled; } @@ -615,6 +620,10 @@ // set global truly enabled, that is, any thread in any environment JvmtiEventController::_universal_global_event_enabled.set_bits(any_env_thread_enabled); + + // set global should_post_on_exceptions + JvmtiExport::set_should_post_on_exceptions((any_env_thread_enabled & SHOULD_POST_ON_EXCEPTIONS_BITS) != 0); + } EC_TRACE(("JVMTI [-] # recompute enabled - after %llx", any_env_thread_enabled)); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/prims/jvmtiExport.cpp --- a/src/share/vm/prims/jvmtiExport.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/prims/jvmtiExport.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -877,7 +877,7 @@ bool JvmtiExport::_can_get_source_debug_extension = false; bool JvmtiExport::_can_maintain_original_method_order = false; bool JvmtiExport::_can_post_interpreter_events = false; -bool JvmtiExport::_can_post_exceptions = false; +bool JvmtiExport::_can_post_on_exceptions = false; bool JvmtiExport::_can_post_breakpoint = false; bool JvmtiExport::_can_post_field_access = false; bool JvmtiExport::_can_post_field_modification = false; @@ -908,6 +908,7 @@ bool JvmtiExport::_should_post_object_free = false; bool JvmtiExport::_should_post_resource_exhausted = false; bool JvmtiExport::_should_post_vm_object_alloc = false; +bool JvmtiExport::_should_post_on_exceptions = false; //////////////////////////////////////////////////////////////////////////////////////////////// diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/prims/jvmtiExport.hpp --- a/src/share/vm/prims/jvmtiExport.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/prims/jvmtiExport.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ JVMTI_SUPPORT_FLAG(can_get_source_debug_extension) JVMTI_SUPPORT_FLAG(can_maintain_original_method_order) JVMTI_SUPPORT_FLAG(can_post_interpreter_events) - JVMTI_SUPPORT_FLAG(can_post_exceptions) + JVMTI_SUPPORT_FLAG(can_post_on_exceptions) JVMTI_SUPPORT_FLAG(can_post_breakpoint) JVMTI_SUPPORT_FLAG(can_post_field_access) JVMTI_SUPPORT_FLAG(can_post_field_modification) @@ -93,6 +93,7 @@ JVMTI_SUPPORT_FLAG(should_post_data_dump) JVMTI_SUPPORT_FLAG(should_post_garbage_collection_start) JVMTI_SUPPORT_FLAG(should_post_garbage_collection_finish) + JVMTI_SUPPORT_FLAG(should_post_on_exceptions) // ------ the below maybe don't have to be (but are for now) // fixed conditions here ------------ diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/prims/jvmtiManageCapabilities.cpp --- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -357,7 +357,7 @@ avail.can_access_local_variables || avail.can_redefine_classes || avail.can_retransform_classes); - JvmtiExport::set_can_post_exceptions( + JvmtiExport::set_can_post_on_exceptions( avail.can_generate_exception_events || avail.can_generate_frame_pop_events || avail.can_generate_method_exit_events); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/prims/jvmtiThreadState.hpp --- a/src/share/vm/prims/jvmtiThreadState.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/prims/jvmtiThreadState.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -381,6 +381,9 @@ static ByteSize earlyret_value_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_value); } void oops_do(OopClosure* f); // GC support + +public: + void set_should_post_on_exceptions(bool val) { _thread->set_should_post_on_exceptions_flag(val ? JNI_TRUE : JNI_FALSE); } }; class RedefineVerifyMark : public StackObj { diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/arguments.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1487,6 +1487,20 @@ //=========================================================================================================== // Parsing of main arguments +bool Arguments::verify_interval(uintx val, uintx min, + uintx max, const char* name) { + // Returns true iff value is in the inclusive interval [min..max] + // false, otherwise. + if (val >= min && val <= max) { + return true; + } + jio_fprintf(defaultStream::error_stream(), + "%s of " UINTX_FORMAT " is invalid; must be between " UINTX_FORMAT + " and " UINTX_FORMAT "\n", + name, val, min, max); + return false; +} + bool Arguments::verify_percentage(uintx value, const char* name) { if (value <= 100) { return true; @@ -1723,6 +1737,16 @@ status = false; } + status = status && verify_interval(RefDiscoveryPolicy, + ReferenceProcessor::DiscoveryPolicyMin, + ReferenceProcessor::DiscoveryPolicyMax, + "RefDiscoveryPolicy"); + + // Limit the lower bound of this flag to 1 as it is used in a division + // expression. + status = status && verify_interval(TLABWasteTargetPercent, + 1, 100, "TLABWasteTargetPercent"); + return status; } diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/arguments.hpp --- a/src/share/vm/runtime/arguments.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/arguments.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -336,6 +336,8 @@ static bool is_bad_option(const JavaVMOption* option, jboolean ignore) { return is_bad_option(option, ignore, NULL); } + static bool verify_interval(uintx val, uintx min, + uintx max, const char* name); static bool verify_percentage(uintx value, const char* name); static void describe_range_error(ArgsRange errcode); static ArgsRange check_memory_size(julong size, julong min_size); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/sharedRuntime.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -364,7 +364,7 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) { - if (JvmtiExport::can_post_exceptions()) { + if (JvmtiExport::can_post_on_exceptions()) { vframeStream vfst(thread, true); methodHandle method = methodHandle(thread, vfst.method()); address bcp = method()->bcp_from(vfst.bci()); diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/stubRoutines.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -196,11 +196,19 @@ // Default versions of arraycopy functions // +static void gen_arraycopy_barrier_pre(oop* dest, size_t count) { + assert(count != 0, "count should be non-zero"); + assert(count <= (size_t)max_intx, "count too large"); + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt"); + bs->write_ref_array_pre(dest, (int)count); +} + static void gen_arraycopy_barrier(oop* dest, size_t count) { assert(count != 0, "count should be non-zero"); BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); - bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count))); + bs->write_ref_array((HeapWord*)dest, count); } JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count)) @@ -240,6 +248,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre(dest, count); Copy::conjoint_oops_atomic(src, dest, count); gen_arraycopy_barrier(dest, count); JRT_END @@ -281,6 +290,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre((oop *) dest, count); Copy::arrayof_conjoint_oops(src, dest, count); gen_arraycopy_barrier((oop *) dest, count); JRT_END diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/thread.cpp --- a/src/share/vm/runtime/thread.cpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/thread.cpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1173,6 +1173,7 @@ _exception_handler_pc = 0; _exception_stack_size = 0; _jvmti_thread_state= NULL; + _should_post_on_exceptions_flag = JNI_FALSE; _jvmti_get_loaded_classes_closure = NULL; _interp_only_mode = 0; _special_runtime_exit_condition = _no_async_condition; diff -r c09ee209b65c -r 7b4415a18c8a src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Fri Feb 12 10:34:11 2010 -0800 +++ b/src/share/vm/runtime/thread.hpp Fri Feb 12 15:27:36 2010 -0800 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1193,6 +1193,9 @@ static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); } static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } + static ByteSize should_post_on_exceptions_flag_offset() { + return byte_offset_of(JavaThread, _should_post_on_exceptions_flag); + } #ifndef SERIALGC static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } @@ -1432,6 +1435,16 @@ void increment_interp_only_mode() { ++_interp_only_mode; } void decrement_interp_only_mode() { --_interp_only_mode; } + // support for cached flag that indicates whether exceptions need to be posted for this thread + // if this is false, we can avoid deoptimizing when events are thrown + // this gets set to reflect whether jvmtiExport::post_exception_throw would actually do anything + private: + int _should_post_on_exceptions_flag; + + public: + int should_post_on_exceptions_flag() { return _should_post_on_exceptions_flag; } + void set_should_post_on_exceptions_flag(int val) { _should_post_on_exceptions_flag = val; } + private: ThreadStatistics *_thread_stat;