# HG changeset patch # User kvn # Date 1228409734 28800 # Node ID 1f54ed41d6ae10d273423493bb8ea3be24e552f6 # Parent 95cad1ab2510170fa3ca07cc17baff9c2c70995d# Parent 424f9bfe6b96f5ffa249d9a87a09224b0a691ed1 Merge diff -r 424f9bfe6b96 -r 1f54ed41d6ae .hgtags --- a/.hgtags Wed Dec 03 13:41:37 2008 -0800 +++ b/.hgtags Thu Dec 04 08:55:34 2008 -0800 @@ -14,3 +14,4 @@ 9ee9cf798b59e7d51f8c0a686959f313867a55d6 jdk7-b37 d9bc824aa078573829bb66572af847e26e1bd12e jdk7-b38 49ca90d77f34571b0757ebfcb8a7848ef2696b88 jdk7-b39 +81a0cbe3b28460ce836109934ece03db7afaf9cc jdk7-b40 diff -r 424f9bfe6b96 -r 1f54ed41d6ae make/hotspot_version --- a/make/hotspot_version Wed Dec 03 13:41:37 2008 -0800 +++ b/make/hotspot_version Thu Dec 04 08:55:34 2008 -0800 @@ -35,7 +35,7 @@ HS_MAJOR_VER=14 HS_MINOR_VER=0 -HS_BUILD_NUMBER=07 +HS_BUILD_NUMBER=08 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff -r 424f9bfe6b96 -r 1f54ed41d6ae make/windows/build.make --- a/make/windows/build.make Wed Dec 03 13:41:37 2008 -0800 +++ b/make/windows/build.make Thu Dec 04 08:55:34 2008 -0800 @@ -200,29 +200,6 @@ checkSA:: @echo Not building SA: ARCH = ia64 -!elseif exist("$(MSVCDIR)\PlatformSDK\Include\dbgeng.h") -# These don't have to be set because the default -# setting of INCLUDE and LIB already contain the needed dirs. -SA_INCLUDE = -SA_LIB = - -!elseif exist("$(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h") -# These don't have to be set because the default -# setting of INCLUDE and LIB already contain the needed dirs. -SA_INCLUDE = -SA_LIB = - -!else -checkSA:: - @echo . - @echo ERROR: Can't build SA because dbgeng.h does not exist here: - @echo $(MSVCDIR)\PlatformSDK\Include\dbgeng.h - @echo nor here: - @echo $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h - @echo You must use Vis. Studio .Net 2003 on Win 32, and you must - @echo have the Microsoft SDK installed on Win amd64. - @echo You can disable building of SA by specifying BUILD_WIN_SA = 0 - @echo . && false !endif # ! "$(BUILD_WIN_SA)" != "1" ######################################################################### diff -r 424f9bfe6b96 -r 1f54ed41d6ae make/windows/makefiles/defs.make --- a/make/windows/makefiles/defs.make Wed Dec 03 13:41:37 2008 -0800 +++ b/make/windows/makefiles/defs.make Thu Dec 04 08:55:34 2008 -0800 @@ -119,7 +119,7 @@ # we want to release it. If we build it here, # the SDK makefiles will copy it over and put it into # the created image. -BUILD_WIN_SA = 0 +BUILD_WIN_SA = 1 ifneq ($(ALT_BUILD_WIN_SA),) BUILD_WIN_SA = $(ALT_BUILD_WIN_SA) endif diff -r 424f9bfe6b96 -r 1f54ed41d6ae make/windows/makefiles/sa.make --- a/make/windows/makefiles/sa.make Wed Dec 03 13:41:37 2008 -0800 +++ b/make/windows/makefiles/sa.make Thu Dec 04 08:55:34 2008 -0800 @@ -49,6 +49,9 @@ default:: $(GENERATED)\sa-jdi.jar +# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space +# at the end of SA version string and causes a version mismatch with the target VM version. + $(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\) @if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR) @echo ...Building sa-jdi.jar @@ -56,15 +59,15 @@ @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\) @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\) $(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer - $(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES) - $(RUN_JAR) cf $@ -C saclasses . - $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector + $(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES) $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql - $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources - $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/* - $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/ - $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/ + $(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources + $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources + $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR) + $(RUN_JAR) cf $@ -C saclasses . + $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext @@ -93,7 +96,7 @@ SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c !endif !if "$(MT)" != "" - SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS) +SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS) !endif SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE) diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/cpu/sparc/vm/templateTable_sparc.cpp --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -2085,7 +2085,7 @@ } else { if (has_tos) { // save object pointer before call_VM() clobbers it - __ mov(Otos_i, Lscratch); + __ push_ptr(Otos_i); // put object on tos where GC wants it. } else { // Load top of stack (do not pop the value off the stack); __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); @@ -2097,7 +2097,7 @@ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), Otos_i, Rcache); if (!is_static && has_tos) { - __ mov(Lscratch, Otos_i); // restore object pointer + __ pop_ptr(Otos_i); // restore object pointer __ verify_oop(Otos_i); } __ get_cache_and_index_at_bcp(Rcache, index, 1); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -325,24 +325,30 @@ // For objects in CMS generation, this closure marks // given objects (transitively) as being reachable/live. // This is currently used during the (weak) reference object -// processing phase of the CMS final checkpoint step. +// processing phase of the CMS final checkpoint step, as +// well as during the concurrent precleaning of the discovered +// reference lists. class CMSKeepAliveClosure: public OopClosure { private: CMSCollector* _collector; const MemRegion _span; CMSMarkStack* _mark_stack; CMSBitMap* _bit_map; + bool _concurrent_precleaning; protected: DO_OOP_WORK_DEFN public: CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, - CMSBitMap* bit_map, CMSMarkStack* mark_stack): + CMSBitMap* bit_map, CMSMarkStack* mark_stack, + bool cpc): _collector(collector), _span(span), _bit_map(bit_map), - _mark_stack(mark_stack) { + _mark_stack(mark_stack), + _concurrent_precleaning(cpc) { assert(!_span.is_empty(), "Empty span could spell trouble"); } + bool concurrent_precleaning() const { return _concurrent_precleaning; } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -538,6 +538,7 @@ _survivor_chunk_capacity(0), // -- ditto -- _survivor_chunk_index(0), // -- ditto -- _ser_pmc_preclean_ovflw(0), + _ser_kac_preclean_ovflw(0), _ser_pmc_remark_ovflw(0), _par_pmc_remark_ovflw(0), _ser_kac_ovflw(0), @@ -1960,6 +1961,7 @@ ref_processor()->set_enqueuing_is_done(false); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_soft_refs); // If an asynchronous collection finishes, the _modUnionTable is // all clear. If we are assuming the collection from an asynchronous // collection, clear the _modUnionTable. @@ -2383,6 +2385,9 @@ Universe::verify(true); } + // Snapshot the soft reference policy to be used in this collection cycle. + ref_processor()->setup_policy(clear_all_soft_refs); + bool init_mark_was_synchronous = false; // until proven otherwise while (_collectorState != Idling) { if (TraceCMSState) { @@ -4388,10 +4393,10 @@ CMSPrecleanRefsYieldClosure yield_cl(this); assert(rp->span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, - &_markStack); + &_markStack, true /* preclean */); CMSDrainMarkingStackClosure complete_trace(this, - _span, &_markBitMap, &_markStack, - &keep_alive); + _span, &_markBitMap, &_markStack, + &keep_alive, true /* preclean */); // We don't want this step to interfere with a young // collection because we don't want to take CPU @@ -4590,11 +4595,11 @@ if (!dirtyRegion.is_empty()) { assert(numDirtyCards > 0, "consistency check"); HeapWord* stop_point = NULL; + stopTimer(); + CMSTokenSyncWithLocks ts(true, gen->freelistLock(), + bitMapLock()); + startTimer(); { - stopTimer(); - CMSTokenSyncWithLocks ts(true, gen->freelistLock(), - bitMapLock()); - startTimer(); verify_work_stacks_empty(); verify_overflow_empty(); sample_eden(); @@ -4611,10 +4616,6 @@ assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || (_collectorState == AbortablePreclean && should_abort_preclean()), "Unparsable objects should only be in perm gen."); - - stopTimer(); - CMSTokenSyncWithLocks ts(true, bitMapLock()); - startTimer(); _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); if (should_abort_preclean()) { break; // out of preclean loop @@ -4852,17 +4853,19 @@ // recurrence of that condition. assert(_markStack.isEmpty(), "No grey objects"); size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + - _ser_kac_ovflw; + _ser_kac_ovflw + _ser_kac_preclean_ovflw; if (ser_ovflw > 0) { if (PrintCMSStatistics != 0) { gclog_or_tty->print_cr("Marking stack overflow (benign) " - "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", + "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT + ", kac_preclean="SIZE_FORMAT")", _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, - _ser_kac_ovflw); + _ser_kac_ovflw, _ser_kac_preclean_ovflw); } _markStack.expand(); _ser_pmc_remark_ovflw = 0; _ser_pmc_preclean_ovflw = 0; + _ser_kac_preclean_ovflw = 0; _ser_kac_ovflw = 0; } if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { @@ -5675,40 +5678,29 @@ ResourceMark rm; HandleMark hm; - ReferencePolicy* soft_ref_policy; - - assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete"); - // Process weak references. - if (clear_all_soft_refs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - verify_work_stacks_empty(); ReferenceProcessor* rp = ref_processor(); assert(rp->span().equals(_span), "Spans should be equal"); + assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); + // Process weak references. + rp->setup_policy(clear_all_soft_refs); + verify_work_stacks_empty(); + CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, - &_markStack); + &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, - &cmsKeepAliveClosure); + &cmsKeepAliveClosure, false /* !preclean */); { TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); if (rp->processing_is_mt()) { CMSRefProcTaskExecutor task_executor(*this); - rp->process_discovered_references(soft_ref_policy, - &_is_alive_closure, + rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor); } else { - rp->process_discovered_references(soft_ref_policy, - &_is_alive_closure, + rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, NULL); @@ -6163,8 +6155,8 @@ #endif size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { - assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), - "missing Printezis mark?"); + assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), + "missing Printezis mark?"); HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); size_t size = pointer_delta(nextOneAddr + 1, addr); assert(size == CompactibleFreeListSpace::adjustObjectSize(size), @@ -8302,8 +8294,29 @@ } ) if (simulate_overflow || !_mark_stack->push(obj)) { - _collector->push_on_overflow_list(obj); - _collector->_ser_kac_ovflw++; + if (_concurrent_precleaning) { + // We dirty the overflown object and let the remark + // phase deal with it. + assert(_collector->overflow_list_is_empty(), "Error"); + // In the case of object arrays, we need to dirty all of + // the cards that the object spans. No locking or atomics + // are needed since no one else can be mutating the mod union + // table. + if (obj->is_objArray()) { + size_t sz = obj->size(); + HeapWord* end_card_addr = + (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size); + MemRegion redirty_range = MemRegion(addr, end_card_addr); + assert(!redirty_range.is_empty(), "Arithmetical tautology"); + _collector->_modUnionTable.mark_range(redirty_range); + } else { + _collector->_modUnionTable.mark(addr); + } + _collector->_ser_kac_preclean_ovflw++; + } else { + _collector->push_on_overflow_list(obj); + _collector->_ser_kac_ovflw++; + } } } } @@ -8400,6 +8413,8 @@ void CMSDrainMarkingStackClosure::do_void() { // the max number to take from overflow list at a time const size_t num = _mark_stack->capacity()/4; + assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), + "Overflow list should be NULL during concurrent phases"); while (!_mark_stack->isEmpty() || // if stack is empty, check the overflow list _collector->take_from_overflow_list(num, _mark_stack)) { diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -592,6 +592,7 @@ size_t _ser_pmc_preclean_ovflw; size_t _ser_pmc_remark_ovflw; size_t _par_pmc_remark_ovflw; + size_t _ser_kac_preclean_ovflw; size_t _ser_kac_ovflw; size_t _par_kac_ovflw; NOT_PRODUCT(size_t _num_par_pushes;) @@ -1749,21 +1750,30 @@ // work-routine/closure used to complete transitive // marking of objects as live after a certain point // in which an initial set has been completely accumulated. +// This closure is currently used both during the final +// remark stop-world phase, as well as during the concurrent +// precleaning of the discovered reference lists. class CMSDrainMarkingStackClosure: public VoidClosure { CMSCollector* _collector; MemRegion _span; CMSMarkStack* _mark_stack; CMSBitMap* _bit_map; CMSKeepAliveClosure* _keep_alive; + bool _concurrent_precleaning; public: CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, CMSMarkStack* mark_stack, - CMSKeepAliveClosure* keep_alive): + CMSKeepAliveClosure* keep_alive, + bool cpc): _collector(collector), _span(span), _bit_map(bit_map), _mark_stack(mark_stack), - _keep_alive(keep_alive) { } + _keep_alive(keep_alive), + _concurrent_precleaning(cpc) { + assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), + "Mismatch"); + } void do_void(); }; diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -811,6 +811,7 @@ ReferenceProcessor* rp = g1h->ref_processor(); rp->verify_no_references_recorded(); rp->enable_discovery(); // enable ("weak") refs discovery + rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold); @@ -1829,32 +1830,21 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { ResourceMark rm; HandleMark hm; - ReferencePolicy* soft_ref_policy; + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + ReferenceProcessor* rp = g1h->ref_processor(); // Process weak references. - if (clear_all_soft_refs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif - } + rp->setup_policy(clear_all_soft_refs); assert(_markStack.isEmpty(), "mark stack should be empty"); - G1CollectedHeap* g1 = G1CollectedHeap::heap(); - G1CMIsAliveClosure g1IsAliveClosure(g1); - - G1CMKeepAliveClosure g1KeepAliveClosure(g1, this, nextMarkBitMap()); + G1CMIsAliveClosure g1IsAliveClosure (g1h); + G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap()); G1CMDrainMarkingStackClosure g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack, &g1KeepAliveClosure); // XXXYYY Also: copy the parallel ref processing code from CMS. - ReferenceProcessor* rp = g1->ref_processor(); - rp->process_discovered_references(soft_ref_policy, - &g1IsAliveClosure, + rp->process_discovered_references(&g1IsAliveClosure, &g1KeepAliveClosure, &g1DrainMarkingStackClosure, NULL); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -891,6 +891,7 @@ ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_soft_refs); // Do collection work { @@ -2463,7 +2464,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); - // We want to turn off ref discovere, if necessary, and turn it back on + // We want to turn off ref discovery, if necessary, and turn it back on // on again later if we do. bool was_enabled = ref_processor()->discovery_enabled(); if (was_enabled) ref_processor()->disable_discovery(); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -33,8 +33,9 @@ // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); + assert(rp != NULL, "should be non-NULL"); GenMarkSweep::_ref_processor = rp; - assert(rp != NULL, "should be non-NULL"); + rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. @@ -121,23 +122,12 @@ &GenMarkSweep::follow_root_closure); // Process reference objects found during marking - ReferencePolicy *soft_ref_policy; - if (clear_all_softrefs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif - } - assert(soft_ref_policy != NULL,"No soft reference policy"); - GenMarkSweep::ref_processor()->process_discovered_references( - soft_ref_policy, - &GenMarkSweep::is_alive, - &GenMarkSweep::keep_alive, - &GenMarkSweep::follow_stack_closure, - NULL); + ReferenceProcessor* rp = GenMarkSweep::ref_processor(); + rp->setup_policy(clear_all_softrefs); + rp->process_discovered_references(&GenMarkSweep::is_alive, + &GenMarkSweep::keep_alive, + &GenMarkSweep::follow_stack_closure, + NULL); // Follow system dictionary roots and unload classes bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -759,17 +759,12 @@ thread_state_set.steals(), thread_state_set.pops()+thread_state_set.steals()); } - assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(), + assert(thread_state_set.pushes() == thread_state_set.pops() + + thread_state_set.steals(), "Or else the queues are leaky."); - // For now, process discovered weak refs sequentially. -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - // Process (weak) reference objects found during scavenge. + ReferenceProcessor* rp = ref_processor(); IsAliveClosure is_alive(this); ScanWeakRefClosure scan_weak_ref(this); KeepAliveClosure keep_alive(&scan_weak_ref); @@ -778,18 +773,17 @@ set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); - if (ref_processor()->processing_is_mt()) { + rp->setup_policy(clear_all_soft_refs); + if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, - &task_executor); + rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, &task_executor); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, - NULL); + rp->process_discovered_references(&is_alive, &keep_alive, + &evacuate_followers, NULL); } if (!promotion_failed()) { // Swap the survivor spaces. @@ -851,14 +845,14 @@ SpecializationStats::print(); - ref_processor()->set_enqueuing_is_done(true); - if (ref_processor()->processing_is_mt()) { + rp->set_enqueuing_is_done(true); + if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); - ref_processor()->enqueue_discovered_references(&task_executor); + rp->enqueue_discovered_references(&task_executor); } else { - ref_processor()->enqueue_discovered_references(NULL); + rp->enqueue_discovered_references(NULL); } - ref_processor()->verify_no_references_recorded(); + rp->verify_no_references_recorded(); } static int sum; @@ -1211,7 +1205,7 @@ int n = 0; while (cur != NULL) { oop obj_to_push = cur->forwardee(); - oop next = oop(cur->klass()); + oop next = oop(cur->klass_or_null()); cur->set_klass(obj_to_push->klass()); if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { obj_to_push = cur; diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -172,6 +172,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); @@ -517,20 +518,9 @@ // Process reference objects found during marking { - ReferencePolicy *soft_ref_policy; - if (clear_all_softrefs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL,"No soft reference policy"); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), mark_and_push_closure(), - follow_stack_closure(), NULL); + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); } // Follow system dictionary roots and unload classes diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -1578,6 +1578,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); + ref_processor()->setup_policy(maximum_heap_compaction); bool marked_for_unloading = false; @@ -1894,26 +1895,14 @@ // Process reference objects found during marking { TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); - ReferencePolicy *soft_ref_policy; - if (maximum_heap_compaction) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL, "No soft reference policy"); if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), &mark_and_push_closure, - &follow_stack_closure, &task_executor); + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, + &task_executor); } else { ref_processor()->process_discovered_references( - soft_ref_policy, is_alive_closure(), &mark_and_push_closure, - &follow_stack_closure, NULL); + is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL); } } diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -330,6 +330,7 @@ COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(); + reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. @@ -394,24 +395,16 @@ // Process reference objects discovered during scavenge { -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - + reference_processor()->setup_policy(false); // not always_clear PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); - assert(soft_ref_policy != NULL,"No soft reference policy"); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->process_discovered_references( - soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, - &task_executor); + &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); } else { reference_processor()->process_discovered_references( - soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, - NULL); + &_is_alive_closure, &keep_alive, &evac_followers, NULL); } } diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/includeDB_core --- a/src/share/vm/includeDB_core Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/includeDB_core Thu Dec 04 08:55:34 2008 -0800 @@ -3434,6 +3434,7 @@ referenceProcessor.cpp systemDictionary.hpp referenceProcessor.hpp instanceRefKlass.hpp +referenceProcessor.hpp referencePolicy.hpp reflection.cpp arguments.hpp reflection.cpp handles.inline.hpp diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/defNewGeneration.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -540,14 +540,6 @@ assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); - // Weak refs. - // FIXME: Are these storage leaks, or are they resource objects? -#ifdef COMPILER2 - ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); -#else - ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); @@ -574,8 +566,10 @@ evacuate_followers.do_void(); FastKeepAliveClosure keep_alive(this, &scan_weak_ref); - ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); + ReferenceProcessor* rp = ref_processor(); + rp->setup_policy(clear_all_soft_refs); + rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, + NULL); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/genCollectedHeap.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -525,8 +525,9 @@ if (rp->discovery_is_atomic()) { rp->verify_no_references_recorded(); rp->enable_discovery(); + rp->setup_policy(clear_all_soft_refs); } else { - // collect() will enable discovery as appropriate + // collect() below will enable discovery as appropriate } _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/genMarkSweep.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -31,8 +31,9 @@ // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); + assert(rp != NULL, "should be non-NULL"); _ref_processor = rp; - assert(rp != NULL, "should be non-NULL"); + rp->setup_policy(clear_all_softrefs); TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); @@ -245,20 +246,9 @@ // Process reference objects found during marking { - ReferencePolicy *soft_ref_policy; - if (clear_all_softrefs) { - soft_ref_policy = new AlwaysClearPolicy(); - } else { -#ifdef COMPILER2 - soft_ref_policy = new LRUMaxHeapPolicy(); -#else - soft_ref_policy = new LRUCurrentHeapPolicy(); -#endif // COMPILER2 - } - assert(soft_ref_policy != NULL,"No soft reference policy"); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( - soft_ref_policy, &is_alive, &keep_alive, - &follow_stack_closure, NULL); + &is_alive, &keep_alive, &follow_stack_closure, NULL); } // Follow system dictionary roots and unload classes diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/referencePolicy.cpp --- a/src/share/vm/memory/referencePolicy.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/referencePolicy.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -26,6 +26,11 @@ # include "incls/_referencePolicy.cpp.incl" LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() { + setup(); +} + +// Capture state (of-the-VM) information needed to evaluate the policy +void LRUCurrentHeapPolicy::setup() { _max_interval = (Universe::get_heap_free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB; assert(_max_interval >= 0,"Sanity check"); } @@ -47,6 +52,11 @@ /////////////////////// MaxHeap ////////////////////// LRUMaxHeapPolicy::LRUMaxHeapPolicy() { + setup(); +} + +// Capture state (of-the-VM) information needed to evaluate the policy +void LRUMaxHeapPolicy::setup() { size_t max_heap = MaxHeapSize; max_heap -= Universe::get_heap_used_at_last_gc(); max_heap /= M; diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/referencePolicy.hpp --- a/src/share/vm/memory/referencePolicy.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/referencePolicy.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -26,9 +26,11 @@ // should be cleared. -class ReferencePolicy : public ResourceObj { +class ReferencePolicy : public CHeapObj { public: virtual bool should_clear_reference(oop p) { ShouldNotReachHere(); return true; } + // Capture state (of-the-VM) information needed to evaluate the policy + virtual void setup() { /* do nothing */ } }; class NeverClearPolicy : public ReferencePolicy { @@ -48,6 +50,8 @@ public: LRUCurrentHeapPolicy(); + // Capture state (of-the-VM) information needed to evaluate the policy + void setup(); bool should_clear_reference(oop p); }; @@ -58,5 +62,7 @@ public: LRUMaxHeapPolicy(); + // Capture state (of-the-VM) information needed to evaluate the policy + void setup(); bool should_clear_reference(oop p); }; diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/referenceProcessor.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -25,6 +25,11 @@ # include "incls/_precompiled.incl" # include "incls/_referenceProcessor.cpp.incl" +ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; +ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; +oop ReferenceProcessor::_sentinelRef = NULL; +const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; + // List of discovered references. class DiscoveredList { public: @@ -47,7 +52,9 @@ } bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } size_t length() { return _len; } - void set_length(size_t len) { _len = len; } + void set_length(size_t len) { _len = len; } + void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } + void dec_length(size_t dec) { _len -= dec; } private: // Set value depending on UseCompressedOops. This could be a template class // but then we have to fix all the instantiations and declarations that use this class. @@ -56,10 +63,6 @@ size_t _len; }; -oop ReferenceProcessor::_sentinelRef = NULL; - -const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; - void referenceProcessor_init() { ReferenceProcessor::init_statics(); } @@ -80,6 +83,12 @@ } assert(_sentinelRef != NULL && _sentinelRef->is_oop(), "Just constructed it!"); + _always_clear_soft_ref_policy = new AlwaysClearPolicy(); + _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) + NOT_COMPILER2(LRUCurrentHeapPolicy()); + if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { + vm_exit_during_initialization("Could not allocate reference policy object"); + } guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery, "Unrecongnized RefDiscoveryPolicy"); @@ -106,6 +115,7 @@ vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); } rp->set_is_alive_non_header(is_alive_non_header); + rp->setup_policy(false /* default soft ref policy */); return rp; } @@ -192,7 +202,6 @@ } void ReferenceProcessor::process_discovered_references( - ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, @@ -207,7 +216,7 @@ // Soft references { TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredSoftRefs, policy, true, + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); } @@ -436,13 +445,13 @@ // The "allow_null_referent" argument tells us to allow for the possibility // of a NULL referent in the discovered Reference object. This typically // happens in the case of concurrent collectors that may have done the - // discovery concurrently or interleaved with mutator execution. + // discovery concurrently, or interleaved, with mutator execution. inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); // Move to the next discovered reference. inline void next(); - // Remove the current reference from the list and move to the next. + // Remove the current reference from the list inline void remove(); // Make the Reference object active again. @@ -476,7 +485,6 @@ inline size_t removed() const { return _removed; } ) -private: inline void move_to_next(); private: @@ -553,7 +561,7 @@ oopDesc::store_heap_oop((oop*)_prev_next, _next); } NOT_PRODUCT(_removed++); - move_to_next(); + _refs_list.dec_length(1); } inline void DiscoveredListIterator::move_to_next() { @@ -591,12 +599,13 @@ gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", iter.obj(), iter.obj()->blueprint()->internal_name()); } + // Remove Reference object from list + iter.remove(); // Make the Reference object active again iter.make_active(); // keep the referent around iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -629,12 +638,13 @@ iter.obj(), iter.obj()->blueprint()->internal_name()); } // The referent is reachable after all. + // Remove Reference object from list. + iter.remove(); // Update the referent pointer as necessary: Note that this // should not entail any recursive marking because the // referent must already have been traversed. iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -670,6 +680,7 @@ } else { keep_alive->do_oop((oop*)next_addr); } + iter.move_to_next(); } else { iter.next(); } @@ -832,9 +843,9 @@ } java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); ref_lists[to_idx].set_head(move_head); - ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move); + ref_lists[to_idx].inc_length(refs_to_move); ref_lists[from_idx].set_head(new_head); - ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move); + ref_lists[from_idx].dec_length(refs_to_move); } else { ++to_idx; } @@ -923,7 +934,6 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { assert(!discovery_is_atomic(), "Else why call this method?"); DiscoveredListIterator iter(refs_list, NULL, NULL); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); oop next = java_lang_ref_Reference::next(iter.obj()); @@ -941,12 +951,11 @@ ) // Remove Reference object from list iter.remove(); - --length; + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); NOT_PRODUCT( if (PrintGCDetails && TraceReferenceGC) { gclog_or_tty->print( @@ -1024,7 +1033,7 @@ // We have separate lists for enqueueing so no synchronization // is necessary. refs_list.set_head(obj); - refs_list.set_length(refs_list.length() + 1); + refs_list.inc_length(1); if (_discovered_list_needs_barrier) { _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); } @@ -1090,15 +1099,28 @@ // reachable. if (is_alive_non_header() != NULL) { oop referent = java_lang_ref_Reference::referent(obj); - // We'd like to assert the following: - // assert(referent != NULL, "Refs with null referents already filtered"); - // However, since this code may be executed concurrently with - // mutators, which can clear() the referent, it is not - // guaranteed that the referent is non-NULL. + // In the case of non-concurrent discovery, the last + // disjunct below should hold. It may not hold in the + // case of concurrent discovery because mutators may + // concurrently clear() a Reference. + assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, + "Refs with null referents already filtered"); if (is_alive_non_header()->do_object_b(referent)) { return false; // referent is reachable } } + if (rt == REF_SOFT) { + // For soft refs we can decide now if these are not + // current candidates for clearing, in which case we + // can mark through them now, rather than delaying that + // to the reference-processing phase. Since all current + // time-stamp policies advance the soft-ref clock only + // at a major collection cycle, this is always currently + // accurate. + if (!_current_soft_ref_policy->should_clear_reference(obj)) { + return false; + } + } HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); const oop discovered = java_lang_ref_Reference::discovered(obj); @@ -1168,7 +1190,7 @@ _bs->write_ref_field((oop*)discovered_addr, current_head); } list->set_head(obj); - list->set_length(list->length() + 1); + list->inc_length(1); } // In the MT discovery case, it is currently possible to see @@ -1209,45 +1231,48 @@ TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Weak references { TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Final references { TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Phantom references { TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc, yield); } @@ -1256,9 +1281,12 @@ // Walk the given discovered ref list, and remove all reference objects // whose referents are still alive, whose referents are NULL or which -// are not active (have a non-NULL next field). NOTE: For this to work -// correctly, refs discovery can not be happening concurrently with this -// step. +// are not active (have a non-NULL next field). NOTE: When we are +// thus precleaning the ref lists (which happens single-threaded today), +// we do not disable refs discovery to honour the correct semantics of +// java.lang.Reference. As a result, we need to be careful below +// that ref removal steps interleave safely with ref discovery steps +// (in this thread). void ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, BoolObjectClosure* is_alive, @@ -1266,7 +1294,6 @@ VoidClosure* complete_gc, YieldClosure* yield) { DiscoveredListIterator iter(refs_list, keep_alive, is_alive); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); oop obj = iter.obj(); @@ -1281,7 +1308,6 @@ } // Remove Reference object from list iter.remove(); - --length; // Keep alive its cohort. iter.make_referent_alive(); if (UseCompressedOops) { @@ -1291,12 +1317,11 @@ oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); keep_alive->do_oop(next_addr); } + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); - // Close the reachable set complete_gc->do_void(); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/referenceProcessor.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -23,7 +23,7 @@ */ // ReferenceProcessor class encapsulates the per-"collector" processing -// of "weak" references for GC. The interface is useful for supporting +// of java.lang.Reference objects for GC. The interface is useful for supporting // a generational abstraction, in particular when there are multiple // generations that are being independently collected -- possibly // concurrently and/or incrementally. Note, however, that the @@ -75,6 +75,14 @@ // all collectors but the CMS collector). BoolObjectClosure* _is_alive_non_header; + // Soft ref clearing policies + // . the default policy + static ReferencePolicy* _default_soft_ref_policy; + // . the "clear all" policy + static ReferencePolicy* _always_clear_soft_ref_policy; + // . the current policy below is either one of the above + ReferencePolicy* _current_soft_ref_policy; + // The discovered ref lists themselves // The MT'ness degree of the queues below @@ -90,6 +98,12 @@ DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } static oop sentinel_ref() { return _sentinelRef; } static oop* adr_sentinel_ref() { return &_sentinelRef; } + ReferencePolicy* setup_policy(bool always_clear) { + _current_soft_ref_policy = always_clear ? + _always_clear_soft_ref_policy : _default_soft_ref_policy; + _current_soft_ref_policy->setup(); // snapshot the policy threshold + return _current_soft_ref_policy; + } public: // Process references with a certain reachability level. @@ -297,8 +311,7 @@ bool discover_reference(oop obj, ReferenceType rt); // Process references found during GC (called by the garbage collector) - void process_discovered_references(ReferencePolicy* policy, - BoolObjectClosure* is_alive, + void process_discovered_references(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/memory/universe.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -96,7 +96,7 @@ bool Universe::_fully_initialized = false; size_t Universe::_heap_capacity_at_last_gc; -size_t Universe::_heap_used_at_last_gc; +size_t Universe::_heap_used_at_last_gc = 0; CollectedHeap* Universe::_collectedHeap = NULL; address Universe::_heap_base = NULL; diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/oops/oop.inline.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -92,7 +92,7 @@ // This is only to be used during GC, for from-space objects, so no // barrier is needed. if (UseCompressedOops) { - _metadata._compressed_klass = encode_heap_oop_not_null(k); + _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling) } else { _metadata._klass = (klassOop)k; } diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/runtime/arguments.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -2322,7 +2322,12 @@ return JNI_ERR; } } - + // Change the default value for flags which have different default values + // when working with older JDKs. + if (JDK_Version::current().compare_major(6) <= 0 && + FLAG_IS_DEFAULT(UseVMInterruptibleIO)) { + FLAG_SET_DEFAULT(UseVMInterruptibleIO, true); + } return JNI_OK; } diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/runtime/biasedLocking.cpp --- a/src/share/vm/runtime/biasedLocking.cpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/runtime/biasedLocking.cpp Thu Dec 04 08:55:34 2008 -0800 @@ -582,13 +582,19 @@ if (heuristics == HR_NOT_BIASED) { return NOT_BIASED; } else if (heuristics == HR_SINGLE_REVOKE) { - if (mark->biased_locker() == THREAD) { + Klass *k = Klass::cast(obj->klass()); + markOop prototype_header = k->prototype_header(); + if (mark->biased_locker() == THREAD && + prototype_header->bias_epoch() == mark->bias_epoch()) { // A thread is trying to revoke the bias of an object biased // toward it, again likely due to an identity hash code // computation. We can again avoid a safepoint in this case // since we are only going to walk our own stack. There are no // races with revocations occurring in other threads because we // reach no safepoints in the revocation path. + // Also check the epoch because even if threads match, another thread + // can come in with a CAS to steal the bias of an object that has a + // stale epoch. ResourceMark rm; if (TraceBiasedLocking) { tty->print_cr("Revoking bias by walking my own stack:"); diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/runtime/globals.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -1474,7 +1474,7 @@ "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ " ratio") \ \ - product(bool, CMSPrecleanRefLists1, false, \ + product(bool, CMSPrecleanRefLists1, true, \ "Preclean ref lists during (initial) preclean phase") \ \ product(bool, CMSPrecleanRefLists2, false, \ @@ -3262,9 +3262,10 @@ diagnostic(bool, PrintDTraceDOF, false, \ "Print the DTrace DOF passed to the system for JSDT probes") \ \ - product(bool, UseVMInterruptibleIO, true, \ + product(bool, UseVMInterruptibleIO, false, \ "(Unstable, Solaris-specific) Thread interrupt before or with " \ - "EINTR for I/O operations results in OS_INTRPT") + "EINTR for I/O operations results in OS_INTRPT. The default value"\ + " of this flag is true for JDK 6 and earliers") /* diff -r 424f9bfe6b96 -r 1f54ed41d6ae src/share/vm/utilities/macros.hpp --- a/src/share/vm/utilities/macros.hpp Wed Dec 03 13:41:37 2008 -0800 +++ b/src/share/vm/utilities/macros.hpp Thu Dec 04 08:55:34 2008 -0800 @@ -65,8 +65,10 @@ // COMPILER2 variant #ifdef COMPILER2 #define COMPILER2_PRESENT(code) code +#define NOT_COMPILER2(code) #else // COMPILER2 #define COMPILER2_PRESENT(code) +#define NOT_COMPILER2(code) code #endif // COMPILER2