# HG changeset patch # User jmasa # Date 1276183655 25200 # Node ID b9bc732be7c06a8677746ce5841719ef9a8fb9a3 # Parent b17deadc902e4a019e2d69bc63a88045344367e3# Parent 2458a1f25356cb19921f48e9ee20d60eea470e82 Merge diff -r b17deadc902e -r b9bc732be7c0 src/share/vm/gc_implementation/g1/g1MMUTracker.cpp --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp Wed Jun 09 13:53:58 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp Thu Jun 10 08:27:35 2010 -0700 @@ -81,20 +81,24 @@ remove_expired_entries(end); if (_no_entries == QueueLength) { - // OK, right now when we fill up we bomb out - // there are a few ways of dealing with this "gracefully" + // OK, we've filled up the queue. There are a few ways + // of dealing with this "gracefully" // increase the array size (:-) // remove the oldest entry (this might allow more GC time for - // the time slice than what's allowed) + // the time slice than what's allowed) - this is what we + // currently do // consolidate the two entries with the minimum gap between them // (this might allow less GC time than what's allowed) - guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker, - "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker"); + // In the case where ScavengeALot is true, such overflow is not // uncommon; in such cases, we can, without much loss of precision // or performance (we are GC'ing most of the time anyway!), - // simply overwrite the oldest entry in the tracker: this - // is also the behaviour when G1UseFixedWindowMMUTracker is enabled. + // simply overwrite the oldest entry in the tracker. + + if (G1PolicyVerbose > 1) { + warning("MMU Tracker Queue overflow. Replacing earliest entry."); + } + _head_index = trim_index(_head_index + 1); assert(_head_index == _tail_index, "Because we have a full circular buffer"); _tail_index = trim_index(_tail_index + 1); diff -r b17deadc902e -r b9bc732be7c0 src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Jun 09 13:53:58 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Jun 10 08:27:35 2010 -0700 @@ -254,9 +254,6 @@ "If non-0 is the size of the G1 survivor space, " \ "otherwise SurvivorRatio is used to determine the size") \ \ - product(bool, G1UseFixedWindowMMUTracker, false, \ - "If the MMU tracker's memory is full, forget the oldest entry") \ - \ product(uintx, G1HeapRegionSize, 0, \ "Size of the G1 regions.") \ \ diff -r b17deadc902e -r b9bc732be7c0 src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Wed Jun 09 13:53:58 2010 -0400 +++ b/src/share/vm/opto/graphKit.cpp Thu Jun 10 08:27:35 2010 -0700 @@ -3487,7 +3487,6 @@ Node* tls = __ thread(); // ThreadLocalStorage - Node* no_ctrl = NULL; Node* no_base = __ top(); float likely = PROB_LIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999); @@ -3511,10 +3510,10 @@ Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); // Now some values - - Node* index = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); - Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); - + // Use ctrl to avoid hoisting these values past a safepoint, which could + // potentially reset these fields in the JavaThread. + Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); + Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); // Convert the store obj pointer to an int prior to doing math on it // Must use ctrl to prevent "integerized oop" existing across safepoint diff -r b17deadc902e -r b9bc732be7c0 test/runtime/6888954/vmerrors.sh --- a/test/runtime/6888954/vmerrors.sh Wed Jun 09 13:53:58 2010 -0400 +++ b/test/runtime/6888954/vmerrors.sh Thu Jun 10 08:27:35 2010 -0700 @@ -65,7 +65,7 @@ done rm -f $$ - i=$(expr $i + 1) + i=`expr $i + 1` done exit $rc