comparison src/share/vm/runtime/synchronizer.cpp @ 1640:bfc89697cccb

6964164: MonitorInUseLists leak of contended objects Summary: fix MonitorInUseLists memory leak and MonitorBound now works Reviewed-by: chrisphi, dice
author acorn
date Fri, 02 Jul 2010 17:23:43 -0400
parents 3a9de63b2209
children fa83ab460c54
comparison
equal deleted inserted replaced
1627:c5f1ea9e15e8 1640:bfc89697cccb
745 // B. After adding an objectmonitor to a free list. 745 // B. After adding an objectmonitor to a free list.
746 // 746 //
747 747
748 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; 748 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
749 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; 749 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
750 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
751 int ObjectSynchronizer::gOmInUseCount = 0;
750 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache 752 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
751 static volatile int MonitorFreeCount = 0 ; // # on gFreeList 753 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
752 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation 754 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
753 #define CHAINMARKER ((oop)-1) 755 #define CHAINMARKER ((oop)-1)
754 756
824 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 826 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
825 ::fflush(stdout) ; 827 ::fflush(stdout) ;
826 } 828 }
827 } 829 }
828 } 830 }
831 /* Too slow for general assert or debug
832 void ObjectSynchronizer::verifyInUse (Thread *Self) {
833 ObjectMonitor* mid;
834 int inusetally = 0;
835 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
836 inusetally ++;
837 }
838 assert(inusetally == Self->omInUseCount, "inuse count off");
839
840 int freetally = 0;
841 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
842 freetally ++;
843 }
844 assert(freetally == Self->omFreeCount, "free count off");
845 }
846 */
829 847
830 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { 848 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
831 // A large MAXPRIVATE value reduces both list lock contention 849 // A large MAXPRIVATE value reduces both list lock contention
832 // and list coherency traffic, but also tends to increase the 850 // and list coherency traffic, but also tends to increase the
833 // number of objectMonitors in circulation as well as the STW 851 // number of objectMonitors in circulation as well as the STW
851 guarantee (m->object() == NULL, "invariant") ; 869 guarantee (m->object() == NULL, "invariant") ;
852 if (MonitorInUseLists) { 870 if (MonitorInUseLists) {
853 m->FreeNext = Self->omInUseList; 871 m->FreeNext = Self->omInUseList;
854 Self->omInUseList = m; 872 Self->omInUseList = m;
855 Self->omInUseCount ++; 873 Self->omInUseCount ++;
874 // verifyInUse(Self);
875 } else {
876 m->FreeNext = NULL;
856 } 877 }
857 return m ; 878 return m ;
858 } 879 }
859 880
860 // 2: try to allocate from the global gFreeList 881 // 2: try to allocate from the global gFreeList
872 ObjectMonitor * take = gFreeList ; 893 ObjectMonitor * take = gFreeList ;
873 gFreeList = take->FreeNext ; 894 gFreeList = take->FreeNext ;
874 guarantee (take->object() == NULL, "invariant") ; 895 guarantee (take->object() == NULL, "invariant") ;
875 guarantee (!take->is_busy(), "invariant") ; 896 guarantee (!take->is_busy(), "invariant") ;
876 take->Recycle() ; 897 take->Recycle() ;
877 omRelease (Self, take) ; 898 omRelease (Self, take, false) ;
878 } 899 }
879 Thread::muxRelease (&ListLock) ; 900 Thread::muxRelease (&ListLock) ;
880 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; 901 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
881 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; 902 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
882 TEVENT (omFirst - reprovision) ; 903 TEVENT (omFirst - reprovision) ;
883 continue ;
884 904
885 const int mx = MonitorBound ; 905 const int mx = MonitorBound ;
886 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { 906 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
887 // We can't safely induce a STW safepoint from omAlloc() as our thread 907 // We can't safely induce a STW safepoint from omAlloc() as our thread
888 // state may not be appropriate for such activities and callers may hold 908 // state may not be appropriate for such activities and callers may hold
959 // accumulation we could limit omCount to (omProvision*2), otherwise return 979 // accumulation we could limit omCount to (omProvision*2), otherwise return
960 // the objectMonitor to the global list. We should drain (return) in reasonable chunks. 980 // the objectMonitor to the global list. We should drain (return) in reasonable chunks.
961 // That is, *not* one-at-a-time. 981 // That is, *not* one-at-a-time.
962 982
963 983
964 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) { 984 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
965 guarantee (m->object() == NULL, "invariant") ; 985 guarantee (m->object() == NULL, "invariant") ;
966 m->FreeNext = Self->omFreeList ; 986
967 Self->omFreeList = m ; 987 // Remove from omInUseList
968 Self->omFreeCount ++ ; 988 if (MonitorInUseLists && fromPerThreadAlloc) {
989 ObjectMonitor* curmidinuse = NULL;
990 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
991 if (m == mid) {
992 // extract from per-thread in-use-list
993 if (mid == Self->omInUseList) {
994 Self->omInUseList = mid->FreeNext;
995 } else if (curmidinuse != NULL) {
996 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
997 }
998 Self->omInUseCount --;
999 // verifyInUse(Self);
1000 break;
1001 } else {
1002 curmidinuse = mid;
1003 mid = mid->FreeNext;
1004 }
1005 }
1006 }
1007
1008 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1009 m->FreeNext = Self->omFreeList ;
1010 Self->omFreeList = m ;
1011 Self->omFreeCount ++ ;
969 } 1012 }
970 1013
971 // Return the monitors of a moribund thread's local free list to 1014 // Return the monitors of a moribund thread's local free list to
972 // the global free list. Typically a thread calls omFlush() when 1015 // the global free list. Typically a thread calls omFlush() when
973 // it's dying. We could also consider having the VM thread steal 1016 // it's dying. We could also consider having the VM thread steal
974 // monitors from threads that have not run java code over a few 1017 // monitors from threads that have not run java code over a few
975 // consecutive STW safepoints. Relatedly, we might decay 1018 // consecutive STW safepoints. Relatedly, we might decay
976 // omFreeProvision at STW safepoints. 1019 // omFreeProvision at STW safepoints.
1020 //
1021 // Also return the monitors of a moribund thread"s omInUseList to
1022 // a global gOmInUseList under the global list lock so these
1023 // will continue to be scanned.
977 // 1024 //
978 // We currently call omFlush() from the Thread:: dtor _after the thread 1025 // We currently call omFlush() from the Thread:: dtor _after the thread
979 // has been excised from the thread list and is no longer a mutator. 1026 // has been excised from the thread list and is no longer a mutator.
980 // That means that omFlush() can run concurrently with a safepoint and 1027 // That means that omFlush() can run concurrently with a safepoint and
981 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1028 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
985 // operator. 1032 // operator.
986 1033
987 void ObjectSynchronizer::omFlush (Thread * Self) { 1034 void ObjectSynchronizer::omFlush (Thread * Self) {
988 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL 1035 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
989 Self->omFreeList = NULL ; 1036 Self->omFreeList = NULL ;
990 if (List == NULL) return ;
991 ObjectMonitor * Tail = NULL ; 1037 ObjectMonitor * Tail = NULL ;
992 ObjectMonitor * s ;
993 int Tally = 0; 1038 int Tally = 0;
994 for (s = List ; s != NULL ; s = s->FreeNext) { 1039 if (List != NULL) {
995 Tally ++ ; 1040 ObjectMonitor * s ;
996 Tail = s ; 1041 for (s = List ; s != NULL ; s = s->FreeNext) {
997 guarantee (s->object() == NULL, "invariant") ; 1042 Tally ++ ;
998 guarantee (!s->is_busy(), "invariant") ; 1043 Tail = s ;
999 s->set_owner (NULL) ; // redundant but good hygiene 1044 guarantee (s->object() == NULL, "invariant") ;
1000 TEVENT (omFlush - Move one) ; 1045 guarantee (!s->is_busy(), "invariant") ;
1001 } 1046 s->set_owner (NULL) ; // redundant but good hygiene
1002 1047 TEVENT (omFlush - Move one) ;
1003 guarantee (Tail != NULL && List != NULL, "invariant") ; 1048 }
1049 guarantee (Tail != NULL && List != NULL, "invariant") ;
1050 }
1051
1052 ObjectMonitor * InUseList = Self->omInUseList;
1053 ObjectMonitor * InUseTail = NULL ;
1054 int InUseTally = 0;
1055 if (InUseList != NULL) {
1056 Self->omInUseList = NULL;
1057 ObjectMonitor *curom;
1058 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1059 InUseTail = curom;
1060 InUseTally++;
1061 }
1062 // TODO debug
1063 assert(Self->omInUseCount == InUseTally, "inuse count off");
1064 Self->omInUseCount = 0;
1065 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1066 }
1067
1004 Thread::muxAcquire (&ListLock, "omFlush") ; 1068 Thread::muxAcquire (&ListLock, "omFlush") ;
1005 Tail->FreeNext = gFreeList ; 1069 if (Tail != NULL) {
1006 gFreeList = List ; 1070 Tail->FreeNext = gFreeList ;
1007 MonitorFreeCount += Tally; 1071 gFreeList = List ;
1072 MonitorFreeCount += Tally;
1073 }
1074
1075 if (InUseTail != NULL) {
1076 InUseTail->FreeNext = gOmInUseList;
1077 gOmInUseList = InUseList;
1078 gOmInUseCount += InUseTally;
1079 }
1080
1008 Thread::muxRelease (&ListLock) ; 1081 Thread::muxRelease (&ListLock) ;
1009 TEVENT (omFlush) ; 1082 TEVENT (omFlush) ;
1010 } 1083 }
1011 1084
1012 1085
1164 ObjectMonitor * m = omAlloc (Self) ; 1237 ObjectMonitor * m = omAlloc (Self) ;
1165 // Optimistically prepare the objectmonitor - anticipate successful CAS 1238 // Optimistically prepare the objectmonitor - anticipate successful CAS
1166 // We do this before the CAS in order to minimize the length of time 1239 // We do this before the CAS in order to minimize the length of time
1167 // in which INFLATING appears in the mark. 1240 // in which INFLATING appears in the mark.
1168 m->Recycle(); 1241 m->Recycle();
1169 m->FreeNext = NULL ;
1170 m->_Responsible = NULL ; 1242 m->_Responsible = NULL ;
1171 m->OwnerIsThread = 0 ; 1243 m->OwnerIsThread = 0 ;
1172 m->_recursions = 0 ; 1244 m->_recursions = 0 ;
1173 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class 1245 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class
1174 1246
1175 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; 1247 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1176 if (cmp != mark) { 1248 if (cmp != mark) {
1177 omRelease (Self, m) ; 1249 omRelease (Self, m, true) ;
1178 continue ; // Interference -- just retry 1250 continue ; // Interference -- just retry
1179 } 1251 }
1180 1252
1181 // We've successfully installed INFLATING (0) into the mark-word. 1253 // We've successfully installed INFLATING (0) into the mark-word.
1182 // This is the only case where 0 will appear in a mark-work. 1254 // This is the only case where 0 will appear in a mark-work.
1260 m->set_header(mark); 1332 m->set_header(mark);
1261 m->set_owner(NULL); 1333 m->set_owner(NULL);
1262 m->set_object(object); 1334 m->set_object(object);
1263 m->OwnerIsThread = 1 ; 1335 m->OwnerIsThread = 1 ;
1264 m->_recursions = 0 ; 1336 m->_recursions = 0 ;
1265 m->FreeNext = NULL ;
1266 m->_Responsible = NULL ; 1337 m->_Responsible = NULL ;
1267 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class 1338 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class
1268 1339
1269 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1340 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1270 m->set_object (NULL) ; 1341 m->set_object (NULL) ;
1271 m->set_owner (NULL) ; 1342 m->set_owner (NULL) ;
1272 m->OwnerIsThread = 0 ; 1343 m->OwnerIsThread = 0 ;
1273 m->Recycle() ; 1344 m->Recycle() ;
1274 omRelease (Self, m) ; 1345 omRelease (Self, m, true) ;
1275 m = NULL ; 1346 m = NULL ;
1276 continue ; 1347 continue ;
1277 // interference - the markword changed - just retry. 1348 // interference - the markword changed - just retry.
1278 // The state-transitions are one-way, so there's no chance of 1349 // The state-transitions are one-way, so there's no chance of
1279 // live-lock -- "Inflated" is an absorbing state. 1350 // live-lock -- "Inflated" is an absorbing state.
1850 // We have added a flag, MonitorInUseLists, which creates a list 1921 // We have added a flag, MonitorInUseLists, which creates a list
1851 // of active monitors for each thread. deflate_idle_monitors() 1922 // of active monitors for each thread. deflate_idle_monitors()
1852 // only scans the per-thread inuse lists. omAlloc() puts all 1923 // only scans the per-thread inuse lists. omAlloc() puts all
1853 // assigned monitors on the per-thread list. deflate_idle_monitors() 1924 // assigned monitors on the per-thread list. deflate_idle_monitors()
1854 // returns the non-busy monitors to the global free list. 1925 // returns the non-busy monitors to the global free list.
1926 // When a thread dies, omFlush() adds the list of active monitors for
1927 // that thread to a global gOmInUseList acquiring the
1928 // global list lock. deflate_idle_monitors() acquires the global
1929 // list lock to scan for non-busy monitors to the global free list.
1855 // An alternative could have used a single global inuse list. The 1930 // An alternative could have used a single global inuse list. The
1856 // downside would have been the additional cost of acquiring the global list lock 1931 // downside would have been the additional cost of acquiring the global list lock
1857 // for every omAlloc(). 1932 // for every omAlloc().
1858 // 1933 //
1859 // Perversely, the heap size -- and thus the STW safepoint rate -- 1934 // Perversely, the heap size -- and thus the STW safepoint rate --
1902 1977
1903 // Move the object to the working free list defined by FreeHead,FreeTail. 1978 // Move the object to the working free list defined by FreeHead,FreeTail.
1904 if (*FreeHeadp == NULL) *FreeHeadp = mid; 1979 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1905 if (*FreeTailp != NULL) { 1980 if (*FreeTailp != NULL) {
1906 ObjectMonitor * prevtail = *FreeTailp; 1981 ObjectMonitor * prevtail = *FreeTailp;
1982 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1907 prevtail->FreeNext = mid; 1983 prevtail->FreeNext = mid;
1908 } 1984 }
1909 *FreeTailp = mid; 1985 *FreeTailp = mid;
1910 deflated = true; 1986 deflated = true;
1911 } 1987 }
1912 return deflated; 1988 return deflated;
1989 }
1990
1991 // Caller acquires ListLock
1992 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1993 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1994 ObjectMonitor* mid;
1995 ObjectMonitor* next;
1996 ObjectMonitor* curmidinuse = NULL;
1997 int deflatedcount = 0;
1998
1999 for (mid = *listheadp; mid != NULL; ) {
2000 oop obj = (oop) mid->object();
2001 bool deflated = false;
2002 if (obj != NULL) {
2003 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
2004 }
2005 if (deflated) {
2006 // extract from per-thread in-use-list
2007 if (mid == *listheadp) {
2008 *listheadp = mid->FreeNext;
2009 } else if (curmidinuse != NULL) {
2010 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
2011 }
2012 next = mid->FreeNext;
2013 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
2014 mid = next;
2015 deflatedcount++;
2016 } else {
2017 curmidinuse = mid;
2018 mid = mid->FreeNext;
2019 }
2020 }
2021 return deflatedcount;
1913 } 2022 }
1914 2023
1915 void ObjectSynchronizer::deflate_idle_monitors() { 2024 void ObjectSynchronizer::deflate_idle_monitors() {
1916 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2025 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1917 int nInuse = 0 ; // currently associated with objects 2026 int nInuse = 0 ; // currently associated with objects
1927 // And in case the vm thread is acquiring a lock during a safepoint 2036 // And in case the vm thread is acquiring a lock during a safepoint
1928 // See e.g. 6320749 2037 // See e.g. 6320749
1929 Thread::muxAcquire (&ListLock, "scavenge - return") ; 2038 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1930 2039
1931 if (MonitorInUseLists) { 2040 if (MonitorInUseLists) {
1932 ObjectMonitor* mid; 2041 int inUse = 0;
1933 ObjectMonitor* next;
1934 ObjectMonitor* curmidinuse;
1935 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 2042 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1936 curmidinuse = NULL; 2043 nInCirculation+= cur->omInUseCount;
1937 for (mid = cur->omInUseList; mid != NULL; ) { 2044 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1938 oop obj = (oop) mid->object(); 2045 cur->omInUseCount-= deflatedcount;
1939 deflated = false; 2046 // verifyInUse(cur);
1940 if (obj != NULL) { 2047 nScavenged += deflatedcount;
1941 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); 2048 nInuse += cur->omInUseCount;
1942 }
1943 if (deflated) {
1944 // extract from per-thread in-use-list
1945 if (mid == cur->omInUseList) {
1946 cur->omInUseList = mid->FreeNext;
1947 } else if (curmidinuse != NULL) {
1948 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1949 }
1950 next = mid->FreeNext;
1951 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1952 mid = next;
1953 cur->omInUseCount--;
1954 nScavenged ++ ;
1955 } else {
1956 curmidinuse = mid;
1957 mid = mid->FreeNext;
1958 nInuse ++;
1959 }
1960 } 2049 }
1961 } 2050
2051 // For moribund threads, scan gOmInUseList
2052 if (gOmInUseList) {
2053 nInCirculation += gOmInUseCount;
2054 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
2055 gOmInUseCount-= deflatedcount;
2056 nScavenged += deflatedcount;
2057 nInuse += gOmInUseCount;
2058 }
2059
1962 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 2060 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1963 // Iterate over all extant monitors - Scavenge all idle monitors. 2061 // Iterate over all extant monitors - Scavenge all idle monitors.
1964 assert(block->object() == CHAINMARKER, "must be a block header"); 2062 assert(block->object() == CHAINMARKER, "must be a block header");
1965 nInCirculation += _BLOCKSIZE ; 2063 nInCirculation += _BLOCKSIZE ;
1966 for (int i = 1 ; i < _BLOCKSIZE; i++) { 2064 for (int i = 1 ; i < _BLOCKSIZE; i++) {