comparison src/share/vm/runtime/mutexLocker.cpp @ 2152:0fa27f37d4d4

6977804: G1: remove the zero-filling thread Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification. Reviewed-by: jcoomes, johnc
author tonyp
date Wed, 19 Jan 2011 19:30:42 -0500
parents f95d63e2154a
children bf8517f4e4d0
comparison
equal deleted inserted replaced
2151:cb913d743d09 2152:0fa27f37d4d4
1 /* 1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
78 Mutex* STS_init_lock = NULL; 78 Mutex* STS_init_lock = NULL;
79 Monitor* SLT_lock = NULL; 79 Monitor* SLT_lock = NULL;
80 Monitor* iCMS_lock = NULL; 80 Monitor* iCMS_lock = NULL;
81 Monitor* FullGCCount_lock = NULL; 81 Monitor* FullGCCount_lock = NULL;
82 Monitor* CMark_lock = NULL; 82 Monitor* CMark_lock = NULL;
83 Monitor* ZF_mon = NULL;
84 Monitor* Cleanup_mon = NULL;
85 Mutex* CMRegionStack_lock = NULL; 83 Mutex* CMRegionStack_lock = NULL;
86 Mutex* SATB_Q_FL_lock = NULL; 84 Mutex* SATB_Q_FL_lock = NULL;
87 Monitor* SATB_Q_CBL_mon = NULL; 85 Monitor* SATB_Q_CBL_mon = NULL;
88 Mutex* Shared_SATB_Q_lock = NULL; 86 Mutex* Shared_SATB_Q_lock = NULL;
89 Mutex* DirtyCardQ_FL_lock = NULL; 87 Mutex* DirtyCardQ_FL_lock = NULL;
120 Mutex* RawMonitor_lock = NULL; 118 Mutex* RawMonitor_lock = NULL;
121 Mutex* PerfDataMemAlloc_lock = NULL; 119 Mutex* PerfDataMemAlloc_lock = NULL;
122 Mutex* PerfDataManager_lock = NULL; 120 Mutex* PerfDataManager_lock = NULL;
123 Mutex* OopMapCacheAlloc_lock = NULL; 121 Mutex* OopMapCacheAlloc_lock = NULL;
124 122
123 Mutex* FreeList_lock = NULL;
124 Monitor* SecondaryFreeList_lock = NULL;
125 Mutex* OldSets_lock = NULL;
125 Mutex* MMUTracker_lock = NULL; 126 Mutex* MMUTracker_lock = NULL;
126 Mutex* HotCardCache_lock = NULL; 127 Mutex* HotCardCache_lock = NULL;
127 128
128 Monitor* GCTaskManager_lock = NULL; 129 Monitor* GCTaskManager_lock = NULL;
129 130
175 if (UseConcMarkSweepGC || UseG1GC) { 176 if (UseConcMarkSweepGC || UseG1GC) {
176 def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent 177 def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
177 } 178 }
178 if (UseG1GC) { 179 if (UseG1GC) {
179 def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread 180 def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread
180 def(ZF_mon , Monitor, leaf, true );
181 def(Cleanup_mon , Monitor, nonleaf, true );
182 def(CMRegionStack_lock , Mutex, leaf, true ); 181 def(CMRegionStack_lock , Mutex, leaf, true );
183 def(SATB_Q_FL_lock , Mutex , special, true ); 182 def(SATB_Q_FL_lock , Mutex , special, true );
184 def(SATB_Q_CBL_mon , Monitor, nonleaf, true ); 183 def(SATB_Q_CBL_mon , Monitor, nonleaf, true );
185 def(Shared_SATB_Q_lock , Mutex, nonleaf, true ); 184 def(Shared_SATB_Q_lock , Mutex, nonleaf, true );
186 185
187 def(DirtyCardQ_FL_lock , Mutex , special, true ); 186 def(DirtyCardQ_FL_lock , Mutex , special, true );
188 def(DirtyCardQ_CBL_mon , Monitor, nonleaf, true ); 187 def(DirtyCardQ_CBL_mon , Monitor, nonleaf, true );
189 def(Shared_DirtyCardQ_lock , Mutex, nonleaf, true ); 188 def(Shared_DirtyCardQ_lock , Mutex, nonleaf, true );
190 189
190 def(FreeList_lock , Mutex, leaf , true );
191 def(SecondaryFreeList_lock , Monitor, leaf , true );
192 def(OldSets_lock , Mutex , leaf , true );
191 def(MMUTracker_lock , Mutex , leaf , true ); 193 def(MMUTracker_lock , Mutex , leaf , true );
192 def(HotCardCache_lock , Mutex , special , true ); 194 def(HotCardCache_lock , Mutex , special , true );
193 def(EvacFailureStack_lock , Mutex , nonleaf , true ); 195 def(EvacFailureStack_lock , Mutex , nonleaf , true );
194 } 196 }
195 def(ParGCRareEvent_lock , Mutex , leaf , true ); 197 def(ParGCRareEvent_lock , Mutex , leaf , true );