Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 3285:49a67202bc67
7011855: G1: non-product flag to artificially grow the heap
Summary: It introduces non-product cmd line parameter G1DummyRegionsPerGC which indicates how many "dummy" regions to allocate at the end of each GC. This allows the G1 heap to grow artificially and makes concurrent marking cycles more frequent irrespective of what the application that is running is doing. The dummy regions will be found totally empty during cleanup so this parameter can also be used to stress the concurrent cleanup operation.
Reviewed-by: brutisso, johnc
author | tonyp |
---|---|
date | Tue, 19 Apr 2011 15:46:59 -0400 |
parents | b099aaf51bf8 |
children | 1f4413413144 |
rev | line source |
---|---|
0 | 1 /* |
2365
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/symbolTable.hpp" | |
27 #include "classfile/systemDictionary.hpp" | |
28 #include "code/codeCache.hpp" | |
29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" | |
30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" | |
31 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" | |
32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" | |
33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" | |
34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" | |
35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" | |
37 #include "gc_implementation/parNew/parNewGeneration.hpp" | |
38 #include "gc_implementation/shared/collectorCounters.hpp" | |
39 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
40 #include "gc_interface/collectedHeap.inline.hpp" | |
41 #include "memory/cardTableRS.hpp" | |
42 #include "memory/collectorPolicy.hpp" | |
43 #include "memory/gcLocker.inline.hpp" | |
44 #include "memory/genCollectedHeap.hpp" | |
45 #include "memory/genMarkSweep.hpp" | |
46 #include "memory/genOopClosures.inline.hpp" | |
47 #include "memory/iterator.hpp" | |
48 #include "memory/referencePolicy.hpp" | |
49 #include "memory/resourceArea.hpp" | |
50 #include "oops/oop.inline.hpp" | |
51 #include "prims/jvmtiExport.hpp" | |
52 #include "runtime/globals_extension.hpp" | |
53 #include "runtime/handles.inline.hpp" | |
54 #include "runtime/java.hpp" | |
55 #include "runtime/vmThread.hpp" | |
56 #include "services/memoryService.hpp" | |
57 #include "services/runtimeService.hpp" | |
0 | 58 |
59 // statics | |
60 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; | |
61 bool CMSCollector::_full_gc_requested = false; | |
62 | |
63 ////////////////////////////////////////////////////////////////// | |
64 // In support of CMS/VM thread synchronization | |
65 ////////////////////////////////////////////////////////////////// | |
66 // We split use of the CGC_lock into 2 "levels". | |
67 // The low-level locking is of the usual CGC_lock monitor. We introduce | |
68 // a higher level "token" (hereafter "CMS token") built on top of the | |
69 // low level monitor (hereafter "CGC lock"). | |
70 // The token-passing protocol gives priority to the VM thread. The | |
71 // CMS-lock doesn't provide any fairness guarantees, but clients | |
72 // should ensure that it is only held for very short, bounded | |
73 // durations. | |
74 // | |
75 // When either of the CMS thread or the VM thread is involved in | |
76 // collection operations during which it does not want the other | |
77 // thread to interfere, it obtains the CMS token. | |
78 // | |
79 // If either thread tries to get the token while the other has | |
80 // it, that thread waits. However, if the VM thread and CMS thread | |
81 // both want the token, then the VM thread gets priority while the | |
82 // CMS thread waits. This ensures, for instance, that the "concurrent" | |
83 // phases of the CMS thread's work do not block out the VM thread | |
84 // for long periods of time as the CMS thread continues to hog | |
85 // the token. (See bug 4616232). | |
86 // | |
87 // The baton-passing functions are, however, controlled by the | |
88 // flags _foregroundGCShouldWait and _foregroundGCIsActive, | |
89 // and here the low-level CMS lock, not the high level token, | |
90 // ensures mutual exclusion. | |
91 // | |
92 // Two important conditions that we have to satisfy: | |
93 // 1. if a thread does a low-level wait on the CMS lock, then it | |
94 // relinquishes the CMS token if it were holding that token | |
95 // when it acquired the low-level CMS lock. | |
96 // 2. any low-level notifications on the low-level lock | |
97 // should only be sent when a thread has relinquished the token. | |
98 // | |
99 // In the absence of either property, we'd have potential deadlock. | |
100 // | |
101 // We protect each of the CMS (concurrent and sequential) phases | |
102 // with the CMS _token_, not the CMS _lock_. | |
103 // | |
104 // The only code protected by CMS lock is the token acquisition code | |
105 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the | |
106 // baton-passing code. | |
107 // | |
108 // Unfortunately, i couldn't come up with a good abstraction to factor and | |
109 // hide the naked CGC_lock manipulation in the baton-passing code | |
110 // further below. That's something we should try to do. Also, the proof | |
111 // of correctness of this 2-level locking scheme is far from obvious, | |
112 // and potentially quite slippery. We have an uneasy supsicion, for instance, | |
113 // that there may be a theoretical possibility of delay/starvation in the | |
114 // low-level lock/wait/notify scheme used for the baton-passing because of | |
115 // potential intereference with the priority scheme embodied in the | |
116 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() | |
117 // invocation further below and marked with "XXX 20011219YSR". | |
118 // Indeed, as we note elsewhere, this may become yet more slippery | |
119 // in the presence of multiple CMS and/or multiple VM threads. XXX | |
120 | |
121 class CMSTokenSync: public StackObj { | |
122 private: | |
123 bool _is_cms_thread; | |
124 public: | |
125 CMSTokenSync(bool is_cms_thread): | |
126 _is_cms_thread(is_cms_thread) { | |
127 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), | |
128 "Incorrect argument to constructor"); | |
129 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); | |
130 } | |
131 | |
132 ~CMSTokenSync() { | |
133 assert(_is_cms_thread ? | |
134 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : | |
135 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
136 "Incorrect state"); | |
137 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); | |
138 } | |
139 }; | |
140 | |
141 // Convenience class that does a CMSTokenSync, and then acquires | |
142 // upto three locks. | |
143 class CMSTokenSyncWithLocks: public CMSTokenSync { | |
144 private: | |
145 // Note: locks are acquired in textual declaration order | |
146 // and released in the opposite order | |
147 MutexLockerEx _locker1, _locker2, _locker3; | |
148 public: | |
149 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, | |
150 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): | |
151 CMSTokenSync(is_cms_thread), | |
152 _locker1(mutex1, Mutex::_no_safepoint_check_flag), | |
153 _locker2(mutex2, Mutex::_no_safepoint_check_flag), | |
154 _locker3(mutex3, Mutex::_no_safepoint_check_flag) | |
155 { } | |
156 }; | |
157 | |
158 | |
159 // Wrapper class to temporarily disable icms during a foreground cms collection. | |
160 class ICMSDisabler: public StackObj { | |
161 public: | |
162 // The ctor disables icms and wakes up the thread so it notices the change; | |
163 // the dtor re-enables icms. Note that the CMSCollector methods will check | |
164 // CMSIncrementalMode. | |
165 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); } | |
166 ~ICMSDisabler() { CMSCollector::enable_icms(); } | |
167 }; | |
168 | |
169 ////////////////////////////////////////////////////////////////// | |
170 // Concurrent Mark-Sweep Generation ///////////////////////////// | |
171 ////////////////////////////////////////////////////////////////// | |
172 | |
173 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) | |
174 | |
175 // This struct contains per-thread things necessary to support parallel | |
176 // young-gen collection. | |
177 class CMSParGCThreadState: public CHeapObj { | |
178 public: | |
179 CFLS_LAB lab; | |
180 PromotionInfo promo; | |
181 | |
182 // Constructor. | |
183 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { | |
184 promo.setSpace(cfls); | |
185 } | |
186 }; | |
187 | |
188 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( | |
189 ReservedSpace rs, size_t initial_byte_size, int level, | |
190 CardTableRS* ct, bool use_adaptive_freelists, | |
191 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
192 CardGeneration(rs, initial_byte_size, level, ct), | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
193 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), |
0 | 194 _debug_collection_type(Concurrent_collection_type) |
195 { | |
196 HeapWord* bottom = (HeapWord*) _virtual_space.low(); | |
197 HeapWord* end = (HeapWord*) _virtual_space.high(); | |
198 | |
199 _direct_allocated_words = 0; | |
200 NOT_PRODUCT( | |
201 _numObjectsPromoted = 0; | |
202 _numWordsPromoted = 0; | |
203 _numObjectsAllocated = 0; | |
204 _numWordsAllocated = 0; | |
205 ) | |
206 | |
207 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), | |
208 use_adaptive_freelists, | |
209 dictionaryChoice); | |
210 NOT_PRODUCT(debug_cms_space = _cmsSpace;) | |
211 if (_cmsSpace == NULL) { | |
212 vm_exit_during_initialization( | |
213 "CompactibleFreeListSpace allocation failure"); | |
214 } | |
215 _cmsSpace->_gen = this; | |
216 | |
217 _gc_stats = new CMSGCStats(); | |
218 | |
219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass | |
220 // offsets match. The ability to tell free chunks from objects | |
221 // depends on this property. | |
222 debug_only( | |
223 FreeChunk* junk = NULL; | |
187 | 224 assert(UseCompressedOops || |
225 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), | |
0 | 226 "Offset of FreeChunk::_prev within FreeChunk must match" |
227 " that of OopDesc::_klass within OopDesc"); | |
228 ) | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
229 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 230 typedef CMSParGCThreadState* CMSParGCThreadStatePtr; |
231 _par_gc_thread_states = | |
232 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads); | |
233 if (_par_gc_thread_states == NULL) { | |
234 vm_exit_during_initialization("Could not allocate par gc structs"); | |
235 } | |
236 for (uint i = 0; i < ParallelGCThreads; i++) { | |
237 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); | |
238 if (_par_gc_thread_states[i] == NULL) { | |
239 vm_exit_during_initialization("Could not allocate par gc structs"); | |
240 } | |
241 } | |
242 } else { | |
243 _par_gc_thread_states = NULL; | |
244 } | |
245 _incremental_collection_failed = false; | |
246 // The "dilatation_factor" is the expansion that can occur on | |
247 // account of the fact that the minimum object size in the CMS | |
248 // generation may be larger than that in, say, a contiguous young | |
249 // generation. | |
250 // Ideally, in the calculation below, we'd compute the dilatation | |
251 // factor as: MinChunkSize/(promoting_gen's min object size) | |
252 // Since we do not have such a general query interface for the | |
253 // promoting generation, we'll instead just use the mimimum | |
254 // object size (which today is a header's worth of space); | |
255 // note that all arithmetic is in units of HeapWords. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
256 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); |
0 | 257 assert(_dilatation_factor >= 1.0, "from previous assert"); |
258 } | |
259 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
260 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
261 // The field "_initiating_occupancy" represents the occupancy percentage |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
262 // at which we trigger a new collection cycle. Unless explicitly specified |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
263 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
264 // is calculated by: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
265 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
266 // Let "f" be MinHeapFreeRatio in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
267 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
268 // _intiating_occupancy = 100-f + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
269 // f * (CMSTrigger[Perm]Ratio/100) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
270 // where CMSTrigger[Perm]Ratio is the argument "tr" below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
271 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
272 // That is, if we assume the heap is at its desired maximum occupancy at the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
273 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
274 // space be allocated before initiating a new collection cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
275 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
277 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
278 if (io >= 0) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
279 _initiating_occupancy = (double)io / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
280 } else { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
281 _initiating_occupancy = ((100 - MinHeapFreeRatio) + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
282 (double)(tr * MinHeapFreeRatio) / 100.0) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
283 / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
284 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
285 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
286 |
0 | 287 void ConcurrentMarkSweepGeneration::ref_processor_init() { |
288 assert(collector() != NULL, "no collector"); | |
289 collector()->ref_processor_init(); | |
290 } | |
291 | |
292 void CMSCollector::ref_processor_init() { | |
293 if (_ref_processor == NULL) { | |
294 // Allocate and initialize a reference processor | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
295 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
296 new ReferenceProcessor(_span, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
297 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
298 (int) ParallelGCThreads, // mt processing degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
299 _cmsGen->refs_discovery_is_mt(), // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
300 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
301 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
302 &_is_alive_closure, // closure for liveness info |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
303 false); // next field updates do not need write barrier |
0 | 304 // Initialize the _ref_processor field of CMSGen |
305 _cmsGen->set_ref_processor(_ref_processor); | |
306 | |
307 // Allocate a dummy ref processor for perm gen. | |
308 ReferenceProcessor* rp2 = new ReferenceProcessor(); | |
309 if (rp2 == NULL) { | |
310 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); | |
311 } | |
312 _permGen->set_ref_processor(rp2); | |
313 } | |
314 } | |
315 | |
316 CMSAdaptiveSizePolicy* CMSCollector::size_policy() { | |
317 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
318 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
319 "Wrong type of heap"); | |
320 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
321 gch->gen_policy()->size_policy(); | |
322 assert(sp->is_gc_cms_adaptive_size_policy(), | |
323 "Wrong type of size policy"); | |
324 return sp; | |
325 } | |
326 | |
327 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() { | |
328 CMSGCAdaptivePolicyCounters* results = | |
329 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters(); | |
330 assert( | |
331 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
332 "Wrong gc policy counter kind"); | |
333 return results; | |
334 } | |
335 | |
336 | |
337 void ConcurrentMarkSweepGeneration::initialize_performance_counters() { | |
338 | |
339 const char* gen_name = "old"; | |
340 | |
341 // Generation Counters - generation 1, 1 subspace | |
342 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space); | |
343 | |
344 _space_counters = new GSpaceCounters(gen_name, 0, | |
345 _virtual_space.reserved_size(), | |
346 this, _gen_counters); | |
347 } | |
348 | |
349 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): | |
350 _cms_gen(cms_gen) | |
351 { | |
352 assert(alpha <= 100, "bad value"); | |
353 _saved_alpha = alpha; | |
354 | |
355 // Initialize the alphas to the bootstrap value of 100. | |
356 _gc0_alpha = _cms_alpha = 100; | |
357 | |
358 _cms_begin_time.update(); | |
359 _cms_end_time.update(); | |
360 | |
361 _gc0_duration = 0.0; | |
362 _gc0_period = 0.0; | |
363 _gc0_promoted = 0; | |
364 | |
365 _cms_duration = 0.0; | |
366 _cms_period = 0.0; | |
367 _cms_allocated = 0; | |
368 | |
369 _cms_used_at_gc0_begin = 0; | |
370 _cms_used_at_gc0_end = 0; | |
371 _allow_duty_cycle_reduction = false; | |
372 _valid_bits = 0; | |
373 _icms_duty_cycle = CMSIncrementalDutyCycle; | |
374 } | |
375 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
376 double CMSStats::cms_free_adjustment_factor(size_t free) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
377 // TBD: CR 6909490 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
378 return 1.0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
379 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
380 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
381 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
382 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
383 |
0 | 384 // If promotion failure handling is on use |
385 // the padded average size of the promotion for each | |
386 // young generation collection. | |
387 double CMSStats::time_until_cms_gen_full() const { | |
388 size_t cms_free = _cms_gen->cmsSpace()->free(); | |
389 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
390 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(), |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
391 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); |
0 | 392 if (cms_free > expected_promotion) { |
393 // Start a cms collection if there isn't enough space to promote | |
394 // for the next minor collection. Use the padded average as | |
395 // a safety factor. | |
396 cms_free -= expected_promotion; | |
397 | |
398 // Adjust by the safety factor. | |
399 double cms_free_dbl = (double)cms_free; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
400 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
401 // Apply a further correction factor which tries to adjust |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
402 // for recent occurance of concurrent mode failures. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
403 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
404 cms_free_dbl = cms_free_dbl * cms_adjustment; |
0 | 405 |
406 if (PrintGCDetails && Verbose) { | |
407 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " | |
408 SIZE_FORMAT " expected_promotion " SIZE_FORMAT, | |
409 cms_free, expected_promotion); | |
410 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", | |
411 cms_free_dbl, cms_consumption_rate() + 1.0); | |
412 } | |
413 // Add 1 in case the consumption rate goes to zero. | |
414 return cms_free_dbl / (cms_consumption_rate() + 1.0); | |
415 } | |
416 return 0.0; | |
417 } | |
418 | |
419 // Compare the duration of the cms collection to the | |
420 // time remaining before the cms generation is empty. | |
421 // Note that the time from the start of the cms collection | |
422 // to the start of the cms sweep (less than the total | |
423 // duration of the cms collection) can be used. This | |
424 // has been tried and some applications experienced | |
425 // promotion failures early in execution. This was | |
426 // possibly because the averages were not accurate | |
427 // enough at the beginning. | |
428 double CMSStats::time_until_cms_start() const { | |
429 // We add "gc0_period" to the "work" calculation | |
430 // below because this query is done (mostly) at the | |
431 // end of a scavenge, so we need to conservatively | |
432 // account for that much possible delay | |
433 // in the query so as to avoid concurrent mode failures | |
434 // due to starting the collection just a wee bit too | |
435 // late. | |
436 double work = cms_duration() + gc0_period(); | |
437 double deadline = time_until_cms_gen_full(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
438 // If a concurrent mode failure occurred recently, we want to be |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
439 // more conservative and halve our expected time_until_cms_gen_full() |
0 | 440 if (work > deadline) { |
441 if (Verbose && PrintGCDetails) { | |
442 gclog_or_tty->print( | |
443 " CMSCollector: collect because of anticipated promotion " | |
444 "before full %3.7f + %3.7f > %3.7f ", cms_duration(), | |
445 gc0_period(), time_until_cms_gen_full()); | |
446 } | |
447 return 0.0; | |
448 } | |
449 return work - deadline; | |
450 } | |
451 | |
452 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the | |
453 // amount of change to prevent wild oscillation. | |
454 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle, | |
455 unsigned int new_duty_cycle) { | |
456 assert(old_duty_cycle <= 100, "bad input value"); | |
457 assert(new_duty_cycle <= 100, "bad input value"); | |
458 | |
459 // Note: use subtraction with caution since it may underflow (values are | |
460 // unsigned). Addition is safe since we're in the range 0-100. | |
461 unsigned int damped_duty_cycle = new_duty_cycle; | |
462 if (new_duty_cycle < old_duty_cycle) { | |
463 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U); | |
464 if (new_duty_cycle + largest_delta < old_duty_cycle) { | |
465 damped_duty_cycle = old_duty_cycle - largest_delta; | |
466 } | |
467 } else if (new_duty_cycle > old_duty_cycle) { | |
468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U); | |
469 if (new_duty_cycle > old_duty_cycle + largest_delta) { | |
470 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U); | |
471 } | |
472 } | |
473 assert(damped_duty_cycle <= 100, "invalid duty cycle computed"); | |
474 | |
475 if (CMSTraceIncrementalPacing) { | |
476 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ", | |
477 old_duty_cycle, new_duty_cycle, damped_duty_cycle); | |
478 } | |
479 return damped_duty_cycle; | |
480 } | |
481 | |
482 unsigned int CMSStats::icms_update_duty_cycle_impl() { | |
483 assert(CMSIncrementalPacing && valid(), | |
484 "should be handled in icms_update_duty_cycle()"); | |
485 | |
486 double cms_time_so_far = cms_timer().seconds(); | |
487 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M; | |
488 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far); | |
489 | |
490 // Avoid division by 0. | |
491 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01); | |
492 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full; | |
493 | |
494 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U); | |
495 if (new_duty_cycle > _icms_duty_cycle) { | |
496 // Avoid very small duty cycles (1 or 2); 0 is allowed. | |
497 if (new_duty_cycle > 2) { | |
498 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, | |
499 new_duty_cycle); | |
500 } | |
501 } else if (_allow_duty_cycle_reduction) { | |
502 // The duty cycle is reduced only once per cms cycle (see record_cms_end()). | |
503 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle); | |
504 // Respect the minimum duty cycle. | |
505 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin; | |
506 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle); | |
507 } | |
508 | |
509 if (PrintGCDetails || CMSTraceIncrementalPacing) { | |
510 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle); | |
511 } | |
512 | |
513 _allow_duty_cycle_reduction = false; | |
514 return _icms_duty_cycle; | |
515 } | |
516 | |
517 #ifndef PRODUCT | |
518 void CMSStats::print_on(outputStream *st) const { | |
519 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); | |
520 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, | |
521 gc0_duration(), gc0_period(), gc0_promoted()); | |
522 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, | |
523 cms_duration(), cms_duration_per_mb(), | |
524 cms_period(), cms_allocated()); | |
525 st->print(",cms_since_beg=%g,cms_since_end=%g", | |
526 cms_time_since_begin(), cms_time_since_end()); | |
527 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, | |
528 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); | |
529 if (CMSIncrementalMode) { | |
530 st->print(",dc=%d", icms_duty_cycle()); | |
531 } | |
532 | |
533 if (valid()) { | |
534 st->print(",promo_rate=%g,cms_alloc_rate=%g", | |
535 promotion_rate(), cms_allocation_rate()); | |
536 st->print(",cms_consumption_rate=%g,time_until_full=%g", | |
537 cms_consumption_rate(), time_until_cms_gen_full()); | |
538 } | |
539 st->print(" "); | |
540 } | |
541 #endif // #ifndef PRODUCT | |
542 | |
543 CMSCollector::CollectorState CMSCollector::_collectorState = | |
544 CMSCollector::Idling; | |
545 bool CMSCollector::_foregroundGCIsActive = false; | |
546 bool CMSCollector::_foregroundGCShouldWait = false; | |
547 | |
548 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, | |
549 ConcurrentMarkSweepGeneration* permGen, | |
550 CardTableRS* ct, | |
551 ConcurrentMarkSweepPolicy* cp): | |
552 _cmsGen(cmsGen), | |
553 _permGen(permGen), | |
554 _ct(ct), | |
555 _ref_processor(NULL), // will be set later | |
556 _conc_workers(NULL), // may be set later | |
557 _abort_preclean(false), | |
558 _start_sampling(false), | |
559 _between_prologue_and_epilogue(false), | |
560 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), | |
561 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"), | |
562 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), | |
563 -1 /* lock-free */, "No_lock" /* dummy */), | |
564 _modUnionClosure(&_modUnionTable), | |
565 _modUnionClosurePar(&_modUnionTable), | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
566 // Adjust my span to cover old (cms) gen and perm gen |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
567 _span(cmsGen->reserved()._union(permGen->reserved())), |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
568 // Construct the is_alive_closure with _span & markBitMap |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
569 _is_alive_closure(_span, &_markBitMap), |
0 | 570 _restart_addr(NULL), |
571 _overflow_list(NULL), | |
572 _stats(cmsGen), | |
573 _eden_chunk_array(NULL), // may be set in ctor body | |
574 _eden_chunk_capacity(0), // -- ditto -- | |
575 _eden_chunk_index(0), // -- ditto -- | |
576 _survivor_plab_array(NULL), // -- ditto -- | |
577 _survivor_chunk_array(NULL), // -- ditto -- | |
578 _survivor_chunk_capacity(0), // -- ditto -- | |
579 _survivor_chunk_index(0), // -- ditto -- | |
580 _ser_pmc_preclean_ovflw(0), | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
581 _ser_kac_preclean_ovflw(0), |
0 | 582 _ser_pmc_remark_ovflw(0), |
583 _par_pmc_remark_ovflw(0), | |
584 _ser_kac_ovflw(0), | |
585 _par_kac_ovflw(0), | |
586 #ifndef PRODUCT | |
587 _num_par_pushes(0), | |
588 #endif | |
589 _collection_count_start(0), | |
590 _verifying(false), | |
591 _icms_start_limit(NULL), | |
592 _icms_stop_limit(NULL), | |
593 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), | |
594 _completed_initialization(false), | |
595 _collector_policy(cp), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
596 _should_unload_classes(false), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
597 _concurrent_cycles_since_last_unload(0), |
798
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
679
diff
changeset
|
598 _roots_scanning_options(0), |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
599 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
600 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) |
0 | 601 { |
602 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { | |
603 ExplicitGCInvokesConcurrent = true; | |
604 } | |
605 // Now expand the span and allocate the collection support structures | |
606 // (MUT, marking bit map etc.) to cover both generations subject to | |
607 // collection. | |
608 | |
609 // First check that _permGen is adjacent to _cmsGen and above it. | |
610 assert( _cmsGen->reserved().word_size() > 0 | |
611 && _permGen->reserved().word_size() > 0, | |
612 "generations should not be of zero size"); | |
613 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(), | |
614 "_cmsGen and _permGen should not overlap"); | |
615 assert(_cmsGen->reserved().end() == _permGen->reserved().start(), | |
616 "_cmsGen->end() different from _permGen->start()"); | |
617 | |
618 // For use by dirty card to oop closures. | |
619 _cmsGen->cmsSpace()->set_collector(this); | |
620 _permGen->cmsSpace()->set_collector(this); | |
621 | |
622 // Allocate MUT and marking bit map | |
623 { | |
624 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); | |
625 if (!_markBitMap.allocate(_span)) { | |
626 warning("Failed to allocate CMS Bit Map"); | |
627 return; | |
628 } | |
629 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); | |
630 } | |
631 { | |
632 _modUnionTable.allocate(_span); | |
633 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); | |
634 } | |
635 | |
1284 | 636 if (!_markStack.allocate(MarkStackSize)) { |
0 | 637 warning("Failed to allocate CMS Marking Stack"); |
638 return; | |
639 } | |
640 if (!_revisitStack.allocate(CMSRevisitStackSize)) { | |
641 warning("Failed to allocate CMS Revisit Stack"); | |
642 return; | |
643 } | |
644 | |
645 // Support for multi-threaded concurrent phases | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
646 if (CMSConcurrentMTEnabled) { |
1284 | 647 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
0 | 648 // just for now |
1284 | 649 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); |
650 } | |
651 if (ConcGCThreads > 1) { | |
0 | 652 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", |
1284 | 653 ConcGCThreads, true); |
0 | 654 if (_conc_workers == NULL) { |
655 warning("GC/CMS: _conc_workers allocation failure: " | |
656 "forcing -CMSConcurrentMTEnabled"); | |
657 CMSConcurrentMTEnabled = false; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
658 } else { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
659 _conc_workers->initialize_workers(); |
0 | 660 } |
661 } else { | |
662 CMSConcurrentMTEnabled = false; | |
663 } | |
664 } | |
665 if (!CMSConcurrentMTEnabled) { | |
1284 | 666 ConcGCThreads = 0; |
0 | 667 } else { |
668 // Turn off CMSCleanOnEnter optimization temporarily for | |
669 // the MT case where it's not fixed yet; see 6178663. | |
670 CMSCleanOnEnter = false; | |
671 } | |
1284 | 672 assert((_conc_workers != NULL) == (ConcGCThreads > 1), |
0 | 673 "Inconsistency"); |
674 | |
675 // Parallel task queues; these are shared for the | |
676 // concurrent and stop-world phases of CMS, but | |
677 // are not shared with parallel scavenge (ParNew). | |
678 { | |
679 uint i; | |
1284 | 680 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads); |
0 | 681 |
682 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled | |
683 || ParallelRefProcEnabled) | |
684 && num_queues > 0) { | |
685 _task_queues = new OopTaskQueueSet(num_queues); | |
686 if (_task_queues == NULL) { | |
687 warning("task_queues allocation failure."); | |
688 return; | |
689 } | |
690 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues); | |
691 if (_hash_seed == NULL) { | |
692 warning("_hash_seed array allocation failure"); | |
693 return; | |
694 } | |
695 | |
1665 | 696 typedef Padded<OopTaskQueue> PaddedOopTaskQueue; |
0 | 697 for (i = 0; i < num_queues; i++) { |
1665 | 698 PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); |
699 if (q == NULL) { | |
0 | 700 warning("work_queue allocation failure."); |
701 return; | |
702 } | |
1665 | 703 _task_queues->register_queue(i, q); |
0 | 704 } |
705 for (i = 0; i < num_queues; i++) { | |
706 _task_queues->queue(i)->initialize(); | |
707 _hash_seed[i] = 17; // copied from ParNew | |
708 } | |
709 } | |
710 } | |
711 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
712 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
713 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
714 |
0 | 715 // Clip CMSBootstrapOccupancy between 0 and 100. |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
716 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) |
0 | 717 /(double)100; |
718 | |
719 _full_gcs_since_conc_gc = 0; | |
720 | |
721 // Now tell CMS generations the identity of their collector | |
722 ConcurrentMarkSweepGeneration::set_collector(this); | |
723 | |
724 // Create & start a CMS thread for this CMS collector | |
725 _cmsThread = ConcurrentMarkSweepThread::start(this); | |
726 assert(cmsThread() != NULL, "CMS Thread should have been created"); | |
727 assert(cmsThread()->collector() == this, | |
728 "CMS Thread should refer to this gen"); | |
729 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | |
730 | |
731 // Support for parallelizing young gen rescan | |
732 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
733 _young_gen = gch->prev_gen(_cmsGen); | |
734 if (gch->supports_inline_contig_alloc()) { | |
735 _top_addr = gch->top_addr(); | |
736 _end_addr = gch->end_addr(); | |
737 assert(_young_gen != NULL, "no _young_gen"); | |
738 _eden_chunk_index = 0; | |
739 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; | |
740 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity); | |
741 if (_eden_chunk_array == NULL) { | |
742 _eden_chunk_capacity = 0; | |
743 warning("GC/CMS: _eden_chunk_array allocation failure"); | |
744 } | |
745 } | |
746 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error"); | |
747 | |
748 // Support for parallelizing survivor space rescan | |
749 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) { | |
1289
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
750 const size_t max_plab_samples = |
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
751 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize; |
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
752 |
0 | 753 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); |
754 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); | |
755 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); | |
756 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL | |
757 || _cursor == NULL) { | |
758 warning("Failed to allocate survivor plab/chunk array"); | |
759 if (_survivor_plab_array != NULL) { | |
760 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
761 _survivor_plab_array = NULL; | |
762 } | |
763 if (_survivor_chunk_array != NULL) { | |
764 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
765 _survivor_chunk_array = NULL; | |
766 } | |
767 if (_cursor != NULL) { | |
768 FREE_C_HEAP_ARRAY(size_t, _cursor); | |
769 _cursor = NULL; | |
770 } | |
771 } else { | |
772 _survivor_chunk_capacity = 2*max_plab_samples; | |
773 for (uint i = 0; i < ParallelGCThreads; i++) { | |
774 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples); | |
775 if (vec == NULL) { | |
776 warning("Failed to allocate survivor plab array"); | |
777 for (int j = i; j > 0; j--) { | |
778 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array()); | |
779 } | |
780 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
781 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
782 _survivor_plab_array = NULL; | |
783 _survivor_chunk_array = NULL; | |
784 _survivor_chunk_capacity = 0; | |
785 break; | |
786 } else { | |
787 ChunkArray* cur = | |
788 ::new (&_survivor_plab_array[i]) ChunkArray(vec, | |
789 max_plab_samples); | |
790 assert(cur->end() == 0, "Should be 0"); | |
791 assert(cur->array() == vec, "Should be vec"); | |
792 assert(cur->capacity() == max_plab_samples, "Error"); | |
793 } | |
794 } | |
795 } | |
796 } | |
797 assert( ( _survivor_plab_array != NULL | |
798 && _survivor_chunk_array != NULL) | |
799 || ( _survivor_chunk_capacity == 0 | |
800 && _survivor_chunk_index == 0), | |
801 "Error"); | |
802 | |
803 // Choose what strong roots should be scanned depending on verification options | |
804 // and perm gen collection mode. | |
805 if (!CMSClassUnloadingEnabled) { | |
806 // If class unloading is disabled we want to include all classes into the root set. | |
807 add_root_scanning_option(SharedHeap::SO_AllClasses); | |
808 } else { | |
809 add_root_scanning_option(SharedHeap::SO_SystemClasses); | |
810 } | |
811 | |
812 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) | |
813 _gc_counters = new CollectorCounters("CMS", 1); | |
814 _completed_initialization = true; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
815 _inter_sweep_timer.start(); // start of time |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
816 #ifdef SPARC |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
817 // Issue a stern warning, but allow use for experimentation and debugging. |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
818 if (VM_Version::is_sun4v() && UseMemSetInBOT) { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
819 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
820 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
821 " on sun4v; please understand that you are using at your own risk!"); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
822 } |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
823 #endif |
0 | 824 } |
825 | |
826 const char* ConcurrentMarkSweepGeneration::name() const { | |
827 return "concurrent mark-sweep generation"; | |
828 } | |
829 void ConcurrentMarkSweepGeneration::update_counters() { | |
830 if (UsePerfData) { | |
831 _space_counters->update_all(); | |
832 _gen_counters->update_all(); | |
833 } | |
834 } | |
835 | |
836 // this is an optimized version of update_counters(). it takes the | |
837 // used value as a parameter rather than computing it. | |
838 // | |
839 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
840 if (UsePerfData) { | |
841 _space_counters->update_used(used); | |
842 _space_counters->update_capacity(); | |
843 _gen_counters->update_all(); | |
844 } | |
845 } | |
846 | |
847 void ConcurrentMarkSweepGeneration::print() const { | |
848 Generation::print(); | |
849 cmsSpace()->print(); | |
850 } | |
851 | |
852 #ifndef PRODUCT | |
853 void ConcurrentMarkSweepGeneration::print_statistics() { | |
854 cmsSpace()->printFLCensus(0); | |
855 } | |
856 #endif | |
857 | |
858 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { | |
859 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
860 if (PrintGCDetails) { | |
861 if (Verbose) { | |
862 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", | |
863 level(), short_name(), s, used(), capacity()); | |
864 } else { | |
865 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", | |
866 level(), short_name(), s, used() / K, capacity() / K); | |
867 } | |
868 } | |
869 if (Verbose) { | |
870 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")", | |
871 gch->used(), gch->capacity()); | |
872 } else { | |
873 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)", | |
874 gch->used() / K, gch->capacity() / K); | |
875 } | |
876 } | |
877 | |
878 size_t | |
879 ConcurrentMarkSweepGeneration::contiguous_available() const { | |
880 // dld proposes an improvement in precision here. If the committed | |
881 // part of the space ends in a free block we should add that to | |
882 // uncommitted size in the calculation below. Will make this | |
883 // change later, staying with the approximation below for the | |
884 // time being. -- ysr. | |
885 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); | |
886 } | |
887 | |
888 size_t | |
889 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { | |
890 return _cmsSpace->max_alloc_in_words() * HeapWordSize; | |
891 } | |
892 | |
893 size_t ConcurrentMarkSweepGeneration::max_available() const { | |
894 return free() + _virtual_space.uncommitted_size(); | |
895 } | |
896 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
897 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
898 size_t available = max_available(); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
899 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
900 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
901 if (Verbose && PrintGCDetails) { |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
902 gclog_or_tty->print_cr( |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
903 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT")," |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
904 "max_promo("SIZE_FORMAT")", |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
905 res? "":" not", available, res? ">=":"<", |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
906 av_promo, max_promotion_in_bytes); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
907 } |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
908 return res; |
0 | 909 } |
910 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
911 // At a promotion failure dump information on block layout in heap |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
912 // (cms old generation). |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
913 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
914 if (CMSDumpAtPromotionFailure) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
915 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
916 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
917 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
918 |
0 | 919 CompactibleSpace* |
920 ConcurrentMarkSweepGeneration::first_compaction_space() const { | |
921 return _cmsSpace; | |
922 } | |
923 | |
924 void ConcurrentMarkSweepGeneration::reset_after_compaction() { | |
925 // Clear the promotion information. These pointers can be adjusted | |
926 // along with all the other pointers into the heap but | |
927 // compaction is expected to be a rare event with | |
928 // a heap using cms so don't do it without seeing the need. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
929 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 930 for (uint i = 0; i < ParallelGCThreads; i++) { |
931 _par_gc_thread_states[i]->promo.reset(); | |
932 } | |
933 } | |
934 } | |
935 | |
936 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { | |
937 blk->do_space(_cmsSpace); | |
938 } | |
939 | |
940 void ConcurrentMarkSweepGeneration::compute_new_size() { | |
941 assert_locked_or_safepoint(Heap_lock); | |
942 | |
943 // If incremental collection failed, we just want to expand | |
944 // to the limit. | |
945 if (incremental_collection_failed()) { | |
946 clear_incremental_collection_failed(); | |
947 grow_to_reserved(); | |
948 return; | |
949 } | |
950 | |
951 size_t expand_bytes = 0; | |
952 double free_percentage = ((double) free()) / capacity(); | |
953 double desired_free_percentage = (double) MinHeapFreeRatio / 100; | |
954 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
955 | |
956 // compute expansion delta needed for reaching desired free percentage | |
957 if (free_percentage < desired_free_percentage) { | |
958 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
959 assert(desired_capacity >= capacity(), "invalid expansion size"); | |
960 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); | |
961 } | |
962 if (expand_bytes > 0) { | |
963 if (PrintGCDetails && Verbose) { | |
964 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
965 gclog_or_tty->print_cr("\nFrom compute_new_size: "); | |
966 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); | |
967 gclog_or_tty->print_cr(" Desired free fraction %f", | |
968 desired_free_percentage); | |
969 gclog_or_tty->print_cr(" Maximum free fraction %f", | |
970 maximum_free_percentage); | |
971 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000); | |
972 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, | |
973 desired_capacity/1000); | |
974 int prev_level = level() - 1; | |
975 if (prev_level >= 0) { | |
976 size_t prev_size = 0; | |
977 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
978 Generation* prev_gen = gch->_gens[prev_level]; | |
979 prev_size = prev_gen->capacity(); | |
980 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, | |
981 prev_size/1000); | |
982 } | |
983 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, | |
984 unsafe_max_alloc_nogc()/1000); | |
985 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, | |
986 contiguous_available()/1000); | |
987 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", | |
988 expand_bytes); | |
989 } | |
990 // safe if expansion fails | |
991 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); | |
992 if (PrintGCDetails && Verbose) { | |
993 gclog_or_tty->print_cr(" Expanded free fraction %f", | |
994 ((double) free()) / capacity()); | |
995 } | |
996 } | |
997 } | |
998 | |
999 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { | |
1000 return cmsSpace()->freelistLock(); | |
1001 } | |
1002 | |
1003 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, | |
1004 bool tlab) { | |
1005 CMSSynchronousYieldRequest yr; | |
1006 MutexLockerEx x(freelistLock(), | |
1007 Mutex::_no_safepoint_check_flag); | |
1008 return have_lock_and_allocate(size, tlab); | |
1009 } | |
1010 | |
1011 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1012 bool tlab /* ignored */) { |
0 | 1013 assert_lock_strong(freelistLock()); |
1014 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); | |
1015 HeapWord* res = cmsSpace()->allocate(adjustedSize); | |
1016 // Allocate the object live (grey) if the background collector has | |
1017 // started marking. This is necessary because the marker may | |
1018 // have passed this address and consequently this object will | |
1019 // not otherwise be greyed and would be incorrectly swept up. | |
1020 // Note that if this object contains references, the writing | |
1021 // of those references will dirty the card containing this object | |
1022 // allowing the object to be blackened (and its references scanned) | |
1023 // either during a preclean phase or at the final checkpoint. | |
1024 if (res != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1025 // We may block here with an uninitialized object with |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1026 // its mark-bit or P-bits not yet set. Such objects need |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1027 // to be safely navigable by block_start(). |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1028 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1029 assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size"); |
0 | 1030 collector()->direct_allocated(res, adjustedSize); |
1031 _direct_allocated_words += adjustedSize; | |
1032 // allocation counters | |
1033 NOT_PRODUCT( | |
1034 _numObjectsAllocated++; | |
1035 _numWordsAllocated += (int)adjustedSize; | |
1036 ) | |
1037 } | |
1038 return res; | |
1039 } | |
1040 | |
1041 // In the case of direct allocation by mutators in a generation that | |
1042 // is being concurrently collected, the object must be allocated | |
1043 // live (grey) if the background collector has started marking. | |
1044 // This is necessary because the marker may | |
1045 // have passed this address and consequently this object will | |
1046 // not otherwise be greyed and would be incorrectly swept up. | |
1047 // Note that if this object contains references, the writing | |
1048 // of those references will dirty the card containing this object | |
1049 // allowing the object to be blackened (and its references scanned) | |
1050 // either during a preclean phase or at the final checkpoint. | |
1051 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { | |
1052 assert(_markBitMap.covers(start, size), "Out of bounds"); | |
1053 if (_collectorState >= Marking) { | |
1054 MutexLockerEx y(_markBitMap.lock(), | |
1055 Mutex::_no_safepoint_check_flag); | |
1056 // [see comments preceding SweepClosure::do_blk() below for details] | |
1057 // 1. need to mark the object as live so it isn't collected | |
1058 // 2. need to mark the 2nd bit to indicate the object may be uninitialized | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1059 // 3. need to mark the end of the object so marking, precleaning or sweeping |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1060 // can skip over uninitialized or unparsable objects. An allocated |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1061 // object is considered uninitialized for our purposes as long as |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1062 // its klass word is NULL. (Unparsable objects are those which are |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1063 // initialized in the sense just described, but whose sizes can still |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1064 // not be correctly determined. Note that the class of unparsable objects |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1065 // can only occur in the perm gen. All old gen objects are parsable |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1066 // as soon as they are initialized.) |
0 | 1067 _markBitMap.mark(start); // object is live |
1068 _markBitMap.mark(start + 1); // object is potentially uninitialized? | |
1069 _markBitMap.mark(start + size - 1); | |
1070 // mark end of object | |
1071 } | |
1072 // check that oop looks uninitialized | |
187 | 1073 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); |
0 | 1074 } |
1075 | |
1076 void CMSCollector::promoted(bool par, HeapWord* start, | |
1077 bool is_obj_array, size_t obj_size) { | |
1078 assert(_markBitMap.covers(start), "Out of bounds"); | |
1079 // See comment in direct_allocated() about when objects should | |
1080 // be allocated live. | |
1081 if (_collectorState >= Marking) { | |
1082 // we already hold the marking bit map lock, taken in | |
1083 // the prologue | |
1084 if (par) { | |
1085 _markBitMap.par_mark(start); | |
1086 } else { | |
1087 _markBitMap.mark(start); | |
1088 } | |
1089 // We don't need to mark the object as uninitialized (as | |
1090 // in direct_allocated above) because this is being done with the | |
1091 // world stopped and the object will be initialized by the | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1092 // time the marking, precleaning or sweeping get to look at it. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1093 // But see the code for copying objects into the CMS generation, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1094 // where we need to ensure that concurrent readers of the |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1095 // block offset table are able to safely navigate a block that |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1096 // is in flux from being free to being allocated (and in |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1097 // transition while being copied into) and subsequently |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1098 // becoming a bona-fide object when the copy/promotion is complete. |
0 | 1099 assert(SafepointSynchronize::is_at_safepoint(), |
1100 "expect promotion only at safepoints"); | |
1101 | |
1102 if (_collectorState < Sweeping) { | |
1103 // Mark the appropriate cards in the modUnionTable, so that | |
1104 // this object gets scanned before the sweep. If this is | |
1105 // not done, CMS generation references in the object might | |
1106 // not get marked. | |
1107 // For the case of arrays, which are otherwise precisely | |
1108 // marked, we need to dirty the entire array, not just its head. | |
1109 if (is_obj_array) { | |
1110 // The [par_]mark_range() method expects mr.end() below to | |
1111 // be aligned to the granularity of a bit's representation | |
1112 // in the heap. In the case of the MUT below, that's a | |
1113 // card size. | |
1114 MemRegion mr(start, | |
1115 (HeapWord*)round_to((intptr_t)(start + obj_size), | |
1116 CardTableModRefBS::card_size /* bytes */)); | |
1117 if (par) { | |
1118 _modUnionTable.par_mark_range(mr); | |
1119 } else { | |
1120 _modUnionTable.mark_range(mr); | |
1121 } | |
1122 } else { // not an obj array; we can just mark the head | |
1123 if (par) { | |
1124 _modUnionTable.par_mark(start); | |
1125 } else { | |
1126 _modUnionTable.mark(start); | |
1127 } | |
1128 } | |
1129 } | |
1130 } | |
1131 } | |
1132 | |
1133 static inline size_t percent_of_space(Space* space, HeapWord* addr) | |
1134 { | |
1135 size_t delta = pointer_delta(addr, space->bottom()); | |
1136 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); | |
1137 } | |
1138 | |
1139 void CMSCollector::icms_update_allocation_limits() | |
1140 { | |
1141 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0); | |
1142 EdenSpace* eden = gen0->as_DefNewGeneration()->eden(); | |
1143 | |
1144 const unsigned int duty_cycle = stats().icms_update_duty_cycle(); | |
1145 if (CMSTraceIncrementalPacing) { | |
1146 stats().print(); | |
1147 } | |
1148 | |
1149 assert(duty_cycle <= 100, "invalid duty cycle"); | |
1150 if (duty_cycle != 0) { | |
1151 // The duty_cycle is a percentage between 0 and 100; convert to words and | |
1152 // then compute the offset from the endpoints of the space. | |
1153 size_t free_words = eden->free() / HeapWordSize; | |
1154 double free_words_dbl = (double)free_words; | |
1155 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0); | |
1156 size_t offset_words = (free_words - duty_cycle_words) / 2; | |
1157 | |
1158 _icms_start_limit = eden->top() + offset_words; | |
1159 _icms_stop_limit = eden->end() - offset_words; | |
1160 | |
1161 // The limits may be adjusted (shifted to the right) by | |
1162 // CMSIncrementalOffset, to allow the application more mutator time after a | |
1163 // young gen gc (when all mutators were stopped) and before CMS starts and | |
1164 // takes away one or more cpus. | |
1165 if (CMSIncrementalOffset != 0) { | |
1166 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0; | |
1167 size_t adjustment = (size_t)adjustment_dbl; | |
1168 HeapWord* tmp_stop = _icms_stop_limit + adjustment; | |
1169 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) { | |
1170 _icms_start_limit += adjustment; | |
1171 _icms_stop_limit = tmp_stop; | |
1172 } | |
1173 } | |
1174 } | |
1175 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) { | |
1176 _icms_start_limit = _icms_stop_limit = eden->end(); | |
1177 } | |
1178 | |
1179 // Install the new start limit. | |
1180 eden->set_soft_end(_icms_start_limit); | |
1181 | |
1182 if (CMSTraceIncrementalMode) { | |
1183 gclog_or_tty->print(" icms alloc limits: " | |
1184 PTR_FORMAT "," PTR_FORMAT | |
1185 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ", | |
1186 _icms_start_limit, _icms_stop_limit, | |
1187 percent_of_space(eden, _icms_start_limit), | |
1188 percent_of_space(eden, _icms_stop_limit)); | |
1189 if (Verbose) { | |
1190 gclog_or_tty->print("eden: "); | |
1191 eden->print_on(gclog_or_tty); | |
1192 } | |
1193 } | |
1194 } | |
1195 | |
1196 // Any changes here should try to maintain the invariant | |
1197 // that if this method is called with _icms_start_limit | |
1198 // and _icms_stop_limit both NULL, then it should return NULL | |
1199 // and not notify the icms thread. | |
1200 HeapWord* | |
1201 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top, | |
1202 size_t word_size) | |
1203 { | |
1204 // A start_limit equal to end() means the duty cycle is 0, so treat that as a | |
1205 // nop. | |
1206 if (CMSIncrementalMode && _icms_start_limit != space->end()) { | |
1207 if (top <= _icms_start_limit) { | |
1208 if (CMSTraceIncrementalMode) { | |
1209 space->print_on(gclog_or_tty); | |
1210 gclog_or_tty->stamp(); | |
1211 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT | |
1212 ", new limit=" PTR_FORMAT | |
1213 " (" SIZE_FORMAT "%%)", | |
1214 top, _icms_stop_limit, | |
1215 percent_of_space(space, _icms_stop_limit)); | |
1216 } | |
1217 ConcurrentMarkSweepThread::start_icms(); | |
1218 assert(top < _icms_stop_limit, "Tautology"); | |
1219 if (word_size < pointer_delta(_icms_stop_limit, top)) { | |
1220 return _icms_stop_limit; | |
1221 } | |
1222 | |
1223 // The allocation will cross both the _start and _stop limits, so do the | |
1224 // stop notification also and return end(). | |
1225 if (CMSTraceIncrementalMode) { | |
1226 space->print_on(gclog_or_tty); | |
1227 gclog_or_tty->stamp(); | |
1228 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT | |
1229 ", new limit=" PTR_FORMAT | |
1230 " (" SIZE_FORMAT "%%)", | |
1231 top, space->end(), | |
1232 percent_of_space(space, space->end())); | |
1233 } | |
1234 ConcurrentMarkSweepThread::stop_icms(); | |
1235 return space->end(); | |
1236 } | |
1237 | |
1238 if (top <= _icms_stop_limit) { | |
1239 if (CMSTraceIncrementalMode) { | |
1240 space->print_on(gclog_or_tty); | |
1241 gclog_or_tty->stamp(); | |
1242 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT | |
1243 ", new limit=" PTR_FORMAT | |
1244 " (" SIZE_FORMAT "%%)", | |
1245 top, space->end(), | |
1246 percent_of_space(space, space->end())); | |
1247 } | |
1248 ConcurrentMarkSweepThread::stop_icms(); | |
1249 return space->end(); | |
1250 } | |
1251 | |
1252 if (CMSTraceIncrementalMode) { | |
1253 space->print_on(gclog_or_tty); | |
1254 gclog_or_tty->stamp(); | |
1255 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT | |
1256 ", new limit=" PTR_FORMAT, | |
1257 top, NULL); | |
1258 } | |
1259 } | |
1260 | |
1261 return NULL; | |
1262 } | |
1263 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1264 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { |
0 | 1265 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1266 // allocate, copy and if necessary update promoinfo -- | |
1267 // delegate to underlying space. | |
1268 assert_lock_strong(freelistLock()); | |
1269 | |
1270 #ifndef PRODUCT | |
1271 if (Universe::heap()->promotion_should_fail()) { | |
1272 return NULL; | |
1273 } | |
1274 #endif // #ifndef PRODUCT | |
1275 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1276 oop res = _cmsSpace->promote(obj, obj_size); |
0 | 1277 if (res == NULL) { |
1278 // expand and retry | |
1279 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords | |
1280 expand(s*HeapWordSize, MinHeapDeltaBytes, | |
1281 CMSExpansionCause::_satisfy_promotion); | |
1282 // Since there's currently no next generation, we don't try to promote | |
1283 // into a more senior generation. | |
1284 assert(next_gen() == NULL, "assumption, based upon which no attempt " | |
1285 "is made to pass on a possibly failing " | |
1286 "promotion to next generation"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1287 res = _cmsSpace->promote(obj, obj_size); |
0 | 1288 } |
1289 if (res != NULL) { | |
1290 // See comment in allocate() about when objects should | |
1291 // be allocated live. | |
1292 assert(obj->is_oop(), "Will dereference klass pointer below"); | |
1293 collector()->promoted(false, // Not parallel | |
1294 (HeapWord*)res, obj->is_objArray(), obj_size); | |
1295 // promotion counters | |
1296 NOT_PRODUCT( | |
1297 _numObjectsPromoted++; | |
1298 _numWordsPromoted += | |
1299 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); | |
1300 ) | |
1301 } | |
1302 return res; | |
1303 } | |
1304 | |
1305 | |
1306 HeapWord* | |
1307 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, | |
1308 HeapWord* top, | |
1309 size_t word_sz) | |
1310 { | |
1311 return collector()->allocation_limit_reached(space, top, word_sz); | |
1312 } | |
1313 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1314 // IMPORTANT: Notes on object size recognition in CMS. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1315 // --------------------------------------------------- |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1316 // A block of storage in the CMS generation is always in |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1317 // one of three states. A free block (FREE), an allocated |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1318 // object (OBJECT) whose size() method reports the correct size, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1319 // and an intermediate state (TRANSIENT) in which its size cannot |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1320 // be accurately determined. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1321 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS) |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1322 // ----------------------------------------------------- |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1323 // FREE: klass_word & 1 == 1; mark_word holds block size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1324 // |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1720
diff
changeset
|
1325 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1326 // obj->size() computes correct size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1327 // [Perm Gen objects needs to be "parsable" before they can be navigated] |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1328 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1329 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1330 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1331 // STATE IDENTIFICATION: (64 bit+COOPS) |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1332 // ------------------------------------ |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1333 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1334 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1335 // OBJECT: klass_word installed; klass_word != 0; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1336 // obj->size() computes correct size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1337 // [Perm Gen comment above continues to hold] |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1338 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1339 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1340 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1341 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1342 // STATE TRANSITION DIAGRAM |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1343 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1344 // mut / parnew mut / parnew |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1345 // FREE --------------------> TRANSIENT ---------------------> OBJECT --| |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1346 // ^ | |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1347 // |------------------------ DEAD <------------------------------------| |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1348 // sweep mut |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1349 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1350 // While a block is in TRANSIENT state its size cannot be determined |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1351 // so readers will either need to come back later or stall until |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1352 // the size can be determined. Note that for the case of direct |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1353 // allocation, P-bits, when available, may be used to determine the |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1354 // size of an object that may not yet have been initialized. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1355 |
0 | 1356 // Things to support parallel young-gen collection. |
1357 oop | |
1358 ConcurrentMarkSweepGeneration::par_promote(int thread_num, | |
1359 oop old, markOop m, | |
1360 size_t word_sz) { | |
1361 #ifndef PRODUCT | |
1362 if (Universe::heap()->promotion_should_fail()) { | |
1363 return NULL; | |
1364 } | |
1365 #endif // #ifndef PRODUCT | |
1366 | |
1367 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1368 PromotionInfo* promoInfo = &ps->promo; | |
1369 // if we are tracking promotions, then first ensure space for | |
1370 // promotion (including spooling space for saving header if necessary). | |
1371 // then allocate and copy, then track promoted info if needed. | |
1372 // When tracking (see PromotionInfo::track()), the mark word may | |
1373 // be displaced and in this case restoration of the mark word | |
1374 // occurs in the (oop_since_save_marks_)iterate phase. | |
1375 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { | |
1376 // Out of space for allocating spooling buffers; | |
1377 // try expanding and allocating spooling buffers. | |
1378 if (!expand_and_ensure_spooling_space(promoInfo)) { | |
1379 return NULL; | |
1380 } | |
1381 } | |
1382 assert(promoInfo->has_spooling_space(), "Control point invariant"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1383 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1384 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz); |
0 | 1385 if (obj_ptr == NULL) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1386 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz); |
0 | 1387 if (obj_ptr == NULL) { |
1388 return NULL; | |
1389 } | |
1390 } | |
1391 oop obj = oop(obj_ptr); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1392 OrderAccess::storestore(); |
187 | 1393 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1394 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1395 // IMPORTANT: See note on object initialization for CMS above. |
0 | 1396 // Otherwise, copy the object. Here we must be careful to insert the |
1397 // klass pointer last, since this marks the block as an allocated object. | |
187 | 1398 // Except with compressed oops it's the mark word. |
0 | 1399 HeapWord* old_ptr = (HeapWord*)old; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1400 // Restore the mark word copied above. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1401 obj->set_mark(m); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1402 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1403 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1404 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1405 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1406 if (UseCompressedOops) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1407 // Copy gap missed by (aligned) header size calculation below |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1408 obj->set_klass_gap(old->klass_gap()); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1409 } |
0 | 1410 if (word_sz > (size_t)oopDesc::header_size()) { |
1411 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), | |
1412 obj_ptr + oopDesc::header_size(), | |
1413 word_sz - oopDesc::header_size()); | |
1414 } | |
187 | 1415 |
0 | 1416 // Now we can track the promoted object, if necessary. We take care |
1521 | 1417 // to delay the transition from uninitialized to full object |
0 | 1418 // (i.e., insertion of klass pointer) until after, so that it |
1419 // atomically becomes a promoted object. | |
1420 if (promoInfo->tracking()) { | |
1421 promoInfo->track((PromotedObject*)obj, old->klass()); | |
1422 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1423 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1424 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1425 assert(old->is_oop(), "Will use and dereference old klass ptr below"); |
187 | 1426 |
1427 // Finally, install the klass pointer (this should be volatile). | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1428 OrderAccess::storestore(); |
0 | 1429 obj->set_klass(old->klass()); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1430 // We should now be able to calculate the right size for this object |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1431 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1432 |
0 | 1433 collector()->promoted(true, // parallel |
1434 obj_ptr, old->is_objArray(), word_sz); | |
1435 | |
1436 NOT_PRODUCT( | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1437 Atomic::inc_ptr(&_numObjectsPromoted); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1438 Atomic::add_ptr(alloc_sz, &_numWordsPromoted); |
0 | 1439 ) |
1440 | |
1441 return obj; | |
1442 } | |
1443 | |
1444 void | |
1445 ConcurrentMarkSweepGeneration:: | |
1446 par_promote_alloc_undo(int thread_num, | |
1447 HeapWord* obj, size_t word_sz) { | |
1448 // CMS does not support promotion undo. | |
1449 ShouldNotReachHere(); | |
1450 } | |
1451 | |
1452 void | |
1453 ConcurrentMarkSweepGeneration:: | |
1454 par_promote_alloc_done(int thread_num) { | |
1455 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
1456 ps->lab.retire(thread_num); |
0 | 1457 } |
1458 | |
1459 void | |
1460 ConcurrentMarkSweepGeneration:: | |
1461 par_oop_since_save_marks_iterate_done(int thread_num) { | |
1462 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1463 ParScanWithoutBarrierClosure* dummy_cl = NULL; | |
1464 ps->promo.promoted_oops_iterate_nv(dummy_cl); | |
1465 } | |
1466 | |
1467 // XXXPERM | |
1468 bool ConcurrentMarkSweepGeneration::should_collect(bool full, | |
1469 size_t size, | |
1470 bool tlab) | |
1471 { | |
1472 // We allow a STW collection only if a full | |
1473 // collection was requested. | |
1474 return full || should_allocate(size, tlab); // FIX ME !!! | |
1475 // This and promotion failure handling are connected at the | |
1476 // hip and should be fixed by untying them. | |
1477 } | |
1478 | |
1479 bool CMSCollector::shouldConcurrentCollect() { | |
1480 if (_full_gc_requested) { | |
1481 if (Verbose && PrintGCDetails) { | |
1482 gclog_or_tty->print_cr("CMSCollector: collect because of explicit " | |
1520
bb843ebc7c55
6919638: CMS: ExplicitGCInvokesConcurrent misinteracts with gc locker
ysr
parents:
1518
diff
changeset
|
1483 " gc request (or gc_locker)"); |
0 | 1484 } |
1485 return true; | |
1486 } | |
1487 | |
1488 // For debugging purposes, change the type of collection. | |
1489 // If the rotation is not on the concurrent collection | |
1490 // type, don't start a concurrent collection. | |
1491 NOT_PRODUCT( | |
1492 if (RotateCMSCollectionTypes && | |
1493 (_cmsGen->debug_collection_type() != | |
1494 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { | |
1495 assert(_cmsGen->debug_collection_type() != | |
1496 ConcurrentMarkSweepGeneration::Unknown_collection_type, | |
1497 "Bad cms collection type"); | |
1498 return false; | |
1499 } | |
1500 ) | |
1501 | |
1502 FreelistLocker x(this); | |
1503 // ------------------------------------------------------------------ | |
1504 // Print out lots of information which affects the initiation of | |
1505 // a collection. | |
1506 if (PrintCMSInitiationStatistics && stats().valid()) { | |
1507 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); | |
1508 gclog_or_tty->stamp(); | |
1509 gclog_or_tty->print_cr(""); | |
1510 stats().print_on(gclog_or_tty); | |
1511 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", | |
1512 stats().time_until_cms_gen_full()); | |
1513 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free()); | |
1514 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, | |
1515 _cmsGen->contiguous_available()); | |
1516 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); | |
1517 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); | |
1518 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1519 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1520 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); |
0 | 1521 } |
1522 // ------------------------------------------------------------------ | |
1523 | |
1524 // If the estimated time to complete a cms collection (cms_duration()) | |
1525 // is less than the estimated time remaining until the cms generation | |
1526 // is full, start a collection. | |
1527 if (!UseCMSInitiatingOccupancyOnly) { | |
1528 if (stats().valid()) { | |
1529 if (stats().time_until_cms_start() == 0.0) { | |
1530 return true; | |
1531 } | |
1532 } else { | |
1533 // We want to conservatively collect somewhat early in order | |
1534 // to try and "bootstrap" our CMS/promotion statistics; | |
1535 // this branch will not fire after the first successful CMS | |
1536 // collection because the stats should then be valid. | |
1537 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { | |
1538 if (Verbose && PrintGCDetails) { | |
1539 gclog_or_tty->print_cr( | |
1540 " CMSCollector: collect for bootstrapping statistics:" | |
1541 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), | |
1542 _bootstrap_occupancy); | |
1543 } | |
1544 return true; | |
1545 } | |
1546 } | |
1547 } | |
1548 | |
1549 // Otherwise, we start a collection cycle if either the perm gen or | |
1550 // old gen want a collection cycle started. Each may use | |
1551 // an appropriate criterion for making this decision. | |
1552 // XXX We need to make sure that the gen expansion | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1553 // criterion dovetails well with this. XXX NEED TO FIX THIS |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1554 if (_cmsGen->should_concurrent_collect()) { |
0 | 1555 if (Verbose && PrintGCDetails) { |
1556 gclog_or_tty->print_cr("CMS old gen initiated"); | |
1557 } | |
1558 return true; | |
1559 } | |
1560 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1561 // We start a collection if we believe an incremental collection may fail; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1562 // this is not likely to be productive in practice because it's probably too |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1563 // late anyway. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1564 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1565 assert(gch->collector_policy()->is_two_generation_policy(), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1566 "You may want to check the correctness of the following"); |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1567 if (gch->incremental_collection_will_fail(true /* consult_young */)) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1568 if (Verbose && PrintGCDetails) { |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1569 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); |
0 | 1570 } |
1571 return true; | |
1572 } | |
1573 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1574 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1575 bool res = update_should_unload_classes(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1576 if (res) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1577 if (Verbose && PrintGCDetails) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1578 gclog_or_tty->print_cr("CMS perm gen initiated"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1579 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1580 return true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1581 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1582 } |
0 | 1583 return false; |
1584 } | |
1585 | |
1586 // Clear _expansion_cause fields of constituent generations | |
1587 void CMSCollector::clear_expansion_cause() { | |
1588 _cmsGen->clear_expansion_cause(); | |
1589 _permGen->clear_expansion_cause(); | |
1590 } | |
1591 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1592 // We should be conservative in starting a collection cycle. To |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1593 // start too eagerly runs the risk of collecting too often in the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1594 // extreme. To collect too rarely falls back on full collections, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1595 // which works, even if not optimum in terms of concurrent work. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1596 // As a work around for too eagerly collecting, use the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1597 // UseCMSInitiatingOccupancyOnly. This also has the advantage of |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1598 // giving the user an easily understandable way of controlling the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1599 // collections. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1600 // We want to start a new collection cycle if any of the following |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1601 // conditions hold: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1602 // . our current occupancy exceeds the configured initiating occupancy |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1603 // for this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1604 // . we recently needed to expand this space and have not, since that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1605 // expansion, done a collection of this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1606 // . the underlying space believes that it may be a good idea to initiate |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1607 // a concurrent collection (this may be based on criteria such as the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1608 // following: the space uses linear allocation and linear allocation is |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1609 // going to fail, or there is believed to be excessive fragmentation in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1610 // the generation, etc... or ... |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1611 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1612 // the case of the old generation, not the perm generation; see CR 6543076): |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1613 // we may be approaching a point at which allocation requests may fail because |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1614 // we will be out of sufficient free space given allocation rate estimates.] |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1615 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1616 |
0 | 1617 assert_lock_strong(freelistLock()); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1618 if (occupancy() > initiating_occupancy()) { |
0 | 1619 if (PrintGCDetails && Verbose) { |
1620 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1621 short_name(), occupancy(), initiating_occupancy()); |
0 | 1622 } |
1623 return true; | |
1624 } | |
1625 if (UseCMSInitiatingOccupancyOnly) { | |
1626 return false; | |
1627 } | |
1628 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { | |
1629 if (PrintGCDetails && Verbose) { | |
1630 gclog_or_tty->print(" %s: collect because expanded for allocation ", | |
1631 short_name()); | |
1632 } | |
1633 return true; | |
1634 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1635 if (_cmsSpace->should_concurrent_collect()) { |
0 | 1636 if (PrintGCDetails && Verbose) { |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1637 gclog_or_tty->print(" %s: collect because cmsSpace says so ", |
0 | 1638 short_name()); |
1639 } | |
1640 return true; | |
1641 } | |
1642 return false; | |
1643 } | |
1644 | |
1645 void ConcurrentMarkSweepGeneration::collect(bool full, | |
1646 bool clear_all_soft_refs, | |
1647 size_t size, | |
1648 bool tlab) | |
1649 { | |
1650 collector()->collect(full, clear_all_soft_refs, size, tlab); | |
1651 } | |
1652 | |
1653 void CMSCollector::collect(bool full, | |
1654 bool clear_all_soft_refs, | |
1655 size_t size, | |
1656 bool tlab) | |
1657 { | |
1658 if (!UseCMSCollectionPassing && _collectorState > Idling) { | |
1659 // For debugging purposes skip the collection if the state | |
1660 // is not currently idle | |
1661 if (TraceCMSState) { | |
1662 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", | |
1663 Thread::current(), full, _collectorState); | |
1664 } | |
1665 return; | |
1666 } | |
1667 | |
1668 // The following "if" branch is present for defensive reasons. | |
1669 // In the current uses of this interface, it can be replaced with: | |
1670 // assert(!GC_locker.is_active(), "Can't be called otherwise"); | |
1671 // But I am not placing that assert here to allow future | |
1672 // generality in invoking this interface. | |
1673 if (GC_locker::is_active()) { | |
1674 // A consistency test for GC_locker | |
1675 assert(GC_locker::needs_gc(), "Should have been set already"); | |
1676 // Skip this foreground collection, instead | |
1677 // expanding the heap if necessary. | |
1678 // Need the free list locks for the call to free() in compute_new_size() | |
1679 compute_new_size(); | |
1680 return; | |
1681 } | |
1682 acquire_control_and_collect(full, clear_all_soft_refs); | |
1683 _full_gcs_since_conc_gc++; | |
1684 | |
1685 } | |
1686 | |
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count) { | |
1688 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1689 unsigned int gc_count = gch->total_full_collections(); | |
1690 if (gc_count == full_gc_count) { | |
1691 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1692 _full_gc_requested = true; | |
1693 CGC_lock->notify(); // nudge CMS thread | |
2365
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
1694 } else { |
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
1695 assert(gc_count > full_gc_count, "Error: causal loop"); |
0 | 1696 } |
1697 } | |
1698 | |
1699 | |
1700 // The foreground and background collectors need to coordinate in order | |
1701 // to make sure that they do not mutually interfere with CMS collections. | |
1702 // When a background collection is active, | |
1703 // the foreground collector may need to take over (preempt) and | |
1704 // synchronously complete an ongoing collection. Depending on the | |
1705 // frequency of the background collections and the heap usage | |
1706 // of the application, this preemption can be seldom or frequent. | |
1707 // There are only certain | |
1708 // points in the background collection that the "collection-baton" | |
1709 // can be passed to the foreground collector. | |
1710 // | |
1711 // The foreground collector will wait for the baton before | |
1712 // starting any part of the collection. The foreground collector | |
1713 // will only wait at one location. | |
1714 // | |
1715 // The background collector will yield the baton before starting a new | |
1716 // phase of the collection (e.g., before initial marking, marking from roots, | |
1717 // precleaning, final re-mark, sweep etc.) This is normally done at the head | |
1718 // of the loop which switches the phases. The background collector does some | |
1719 // of the phases (initial mark, final re-mark) with the world stopped. | |
1720 // Because of locking involved in stopping the world, | |
1721 // the foreground collector should not block waiting for the background | |
1722 // collector when it is doing a stop-the-world phase. The background | |
1723 // collector will yield the baton at an additional point just before | |
1724 // it enters a stop-the-world phase. Once the world is stopped, the | |
1725 // background collector checks the phase of the collection. If the | |
1726 // phase has not changed, it proceeds with the collection. If the | |
1727 // phase has changed, it skips that phase of the collection. See | |
1728 // the comments on the use of the Heap_lock in collect_in_background(). | |
1729 // | |
1730 // Variable used in baton passing. | |
1731 // _foregroundGCIsActive - Set to true by the foreground collector when | |
1732 // it wants the baton. The foreground clears it when it has finished | |
1733 // the collection. | |
1734 // _foregroundGCShouldWait - Set to true by the background collector | |
1735 // when it is running. The foreground collector waits while | |
1736 // _foregroundGCShouldWait is true. | |
1737 // CGC_lock - monitor used to protect access to the above variables | |
1738 // and to notify the foreground and background collectors. | |
1739 // _collectorState - current state of the CMS collection. | |
1740 // | |
1741 // The foreground collector | |
1742 // acquires the CGC_lock | |
1743 // sets _foregroundGCIsActive | |
1744 // waits on the CGC_lock for _foregroundGCShouldWait to be false | |
1745 // various locks acquired in preparation for the collection | |
1746 // are released so as not to block the background collector | |
1747 // that is in the midst of a collection | |
1748 // proceeds with the collection | |
1749 // clears _foregroundGCIsActive | |
1750 // returns | |
1751 // | |
1752 // The background collector in a loop iterating on the phases of the | |
1753 // collection | |
1754 // acquires the CGC_lock | |
1755 // sets _foregroundGCShouldWait | |
1756 // if _foregroundGCIsActive is set | |
1757 // clears _foregroundGCShouldWait, notifies _CGC_lock | |
1758 // waits on _CGC_lock for _foregroundGCIsActive to become false | |
1759 // and exits the loop. | |
1760 // otherwise | |
1761 // proceed with that phase of the collection | |
1762 // if the phase is a stop-the-world phase, | |
1763 // yield the baton once more just before enqueueing | |
1764 // the stop-world CMS operation (executed by the VM thread). | |
1765 // returns after all phases of the collection are done | |
1766 // | |
1767 | |
1768 void CMSCollector::acquire_control_and_collect(bool full, | |
1769 bool clear_all_soft_refs) { | |
1770 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
1771 assert(!Thread::current()->is_ConcurrentGC_thread(), | |
1772 "shouldn't try to acquire control from self!"); | |
1773 | |
1774 // Start the protocol for acquiring control of the | |
1775 // collection from the background collector (aka CMS thread). | |
1776 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1777 "VM thread should have CMS token"); | |
1778 // Remember the possibly interrupted state of an ongoing | |
1779 // concurrent collection | |
1780 CollectorState first_state = _collectorState; | |
1781 | |
1782 // Signal to a possibly ongoing concurrent collection that | |
1783 // we want to do a foreground collection. | |
1784 _foregroundGCIsActive = true; | |
1785 | |
1786 // Disable incremental mode during a foreground collection. | |
1787 ICMSDisabler icms_disabler; | |
1788 | |
1789 // release locks and wait for a notify from the background collector | |
1790 // releasing the locks in only necessary for phases which | |
1791 // do yields to improve the granularity of the collection. | |
1792 assert_lock_strong(bitMapLock()); | |
1793 // We need to lock the Free list lock for the space that we are | |
1794 // currently collecting. | |
1795 assert(haveFreelistLocks(), "Must be holding free list locks"); | |
1796 bitMapLock()->unlock(); | |
1797 releaseFreelistLocks(); | |
1798 { | |
1799 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1800 if (_foregroundGCShouldWait) { | |
1801 // We are going to be waiting for action for the CMS thread; | |
1802 // it had better not be gone (for instance at shutdown)! | |
1803 assert(ConcurrentMarkSweepThread::cmst() != NULL, | |
1804 "CMS thread must be running"); | |
1805 // Wait here until the background collector gives us the go-ahead | |
1806 ConcurrentMarkSweepThread::clear_CMS_flag( | |
1807 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token | |
1808 // Get a possibly blocked CMS thread going: | |
1809 // Note that we set _foregroundGCIsActive true above, | |
1810 // without protection of the CGC_lock. | |
1811 CGC_lock->notify(); | |
1812 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), | |
1813 "Possible deadlock"); | |
1814 while (_foregroundGCShouldWait) { | |
1815 // wait for notification | |
1816 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
1817 // Possibility of delay/starvation here, since CMS token does | |
1818 // not know to give priority to VM thread? Actually, i think | |
1819 // there wouldn't be any delay/starvation, but the proof of | |
1820 // that "fact" (?) appears non-trivial. XXX 20011219YSR | |
1821 } | |
1822 ConcurrentMarkSweepThread::set_CMS_flag( | |
1823 ConcurrentMarkSweepThread::CMS_vm_has_token); | |
1824 } | |
1825 } | |
1826 // The CMS_token is already held. Get back the other locks. | |
1827 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1828 "VM thread should have CMS token"); | |
1829 getFreelistLocks(); | |
1830 bitMapLock()->lock_without_safepoint_check(); | |
1831 if (TraceCMSState) { | |
1832 gclog_or_tty->print_cr("CMS foreground collector has asked for control " | |
1833 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); | |
1834 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); | |
1835 } | |
1836 | |
1837 // Check if we need to do a compaction, or if not, whether | |
1838 // we need to start the mark-sweep from scratch. | |
1839 bool should_compact = false; | |
1840 bool should_start_over = false; | |
1841 decide_foreground_collection_type(clear_all_soft_refs, | |
1842 &should_compact, &should_start_over); | |
1843 | |
1844 NOT_PRODUCT( | |
1845 if (RotateCMSCollectionTypes) { | |
1846 if (_cmsGen->debug_collection_type() == | |
1847 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { | |
1848 should_compact = true; | |
1849 } else if (_cmsGen->debug_collection_type() == | |
1850 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { | |
1851 should_compact = false; | |
1852 } | |
1853 } | |
1854 ) | |
1855 | |
1856 if (PrintGCDetails && first_state > Idling) { | |
1857 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); | |
1858 if (GCCause::is_user_requested_gc(cause) || | |
1859 GCCause::is_serviceability_requested_gc(cause)) { | |
1860 gclog_or_tty->print(" (concurrent mode interrupted)"); | |
1861 } else { | |
1862 gclog_or_tty->print(" (concurrent mode failure)"); | |
1863 } | |
1864 } | |
1865 | |
1866 if (should_compact) { | |
1867 // If the collection is being acquired from the background | |
1868 // collector, there may be references on the discovered | |
1869 // references lists that have NULL referents (being those | |
1870 // that were concurrently cleared by a mutator) or | |
1871 // that are no longer active (having been enqueued concurrently | |
1872 // by the mutator). | |
1873 // Scrub the list of those references because Mark-Sweep-Compact | |
1874 // code assumes referents are not NULL and that all discovered | |
1875 // Reference objects are active. | |
1876 ref_processor()->clean_up_discovered_references(); | |
1877 | |
1878 do_compaction_work(clear_all_soft_refs); | |
1879 | |
1880 // Has the GC time limit been exceeded? | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1881 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1882 size_t max_eden_size = young_gen->max_capacity() - |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1883 young_gen->to()->capacity() - |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1884 young_gen->from()->capacity(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1885 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1886 GCCause::Cause gc_cause = gch->gc_cause(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1887 size_policy()->check_gc_overhead_limit(_young_gen->used(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1888 young_gen->eden()->used(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1889 _cmsGen->max_capacity(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1890 max_eden_size, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1891 full, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1892 gc_cause, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1893 gch->collector_policy()); |
0 | 1894 } else { |
1895 do_mark_sweep_work(clear_all_soft_refs, first_state, | |
1896 should_start_over); | |
1897 } | |
1898 // Reset the expansion cause, now that we just completed | |
1899 // a collection cycle. | |
1900 clear_expansion_cause(); | |
1901 _foregroundGCIsActive = false; | |
1902 return; | |
1903 } | |
1904 | |
1905 // Resize the perm generation and the tenured generation | |
1906 // after obtaining the free list locks for the | |
1907 // two generations. | |
1908 void CMSCollector::compute_new_size() { | |
1909 assert_locked_or_safepoint(Heap_lock); | |
1910 FreelistLocker z(this); | |
1911 _permGen->compute_new_size(); | |
1912 _cmsGen->compute_new_size(); | |
1913 } | |
1914 | |
1915 // A work method used by foreground collection to determine | |
1916 // what type of collection (compacting or not, continuing or fresh) | |
1917 // it should do. | |
1918 // NOTE: the intent is to make UseCMSCompactAtFullCollection | |
1919 // and CMSCompactWhenClearAllSoftRefs the default in the future | |
1920 // and do away with the flags after a suitable period. | |
1921 void CMSCollector::decide_foreground_collection_type( | |
1922 bool clear_all_soft_refs, bool* should_compact, | |
1923 bool* should_start_over) { | |
1924 // Normally, we'll compact only if the UseCMSCompactAtFullCollection | |
1925 // flag is set, and we have either requested a System.gc() or | |
1926 // the number of full gc's since the last concurrent cycle | |
1927 // has exceeded the threshold set by CMSFullGCsBeforeCompaction, | |
1928 // or if an incremental collection has failed | |
1929 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1930 assert(gch->collector_policy()->is_two_generation_policy(), | |
1931 "You may want to check the correctness of the following"); | |
1932 // Inform cms gen if this was due to partial collection failing. | |
1933 // The CMS gen may use this fact to determine its expansion policy. | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1934 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { |
0 | 1935 assert(!_cmsGen->incremental_collection_failed(), |
1936 "Should have been noticed, reacted to and cleared"); | |
1937 _cmsGen->set_incremental_collection_failed(); | |
1938 } | |
1939 *should_compact = | |
1940 UseCMSCompactAtFullCollection && | |
1941 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || | |
1942 GCCause::is_user_requested_gc(gch->gc_cause()) || | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1943 gch->incremental_collection_will_fail(true /* consult_young */)); |
0 | 1944 *should_start_over = false; |
1945 if (clear_all_soft_refs && !*should_compact) { | |
1946 // We are about to do a last ditch collection attempt | |
1947 // so it would normally make sense to do a compaction | |
1948 // to reclaim as much space as possible. | |
1949 if (CMSCompactWhenClearAllSoftRefs) { | |
1950 // Default: The rationale is that in this case either | |
1951 // we are past the final marking phase, in which case | |
1952 // we'd have to start over, or so little has been done | |
1953 // that there's little point in saving that work. Compaction | |
1954 // appears to be the sensible choice in either case. | |
1955 *should_compact = true; | |
1956 } else { | |
1957 // We have been asked to clear all soft refs, but not to | |
1958 // compact. Make sure that we aren't past the final checkpoint | |
1959 // phase, for that is where we process soft refs. If we are already | |
1960 // past that phase, we'll need to redo the refs discovery phase and | |
1961 // if necessary clear soft refs that weren't previously | |
1962 // cleared. We do so by remembering the phase in which | |
1963 // we came in, and if we are past the refs processing | |
1964 // phase, we'll choose to just redo the mark-sweep | |
1965 // collection from scratch. | |
1966 if (_collectorState > FinalMarking) { | |
1967 // We are past the refs processing phase; | |
1968 // start over and do a fresh synchronous CMS cycle | |
1969 _collectorState = Resetting; // skip to reset to start new cycle | |
1970 reset(false /* == !asynch */); | |
1971 *should_start_over = true; | |
1972 } // else we can continue a possibly ongoing current cycle | |
1973 } | |
1974 } | |
1975 } | |
1976 | |
1977 // A work method used by the foreground collector to do | |
1978 // a mark-sweep-compact. | |
1979 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { | |
1980 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1981 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); | |
1982 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { | |
1983 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " | |
1984 "collections passed to foreground collector", _full_gcs_since_conc_gc); | |
1985 } | |
1986 | |
1987 // Sample collection interval time and reset for collection pause. | |
1988 if (UseAdaptiveSizePolicy) { | |
1989 size_policy()->msc_collection_begin(); | |
1990 } | |
1991 | |
1992 // Temporarily widen the span of the weak reference processing to | |
1993 // the entire heap. | |
1994 MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
1995 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); |
0 | 1996 // Temporarily, clear the "is_alive_non_header" field of the |
1997 // reference processor. | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
1998 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); |
0 | 1999 // Temporarily make reference _processing_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2000 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); |
0 | 2001 // Temporarily make refs discovery atomic |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2002 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2003 // Temporarily make reference _discovery_ single threaded (non-MT) |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2004 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
0 | 2005 |
2006 ref_processor()->set_enqueuing_is_done(false); | |
2007 ref_processor()->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
2008 ref_processor()->setup_policy(clear_all_soft_refs); |
0 | 2009 // If an asynchronous collection finishes, the _modUnionTable is |
2010 // all clear. If we are assuming the collection from an asynchronous | |
2011 // collection, clear the _modUnionTable. | |
2012 assert(_collectorState != Idling || _modUnionTable.isAllClear(), | |
2013 "_modUnionTable should be clear if the baton was not passed"); | |
2014 _modUnionTable.clear_all(); | |
2015 | |
2016 // We must adjust the allocation statistics being maintained | |
2017 // in the free list space. We do so by reading and clearing | |
2018 // the sweep timer and updating the block flux rate estimates below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2019 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2020 if (_inter_sweep_timer.is_active()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2021 _inter_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2022 // Note that we do not use this sample to update the _inter_sweep_estimate. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2023 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2024 _inter_sweep_estimate.padded_average(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2025 _intra_sweep_estimate.padded_average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2026 } |
0 | 2027 |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
2028 { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
2029 TraceCMSMemoryManagerStats(); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
2030 } |
0 | 2031 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), |
2032 ref_processor(), clear_all_soft_refs); | |
2033 #ifdef ASSERT | |
2034 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
2035 size_t free_size = cms_space->free(); | |
2036 assert(free_size == | |
2037 pointer_delta(cms_space->end(), cms_space->compaction_top()) | |
2038 * HeapWordSize, | |
2039 "All the free space should be compacted into one chunk at top"); | |
2040 assert(cms_space->dictionary()->totalChunkSize( | |
2041 debug_only(cms_space->freelistLock())) == 0 || | |
2042 cms_space->totalSizeInIndexedFreeLists() == 0, | |
2043 "All the free space should be in a single chunk"); | |
2044 size_t num = cms_space->totalCount(); | |
2045 assert((free_size == 0 && num == 0) || | |
2046 (free_size > 0 && (num == 1 || num == 2)), | |
2047 "There should be at most 2 free chunks after compaction"); | |
2048 #endif // ASSERT | |
2049 _collectorState = Resetting; | |
2050 assert(_restart_addr == NULL, | |
2051 "Should have been NULL'd before baton was passed"); | |
2052 reset(false /* == !asynch */); | |
2053 _cmsGen->reset_after_compaction(); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2054 _concurrent_cycles_since_last_unload = 0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2055 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2056 if (verifying() && !should_unload_classes()) { |
0 | 2057 perm_gen_verify_bit_map()->clear_all(); |
2058 } | |
2059 | |
2060 // Clear any data recorded in the PLAB chunk arrays. | |
2061 if (_survivor_plab_array != NULL) { | |
2062 reset_survivor_plab_arrays(); | |
2063 } | |
2064 | |
2065 // Adjust the per-size allocation stats for the next epoch. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2066 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2067 // Restart the "inter sweep timer" for the next epoch. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2068 _inter_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2069 _inter_sweep_timer.start(); |
0 | 2070 |
2071 // Sample collection pause time and reset for collection interval. | |
2072 if (UseAdaptiveSizePolicy) { | |
2073 size_policy()->msc_collection_end(gch->gc_cause()); | |
2074 } | |
2075 | |
2076 // For a mark-sweep-compact, compute_new_size() will be called | |
2077 // in the heap's do_collection() method. | |
2078 } | |
2079 | |
2080 // A work method used by the foreground collector to do | |
2081 // a mark-sweep, after taking over from a possibly on-going | |
2082 // concurrent mark-sweep collection. | |
2083 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, | |
2084 CollectorState first_state, bool should_start_over) { | |
2085 if (PrintGC && Verbose) { | |
2086 gclog_or_tty->print_cr("Pass concurrent collection to foreground " | |
2087 "collector with count %d", | |
2088 _full_gcs_since_conc_gc); | |
2089 } | |
2090 switch (_collectorState) { | |
2091 case Idling: | |
2092 if (first_state == Idling || should_start_over) { | |
2093 // The background GC was not active, or should | |
2094 // restarted from scratch; start the cycle. | |
2095 _collectorState = InitialMarking; | |
2096 } | |
2097 // If first_state was not Idling, then a background GC | |
2098 // was in progress and has now finished. No need to do it | |
2099 // again. Leave the state as Idling. | |
2100 break; | |
2101 case Precleaning: | |
2102 // In the foreground case don't do the precleaning since | |
2103 // it is not done concurrently and there is extra work | |
2104 // required. | |
2105 _collectorState = FinalMarking; | |
2106 } | |
2107 if (PrintGCDetails && | |
2108 (_collectorState > Idling || | |
2109 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) { | |
2110 gclog_or_tty->print(" (concurrent mode failure)"); | |
2111 } | |
2112 collect_in_foreground(clear_all_soft_refs); | |
2113 | |
2114 // For a mark-sweep, compute_new_size() will be called | |
2115 // in the heap's do_collection() method. | |
2116 } | |
2117 | |
2118 | |
2119 void CMSCollector::getFreelistLocks() const { | |
2120 // Get locks for all free lists in all generations that this | |
2121 // collector is responsible for | |
2122 _cmsGen->freelistLock()->lock_without_safepoint_check(); | |
2123 _permGen->freelistLock()->lock_without_safepoint_check(); | |
2124 } | |
2125 | |
2126 void CMSCollector::releaseFreelistLocks() const { | |
2127 // Release locks for all free lists in all generations that this | |
2128 // collector is responsible for | |
2129 _cmsGen->freelistLock()->unlock(); | |
2130 _permGen->freelistLock()->unlock(); | |
2131 } | |
2132 | |
2133 bool CMSCollector::haveFreelistLocks() const { | |
2134 // Check locks for all free lists in all generations that this | |
2135 // collector is responsible for | |
2136 assert_lock_strong(_cmsGen->freelistLock()); | |
2137 assert_lock_strong(_permGen->freelistLock()); | |
2138 PRODUCT_ONLY(ShouldNotReachHere()); | |
2139 return true; | |
2140 } | |
2141 | |
2142 // A utility class that is used by the CMS collector to | |
2143 // temporarily "release" the foreground collector from its | |
2144 // usual obligation to wait for the background collector to | |
2145 // complete an ongoing phase before proceeding. | |
2146 class ReleaseForegroundGC: public StackObj { | |
2147 private: | |
2148 CMSCollector* _c; | |
2149 public: | |
2150 ReleaseForegroundGC(CMSCollector* c) : _c(c) { | |
2151 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); | |
2152 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2153 // allow a potentially blocked foreground collector to proceed | |
2154 _c->_foregroundGCShouldWait = false; | |
2155 if (_c->_foregroundGCIsActive) { | |
2156 CGC_lock->notify(); | |
2157 } | |
2158 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2159 "Possible deadlock"); | |
2160 } | |
2161 | |
2162 ~ReleaseForegroundGC() { | |
2163 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); | |
2164 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2165 _c->_foregroundGCShouldWait = true; | |
2166 } | |
2167 }; | |
2168 | |
2169 // There are separate collect_in_background and collect_in_foreground because of | |
2170 // the different locking requirements of the background collector and the | |
2171 // foreground collector. There was originally an attempt to share | |
2172 // one "collect" method between the background collector and the foreground | |
2173 // collector but the if-then-else required made it cleaner to have | |
2174 // separate methods. | |
2175 void CMSCollector::collect_in_background(bool clear_all_soft_refs) { | |
2176 assert(Thread::current()->is_ConcurrentGC_thread(), | |
2177 "A CMS asynchronous collection is only allowed on a CMS thread."); | |
2178 | |
2179 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2180 { | |
2181 bool safepoint_check = Mutex::_no_safepoint_check_flag; | |
2182 MutexLockerEx hl(Heap_lock, safepoint_check); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2183 FreelistLocker fll(this); |
0 | 2184 MutexLockerEx x(CGC_lock, safepoint_check); |
2185 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { | |
2186 // The foreground collector is active or we're | |
2187 // not using asynchronous collections. Skip this | |
2188 // background collection. | |
2189 assert(!_foregroundGCShouldWait, "Should be clear"); | |
2190 return; | |
2191 } else { | |
2192 assert(_collectorState == Idling, "Should be idling before start."); | |
2193 _collectorState = InitialMarking; | |
2194 // Reset the expansion cause, now that we are about to begin | |
2195 // a new cycle. | |
2196 clear_expansion_cause(); | |
2197 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2198 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2199 // ensuing concurrent GC cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2200 update_should_unload_classes(); |
0 | 2201 _full_gc_requested = false; // acks all outstanding full gc requests |
2202 // Signal that we are about to start a collection | |
2203 gch->increment_total_full_collections(); // ... starting a collection cycle | |
2204 _collection_count_start = gch->total_full_collections(); | |
2205 } | |
2206 | |
2207 // Used for PrintGC | |
2208 size_t prev_used; | |
2209 if (PrintGC && Verbose) { | |
2210 prev_used = _cmsGen->used(); // XXXPERM | |
2211 } | |
2212 | |
2213 // The change of the collection state is normally done at this level; | |
2214 // the exceptions are phases that are executed while the world is | |
2215 // stopped. For those phases the change of state is done while the | |
2216 // world is stopped. For baton passing purposes this allows the | |
2217 // background collector to finish the phase and change state atomically. | |
2218 // The foreground collector cannot wait on a phase that is done | |
2219 // while the world is stopped because the foreground collector already | |
2220 // has the world stopped and would deadlock. | |
2221 while (_collectorState != Idling) { | |
2222 if (TraceCMSState) { | |
2223 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2224 Thread::current(), _collectorState); | |
2225 } | |
2226 // The foreground collector | |
2227 // holds the Heap_lock throughout its collection. | |
2228 // holds the CMS token (but not the lock) | |
2229 // except while it is waiting for the background collector to yield. | |
2230 // | |
2231 // The foreground collector should be blocked (not for long) | |
2232 // if the background collector is about to start a phase | |
2233 // executed with world stopped. If the background | |
2234 // collector has already started such a phase, the | |
2235 // foreground collector is blocked waiting for the | |
2236 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) | |
2237 // are executed in the VM thread. | |
2238 // | |
2239 // The locking order is | |
2240 // PendingListLock (PLL) -- if applicable (FinalMarking) | |
2241 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) | |
2242 // CMS token (claimed in | |
2243 // stop_world_and_do() --> | |
2244 // safepoint_synchronize() --> | |
2245 // CMSThread::synchronize()) | |
2246 | |
2247 { | |
2248 // Check if the FG collector wants us to yield. | |
2249 CMSTokenSync x(true); // is cms thread | |
2250 if (waitForForegroundGC()) { | |
2251 // We yielded to a foreground GC, nothing more to be | |
2252 // done this round. | |
2253 assert(_foregroundGCShouldWait == false, "We set it to false in " | |
2254 "waitForForegroundGC()"); | |
2255 if (TraceCMSState) { | |
2256 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2257 " exiting collection CMS state %d", | |
2258 Thread::current(), _collectorState); | |
2259 } | |
2260 return; | |
2261 } else { | |
2262 // The background collector can run but check to see if the | |
2263 // foreground collector has done a collection while the | |
2264 // background collector was waiting to get the CGC_lock | |
2265 // above. If yes, break so that _foregroundGCShouldWait | |
2266 // is cleared before returning. | |
2267 if (_collectorState == Idling) { | |
2268 break; | |
2269 } | |
2270 } | |
2271 } | |
2272 | |
2273 assert(_foregroundGCShouldWait, "Foreground collector, if active, " | |
2274 "should be waiting"); | |
2275 | |
2276 switch (_collectorState) { | |
2277 case InitialMarking: | |
2278 { | |
2279 ReleaseForegroundGC x(this); | |
2280 stats().record_cms_begin(); | |
2281 | |
2282 VM_CMS_Initial_Mark initial_mark_op(this); | |
2283 VMThread::execute(&initial_mark_op); | |
2284 } | |
2285 // The collector state may be any legal state at this point | |
2286 // since the background collector may have yielded to the | |
2287 // foreground collector. | |
2288 break; | |
2289 case Marking: | |
2290 // initial marking in checkpointRootsInitialWork has been completed | |
2291 if (markFromRoots(true)) { // we were successful | |
2292 assert(_collectorState == Precleaning, "Collector state should " | |
2293 "have changed"); | |
2294 } else { | |
2295 assert(_foregroundGCIsActive, "Internal state inconsistency"); | |
2296 } | |
2297 break; | |
2298 case Precleaning: | |
2299 if (UseAdaptiveSizePolicy) { | |
2300 size_policy()->concurrent_precleaning_begin(); | |
2301 } | |
2302 // marking from roots in markFromRoots has been completed | |
2303 preclean(); | |
2304 if (UseAdaptiveSizePolicy) { | |
2305 size_policy()->concurrent_precleaning_end(); | |
2306 } | |
2307 assert(_collectorState == AbortablePreclean || | |
2308 _collectorState == FinalMarking, | |
2309 "Collector state should have changed"); | |
2310 break; | |
2311 case AbortablePreclean: | |
2312 if (UseAdaptiveSizePolicy) { | |
2313 size_policy()->concurrent_phases_resume(); | |
2314 } | |
2315 abortable_preclean(); | |
2316 if (UseAdaptiveSizePolicy) { | |
2317 size_policy()->concurrent_precleaning_end(); | |
2318 } | |
2319 assert(_collectorState == FinalMarking, "Collector state should " | |
2320 "have changed"); | |
2321 break; | |
2322 case FinalMarking: | |
2323 { | |
2324 ReleaseForegroundGC x(this); | |
2325 | |
2326 VM_CMS_Final_Remark final_remark_op(this); | |
2327 VMThread::execute(&final_remark_op); | |
935 | 2328 } |
0 | 2329 assert(_foregroundGCShouldWait, "block post-condition"); |
2330 break; | |
2331 case Sweeping: | |
2332 if (UseAdaptiveSizePolicy) { | |
2333 size_policy()->concurrent_sweeping_begin(); | |
2334 } | |
2335 // final marking in checkpointRootsFinal has been completed | |
2336 sweep(true); | |
2337 assert(_collectorState == Resizing, "Collector state change " | |
2338 "to Resizing must be done under the free_list_lock"); | |
2339 _full_gcs_since_conc_gc = 0; | |
2340 | |
2341 // Stop the timers for adaptive size policy for the concurrent phases | |
2342 if (UseAdaptiveSizePolicy) { | |
2343 size_policy()->concurrent_sweeping_end(); | |
2344 size_policy()->concurrent_phases_end(gch->gc_cause(), | |
2345 gch->prev_gen(_cmsGen)->capacity(), | |
2346 _cmsGen->free()); | |
2347 } | |
2348 | |
2349 case Resizing: { | |
2350 // Sweeping has been completed... | |
2351 // At this point the background collection has completed. | |
2352 // Don't move the call to compute_new_size() down | |
2353 // into code that might be executed if the background | |
2354 // collection was preempted. | |
2355 { | |
2356 ReleaseForegroundGC x(this); // unblock FG collection | |
2357 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag); | |
2358 CMSTokenSync z(true); // not strictly needed. | |
2359 if (_collectorState == Resizing) { | |
2360 compute_new_size(); | |
2361 _collectorState = Resetting; | |
2362 } else { | |
2363 assert(_collectorState == Idling, "The state should only change" | |
2364 " because the foreground collector has finished the collection"); | |
2365 } | |
2366 } | |
2367 break; | |
2368 } | |
2369 case Resetting: | |
2370 // CMS heap resizing has been completed | |
2371 reset(true); | |
2372 assert(_collectorState == Idling, "Collector state should " | |
2373 "have changed"); | |
2374 stats().record_cms_end(); | |
2375 // Don't move the concurrent_phases_end() and compute_new_size() | |
2376 // calls to here because a preempted background collection | |
2377 // has it's state set to "Resetting". | |
2378 break; | |
2379 case Idling: | |
2380 default: | |
2381 ShouldNotReachHere(); | |
2382 break; | |
2383 } | |
2384 if (TraceCMSState) { | |
2385 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2386 Thread::current(), _collectorState); | |
2387 } | |
2388 assert(_foregroundGCShouldWait, "block post-condition"); | |
2389 } | |
2390 | |
2391 // Should this be in gc_epilogue? | |
2392 collector_policy()->counters()->update_counters(); | |
2393 | |
2394 { | |
2395 // Clear _foregroundGCShouldWait and, in the event that the | |
2396 // foreground collector is waiting, notify it, before | |
2397 // returning. | |
2398 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2399 _foregroundGCShouldWait = false; | |
2400 if (_foregroundGCIsActive) { | |
2401 CGC_lock->notify(); | |
2402 } | |
2403 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2404 "Possible deadlock"); | |
2405 } | |
2406 if (TraceCMSState) { | |
2407 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2408 " exiting collection CMS state %d", | |
2409 Thread::current(), _collectorState); | |
2410 } | |
2411 if (PrintGC && Verbose) { | |
2412 _cmsGen->print_heap_change(prev_used); | |
2413 } | |
2414 } | |
2415 | |
2416 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { | |
2417 assert(_foregroundGCIsActive && !_foregroundGCShouldWait, | |
2418 "Foreground collector should be waiting, not executing"); | |
2419 assert(Thread::current()->is_VM_thread(), "A foreground collection" | |
2420 "may only be done by the VM Thread with the world stopped"); | |
2421 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
2422 "VM thread should have CMS token"); | |
2423 | |
2424 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, | |
2425 true, gclog_or_tty);) | |
2426 if (UseAdaptiveSizePolicy) { | |
2427 size_policy()->ms_collection_begin(); | |
2428 } | |
2429 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); | |
2430 | |
2431 HandleMark hm; // Discard invalid handles created during verification | |
2432 | |
2433 if (VerifyBeforeGC && | |
2434 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2435 Universe::verify(true); | |
2436 } | |
2437 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
2438 // Snapshot the soft reference policy to be used in this collection cycle. |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
2439 ref_processor()->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
2440 |
0 | 2441 bool init_mark_was_synchronous = false; // until proven otherwise |
2442 while (_collectorState != Idling) { | |
2443 if (TraceCMSState) { | |
2444 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2445 Thread::current(), _collectorState); | |
2446 } | |
2447 switch (_collectorState) { | |
2448 case InitialMarking: | |
2449 init_mark_was_synchronous = true; // fact to be exploited in re-mark | |
2450 checkpointRootsInitial(false); | |
2451 assert(_collectorState == Marking, "Collector state should have changed" | |
2452 " within checkpointRootsInitial()"); | |
2453 break; | |
2454 case Marking: | |
2455 // initial marking in checkpointRootsInitialWork has been completed | |
2456 if (VerifyDuringGC && | |
2457 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2458 gclog_or_tty->print("Verify before initial mark: "); | |
2459 Universe::verify(true); | |
2460 } | |
2461 { | |
2462 bool res = markFromRoots(false); | |
2463 assert(res && _collectorState == FinalMarking, "Collector state should " | |
2464 "have changed"); | |
2465 break; | |
2466 } | |
2467 case FinalMarking: | |
2468 if (VerifyDuringGC && | |
2469 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2470 gclog_or_tty->print("Verify before re-mark: "); | |
2471 Universe::verify(true); | |
2472 } | |
2473 checkpointRootsFinal(false, clear_all_soft_refs, | |
2474 init_mark_was_synchronous); | |
2475 assert(_collectorState == Sweeping, "Collector state should not " | |
2476 "have changed within checkpointRootsFinal()"); | |
2477 break; | |
2478 case Sweeping: | |
2479 // final marking in checkpointRootsFinal has been completed | |
2480 if (VerifyDuringGC && | |
2481 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2482 gclog_or_tty->print("Verify before sweep: "); | |
2483 Universe::verify(true); | |
2484 } | |
2485 sweep(false); | |
2486 assert(_collectorState == Resizing, "Incorrect state"); | |
2487 break; | |
2488 case Resizing: { | |
2489 // Sweeping has been completed; the actual resize in this case | |
2490 // is done separately; nothing to be done in this state. | |
2491 _collectorState = Resetting; | |
2492 break; | |
2493 } | |
2494 case Resetting: | |
2495 // The heap has been resized. | |
2496 if (VerifyDuringGC && | |
2497 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2498 gclog_or_tty->print("Verify before reset: "); | |
2499 Universe::verify(true); | |
2500 } | |
2501 reset(false); | |
2502 assert(_collectorState == Idling, "Collector state should " | |
2503 "have changed"); | |
2504 break; | |
2505 case Precleaning: | |
2506 case AbortablePreclean: | |
2507 // Elide the preclean phase | |
2508 _collectorState = FinalMarking; | |
2509 break; | |
2510 default: | |
2511 ShouldNotReachHere(); | |
2512 } | |
2513 if (TraceCMSState) { | |
2514 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2515 Thread::current(), _collectorState); | |
2516 } | |
2517 } | |
2518 | |
2519 if (UseAdaptiveSizePolicy) { | |
2520 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2521 size_policy()->ms_collection_end(gch->gc_cause()); | |
2522 } | |
2523 | |
2524 if (VerifyAfterGC && | |
2525 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2526 Universe::verify(true); | |
2527 } | |
2528 if (TraceCMSState) { | |
2529 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2530 " exiting collection CMS state %d", | |
2531 Thread::current(), _collectorState); | |
2532 } | |
2533 } | |
2534 | |
2535 bool CMSCollector::waitForForegroundGC() { | |
2536 bool res = false; | |
2537 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2538 "CMS thread should have CMS token"); | |
2539 // Block the foreground collector until the | |
2540 // background collectors decides whether to | |
2541 // yield. | |
2542 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2543 _foregroundGCShouldWait = true; | |
2544 if (_foregroundGCIsActive) { | |
2545 // The background collector yields to the | |
2546 // foreground collector and returns a value | |
2547 // indicating that it has yielded. The foreground | |
2548 // collector can proceed. | |
2549 res = true; | |
2550 _foregroundGCShouldWait = false; | |
2551 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2552 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2553 ConcurrentMarkSweepThread::set_CMS_flag( | |
2554 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2555 // Get a possibly blocked foreground thread going | |
2556 CGC_lock->notify(); | |
2557 if (TraceCMSState) { | |
2558 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", | |
2559 Thread::current(), _collectorState); | |
2560 } | |
2561 while (_foregroundGCIsActive) { | |
2562 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
2563 } | |
2564 ConcurrentMarkSweepThread::set_CMS_flag( | |
2565 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2566 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2567 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2568 } | |
2569 if (TraceCMSState) { | |
2570 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", | |
2571 Thread::current(), _collectorState); | |
2572 } | |
2573 return res; | |
2574 } | |
2575 | |
2576 // Because of the need to lock the free lists and other structures in | |
2577 // the collector, common to all the generations that the collector is | |
2578 // collecting, we need the gc_prologues of individual CMS generations | |
2579 // delegate to their collector. It may have been simpler had the | |
2580 // current infrastructure allowed one to call a prologue on a | |
2581 // collector. In the absence of that we have the generation's | |
2582 // prologue delegate to the collector, which delegates back | |
2583 // some "local" work to a worker method in the individual generations | |
2584 // that it's responsible for collecting, while itself doing any | |
2585 // work common to all generations it's responsible for. A similar | |
2586 // comment applies to the gc_epilogue()'s. | |
2587 // The role of the varaible _between_prologue_and_epilogue is to | |
2588 // enforce the invocation protocol. | |
2589 void CMSCollector::gc_prologue(bool full) { | |
2590 // Call gc_prologue_work() for each CMSGen and PermGen that | |
2591 // we are responsible for. | |
2592 | |
2593 // The following locking discipline assumes that we are only called | |
2594 // when the world is stopped. | |
2595 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); | |
2596 | |
2597 // The CMSCollector prologue must call the gc_prologues for the | |
2598 // "generations" (including PermGen if any) that it's responsible | |
2599 // for. | |
2600 | |
2601 assert( Thread::current()->is_VM_thread() | |
2602 || ( CMSScavengeBeforeRemark | |
2603 && Thread::current()->is_ConcurrentGC_thread()), | |
2604 "Incorrect thread type for prologue execution"); | |
2605 | |
2606 if (_between_prologue_and_epilogue) { | |
2607 // We have already been invoked; this is a gc_prologue delegation | |
2608 // from yet another CMS generation that we are responsible for, just | |
2609 // ignore it since all relevant work has already been done. | |
2610 return; | |
2611 } | |
2612 | |
2613 // set a bit saying prologue has been called; cleared in epilogue | |
2614 _between_prologue_and_epilogue = true; | |
2615 // Claim locks for common data structures, then call gc_prologue_work() | |
2616 // for each CMSGen and PermGen that we are responsible for. | |
2617 | |
2618 getFreelistLocks(); // gets free list locks on constituent spaces | |
2619 bitMapLock()->lock_without_safepoint_check(); | |
2620 | |
2621 // Should call gc_prologue_work() for all cms gens we are responsible for | |
2622 bool registerClosure = _collectorState >= Marking | |
2623 && _collectorState < Sweeping; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2624 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ? |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2625 &_modUnionClosurePar |
0 | 2626 : &_modUnionClosure; |
2627 _cmsGen->gc_prologue_work(full, registerClosure, muc); | |
2628 _permGen->gc_prologue_work(full, registerClosure, muc); | |
2629 | |
2630 if (!full) { | |
2631 stats().record_gc0_begin(); | |
2632 } | |
2633 } | |
2634 | |
2635 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { | |
2636 // Delegate to CMScollector which knows how to coordinate between | |
2637 // this and any other CMS generations that it is responsible for | |
2638 // collecting. | |
2639 collector()->gc_prologue(full); | |
2640 } | |
2641 | |
2642 // This is a "private" interface for use by this generation's CMSCollector. | |
2643 // Not to be called directly by any other entity (for instance, | |
2644 // GenCollectedHeap, which calls the "public" gc_prologue method above). | |
2645 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, | |
2646 bool registerClosure, ModUnionClosure* modUnionClosure) { | |
2647 assert(!incremental_collection_failed(), "Shouldn't be set yet"); | |
2648 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, | |
2649 "Should be NULL"); | |
2650 if (registerClosure) { | |
2651 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); | |
2652 } | |
2653 cmsSpace()->gc_prologue(); | |
2654 // Clear stat counters | |
2655 NOT_PRODUCT( | |
2656 assert(_numObjectsPromoted == 0, "check"); | |
2657 assert(_numWordsPromoted == 0, "check"); | |
2658 if (Verbose && PrintGC) { | |
2659 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, " | |
2660 SIZE_FORMAT" bytes concurrently", | |
2661 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); | |
2662 } | |
2663 _numObjectsAllocated = 0; | |
2664 _numWordsAllocated = 0; | |
2665 ) | |
2666 } | |
2667 | |
2668 void CMSCollector::gc_epilogue(bool full) { | |
2669 // The following locking discipline assumes that we are only called | |
2670 // when the world is stopped. | |
2671 assert(SafepointSynchronize::is_at_safepoint(), | |
2672 "world is stopped assumption"); | |
2673 | |
2674 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks | |
2675 // if linear allocation blocks need to be appropriately marked to allow the | |
2676 // the blocks to be parsable. We also check here whether we need to nudge the | |
2677 // CMS collector thread to start a new cycle (if it's not already active). | |
2678 assert( Thread::current()->is_VM_thread() | |
2679 || ( CMSScavengeBeforeRemark | |
2680 && Thread::current()->is_ConcurrentGC_thread()), | |
2681 "Incorrect thread type for epilogue execution"); | |
2682 | |
2683 if (!_between_prologue_and_epilogue) { | |
2684 // We have already been invoked; this is a gc_epilogue delegation | |
2685 // from yet another CMS generation that we are responsible for, just | |
2686 // ignore it since all relevant work has already been done. | |
2687 return; | |
2688 } | |
2689 assert(haveFreelistLocks(), "must have freelist locks"); | |
2690 assert_lock_strong(bitMapLock()); | |
2691 | |
2692 _cmsGen->gc_epilogue_work(full); | |
2693 _permGen->gc_epilogue_work(full); | |
2694 | |
2695 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { | |
2696 // in case sampling was not already enabled, enable it | |
2697 _start_sampling = true; | |
2698 } | |
2699 // reset _eden_chunk_array so sampling starts afresh | |
2700 _eden_chunk_index = 0; | |
2701 | |
2702 size_t cms_used = _cmsGen->cmsSpace()->used(); | |
2703 size_t perm_used = _permGen->cmsSpace()->used(); | |
2704 | |
2705 // update performance counters - this uses a special version of | |
2706 // update_counters() that allows the utilization to be passed as a | |
2707 // parameter, avoiding multiple calls to used(). | |
2708 // | |
2709 _cmsGen->update_counters(cms_used); | |
2710 _permGen->update_counters(perm_used); | |
2711 | |
2712 if (CMSIncrementalMode) { | |
2713 icms_update_allocation_limits(); | |
2714 } | |
2715 | |
2716 bitMapLock()->unlock(); | |
2717 releaseFreelistLocks(); | |
2718 | |
2719 _between_prologue_and_epilogue = false; // ready for next cycle | |
2720 } | |
2721 | |
2722 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { | |
2723 collector()->gc_epilogue(full); | |
2724 | |
2725 // Also reset promotion tracking in par gc thread states. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2726 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 2727 for (uint i = 0; i < ParallelGCThreads; i++) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2728 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i); |
0 | 2729 } |
2730 } | |
2731 } | |
2732 | |
2733 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { | |
2734 assert(!incremental_collection_failed(), "Should have been cleared"); | |
2735 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); | |
2736 cmsSpace()->gc_epilogue(); | |
2737 // Print stat counters | |
2738 NOT_PRODUCT( | |
2739 assert(_numObjectsAllocated == 0, "check"); | |
2740 assert(_numWordsAllocated == 0, "check"); | |
2741 if (Verbose && PrintGC) { | |
2742 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, " | |
2743 SIZE_FORMAT" bytes", | |
2744 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); | |
2745 } | |
2746 _numObjectsPromoted = 0; | |
2747 _numWordsPromoted = 0; | |
2748 ) | |
2749 | |
2750 if (PrintGC && Verbose) { | |
2751 // Call down the chain in contiguous_available needs the freelistLock | |
2752 // so print this out before releasing the freeListLock. | |
2753 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ", | |
2754 contiguous_available()); | |
2755 } | |
2756 } | |
2757 | |
2758 #ifndef PRODUCT | |
2759 bool CMSCollector::have_cms_token() { | |
2760 Thread* thr = Thread::current(); | |
2761 if (thr->is_VM_thread()) { | |
2762 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); | |
2763 } else if (thr->is_ConcurrentGC_thread()) { | |
2764 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); | |
2765 } else if (thr->is_GC_task_thread()) { | |
2766 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && | |
2767 ParGCRareEvent_lock->owned_by_self(); | |
2768 } | |
2769 return false; | |
2770 } | |
2771 #endif | |
2772 | |
2773 // Check reachability of the given heap address in CMS generation, | |
2774 // treating all other generations as roots. | |
2775 bool CMSCollector::is_cms_reachable(HeapWord* addr) { | |
2776 // We could "guarantee" below, rather than assert, but i'll | |
2777 // leave these as "asserts" so that an adventurous debugger | |
2778 // could try this in the product build provided some subset of | |
2779 // the conditions were met, provided they were intersted in the | |
2780 // results and knew that the computation below wouldn't interfere | |
2781 // with other concurrent computations mutating the structures | |
2782 // being read or written. | |
2783 assert(SafepointSynchronize::is_at_safepoint(), | |
2784 "Else mutations in object graph will make answer suspect"); | |
2785 assert(have_cms_token(), "Should hold cms token"); | |
2786 assert(haveFreelistLocks(), "must hold free list locks"); | |
2787 assert_lock_strong(bitMapLock()); | |
2788 | |
2789 // Clear the marking bit map array before starting, but, just | |
2790 // for kicks, first report if the given address is already marked | |
2791 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr, | |
2792 _markBitMap.isMarked(addr) ? "" : " not"); | |
2793 | |
2794 if (verify_after_remark()) { | |
2795 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2796 bool result = verification_mark_bm()->isMarked(addr); | |
2797 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr, | |
2798 result ? "IS" : "is NOT"); | |
2799 return result; | |
2800 } else { | |
2801 gclog_or_tty->print_cr("Could not compute result"); | |
2802 return false; | |
2803 } | |
2804 } | |
2805 | |
2806 //////////////////////////////////////////////////////// | |
2807 // CMS Verification Support | |
2808 //////////////////////////////////////////////////////// | |
2809 // Following the remark phase, the following invariant | |
2810 // should hold -- each object in the CMS heap which is | |
2811 // marked in markBitMap() should be marked in the verification_mark_bm(). | |
2812 | |
2813 class VerifyMarkedClosure: public BitMapClosure { | |
2814 CMSBitMap* _marks; | |
2815 bool _failed; | |
2816 | |
2817 public: | |
2818 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} | |
2819 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
2820 bool do_bit(size_t offset) { |
0 | 2821 HeapWord* addr = _marks->offsetToHeapWord(offset); |
2822 if (!_marks->isMarked(addr)) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2823 oop(addr)->print_on(gclog_or_tty); |
0 | 2824 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); |
2825 _failed = true; | |
2826 } | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
2827 return true; |
0 | 2828 } |
2829 | |
2830 bool failed() { return _failed; } | |
2831 }; | |
2832 | |
2833 bool CMSCollector::verify_after_remark() { | |
2834 gclog_or_tty->print(" [Verifying CMS Marking... "); | |
2835 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2836 static bool init = false; | |
2837 | |
2838 assert(SafepointSynchronize::is_at_safepoint(), | |
2839 "Else mutations in object graph will make answer suspect"); | |
2840 assert(have_cms_token(), | |
2841 "Else there may be mutual interference in use of " | |
2842 " verification data structures"); | |
2843 assert(_collectorState > Marking && _collectorState <= Sweeping, | |
2844 "Else marking info checked here may be obsolete"); | |
2845 assert(haveFreelistLocks(), "must hold free list locks"); | |
2846 assert_lock_strong(bitMapLock()); | |
2847 | |
2848 | |
2849 // Allocate marking bit map if not already allocated | |
2850 if (!init) { // first time | |
2851 if (!verification_mark_bm()->allocate(_span)) { | |
2852 return false; | |
2853 } | |
2854 init = true; | |
2855 } | |
2856 | |
2857 assert(verification_mark_stack()->isEmpty(), "Should be empty"); | |
2858 | |
2859 // Turn off refs discovery -- so we will be tracing through refs. | |
2860 // This is as intended, because by this time | |
2861 // GC must already have cleared any refs that need to be cleared, | |
2862 // and traced those that need to be marked; moreover, | |
2863 // the marking done here is not going to intefere in any | |
2864 // way with the marking information used by GC. | |
2865 NoRefDiscovery no_discovery(ref_processor()); | |
2866 | |
2867 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
2868 | |
2869 // Clear any marks from a previous round | |
2870 verification_mark_bm()->clear_all(); | |
2871 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2872 verify_work_stacks_empty(); |
0 | 2873 |
2874 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2875 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
2876 // Update the saved marks which may affect the root scans. | |
2877 gch->save_marks(); | |
2878 | |
2879 if (CMSRemarkVerifyVariant == 1) { | |
2880 // In this first variant of verification, we complete | |
2881 // all marking, then check if the new marks-verctor is | |
2882 // a subset of the CMS marks-vector. | |
2883 verify_after_remark_work_1(); | |
2884 } else if (CMSRemarkVerifyVariant == 2) { | |
2885 // In this second variant of verification, we flag an error | |
2886 // (i.e. an object reachable in the new marks-vector not reachable | |
2887 // in the CMS marks-vector) immediately, also indicating the | |
2888 // identify of an object (A) that references the unmarked object (B) -- | |
2889 // presumably, a mutation to A failed to be picked up by preclean/remark? | |
2890 verify_after_remark_work_2(); | |
2891 } else { | |
2892 warning("Unrecognized value %d for CMSRemarkVerifyVariant", | |
2893 CMSRemarkVerifyVariant); | |
2894 } | |
2895 gclog_or_tty->print(" done] "); | |
2896 return true; | |
2897 } | |
2898 | |
2899 void CMSCollector::verify_after_remark_work_1() { | |
2900 ResourceMark rm; | |
2901 HandleMark hm; | |
2902 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2903 | |
2904 // Mark from roots one level into CMS | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
2905 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); |
0 | 2906 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
2907 | |
2908 gch->gen_process_strong_roots(_cmsGen->level(), | |
2909 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2910 true, // activate StrongRootsScope |
0 | 2911 true, // collecting perm gen |
2912 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2913 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2914 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2915 NULL); |
0 | 2916 |
2917 // Now mark from the roots | |
2918 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2919 MarkFromRootsClosure markFromRootsClosure(this, _span, | |
2920 verification_mark_bm(), verification_mark_stack(), &_revisitStack, | |
2921 false /* don't yield */, true /* verifying */); | |
2922 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2923 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2924 while (_restart_addr != NULL) { | |
2925 // Deal with stack overflow: by restarting at the indicated | |
2926 // address. | |
2927 HeapWord* ra = _restart_addr; | |
2928 markFromRootsClosure.reset(ra); | |
2929 _restart_addr = NULL; | |
2930 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2931 } | |
2932 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2933 verify_work_stacks_empty(); | |
2934 // Should reset the revisit stack above, since no class tree | |
2935 // surgery is forthcoming. | |
2936 _revisitStack.reset(); // throwing away all contents | |
2937 | |
2938 // Marking completed -- now verify that each bit marked in | |
2939 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2940 // errors by printing corresponding objects. | |
2941 VerifyMarkedClosure vcl(markBitMap()); | |
2942 verification_mark_bm()->iterate(&vcl); | |
2943 if (vcl.failed()) { | |
2944 gclog_or_tty->print("Verification failed"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2945 Universe::heap()->print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2946 fatal("CMS: failed marking verification after remark"); |
0 | 2947 } |
2948 } | |
2949 | |
2950 void CMSCollector::verify_after_remark_work_2() { | |
2951 ResourceMark rm; | |
2952 HandleMark hm; | |
2953 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2954 | |
2955 // Mark from roots one level into CMS | |
2956 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
2957 markBitMap()); |
0 | 2958 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
2959 gch->gen_process_strong_roots(_cmsGen->level(), | |
2960 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2961 true, // activate StrongRootsScope |
0 | 2962 true, // collecting perm gen |
2963 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2964 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2965 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2966 NULL); |
0 | 2967 |
2968 // Now mark from the roots | |
2969 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2970 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, | |
2971 verification_mark_bm(), markBitMap(), verification_mark_stack()); | |
2972 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2973 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2974 while (_restart_addr != NULL) { | |
2975 // Deal with stack overflow: by restarting at the indicated | |
2976 // address. | |
2977 HeapWord* ra = _restart_addr; | |
2978 markFromRootsClosure.reset(ra); | |
2979 _restart_addr = NULL; | |
2980 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2981 } | |
2982 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2983 verify_work_stacks_empty(); | |
2984 // Should reset the revisit stack above, since no class tree | |
2985 // surgery is forthcoming. | |
2986 _revisitStack.reset(); // throwing away all contents | |
2987 | |
2988 // Marking completed -- now verify that each bit marked in | |
2989 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2990 // errors by printing corresponding objects. | |
2991 VerifyMarkedClosure vcl(markBitMap()); | |
2992 verification_mark_bm()->iterate(&vcl); | |
2993 assert(!vcl.failed(), "Else verification above should not have succeeded"); | |
2994 } | |
2995 | |
2996 void ConcurrentMarkSweepGeneration::save_marks() { | |
2997 // delegate to CMS space | |
2998 cmsSpace()->save_marks(); | |
2999 for (uint i = 0; i < ParallelGCThreads; i++) { | |
3000 _par_gc_thread_states[i]->promo.startTrackingPromotions(); | |
3001 } | |
3002 } | |
3003 | |
3004 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { | |
3005 return cmsSpace()->no_allocs_since_save_marks(); | |
3006 } | |
3007 | |
3008 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
3009 \ | |
3010 void ConcurrentMarkSweepGeneration:: \ | |
3011 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
3012 cl->set_generation(this); \ | |
3013 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
3014 cl->reset_generation(); \ | |
3015 save_marks(); \ | |
3016 } | |
3017 | |
3018 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) | |
3019 | |
3020 void | |
3021 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) | |
3022 { | |
3023 // Not currently implemented; need to do the following. -- ysr. | |
3024 // dld -- I think that is used for some sort of allocation profiler. So it | |
3025 // really means the objects allocated by the mutator since the last | |
3026 // GC. We could potentially implement this cheaply by recording only | |
3027 // the direct allocations in a side data structure. | |
3028 // | |
3029 // I think we probably ought not to be required to support these | |
3030 // iterations at any arbitrary point; I think there ought to be some | |
3031 // call to enable/disable allocation profiling in a generation/space, | |
3032 // and the iterator ought to return the objects allocated in the | |
3033 // gen/space since the enable call, or the last iterator call (which | |
3034 // will probably be at a GC.) That way, for gens like CM&S that would | |
3035 // require some extra data structure to support this, we only pay the | |
3036 // cost when it's in use... | |
3037 cmsSpace()->object_iterate_since_last_GC(blk); | |
3038 } | |
3039 | |
3040 void | |
3041 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
3042 cl->set_generation(this); | |
3043 younger_refs_in_space_iterate(_cmsSpace, cl); | |
3044 cl->reset_generation(); | |
3045 } | |
3046 | |
3047 void | |
3048 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) { | |
3049 if (freelistLock()->owned_by_self()) { | |
3050 Generation::oop_iterate(mr, cl); | |
3051 } else { | |
3052 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3053 Generation::oop_iterate(mr, cl); | |
3054 } | |
3055 } | |
3056 | |
3057 void | |
3058 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) { | |
3059 if (freelistLock()->owned_by_self()) { | |
3060 Generation::oop_iterate(cl); | |
3061 } else { | |
3062 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3063 Generation::oop_iterate(cl); | |
3064 } | |
3065 } | |
3066 | |
3067 void | |
3068 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { | |
3069 if (freelistLock()->owned_by_self()) { | |
3070 Generation::object_iterate(cl); | |
3071 } else { | |
3072 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3073 Generation::object_iterate(cl); | |
3074 } | |
3075 } | |
3076 | |
3077 void | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3078 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3079 if (freelistLock()->owned_by_self()) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3080 Generation::safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3081 } else { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3082 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3083 Generation::safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3084 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3085 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3086 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3087 void |
0 | 3088 ConcurrentMarkSweepGeneration::pre_adjust_pointers() { |
3089 } | |
3090 | |
3091 void | |
3092 ConcurrentMarkSweepGeneration::post_compact() { | |
3093 } | |
3094 | |
3095 void | |
3096 ConcurrentMarkSweepGeneration::prepare_for_verify() { | |
3097 // Fix the linear allocation blocks to look like free blocks. | |
3098 | |
3099 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3100 // are not called when the heap is verified during universe initialization and | |
3101 // at vm shutdown. | |
3102 if (freelistLock()->owned_by_self()) { | |
3103 cmsSpace()->prepare_for_verify(); | |
3104 } else { | |
3105 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3106 cmsSpace()->prepare_for_verify(); | |
3107 } | |
3108 } | |
3109 | |
3110 void | |
3111 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) { | |
3112 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3113 // are not called when the heap is verified during universe initialization and | |
3114 // at vm shutdown. | |
3115 if (freelistLock()->owned_by_self()) { | |
3116 cmsSpace()->verify(false /* ignored */); | |
3117 } else { | |
3118 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3119 cmsSpace()->verify(false /* ignored */); | |
3120 } | |
3121 } | |
3122 | |
3123 void CMSCollector::verify(bool allow_dirty /* ignored */) { | |
3124 _cmsGen->verify(allow_dirty); | |
3125 _permGen->verify(allow_dirty); | |
3126 } | |
3127 | |
3128 #ifndef PRODUCT | |
3129 bool CMSCollector::overflow_list_is_empty() const { | |
3130 assert(_num_par_pushes >= 0, "Inconsistency"); | |
3131 if (_overflow_list == NULL) { | |
3132 assert(_num_par_pushes == 0, "Inconsistency"); | |
3133 } | |
3134 return _overflow_list == NULL; | |
3135 } | |
3136 | |
3137 // The methods verify_work_stacks_empty() and verify_overflow_empty() | |
3138 // merely consolidate assertion checks that appear to occur together frequently. | |
3139 void CMSCollector::verify_work_stacks_empty() const { | |
3140 assert(_markStack.isEmpty(), "Marking stack should be empty"); | |
3141 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3142 } | |
3143 | |
3144 void CMSCollector::verify_overflow_empty() const { | |
3145 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3146 assert(no_preserved_marks(), "No preserved marks"); | |
3147 } | |
3148 #endif // PRODUCT | |
3149 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3150 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3151 // ensuing concurrent GC cycle. We will collect the perm gen and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3152 // unload classes if it's the case that: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3153 // (1) an explicit gc request has been made and the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3154 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3155 // (2) (a) class unloading is enabled at the command line, and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3156 // (b) (i) perm gen threshold has been crossed, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3157 // (ii) old gen is getting really full, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3158 // (iii) the previous N CMS collections did not collect the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3159 // perm gen |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3160 // NOTE: Provided there is no change in the state of the heap between |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3161 // calls to this method, it should have idempotent results. Moreover, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3162 // its results should be monotonically increasing (i.e. going from 0 to 1, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3163 // but not 1 to 0) between successive calls between which the heap was |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3164 // not collected. For the implementation below, it must thus rely on |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3165 // the property that concurrent_cycles_since_last_unload() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3166 // will not decrease unless a collection cycle happened and that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3167 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3168 // themselves also monotonic in that sense. See check_monotonicity() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3169 // below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3170 bool CMSCollector::update_should_unload_classes() { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3171 _should_unload_classes = false; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3172 // Condition 1 above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3173 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3174 _should_unload_classes = true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3175 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3176 // Disjuncts 2.b.(i,ii,iii) above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3177 _should_unload_classes = (concurrent_cycles_since_last_unload() >= |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3178 CMSClassUnloadingMaxInterval) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3179 || _permGen->should_concurrent_collect() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3180 || _cmsGen->is_too_full(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3181 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3182 return _should_unload_classes; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3183 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3184 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3185 bool ConcurrentMarkSweepGeneration::is_too_full() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3186 bool res = should_concurrent_collect(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3187 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3188 return res; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3189 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3190 |
0 | 3191 void CMSCollector::setup_cms_unloading_and_verification_state() { |
3192 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC | |
3193 || VerifyBeforeExit; | |
3194 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | |
3195 | SharedHeap::SO_CodeCache; | |
3196 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3197 if (should_unload_classes()) { // Should unload classes this cycle |
0 | 3198 remove_root_scanning_option(rso); // Shrink the root set appropriately |
3199 set_verifying(should_verify); // Set verification state for this cycle | |
3200 return; // Nothing else needs to be done at this time | |
3201 } | |
3202 | |
3203 // Not unloading classes this cycle | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3204 assert(!should_unload_classes(), "Inconsitency!"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3205 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { |
0 | 3206 // We were not verifying, or we _were_ unloading classes in the last cycle, |
3207 // AND some verification options are enabled this cycle; in this case, | |
3208 // we must make sure that the deadness map is allocated if not already so, | |
3209 // and cleared (if already allocated previously -- | |
3210 // CMSBitMap::sizeInBits() is used to determine if it's allocated). | |
3211 if (perm_gen_verify_bit_map()->sizeInBits() == 0) { | |
3212 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) { | |
3213 warning("Failed to allocate permanent generation verification CMS Bit Map;\n" | |
3214 "permanent generation verification disabled"); | |
3215 return; // Note that we leave verification disabled, so we'll retry this | |
3216 // allocation next cycle. We _could_ remember this failure | |
3217 // and skip further attempts and permanently disable verification | |
3218 // attempts if that is considered more desirable. | |
3219 } | |
3220 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()), | |
3221 "_perm_gen_ver_bit_map inconsistency?"); | |
3222 } else { | |
3223 perm_gen_verify_bit_map()->clear_all(); | |
3224 } | |
3225 // Include symbols, strings and code cache elements to prevent their resurrection. | |
3226 add_root_scanning_option(rso); | |
3227 set_verifying(true); | |
3228 } else if (verifying() && !should_verify) { | |
3229 // We were verifying, but some verification flags got disabled. | |
3230 set_verifying(false); | |
3231 // Exclude symbols, strings and code cache elements from root scanning to | |
3232 // reduce IM and RM pauses. | |
3233 remove_root_scanning_option(rso); | |
3234 } | |
3235 } | |
3236 | |
3237 | |
3238 #ifndef PRODUCT | |
3239 HeapWord* CMSCollector::block_start(const void* p) const { | |
3240 const HeapWord* addr = (HeapWord*)p; | |
3241 if (_span.contains(p)) { | |
3242 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { | |
3243 return _cmsGen->cmsSpace()->block_start(p); | |
3244 } else { | |
3245 assert(_permGen->cmsSpace()->is_in_reserved(addr), | |
3246 "Inconsistent _span?"); | |
3247 return _permGen->cmsSpace()->block_start(p); | |
3248 } | |
3249 } | |
3250 return NULL; | |
3251 } | |
3252 #endif | |
3253 | |
3254 HeapWord* | |
3255 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, | |
3256 bool tlab, | |
3257 bool parallel) { | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3258 CMSSynchronousYieldRequest yr; |
0 | 3259 assert(!tlab, "Can't deal with TLAB allocation"); |
3260 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3261 expand(word_size*HeapWordSize, MinHeapDeltaBytes, | |
3262 CMSExpansionCause::_satisfy_allocation); | |
3263 if (GCExpandToAllocateDelayMillis > 0) { | |
3264 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3265 } | |
9
173195ff483a
6642634: Test nsk/regression/b6186200 crashed with SIGSEGV
ysr
parents:
7
diff
changeset
|
3266 return have_lock_and_allocate(word_size, tlab); |
0 | 3267 } |
3268 | |
3269 // YSR: All of this generation expansion/shrinking stuff is an exact copy of | |
3270 // OneContigSpaceCardGeneration, which makes me wonder if we should move this | |
3271 // to CardGeneration and share it... | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3272 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3273 return CardGeneration::expand(bytes, expand_bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3274 } |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3275 |
0 | 3276 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, |
3277 CMSExpansionCause::Cause cause) | |
3278 { | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3279 |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3280 bool success = expand(bytes, expand_bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3281 |
0 | 3282 // remember why we expanded; this information is used |
3283 // by shouldConcurrentCollect() when making decisions on whether to start | |
3284 // a new CMS cycle. | |
3285 if (success) { | |
3286 set_expansion_cause(cause); | |
3287 if (PrintGCDetails && Verbose) { | |
3288 gclog_or_tty->print_cr("Expanded CMS gen for %s", | |
3289 CMSExpansionCause::to_string(cause)); | |
3290 } | |
3291 } | |
3292 } | |
3293 | |
3294 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { | |
3295 HeapWord* res = NULL; | |
3296 MutexLocker x(ParGCRareEvent_lock); | |
3297 while (true) { | |
3298 // Expansion by some other thread might make alloc OK now: | |
3299 res = ps->lab.alloc(word_sz); | |
3300 if (res != NULL) return res; | |
3301 // If there's not enough expansion space available, give up. | |
3302 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { | |
3303 return NULL; | |
3304 } | |
3305 // Otherwise, we try expansion. | |
3306 expand(word_sz*HeapWordSize, MinHeapDeltaBytes, | |
3307 CMSExpansionCause::_allocate_par_lab); | |
3308 // Now go around the loop and try alloc again; | |
3309 // A competing par_promote might beat us to the expansion space, | |
3310 // so we may go around the loop again if promotion fails agaion. | |
3311 if (GCExpandToAllocateDelayMillis > 0) { | |
3312 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3313 } | |
3314 } | |
3315 } | |
3316 | |
3317 | |
3318 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( | |
3319 PromotionInfo* promo) { | |
3320 MutexLocker x(ParGCRareEvent_lock); | |
3321 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; | |
3322 while (true) { | |
3323 // Expansion by some other thread might make alloc OK now: | |
3324 if (promo->ensure_spooling_space()) { | |
3325 assert(promo->has_spooling_space(), | |
3326 "Post-condition of successful ensure_spooling_space()"); | |
3327 return true; | |
3328 } | |
3329 // If there's not enough expansion space available, give up. | |
3330 if (_virtual_space.uncommitted_size() < refill_size_bytes) { | |
3331 return false; | |
3332 } | |
3333 // Otherwise, we try expansion. | |
3334 expand(refill_size_bytes, MinHeapDeltaBytes, | |
3335 CMSExpansionCause::_allocate_par_spooling_space); | |
3336 // Now go around the loop and try alloc again; | |
3337 // A competing allocation might beat us to the expansion space, | |
3338 // so we may go around the loop again if allocation fails again. | |
3339 if (GCExpandToAllocateDelayMillis > 0) { | |
3340 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3341 } | |
3342 } | |
3343 } | |
3344 | |
3345 | |
3346 | |
3347 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { | |
3348 assert_locked_or_safepoint(Heap_lock); | |
3349 size_t size = ReservedSpace::page_align_size_down(bytes); | |
3350 if (size > 0) { | |
3351 shrink_by(size); | |
3352 } | |
3353 } | |
3354 | |
3355 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { | |
3356 assert_locked_or_safepoint(Heap_lock); | |
3357 bool result = _virtual_space.expand_by(bytes); | |
3358 if (result) { | |
3359 HeapWord* old_end = _cmsSpace->end(); | |
3360 size_t new_word_size = | |
3361 heap_word_size(_virtual_space.committed_size()); | |
3362 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
3363 _bts->resize(new_word_size); // resize the block offset shared array | |
3364 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
3365 // Hmmmm... why doesn't CFLS::set_end verify locking? | |
3366 // This is quite ugly; FIX ME XXX | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
3367 _cmsSpace->assert_locked(freelistLock()); |
0 | 3368 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); |
3369 | |
3370 // update the space and generation capacity counters | |
3371 if (UsePerfData) { | |
3372 _space_counters->update_capacity(); | |
3373 _gen_counters->update_all(); | |
3374 } | |
3375 | |
3376 if (Verbose && PrintGC) { | |
3377 size_t new_mem_size = _virtual_space.committed_size(); | |
3378 size_t old_mem_size = new_mem_size - bytes; | |
3379 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK", | |
3380 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
3381 } | |
3382 } | |
3383 return result; | |
3384 } | |
3385 | |
3386 bool ConcurrentMarkSweepGeneration::grow_to_reserved() { | |
3387 assert_locked_or_safepoint(Heap_lock); | |
3388 bool success = true; | |
3389 const size_t remaining_bytes = _virtual_space.uncommitted_size(); | |
3390 if (remaining_bytes > 0) { | |
3391 success = grow_by(remaining_bytes); | |
3392 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) | |
3393 } | |
3394 return success; | |
3395 } | |
3396 | |
3397 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { | |
3398 assert_locked_or_safepoint(Heap_lock); | |
3399 assert_lock_strong(freelistLock()); | |
3400 // XXX Fix when compaction is implemented. | |
3401 warning("Shrinking of CMS not yet implemented"); | |
3402 return; | |
3403 } | |
3404 | |
3405 | |
3406 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent | |
3407 // phases. | |
3408 class CMSPhaseAccounting: public StackObj { | |
3409 public: | |
3410 CMSPhaseAccounting(CMSCollector *collector, | |
3411 const char *phase, | |
3412 bool print_cr = true); | |
3413 ~CMSPhaseAccounting(); | |
3414 | |
3415 private: | |
3416 CMSCollector *_collector; | |
3417 const char *_phase; | |
3418 elapsedTimer _wallclock; | |
3419 bool _print_cr; | |
3420 | |
3421 public: | |
3422 // Not MT-safe; so do not pass around these StackObj's | |
3423 // where they may be accessed by other threads. | |
3424 jlong wallclock_millis() { | |
3425 assert(_wallclock.is_active(), "Wall clock should not stop"); | |
3426 _wallclock.stop(); // to record time | |
3427 jlong ret = _wallclock.milliseconds(); | |
3428 _wallclock.start(); // restart | |
3429 return ret; | |
3430 } | |
3431 }; | |
3432 | |
3433 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, | |
3434 const char *phase, | |
3435 bool print_cr) : | |
3436 _collector(collector), _phase(phase), _print_cr(print_cr) { | |
3437 | |
3438 if (PrintCMSStatistics != 0) { | |
3439 _collector->resetYields(); | |
3440 } | |
3441 if (PrintGCDetails && PrintGCTimeStamps) { | |
3442 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3443 gclog_or_tty->stamp(); | |
3444 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", | |
3445 _collector->cmsGen()->short_name(), _phase); | |
3446 } | |
3447 _collector->resetTimer(); | |
3448 _wallclock.start(); | |
3449 _collector->startTimer(); | |
3450 } | |
3451 | |
3452 CMSPhaseAccounting::~CMSPhaseAccounting() { | |
3453 assert(_wallclock.is_active(), "Wall clock should not have stopped"); | |
3454 _collector->stopTimer(); | |
3455 _wallclock.stop(); | |
3456 if (PrintGCDetails) { | |
3457 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3458 if (PrintGCTimeStamps) { | |
3459 gclog_or_tty->stamp(); | |
3460 gclog_or_tty->print(": "); | |
3461 } | |
3462 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", | |
3463 _collector->cmsGen()->short_name(), | |
3464 _phase, _collector->timerValue(), _wallclock.seconds()); | |
3465 if (_print_cr) { | |
3466 gclog_or_tty->print_cr(""); | |
3467 } | |
3468 if (PrintCMSStatistics != 0) { | |
3469 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, | |
3470 _collector->yields()); | |
3471 } | |
3472 } | |
3473 } | |
3474 | |
3475 // CMS work | |
3476 | |
3477 // Checkpoint the roots into this generation from outside | |
3478 // this generation. [Note this initial checkpoint need only | |
3479 // be approximate -- we'll do a catch up phase subsequently.] | |
3480 void CMSCollector::checkpointRootsInitial(bool asynch) { | |
3481 assert(_collectorState == InitialMarking, "Wrong collector state"); | |
3482 check_correct_thread_executing(); | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
3483 TraceCMSMemoryManagerStats tms(_collectorState); |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
3484 |
0 | 3485 ReferenceProcessor* rp = ref_processor(); |
3486 SpecializationStats::clear(); | |
3487 assert(_restart_addr == NULL, "Control point invariant"); | |
3488 if (asynch) { | |
3489 // acquire locks for subsequent manipulations | |
3490 MutexLockerEx x(bitMapLock(), | |
3491 Mutex::_no_safepoint_check_flag); | |
3492 checkpointRootsInitialWork(asynch); | |
3493 rp->verify_no_references_recorded(); | |
3494 rp->enable_discovery(); // enable ("weak") refs discovery | |
3495 _collectorState = Marking; | |
3496 } else { | |
3497 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection | |
3498 // which recognizes if we are a CMS generation, and doesn't try to turn on | |
3499 // discovery; verify that they aren't meddling. | |
3500 assert(!rp->discovery_is_atomic(), | |
3501 "incorrect setting of discovery predicate"); | |
3502 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " | |
3503 "ref discovery for this generation kind"); | |
3504 // already have locks | |
3505 checkpointRootsInitialWork(asynch); | |
3506 rp->enable_discovery(); // now enable ("weak") refs discovery | |
3507 _collectorState = Marking; | |
3508 } | |
3509 SpecializationStats::print(); | |
3510 } | |
3511 | |
3512 void CMSCollector::checkpointRootsInitialWork(bool asynch) { | |
3513 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); | |
3514 assert(_collectorState == InitialMarking, "just checking"); | |
3515 | |
3516 // If there has not been a GC[n-1] since last GC[n] cycle completed, | |
3517 // precede our marking with a collection of all | |
3518 // younger generations to keep floating garbage to a minimum. | |
3519 // XXX: we won't do this for now -- it's an optimization to be done later. | |
3520 | |
3521 // already have locks | |
3522 assert_lock_strong(bitMapLock()); | |
3523 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); | |
3524 | |
3525 // Setup the verification and class unloading state for this | |
3526 // CMS collection cycle. | |
3527 setup_cms_unloading_and_verification_state(); | |
3528 | |
3529 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", | |
3530 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
3531 if (UseAdaptiveSizePolicy) { | |
3532 size_policy()->checkpoint_roots_initial_begin(); | |
3533 } | |
3534 | |
3535 // Reset all the PLAB chunk arrays if necessary. | |
3536 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { | |
3537 reset_survivor_plab_arrays(); | |
3538 } | |
3539 | |
3540 ResourceMark rm; | |
3541 HandleMark hm; | |
3542 | |
3543 FalseClosure falseClosure; | |
3544 // In the case of a synchronous collection, we will elide the | |
3545 // remark step, so it's important to catch all the nmethod oops | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3546 // in this step. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3547 // The final 'true' flag to gen_process_strong_roots will ensure this. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3548 // If 'async' is true, we can relax the nmethod tracing. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3549 MarkRefsIntoClosure notOlder(_span, &_markBitMap); |
0 | 3550 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
3551 | |
3552 verify_work_stacks_empty(); | |
3553 verify_overflow_empty(); | |
3554 | |
3555 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
3556 // Update the saved marks which may affect the root scans. | |
3557 gch->save_marks(); | |
3558 | |
3559 // weak reference processing has not started yet. | |
3560 ref_processor()->set_enqueuing_is_done(false); | |
3561 | |
3562 { | |
935 | 3563 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);) |
0 | 3564 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) |
3565 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
3566 gch->gen_process_strong_roots(_cmsGen->level(), | |
3567 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3568 true, // activate StrongRootsScope |
0 | 3569 true, // collecting perm gen |
3570 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3571 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3572 true, // walk all of code cache if (so & SO_CodeCache) |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3573 NULL); |
0 | 3574 } |
3575 | |
3576 // Clear mod-union table; it will be dirtied in the prologue of | |
3577 // CMS generation per each younger generation collection. | |
3578 | |
3579 assert(_modUnionTable.isAllClear(), | |
3580 "Was cleared in most recent final checkpoint phase" | |
3581 " or no bits are set in the gc_prologue before the start of the next " | |
3582 "subsequent marking phase."); | |
3583 | |
3584 // Temporarily disabled, since pre/post-consumption closures don't | |
3585 // care about precleaned cards | |
3586 #if 0 | |
3587 { | |
3588 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(), | |
3589 (HeapWord*)_virtual_space.high()); | |
3590 _ct->ct_bs()->preclean_dirty_cards(mr); | |
3591 } | |
3592 #endif | |
3593 | |
3594 // Save the end of the used_region of the constituent generations | |
3595 // to be used to limit the extent of sweep in each generation. | |
3596 save_sweep_limits(); | |
3597 if (UseAdaptiveSizePolicy) { | |
3598 size_policy()->checkpoint_roots_initial_end(gch->gc_cause()); | |
3599 } | |
3600 verify_overflow_empty(); | |
3601 } | |
3602 | |
3603 bool CMSCollector::markFromRoots(bool asynch) { | |
3604 // we might be tempted to assert that: | |
3605 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), | |
3606 // "inconsistent argument?"); | |
3607 // However that wouldn't be right, because it's possible that | |
3608 // a safepoint is indeed in progress as a younger generation | |
3609 // stop-the-world GC happens even as we mark in this generation. | |
3610 assert(_collectorState == Marking, "inconsistent state?"); | |
3611 check_correct_thread_executing(); | |
3612 verify_overflow_empty(); | |
3613 | |
3614 bool res; | |
3615 if (asynch) { | |
3616 | |
3617 // Start the timers for adaptive size policy for the concurrent phases | |
3618 // Do it here so that the foreground MS can use the concurrent | |
3619 // timer since a foreground MS might has the sweep done concurrently | |
3620 // or STW. | |
3621 if (UseAdaptiveSizePolicy) { | |
3622 size_policy()->concurrent_marking_begin(); | |
3623 } | |
3624 | |
3625 // Weak ref discovery note: We may be discovering weak | |
3626 // refs in this generation concurrent (but interleaved) with | |
3627 // weak ref discovery by a younger generation collector. | |
3628 | |
3629 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
3630 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
3631 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); | |
3632 res = markFromRootsWork(asynch); | |
3633 if (res) { | |
3634 _collectorState = Precleaning; | |
3635 } else { // We failed and a foreground collection wants to take over | |
3636 assert(_foregroundGCIsActive, "internal state inconsistency"); | |
3637 assert(_restart_addr == NULL, "foreground will restart from scratch"); | |
3638 if (PrintGCDetails) { | |
3639 gclog_or_tty->print_cr("bailing out to foreground collection"); | |
3640 } | |
3641 } | |
3642 if (UseAdaptiveSizePolicy) { | |
3643 size_policy()->concurrent_marking_end(); | |
3644 } | |
3645 } else { | |
3646 assert(SafepointSynchronize::is_at_safepoint(), | |
3647 "inconsistent with asynch == false"); | |
3648 if (UseAdaptiveSizePolicy) { | |
3649 size_policy()->ms_collection_marking_begin(); | |
3650 } | |
3651 // already have locks | |
3652 res = markFromRootsWork(asynch); | |
3653 _collectorState = FinalMarking; | |
3654 if (UseAdaptiveSizePolicy) { | |
3655 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
3656 size_policy()->ms_collection_marking_end(gch->gc_cause()); | |
3657 } | |
3658 } | |
3659 verify_overflow_empty(); | |
3660 return res; | |
3661 } | |
3662 | |
3663 bool CMSCollector::markFromRootsWork(bool asynch) { | |
3664 // iterate over marked bits in bit map, doing a full scan and mark | |
3665 // from these roots using the following algorithm: | |
3666 // . if oop is to the right of the current scan pointer, | |
3667 // mark corresponding bit (we'll process it later) | |
3668 // . else (oop is to left of current scan pointer) | |
3669 // push oop on marking stack | |
3670 // . drain the marking stack | |
3671 | |
3672 // Note that when we do a marking step we need to hold the | |
3673 // bit map lock -- recall that direct allocation (by mutators) | |
3674 // and promotion (by younger generation collectors) is also | |
3675 // marking the bit map. [the so-called allocate live policy.] | |
3676 // Because the implementation of bit map marking is not | |
3677 // robust wrt simultaneous marking of bits in the same word, | |
3678 // we need to make sure that there is no such interference | |
3679 // between concurrent such updates. | |
3680 | |
3681 // already have locks | |
3682 assert_lock_strong(bitMapLock()); | |
3683 | |
3684 // Clear the revisit stack, just in case there are any | |
3685 // obsolete contents from a short-circuited previous CMS cycle. | |
3686 _revisitStack.reset(); | |
3687 verify_work_stacks_empty(); | |
3688 verify_overflow_empty(); | |
3689 assert(_revisitStack.isEmpty(), "tabula rasa"); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
3690 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
0 | 3691 bool result = false; |
1284 | 3692 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { |
0 | 3693 result = do_marking_mt(asynch); |
3694 } else { | |
3695 result = do_marking_st(asynch); | |
3696 } | |
3697 return result; | |
3698 } | |
3699 | |
3700 // Forward decl | |
3701 class CMSConcMarkingTask; | |
3702 | |
3703 class CMSConcMarkingTerminator: public ParallelTaskTerminator { | |
3704 CMSCollector* _collector; | |
3705 CMSConcMarkingTask* _task; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3706 public: |
0 | 3707 virtual void yield(); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3708 |
0 | 3709 // "n_threads" is the number of threads to be terminated. |
3710 // "queue_set" is a set of work queues of other threads. | |
3711 // "collector" is the CMS collector associated with this task terminator. | |
3712 // "yield" indicates whether we need the gang as a whole to yield. | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3713 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : |
0 | 3714 ParallelTaskTerminator(n_threads, queue_set), |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3715 _collector(collector) { } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3716 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3717 void set_task(CMSConcMarkingTask* task) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3718 _task = task; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3719 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3720 }; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3721 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3722 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3723 CMSConcMarkingTask* _task; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3724 public: |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3725 bool should_exit_termination(); |
0 | 3726 void set_task(CMSConcMarkingTask* task) { |
3727 _task = task; | |
3728 } | |
3729 }; | |
3730 | |
3731 // MT Concurrent Marking Task | |
3732 class CMSConcMarkingTask: public YieldingFlexibleGangTask { | |
3733 CMSCollector* _collector; | |
3734 int _n_workers; // requested/desired # workers | |
3735 bool _asynch; | |
3736 bool _result; | |
3737 CompactibleFreeListSpace* _cms_space; | |
3738 CompactibleFreeListSpace* _perm_space; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3739 char _pad_front[64]; // padding to ... |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3740 HeapWord* _global_finger; // ... avoid sharing cache line |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3741 char _pad_back[64]; |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3742 HeapWord* _restart_addr; |
0 | 3743 |
3744 // Exposed here for yielding support | |
3745 Mutex* const _bit_map_lock; | |
3746 | |
3747 // The per thread work queues, available here for stealing | |
3748 OopTaskQueueSet* _task_queues; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3749 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3750 // Termination (and yielding) support |
0 | 3751 CMSConcMarkingTerminator _term; |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3752 CMSConcMarkingTerminatorTerminator _term_term; |
0 | 3753 |
3754 public: | |
3755 CMSConcMarkingTask(CMSCollector* collector, | |
3756 CompactibleFreeListSpace* cms_space, | |
3757 CompactibleFreeListSpace* perm_space, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3758 bool asynch, |
0 | 3759 YieldingFlexibleWorkGang* workers, |
3760 OopTaskQueueSet* task_queues): | |
3761 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), | |
3762 _collector(collector), | |
3763 _cms_space(cms_space), | |
3764 _perm_space(perm_space), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3765 _asynch(asynch), _n_workers(0), _result(true), |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3766 _task_queues(task_queues), |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3767 _term(_n_workers, task_queues, _collector), |
0 | 3768 _bit_map_lock(collector->bitMapLock()) |
3769 { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3770 _requested_size = _n_workers; |
0 | 3771 _term.set_task(this); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3772 _term_term.set_task(this); |
0 | 3773 assert(_cms_space->bottom() < _perm_space->bottom(), |
3774 "Finger incorrectly initialized below"); | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3775 _restart_addr = _global_finger = _cms_space->bottom(); |
0 | 3776 } |
3777 | |
3778 | |
3779 OopTaskQueueSet* task_queues() { return _task_queues; } | |
3780 | |
3781 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
3782 | |
3783 HeapWord** global_finger_addr() { return &_global_finger; } | |
3784 | |
3785 CMSConcMarkingTerminator* terminator() { return &_term; } | |
3786 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3787 virtual void set_for_termination(int active_workers) { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3788 terminator()->reset_for_reuse(active_workers); |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3789 } |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3790 |
0 | 3791 void work(int i); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3792 bool should_yield() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3793 return ConcurrentMarkSweepThread::should_yield() |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3794 && !_collector->foregroundGCIsActive() |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3795 && _asynch; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3796 } |
0 | 3797 |
3798 virtual void coordinator_yield(); // stuff done by coordinator | |
3799 bool result() { return _result; } | |
3800 | |
3801 void reset(HeapWord* ra) { | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3802 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3803 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3804 assert(ra < _perm_space->end(), "ra too large"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3805 _restart_addr = _global_finger = ra; |
0 | 3806 _term.reset_for_reuse(); |
3807 } | |
3808 | |
3809 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3810 OopTaskQueue* work_q); | |
3811 | |
3812 private: | |
3813 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); | |
3814 void do_work_steal(int i); | |
3815 void bump_global_finger(HeapWord* f); | |
3816 }; | |
3817 | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3818 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3819 assert(_task != NULL, "Error"); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3820 return _task->yielding(); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3821 // Note that we do not need the disjunct || _task->should_yield() above |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3822 // because we want terminating threads to yield only if the task |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3823 // is already in the midst of yielding, which happens only after at least one |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3824 // thread has yielded. |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3825 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3826 |
0 | 3827 void CMSConcMarkingTerminator::yield() { |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3828 if (_task->should_yield()) { |
0 | 3829 _task->yield(); |
3830 } else { | |
3831 ParallelTaskTerminator::yield(); | |
3832 } | |
3833 } | |
3834 | |
3835 //////////////////////////////////////////////////////////////// | |
3836 // Concurrent Marking Algorithm Sketch | |
3837 //////////////////////////////////////////////////////////////// | |
3838 // Until all tasks exhausted (both spaces): | |
3839 // -- claim next available chunk | |
3840 // -- bump global finger via CAS | |
3841 // -- find first object that starts in this chunk | |
3842 // and start scanning bitmap from that position | |
3843 // -- scan marked objects for oops | |
3844 // -- CAS-mark target, and if successful: | |
3845 // . if target oop is above global finger (volatile read) | |
3846 // nothing to do | |
3847 // . if target oop is in chunk and above local finger | |
3848 // then nothing to do | |
3849 // . else push on work-queue | |
3850 // -- Deal with possible overflow issues: | |
3851 // . local work-queue overflow causes stuff to be pushed on | |
3852 // global (common) overflow queue | |
3853 // . always first empty local work queue | |
3854 // . then get a batch of oops from global work queue if any | |
3855 // . then do work stealing | |
3856 // -- When all tasks claimed (both spaces) | |
3857 // and local work queue empty, | |
3858 // then in a loop do: | |
3859 // . check global overflow stack; steal a batch of oops and trace | |
3860 // . try to steal from other threads oif GOS is empty | |
3861 // . if neither is available, offer termination | |
3862 // -- Terminate and return result | |
3863 // | |
3864 void CMSConcMarkingTask::work(int i) { | |
3865 elapsedTimer _timer; | |
3866 ResourceMark rm; | |
3867 HandleMark hm; | |
3868 | |
3869 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3870 | |
3871 // Before we begin work, our work queue should be empty | |
3872 assert(work_queue(i)->size() == 0, "Expected to be empty"); | |
3873 // Scan the bitmap covering _cms_space, tracing through grey objects. | |
3874 _timer.start(); | |
3875 do_scan_and_mark(i, _cms_space); | |
3876 _timer.stop(); | |
3877 if (PrintCMSStatistics != 0) { | |
3878 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", | |
3879 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3880 } | |
3881 | |
3882 // ... do the same for the _perm_space | |
3883 _timer.reset(); | |
3884 _timer.start(); | |
3885 do_scan_and_mark(i, _perm_space); | |
3886 _timer.stop(); | |
3887 if (PrintCMSStatistics != 0) { | |
3888 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec", | |
3889 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3890 } | |
3891 | |
3892 // ... do work stealing | |
3893 _timer.reset(); | |
3894 _timer.start(); | |
3895 do_work_steal(i); | |
3896 _timer.stop(); | |
3897 if (PrintCMSStatistics != 0) { | |
3898 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", | |
3899 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3900 } | |
3901 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); | |
3902 assert(work_queue(i)->size() == 0, "Should have been emptied"); | |
3903 // Note that under the current task protocol, the | |
3904 // following assertion is true even of the spaces | |
3905 // expanded since the completion of the concurrent | |
3906 // marking. XXX This will likely change under a strict | |
3907 // ABORT semantics. | |
3908 assert(_global_finger > _cms_space->end() && | |
3909 _global_finger >= _perm_space->end(), | |
3910 "All tasks have been completed"); | |
3911 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3912 } | |
3913 | |
3914 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { | |
3915 HeapWord* read = _global_finger; | |
3916 HeapWord* cur = read; | |
3917 while (f > read) { | |
3918 cur = read; | |
3919 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); | |
3920 if (cur == read) { | |
3921 // our cas succeeded | |
3922 assert(_global_finger >= f, "protocol consistency"); | |
3923 break; | |
3924 } | |
3925 } | |
3926 } | |
3927 | |
3928 // This is really inefficient, and should be redone by | |
3929 // using (not yet available) block-read and -write interfaces to the | |
3930 // stack and the work_queue. XXX FIX ME !!! | |
3931 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3932 OopTaskQueue* work_q) { | |
3933 // Fast lock-free check | |
3934 if (ovflw_stk->length() == 0) { | |
3935 return false; | |
3936 } | |
3937 assert(work_q->size() == 0, "Shouldn't steal"); | |
3938 MutexLockerEx ml(ovflw_stk->par_lock(), | |
3939 Mutex::_no_safepoint_check_flag); | |
3940 // Grab up to 1/4 the size of the work queue | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
3941 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
0 | 3942 (size_t)ParGCDesiredObjsFromOverflowList); |
3943 num = MIN2(num, ovflw_stk->length()); | |
3944 for (int i = (int) num; i > 0; i--) { | |
3945 oop cur = ovflw_stk->pop(); | |
3946 assert(cur != NULL, "Counted wrong?"); | |
3947 work_q->push(cur); | |
3948 } | |
3949 return num > 0; | |
3950 } | |
3951 | |
3952 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { | |
3953 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
3954 int n_tasks = pst->n_tasks(); | |
3955 // We allow that there may be no tasks to do here because | |
3956 // we are restarting after a stack overflow. | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3957 assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); |
0 | 3958 int nth_task = 0; |
3959 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3960 HeapWord* aligned_start = sp->bottom(); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3961 if (sp->used_region().contains(_restart_addr)) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3962 // Align down to a card boundary for the start of 0th task |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3963 // for this space. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3964 aligned_start = |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3965 (HeapWord*)align_size_down((uintptr_t)_restart_addr, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3966 CardTableModRefBS::card_size); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3967 } |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3968 |
0 | 3969 size_t chunk_size = sp->marking_task_size(); |
3970 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
3971 // Having claimed the nth task in this space, | |
3972 // compute the chunk that it corresponds to: | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3973 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3974 aligned_start + (nth_task+1)*chunk_size); |
0 | 3975 // Try and bump the global finger via a CAS; |
3976 // note that we need to do the global finger bump | |
3977 // _before_ taking the intersection below, because | |
3978 // the task corresponding to that region will be | |
3979 // deemed done even if the used_region() expands | |
3980 // because of allocation -- as it almost certainly will | |
3981 // during start-up while the threads yield in the | |
3982 // closure below. | |
3983 HeapWord* finger = span.end(); | |
3984 bump_global_finger(finger); // atomically | |
3985 // There are null tasks here corresponding to chunks | |
3986 // beyond the "top" address of the space. | |
3987 span = span.intersection(sp->used_region()); | |
3988 if (!span.is_empty()) { // Non-null task | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3989 HeapWord* prev_obj; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3990 assert(!span.contains(_restart_addr) || nth_task == 0, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3991 "Inconsistency"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3992 if (nth_task == 0) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3993 // For the 0th task, we'll not need to compute a block_start. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3994 if (span.contains(_restart_addr)) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3995 // In the case of a restart because of stack overflow, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3996 // we might additionally skip a chunk prefix. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3997 prev_obj = _restart_addr; |
0 | 3998 } else { |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3999 prev_obj = span.start(); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4000 } |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4001 } else { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4002 // We want to skip the first object because |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4003 // the protocol is to scan any object in its entirety |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4004 // that _starts_ in this span; a fortiori, any |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4005 // object starting in an earlier span is scanned |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4006 // as part of an earlier claimed task. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4007 // Below we use the "careful" version of block_start |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4008 // so we do not try to navigate uninitialized objects. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4009 prev_obj = sp->block_start_careful(span.start()); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4010 // Below we use a variant of block_size that uses the |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4011 // Printezis bits to avoid waiting for allocated |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4012 // objects to become initialized/parsable. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4013 while (prev_obj < span.start()) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4014 size_t sz = sp->block_size_no_stall(prev_obj, _collector); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4015 if (sz > 0) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4016 prev_obj += sz; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4017 } else { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4018 // In this case we may end up doing a bit of redundant |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4019 // scanning, but that appears unavoidable, short of |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4020 // locking the free list locks; see bug 6324141. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4021 break; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4022 } |
0 | 4023 } |
4024 } | |
4025 if (prev_obj < span.end()) { | |
4026 MemRegion my_span = MemRegion(prev_obj, span.end()); | |
4027 // Do the marking work within a non-empty span -- | |
4028 // the last argument to the constructor indicates whether the | |
4029 // iteration should be incremental with periodic yields. | |
4030 Par_MarkFromRootsClosure cl(this, _collector, my_span, | |
4031 &_collector->_markBitMap, | |
4032 work_queue(i), | |
4033 &_collector->_markStack, | |
4034 &_collector->_revisitStack, | |
4035 _asynch); | |
4036 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); | |
4037 } // else nothing to do for this task | |
4038 } // else nothing to do for this task | |
4039 } | |
4040 // We'd be tempted to assert here that since there are no | |
4041 // more tasks left to claim in this space, the global_finger | |
4042 // must exceed space->top() and a fortiori space->end(). However, | |
4043 // that would not quite be correct because the bumping of | |
4044 // global_finger occurs strictly after the claiming of a task, | |
4045 // so by the time we reach here the global finger may not yet | |
4046 // have been bumped up by the thread that claimed the last | |
4047 // task. | |
4048 pst->all_tasks_completed(); | |
4049 } | |
4050 | |
935 | 4051 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4052 private: |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4053 CMSConcMarkingTask* _task; |
0 | 4054 MemRegion _span; |
4055 CMSBitMap* _bit_map; | |
4056 CMSMarkStack* _overflow_stack; | |
4057 OopTaskQueue* _work_queue; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4058 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4059 DO_OOP_WORK_DEFN |
0 | 4060 public: |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4061 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, |
935 | 4062 CMSBitMap* bit_map, CMSMarkStack* overflow_stack, |
4063 CMSMarkStack* revisit_stack): | |
4064 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4065 _task(task), |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4066 _span(collector->_span), |
0 | 4067 _work_queue(work_queue), |
4068 _bit_map(bit_map), | |
935 | 4069 _overflow_stack(overflow_stack) |
4070 { } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4071 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4072 virtual void do_oop(narrowOop* p); |
0 | 4073 void trim_queue(size_t max); |
4074 void handle_stack_overflow(HeapWord* lost); | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4075 void do_yield_check() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4076 if (_task->should_yield()) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4077 _task->yield(); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4078 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4079 } |
0 | 4080 }; |
4081 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4082 // Grey object scanning during work stealing phase -- |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4083 // the salient assumption here is that any references |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4084 // that are in these stolen objects being scanned must |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4085 // already have been initialized (else they would not have |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4086 // been published), so we do not need to check for |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4087 // uninitialized objects before pushing here. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4088 void Par_ConcMarkingClosure::do_oop(oop obj) { |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4089 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4090 HeapWord* addr = (HeapWord*)obj; |
0 | 4091 // Check if oop points into the CMS generation |
4092 // and is not marked | |
4093 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
4094 // a white object ... | |
4095 // If we manage to "claim" the object, by being the | |
4096 // first thread to mark it, then we push it on our | |
4097 // marking stack | |
4098 if (_bit_map->par_mark(addr)) { // ... now grey | |
4099 // push on work queue (grey set) | |
4100 bool simulate_overflow = false; | |
4101 NOT_PRODUCT( | |
4102 if (CMSMarkStackOverflowALot && | |
4103 _collector->simulate_overflow()) { | |
4104 // simulate a stack overflow | |
4105 simulate_overflow = true; | |
4106 } | |
4107 ) | |
4108 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4109 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 4110 // stack overflow |
4111 if (PrintCMSStatistics != 0) { | |
4112 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
4113 SIZE_FORMAT, _overflow_stack->capacity()); | |
4114 } | |
4115 // We cannot assert that the overflow stack is full because | |
4116 // it may have been emptied since. | |
4117 assert(simulate_overflow || | |
4118 _work_queue->size() == _work_queue->max_elems(), | |
4119 "Else push should have succeeded"); | |
4120 handle_stack_overflow(addr); | |
4121 } | |
4122 } // Else, some other thread got there first | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4123 do_yield_check(); |
0 | 4124 } |
4125 } | |
4126 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4127 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4128 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4129 |
0 | 4130 void Par_ConcMarkingClosure::trim_queue(size_t max) { |
4131 while (_work_queue->size() > max) { | |
4132 oop new_oop; | |
4133 if (_work_queue->pop_local(new_oop)) { | |
4134 assert(new_oop->is_oop(), "Should be an oop"); | |
4135 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); | |
4136 assert(_span.contains((HeapWord*)new_oop), "Not in span"); | |
4137 assert(new_oop->is_parsable(), "Should be parsable"); | |
4138 new_oop->oop_iterate(this); // do_oop() above | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4139 do_yield_check(); |
0 | 4140 } |
4141 } | |
4142 } | |
4143 | |
4144 // Upon stack overflow, we discard (part of) the stack, | |
4145 // remembering the least address amongst those discarded | |
4146 // in CMSCollector's _restart_address. | |
4147 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { | |
4148 // We need to do this under a mutex to prevent other | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4149 // workers from interfering with the work done below. |
0 | 4150 MutexLockerEx ml(_overflow_stack->par_lock(), |
4151 Mutex::_no_safepoint_check_flag); | |
4152 // Remember the least grey address discarded | |
4153 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
4154 _collector->lower_restart_addr(ra); | |
4155 _overflow_stack->reset(); // discard stack contents | |
4156 _overflow_stack->expand(); // expand the stack if possible | |
4157 } | |
4158 | |
4159 | |
4160 void CMSConcMarkingTask::do_work_steal(int i) { | |
4161 OopTaskQueue* work_q = work_queue(i); | |
4162 oop obj_to_scan; | |
4163 CMSBitMap* bm = &(_collector->_markBitMap); | |
4164 CMSMarkStack* ovflw = &(_collector->_markStack); | |
935 | 4165 CMSMarkStack* revisit = &(_collector->_revisitStack); |
0 | 4166 int* seed = _collector->hash_seed(i); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4167 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit); |
0 | 4168 while (true) { |
4169 cl.trim_queue(0); | |
4170 assert(work_q->size() == 0, "Should have been emptied above"); | |
4171 if (get_work_from_overflow_stack(ovflw, work_q)) { | |
4172 // Can't assert below because the work obtained from the | |
4173 // overflow stack may already have been stolen from us. | |
4174 // assert(work_q->size() > 0, "Work from overflow stack"); | |
4175 continue; | |
4176 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
4177 assert(obj_to_scan->is_oop(), "Should be an oop"); | |
4178 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); | |
4179 obj_to_scan->oop_iterate(&cl); | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4180 } else if (terminator()->offer_termination(&_term_term)) { |
0 | 4181 assert(work_q->size() == 0, "Impossible!"); |
4182 break; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4183 } else if (yielding() || should_yield()) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4184 yield(); |
0 | 4185 } |
4186 } | |
4187 } | |
4188 | |
4189 // This is run by the CMS (coordinator) thread. | |
4190 void CMSConcMarkingTask::coordinator_yield() { | |
4191 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4192 "CMS thread should hold CMS token"); | |
935 | 4193 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 4194 // First give up the locks, then yield, then re-lock |
4195 // We should probably use a constructor/destructor idiom to | |
4196 // do this unlock/lock or modify the MutexUnlocker class to | |
4197 // serve our purpose. XXX | |
4198 assert_lock_strong(_bit_map_lock); | |
4199 _bit_map_lock->unlock(); | |
4200 ConcurrentMarkSweepThread::desynchronize(true); | |
4201 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4202 _collector->stopTimer(); | |
4203 if (PrintCMSStatistics != 0) { | |
4204 _collector->incrementYields(); | |
4205 } | |
4206 _collector->icms_wait(); | |
4207 | |
4208 // It is possible for whichever thread initiated the yield request | |
4209 // not to get a chance to wake up and take the bitmap lock between | |
4210 // this thread releasing it and reacquiring it. So, while the | |
4211 // should_yield() flag is on, let's sleep for a bit to give the | |
4212 // other thread a chance to wake up. The limit imposed on the number | |
4213 // of iterations is defensive, to avoid any unforseen circumstances | |
4214 // putting us into an infinite loop. Since it's always been this | |
4215 // (coordinator_yield()) method that was observed to cause the | |
4216 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) | |
4217 // which is by default non-zero. For the other seven methods that | |
4218 // also perform the yield operation, as are using a different | |
4219 // parameter (CMSYieldSleepCount) which is by default zero. This way we | |
4220 // can enable the sleeping for those methods too, if necessary. | |
4221 // See 6442774. | |
4222 // | |
4223 // We really need to reconsider the synchronization between the GC | |
4224 // thread and the yield-requesting threads in the future and we | |
4225 // should really use wait/notify, which is the recommended | |
4226 // way of doing this type of interaction. Additionally, we should | |
4227 // consolidate the eight methods that do the yield operation and they | |
4228 // are almost identical into one for better maintenability and | |
4229 // readability. See 6445193. | |
4230 // | |
4231 // Tony 2006.06.29 | |
4232 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4233 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4234 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 4235 os::sleep(Thread::current(), 1, false); |
4236 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4237 } | |
4238 | |
4239 ConcurrentMarkSweepThread::synchronize(true); | |
4240 _bit_map_lock->lock_without_safepoint_check(); | |
4241 _collector->startTimer(); | |
4242 } | |
4243 | |
4244 bool CMSCollector::do_marking_mt(bool asynch) { | |
1284 | 4245 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); |
0 | 4246 // In the future this would be determined ergonomically, based |
4247 // on #cpu's, # active mutator threads (and load), and mutation rate. | |
1284 | 4248 int num_workers = ConcGCThreads; |
0 | 4249 |
4250 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
4251 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
4252 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4253 CMSConcMarkingTask tsk(this, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4254 cms_space, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4255 perm_space, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4256 asynch, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4257 conc_workers(), |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4258 task_queues()); |
0 | 4259 |
4260 // Since the actual number of workers we get may be different | |
4261 // from the number we requested above, do we need to do anything different | |
4262 // below? In particular, may be we need to subclass the SequantialSubTasksDone | |
4263 // class?? XXX | |
4264 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); | |
4265 perm_space->initialize_sequential_subtasks_for_marking(num_workers); | |
4266 | |
4267 // Refs discovery is already non-atomic. | |
4268 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4269 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4270 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
0 | 4271 conc_workers()->start_task(&tsk); |
4272 while (tsk.yielded()) { | |
4273 tsk.coordinator_yield(); | |
4274 conc_workers()->continue_task(&tsk); | |
4275 } | |
4276 // If the task was aborted, _restart_addr will be non-NULL | |
4277 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); | |
4278 while (_restart_addr != NULL) { | |
4279 // XXX For now we do not make use of ABORTED state and have not | |
4280 // yet implemented the right abort semantics (even in the original | |
4281 // single-threaded CMS case). That needs some more investigation | |
4282 // and is deferred for now; see CR# TBF. 07252005YSR. XXX | |
4283 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); | |
4284 // If _restart_addr is non-NULL, a marking stack overflow | |
605 | 4285 // occurred; we need to do a fresh marking iteration from the |
0 | 4286 // indicated restart address. |
4287 if (_foregroundGCIsActive && asynch) { | |
4288 // We may be running into repeated stack overflows, having | |
4289 // reached the limit of the stack size, while making very | |
4290 // slow forward progress. It may be best to bail out and | |
4291 // let the foreground collector do its job. | |
4292 // Clear _restart_addr, so that foreground GC | |
4293 // works from scratch. This avoids the headache of | |
4294 // a "rescan" which would otherwise be needed because | |
4295 // of the dirty mod union table & card table. | |
4296 _restart_addr = NULL; | |
4297 return false; | |
4298 } | |
4299 // Adjust the task to restart from _restart_addr | |
4300 tsk.reset(_restart_addr); | |
4301 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, | |
4302 _restart_addr); | |
4303 perm_space->initialize_sequential_subtasks_for_marking(num_workers, | |
4304 _restart_addr); | |
4305 _restart_addr = NULL; | |
4306 // Get the workers going again | |
4307 conc_workers()->start_task(&tsk); | |
4308 while (tsk.yielded()) { | |
4309 tsk.coordinator_yield(); | |
4310 conc_workers()->continue_task(&tsk); | |
4311 } | |
4312 } | |
4313 assert(tsk.completed(), "Inconsistency"); | |
4314 assert(tsk.result() == true, "Inconsistency"); | |
4315 return true; | |
4316 } | |
4317 | |
4318 bool CMSCollector::do_marking_st(bool asynch) { | |
4319 ResourceMark rm; | |
4320 HandleMark hm; | |
4321 | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4322 // Temporarily make refs discovery single threaded (non-MT) |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4323 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
0 | 4324 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, |
4325 &_markStack, &_revisitStack, CMSYield && asynch); | |
4326 // the last argument to iterate indicates whether the iteration | |
4327 // should be incremental with periodic yields. | |
4328 _markBitMap.iterate(&markFromRootsClosure); | |
4329 // If _restart_addr is non-NULL, a marking stack overflow | |
605 | 4330 // occurred; we need to do a fresh iteration from the |
0 | 4331 // indicated restart address. |
4332 while (_restart_addr != NULL) { | |
4333 if (_foregroundGCIsActive && asynch) { | |
4334 // We may be running into repeated stack overflows, having | |
4335 // reached the limit of the stack size, while making very | |
4336 // slow forward progress. It may be best to bail out and | |
4337 // let the foreground collector do its job. | |
4338 // Clear _restart_addr, so that foreground GC | |
4339 // works from scratch. This avoids the headache of | |
4340 // a "rescan" which would otherwise be needed because | |
4341 // of the dirty mod union table & card table. | |
4342 _restart_addr = NULL; | |
4343 return false; // indicating failure to complete marking | |
4344 } | |
4345 // Deal with stack overflow: | |
4346 // we restart marking from _restart_addr | |
4347 HeapWord* ra = _restart_addr; | |
4348 markFromRootsClosure.reset(ra); | |
4349 _restart_addr = NULL; | |
4350 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); | |
4351 } | |
4352 return true; | |
4353 } | |
4354 | |
4355 void CMSCollector::preclean() { | |
4356 check_correct_thread_executing(); | |
4357 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); | |
4358 verify_work_stacks_empty(); | |
4359 verify_overflow_empty(); | |
4360 _abort_preclean = false; | |
4361 if (CMSPrecleaningEnabled) { | |
4362 _eden_chunk_index = 0; | |
4363 size_t used = get_eden_used(); | |
4364 size_t capacity = get_eden_capacity(); | |
4365 // Don't start sampling unless we will get sufficiently | |
4366 // many samples. | |
4367 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100) | |
4368 * CMSScheduleRemarkEdenPenetration)) { | |
4369 _start_sampling = true; | |
4370 } else { | |
4371 _start_sampling = false; | |
4372 } | |
4373 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4374 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); | |
4375 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); | |
4376 } | |
4377 CMSTokenSync x(true); // is cms thread | |
4378 if (CMSPrecleaningEnabled) { | |
4379 sample_eden(); | |
4380 _collectorState = AbortablePreclean; | |
4381 } else { | |
4382 _collectorState = FinalMarking; | |
4383 } | |
4384 verify_work_stacks_empty(); | |
4385 verify_overflow_empty(); | |
4386 } | |
4387 | |
4388 // Try and schedule the remark such that young gen | |
4389 // occupancy is CMSScheduleRemarkEdenPenetration %. | |
4390 void CMSCollector::abortable_preclean() { | |
4391 check_correct_thread_executing(); | |
4392 assert(CMSPrecleaningEnabled, "Inconsistent control state"); | |
4393 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); | |
4394 | |
4395 // If Eden's current occupancy is below this threshold, | |
4396 // immediately schedule the remark; else preclean | |
4397 // past the next scavenge in an effort to | |
4398 // schedule the pause as described avove. By choosing | |
4399 // CMSScheduleRemarkEdenSizeThreshold >= max eden size | |
4400 // we will never do an actual abortable preclean cycle. | |
4401 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { | |
4402 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4403 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); | |
4404 // We need more smarts in the abortable preclean | |
4405 // loop below to deal with cases where allocation | |
4406 // in young gen is very very slow, and our precleaning | |
4407 // is running a losing race against a horde of | |
4408 // mutators intent on flooding us with CMS updates | |
4409 // (dirty cards). | |
4410 // One, admittedly dumb, strategy is to give up | |
4411 // after a certain number of abortable precleaning loops | |
4412 // or after a certain maximum time. We want to make | |
4413 // this smarter in the next iteration. | |
4414 // XXX FIX ME!!! YSR | |
4415 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; | |
4416 while (!(should_abort_preclean() || | |
4417 ConcurrentMarkSweepThread::should_terminate())) { | |
4418 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); | |
4419 cumworkdone += workdone; | |
4420 loops++; | |
4421 // Voluntarily terminate abortable preclean phase if we have | |
4422 // been at it for too long. | |
4423 if ((CMSMaxAbortablePrecleanLoops != 0) && | |
4424 loops >= CMSMaxAbortablePrecleanLoops) { | |
4425 if (PrintGCDetails) { | |
4426 gclog_or_tty->print(" CMS: abort preclean due to loops "); | |
4427 } | |
4428 break; | |
4429 } | |
4430 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { | |
4431 if (PrintGCDetails) { | |
4432 gclog_or_tty->print(" CMS: abort preclean due to time "); | |
4433 } | |
4434 break; | |
4435 } | |
4436 // If we are doing little work each iteration, we should | |
4437 // take a short break. | |
4438 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { | |
4439 // Sleep for some time, waiting for work to accumulate | |
4440 stopTimer(); | |
4441 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); | |
4442 startTimer(); | |
4443 waited++; | |
4444 } | |
4445 } | |
4446 if (PrintCMSStatistics > 0) { | |
4447 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ", | |
4448 loops, waited, cumworkdone); | |
4449 } | |
4450 } | |
4451 CMSTokenSync x(true); // is cms thread | |
4452 if (_collectorState != Idling) { | |
4453 assert(_collectorState == AbortablePreclean, | |
4454 "Spontaneous state transition?"); | |
4455 _collectorState = FinalMarking; | |
4456 } // Else, a foreground collection completed this CMS cycle. | |
4457 return; | |
4458 } | |
4459 | |
4460 // Respond to an Eden sampling opportunity | |
4461 void CMSCollector::sample_eden() { | |
4462 // Make sure a young gc cannot sneak in between our | |
4463 // reading and recording of a sample. | |
4464 assert(Thread::current()->is_ConcurrentGC_thread(), | |
4465 "Only the cms thread may collect Eden samples"); | |
4466 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4467 "Should collect samples while holding CMS token"); | |
4468 if (!_start_sampling) { | |
4469 return; | |
4470 } | |
4471 if (_eden_chunk_array) { | |
4472 if (_eden_chunk_index < _eden_chunk_capacity) { | |
4473 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample | |
4474 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, | |
4475 "Unexpected state of Eden"); | |
4476 // We'd like to check that what we just sampled is an oop-start address; | |
4477 // however, we cannot do that here since the object may not yet have been | |
4478 // initialized. So we'll instead do the check when we _use_ this sample | |
4479 // later. | |
4480 if (_eden_chunk_index == 0 || | |
4481 (pointer_delta(_eden_chunk_array[_eden_chunk_index], | |
4482 _eden_chunk_array[_eden_chunk_index-1]) | |
4483 >= CMSSamplingGrain)) { | |
4484 _eden_chunk_index++; // commit sample | |
4485 } | |
4486 } | |
4487 } | |
4488 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { | |
4489 size_t used = get_eden_used(); | |
4490 size_t capacity = get_eden_capacity(); | |
4491 assert(used <= capacity, "Unexpected state of Eden"); | |
4492 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { | |
4493 _abort_preclean = true; | |
4494 } | |
4495 } | |
4496 } | |
4497 | |
4498 | |
4499 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { | |
4500 assert(_collectorState == Precleaning || | |
4501 _collectorState == AbortablePreclean, "incorrect state"); | |
4502 ResourceMark rm; | |
4503 HandleMark hm; | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4504 |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4505 // Precleaning is currently not MT but the reference processor |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4506 // may be set for MT. Disable it temporarily here. |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4507 ReferenceProcessor* rp = ref_processor(); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4508 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4509 |
0 | 4510 // Do one pass of scrubbing the discovered reference lists |
4511 // to remove any reference objects with strongly-reachable | |
4512 // referents. | |
4513 if (clean_refs) { | |
4514 CMSPrecleanRefsYieldClosure yield_cl(this); | |
4515 assert(rp->span().equals(_span), "Spans should be equal"); | |
4516 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, | |
935 | 4517 &_markStack, &_revisitStack, |
4518 true /* preclean */); | |
0 | 4519 CMSDrainMarkingStackClosure complete_trace(this, |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4520 _span, &_markBitMap, &_markStack, |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4521 &keep_alive, true /* preclean */); |
0 | 4522 |
4523 // We don't want this step to interfere with a young | |
4524 // collection because we don't want to take CPU | |
4525 // or memory bandwidth away from the young GC threads | |
4526 // (which may be as many as there are CPUs). | |
4527 // Note that we don't need to protect ourselves from | |
4528 // interference with mutators because they can't | |
4529 // manipulate the discovered reference lists nor affect | |
4530 // the computed reachability of the referents, the | |
4531 // only properties manipulated by the precleaning | |
4532 // of these reference lists. | |
4533 stopTimer(); | |
4534 CMSTokenSyncWithLocks x(true /* is cms thread */, | |
4535 bitMapLock()); | |
4536 startTimer(); | |
4537 sample_eden(); | |
935 | 4538 |
0 | 4539 // The following will yield to allow foreground |
4540 // collection to proceed promptly. XXX YSR: | |
4541 // The code in this method may need further | |
4542 // tweaking for better performance and some restructuring | |
4543 // for cleaner interfaces. | |
4544 rp->preclean_discovered_references( | |
4545 rp->is_alive_non_header(), &keep_alive, &complete_trace, | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4546 &yield_cl, should_unload_classes()); |
0 | 4547 } |
4548 | |
4549 if (clean_survivor) { // preclean the active survivor space(s) | |
4550 assert(_young_gen->kind() == Generation::DefNew || | |
4551 _young_gen->kind() == Generation::ParNew || | |
4552 _young_gen->kind() == Generation::ASParNew, | |
4553 "incorrect type for cast"); | |
4554 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
4555 PushAndMarkClosure pam_cl(this, _span, ref_processor(), | |
4556 &_markBitMap, &_modUnionTable, | |
4557 &_markStack, &_revisitStack, | |
4558 true /* precleaning phase */); | |
4559 stopTimer(); | |
4560 CMSTokenSyncWithLocks ts(true /* is cms thread */, | |
4561 bitMapLock()); | |
4562 startTimer(); | |
4563 unsigned int before_count = | |
4564 GenCollectedHeap::heap()->total_collections(); | |
4565 SurvivorSpacePrecleanClosure | |
4566 sss_cl(this, _span, &_markBitMap, &_markStack, | |
4567 &pam_cl, before_count, CMSYield); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4568 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4569 dng->from()->object_iterate_careful(&sss_cl); |
4570 dng->to()->object_iterate_careful(&sss_cl); | |
4571 } | |
4572 MarkRefsIntoAndScanClosure | |
4573 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
4574 &_markStack, &_revisitStack, this, CMSYield, | |
4575 true /* precleaning phase */); | |
4576 // CAUTION: The following closure has persistent state that may need to | |
4577 // be reset upon a decrease in the sequence of addresses it | |
4578 // processes. | |
4579 ScanMarkedObjectsAgainCarefullyClosure | |
4580 smoac_cl(this, _span, | |
4581 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield); | |
4582 | |
4583 // Preclean dirty cards in ModUnionTable and CardTable using | |
4584 // appropriate convergence criterion; | |
4585 // repeat CMSPrecleanIter times unless we find that | |
4586 // we are losing. | |
4587 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); | |
4588 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, | |
4589 "Bad convergence multiplier"); | |
4590 assert(CMSPrecleanThreshold >= 100, | |
4591 "Unreasonably low CMSPrecleanThreshold"); | |
4592 | |
4593 size_t numIter, cumNumCards, lastNumCards, curNumCards; | |
4594 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; | |
4595 numIter < CMSPrecleanIter; | |
4596 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { | |
4597 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); | |
4598 if (CMSPermGenPrecleaningEnabled) { | |
4599 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl); | |
4600 } | |
4601 if (Verbose && PrintGCDetails) { | |
4602 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards); | |
4603 } | |
4604 // Either there are very few dirty cards, so re-mark | |
4605 // pause will be small anyway, or our pre-cleaning isn't | |
4606 // that much faster than the rate at which cards are being | |
4607 // dirtied, so we might as well stop and re-mark since | |
4608 // precleaning won't improve our re-mark time by much. | |
4609 if (curNumCards <= CMSPrecleanThreshold || | |
4610 (numIter > 0 && | |
4611 (curNumCards * CMSPrecleanDenominator > | |
4612 lastNumCards * CMSPrecleanNumerator))) { | |
4613 numIter++; | |
4614 cumNumCards += curNumCards; | |
4615 break; | |
4616 } | |
4617 } | |
4618 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); | |
4619 if (CMSPermGenPrecleaningEnabled) { | |
4620 curNumCards += preclean_card_table(_permGen, &smoac_cl); | |
4621 } | |
4622 cumNumCards += curNumCards; | |
4623 if (PrintGCDetails && PrintCMSStatistics != 0) { | |
4624 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)", | |
4625 curNumCards, cumNumCards, numIter); | |
4626 } | |
4627 return cumNumCards; // as a measure of useful work done | |
4628 } | |
4629 | |
4630 // PRECLEANING NOTES: | |
4631 // Precleaning involves: | |
4632 // . reading the bits of the modUnionTable and clearing the set bits. | |
4633 // . For the cards corresponding to the set bits, we scan the | |
4634 // objects on those cards. This means we need the free_list_lock | |
4635 // so that we can safely iterate over the CMS space when scanning | |
4636 // for oops. | |
4637 // . When we scan the objects, we'll be both reading and setting | |
4638 // marks in the marking bit map, so we'll need the marking bit map. | |
4639 // . For protecting _collector_state transitions, we take the CGC_lock. | |
4640 // Note that any races in the reading of of card table entries by the | |
4641 // CMS thread on the one hand and the clearing of those entries by the | |
4642 // VM thread or the setting of those entries by the mutator threads on the | |
4643 // other are quite benign. However, for efficiency it makes sense to keep | |
4644 // the VM thread from racing with the CMS thread while the latter is | |
4645 // dirty card info to the modUnionTable. We therefore also use the | |
4646 // CGC_lock to protect the reading of the card table and the mod union | |
4647 // table by the CM thread. | |
4648 // . We run concurrently with mutator updates, so scanning | |
4649 // needs to be done carefully -- we should not try to scan | |
4650 // potentially uninitialized objects. | |
4651 // | |
4652 // Locking strategy: While holding the CGC_lock, we scan over and | |
4653 // reset a maximal dirty range of the mod union / card tables, then lock | |
4654 // the free_list_lock and bitmap lock to do a full marking, then | |
4655 // release these locks; and repeat the cycle. This allows for a | |
4656 // certain amount of fairness in the sharing of these locks between | |
4657 // the CMS collector on the one hand, and the VM thread and the | |
4658 // mutators on the other. | |
4659 | |
4660 // NOTE: preclean_mod_union_table() and preclean_card_table() | |
4661 // further below are largely identical; if you need to modify | |
4662 // one of these methods, please check the other method too. | |
4663 | |
4664 size_t CMSCollector::preclean_mod_union_table( | |
4665 ConcurrentMarkSweepGeneration* gen, | |
4666 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4667 verify_work_stacks_empty(); | |
4668 verify_overflow_empty(); | |
4669 | |
935 | 4670 // Turn off checking for this method but turn it back on |
4671 // selectively. There are yield points in this method | |
4672 // but it is difficult to turn the checking off just around | |
4673 // the yield points. It is simpler to selectively turn | |
4674 // it on. | |
4675 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
4676 | |
0 | 4677 // strategy: starting with the first card, accumulate contiguous |
4678 // ranges of dirty cards; clear these cards, then scan the region | |
4679 // covered by these cards. | |
4680 | |
4681 // Since all of the MUT is committed ahead, we can just use | |
4682 // that, in case the generations expand while we are precleaning. | |
4683 // It might also be fine to just use the committed part of the | |
4684 // generation, but we might potentially miss cards when the | |
4685 // generation is rapidly expanding while we are in the midst | |
4686 // of precleaning. | |
4687 HeapWord* startAddr = gen->reserved().start(); | |
4688 HeapWord* endAddr = gen->reserved().end(); | |
4689 | |
4690 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4691 | |
4692 size_t numDirtyCards, cumNumDirtyCards; | |
4693 HeapWord *nextAddr, *lastAddr; | |
4694 for (cumNumDirtyCards = numDirtyCards = 0, | |
4695 nextAddr = lastAddr = startAddr; | |
4696 nextAddr < endAddr; | |
4697 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4698 | |
4699 ResourceMark rm; | |
4700 HandleMark hm; | |
4701 | |
4702 MemRegion dirtyRegion; | |
4703 { | |
4704 stopTimer(); | |
935 | 4705 // Potential yield point |
0 | 4706 CMSTokenSync ts(true); |
4707 startTimer(); | |
4708 sample_eden(); | |
4709 // Get dirty region starting at nextOffset (inclusive), | |
4710 // simultaneously clearing it. | |
4711 dirtyRegion = | |
4712 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); | |
4713 assert(dirtyRegion.start() >= nextAddr, | |
4714 "returned region inconsistent?"); | |
4715 } | |
4716 // Remember where the next search should begin. | |
4717 // The returned region (if non-empty) is a right open interval, | |
4718 // so lastOffset is obtained from the right end of that | |
4719 // interval. | |
4720 lastAddr = dirtyRegion.end(); | |
4721 // Should do something more transparent and less hacky XXX | |
4722 numDirtyCards = | |
4723 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); | |
4724 | |
4725 // We'll scan the cards in the dirty region (with periodic | |
4726 // yields for foreground GC as needed). | |
4727 if (!dirtyRegion.is_empty()) { | |
4728 assert(numDirtyCards > 0, "consistency check"); | |
4729 HeapWord* stop_point = NULL; | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4730 stopTimer(); |
935 | 4731 // Potential yield point |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4732 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4733 bitMapLock()); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4734 startTimer(); |
0 | 4735 { |
4736 verify_work_stacks_empty(); | |
4737 verify_overflow_empty(); | |
4738 sample_eden(); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4739 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4740 stop_point = |
4741 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4742 } | |
4743 if (stop_point != NULL) { | |
4744 // The careful iteration stopped early either because it found an | |
4745 // uninitialized object, or because we were in the midst of an | |
4746 // "abortable preclean", which should now be aborted. Redirty | |
4747 // the bits corresponding to the partially-scanned or unscanned | |
4748 // cards. We'll either restart at the next block boundary or | |
4749 // abort the preclean. | |
4750 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || | |
4751 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4752 "Unparsable objects should only be in perm gen."); | |
4753 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); | |
4754 if (should_abort_preclean()) { | |
4755 break; // out of preclean loop | |
4756 } else { | |
4757 // Compute the next address at which preclean should pick up; | |
4758 // might need bitMapLock in order to read P-bits. | |
4759 lastAddr = next_card_start_after_block(stop_point); | |
4760 } | |
4761 } | |
4762 } else { | |
4763 assert(lastAddr == endAddr, "consistency check"); | |
4764 assert(numDirtyCards == 0, "consistency check"); | |
4765 break; | |
4766 } | |
4767 } | |
4768 verify_work_stacks_empty(); | |
4769 verify_overflow_empty(); | |
4770 return cumNumDirtyCards; | |
4771 } | |
4772 | |
4773 // NOTE: preclean_mod_union_table() above and preclean_card_table() | |
4774 // below are largely identical; if you need to modify | |
4775 // one of these methods, please check the other method too. | |
4776 | |
4777 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, | |
4778 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4779 // strategy: it's similar to precleamModUnionTable above, in that | |
4780 // we accumulate contiguous ranges of dirty cards, mark these cards | |
4781 // precleaned, then scan the region covered by these cards. | |
4782 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); | |
4783 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); | |
4784 | |
4785 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4786 | |
4787 size_t numDirtyCards, cumNumDirtyCards; | |
4788 HeapWord *lastAddr, *nextAddr; | |
4789 | |
4790 for (cumNumDirtyCards = numDirtyCards = 0, | |
4791 nextAddr = lastAddr = startAddr; | |
4792 nextAddr < endAddr; | |
4793 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4794 | |
4795 ResourceMark rm; | |
4796 HandleMark hm; | |
4797 | |
4798 MemRegion dirtyRegion; | |
4799 { | |
4800 // See comments in "Precleaning notes" above on why we | |
4801 // do this locking. XXX Could the locking overheads be | |
4802 // too high when dirty cards are sparse? [I don't think so.] | |
4803 stopTimer(); | |
4804 CMSTokenSync x(true); // is cms thread | |
4805 startTimer(); | |
4806 sample_eden(); | |
4807 // Get and clear dirty region from card table | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4808 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4809 MemRegion(nextAddr, endAddr), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4810 true, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4811 CardTableModRefBS::precleaned_card_val()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4812 |
0 | 4813 assert(dirtyRegion.start() >= nextAddr, |
4814 "returned region inconsistent?"); | |
4815 } | |
4816 lastAddr = dirtyRegion.end(); | |
4817 numDirtyCards = | |
4818 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; | |
4819 | |
4820 if (!dirtyRegion.is_empty()) { | |
4821 stopTimer(); | |
4822 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); | |
4823 startTimer(); | |
4824 sample_eden(); | |
4825 verify_work_stacks_empty(); | |
4826 verify_overflow_empty(); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4827 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4828 HeapWord* stop_point = |
4829 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4830 if (stop_point != NULL) { | |
4831 // The careful iteration stopped early because it found an | |
4832 // uninitialized object. Redirty the bits corresponding to the | |
4833 // partially-scanned or unscanned cards, and start again at the | |
4834 // next block boundary. | |
4835 assert(CMSPermGenPrecleaningEnabled || | |
4836 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4837 "Unparsable objects should only be in perm gen."); | |
4838 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); | |
4839 if (should_abort_preclean()) { | |
4840 break; // out of preclean loop | |
4841 } else { | |
4842 // Compute the next address at which preclean should pick up. | |
4843 lastAddr = next_card_start_after_block(stop_point); | |
4844 } | |
4845 } | |
4846 } else { | |
4847 break; | |
4848 } | |
4849 } | |
4850 verify_work_stacks_empty(); | |
4851 verify_overflow_empty(); | |
4852 return cumNumDirtyCards; | |
4853 } | |
4854 | |
4855 void CMSCollector::checkpointRootsFinal(bool asynch, | |
4856 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4857 assert(_collectorState == FinalMarking, "incorrect state transition?"); | |
4858 check_correct_thread_executing(); | |
4859 // world is stopped at this checkpoint | |
4860 assert(SafepointSynchronize::is_at_safepoint(), | |
4861 "world should be stopped"); | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
4862 TraceCMSMemoryManagerStats tms(_collectorState); |
0 | 4863 verify_work_stacks_empty(); |
4864 verify_overflow_empty(); | |
4865 | |
4866 SpecializationStats::clear(); | |
4867 if (PrintGCDetails) { | |
4868 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", | |
4869 _young_gen->used() / K, | |
4870 _young_gen->capacity() / K); | |
4871 } | |
4872 if (asynch) { | |
4873 if (CMSScavengeBeforeRemark) { | |
4874 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4875 // Temporarily set flag to false, GCH->do_collection will | |
4876 // expect it to be false and set to true | |
4877 FlagSetting fl(gch->_is_gc_active, false); | |
4878 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", | |
4879 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
4880 int level = _cmsGen->level() - 1; | |
4881 if (level >= 0) { | |
4882 gch->do_collection(true, // full (i.e. force, see below) | |
4883 false, // !clear_all_soft_refs | |
4884 0, // size | |
4885 false, // is_tlab | |
4886 level // max_level | |
4887 ); | |
4888 } | |
4889 } | |
4890 FreelistLocker x(this); | |
4891 MutexLockerEx y(bitMapLock(), | |
4892 Mutex::_no_safepoint_check_flag); | |
4893 assert(!init_mark_was_synchronous, "but that's impossible!"); | |
4894 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); | |
4895 } else { | |
4896 // already have all the locks | |
4897 checkpointRootsFinalWork(asynch, clear_all_soft_refs, | |
4898 init_mark_was_synchronous); | |
4899 } | |
4900 verify_work_stacks_empty(); | |
4901 verify_overflow_empty(); | |
4902 SpecializationStats::print(); | |
4903 } | |
4904 | |
4905 void CMSCollector::checkpointRootsFinalWork(bool asynch, | |
4906 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4907 | |
4908 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) | |
4909 | |
4910 assert(haveFreelistLocks(), "must have free list locks"); | |
4911 assert_lock_strong(bitMapLock()); | |
4912 | |
4913 if (UseAdaptiveSizePolicy) { | |
4914 size_policy()->checkpoint_roots_final_begin(); | |
4915 } | |
4916 | |
4917 ResourceMark rm; | |
4918 HandleMark hm; | |
4919 | |
4920 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4921 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4922 if (should_unload_classes()) { |
0 | 4923 CodeCache::gc_prologue(); |
4924 } | |
4925 assert(haveFreelistLocks(), "must have free list locks"); | |
4926 assert_lock_strong(bitMapLock()); | |
4927 | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4928 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());) |
0 | 4929 if (!init_mark_was_synchronous) { |
4930 // We might assume that we need not fill TLAB's when | |
4931 // CMSScavengeBeforeRemark is set, because we may have just done | |
4932 // a scavenge which would have filled all TLAB's -- and besides | |
4933 // Eden would be empty. This however may not always be the case -- | |
4934 // for instance although we asked for a scavenge, it may not have | |
4935 // happened because of a JNI critical section. We probably need | |
4936 // a policy for deciding whether we can in that case wait until | |
4937 // the critical section releases and then do the remark following | |
4938 // the scavenge, and skip it here. In the absence of that policy, | |
4939 // or of an indication of whether the scavenge did indeed occur, | |
4940 // we cannot rely on TLAB's having been filled and must do | |
4941 // so here just in case a scavenge did not happen. | |
4942 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them | |
4943 // Update the saved marks which may affect the root scans. | |
4944 gch->save_marks(); | |
4945 | |
4946 { | |
4947 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
4948 | |
4949 // Note on the role of the mod union table: | |
4950 // Since the marker in "markFromRoots" marks concurrently with | |
4951 // mutators, it is possible for some reachable objects not to have been | |
4952 // scanned. For instance, an only reference to an object A was | |
4953 // placed in object B after the marker scanned B. Unless B is rescanned, | |
4954 // A would be collected. Such updates to references in marked objects | |
4955 // are detected via the mod union table which is the set of all cards | |
4956 // dirtied since the first checkpoint in this GC cycle and prior to | |
4957 // the most recent young generation GC, minus those cleaned up by the | |
4958 // concurrent precleaning. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4959 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { |
0 | 4960 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); |
4961 do_remark_parallel(); | |
4962 } else { | |
4963 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, | |
4964 gclog_or_tty); | |
4965 do_remark_non_parallel(); | |
4966 } | |
4967 } | |
4968 } else { | |
4969 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); | |
4970 // The initial mark was stop-world, so there's no rescanning to | |
4971 // do; go straight on to the next step below. | |
4972 } | |
4973 verify_work_stacks_empty(); | |
4974 verify_overflow_empty(); | |
4975 | |
4976 { | |
4977 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) | |
4978 refProcessingWork(asynch, clear_all_soft_refs); | |
4979 } | |
4980 verify_work_stacks_empty(); | |
4981 verify_overflow_empty(); | |
4982 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4983 if (should_unload_classes()) { |
0 | 4984 CodeCache::gc_epilogue(); |
4985 } | |
2147
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2137
diff
changeset
|
4986 JvmtiExport::gc_epilogue(); |
0 | 4987 |
4988 // If we encountered any (marking stack / work queue) overflow | |
4989 // events during the current CMS cycle, take appropriate | |
4990 // remedial measures, where possible, so as to try and avoid | |
4991 // recurrence of that condition. | |
4992 assert(_markStack.isEmpty(), "No grey objects"); | |
4993 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4994 _ser_kac_ovflw + _ser_kac_preclean_ovflw; |
0 | 4995 if (ser_ovflw > 0) { |
4996 if (PrintCMSStatistics != 0) { | |
4997 gclog_or_tty->print_cr("Marking stack overflow (benign) " | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4998 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4999 ", kac_preclean="SIZE_FORMAT")", |
0 | 5000 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5001 _ser_kac_ovflw, _ser_kac_preclean_ovflw); |
0 | 5002 } |
5003 _markStack.expand(); | |
5004 _ser_pmc_remark_ovflw = 0; | |
5005 _ser_pmc_preclean_ovflw = 0; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5006 _ser_kac_preclean_ovflw = 0; |
0 | 5007 _ser_kac_ovflw = 0; |
5008 } | |
5009 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { | |
5010 if (PrintCMSStatistics != 0) { | |
5011 gclog_or_tty->print_cr("Work queue overflow (benign) " | |
5012 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", | |
5013 _par_pmc_remark_ovflw, _par_kac_ovflw); | |
5014 } | |
5015 _par_pmc_remark_ovflw = 0; | |
5016 _par_kac_ovflw = 0; | |
5017 } | |
5018 if (PrintCMSStatistics != 0) { | |
5019 if (_markStack._hit_limit > 0) { | |
5020 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")", | |
5021 _markStack._hit_limit); | |
5022 } | |
5023 if (_markStack._failed_double > 0) { | |
5024 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT")," | |
5025 " current capacity "SIZE_FORMAT, | |
5026 _markStack._failed_double, | |
5027 _markStack.capacity()); | |
5028 } | |
5029 } | |
5030 _markStack._hit_limit = 0; | |
5031 _markStack._failed_double = 0; | |
5032 | |
935 | 5033 // Check that all the klasses have been checked |
5034 assert(_revisitStack.isEmpty(), "Not all klasses revisited"); | |
5035 | |
0 | 5036 if ((VerifyAfterGC || VerifyDuringGC) && |
5037 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
5038 verify_after_remark(); | |
5039 } | |
5040 | |
5041 // Change under the freelistLocks. | |
5042 _collectorState = Sweeping; | |
5043 // Call isAllClear() under bitMapLock | |
5044 assert(_modUnionTable.isAllClear(), "Should be clear by end of the" | |
5045 " final marking"); | |
5046 if (UseAdaptiveSizePolicy) { | |
5047 size_policy()->checkpoint_roots_final_end(gch->gc_cause()); | |
5048 } | |
5049 } | |
5050 | |
5051 // Parallel remark task | |
5052 class CMSParRemarkTask: public AbstractGangTask { | |
5053 CMSCollector* _collector; | |
5054 int _n_workers; | |
5055 CompactibleFreeListSpace* _cms_space; | |
5056 CompactibleFreeListSpace* _perm_space; | |
5057 | |
5058 // The per-thread work queues, available here for stealing. | |
5059 OopTaskQueueSet* _task_queues; | |
5060 ParallelTaskTerminator _term; | |
5061 | |
5062 public: | |
5063 CMSParRemarkTask(CMSCollector* collector, | |
5064 CompactibleFreeListSpace* cms_space, | |
5065 CompactibleFreeListSpace* perm_space, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5066 int n_workers, FlexibleWorkGang* workers, |
0 | 5067 OopTaskQueueSet* task_queues): |
5068 AbstractGangTask("Rescan roots and grey objects in parallel"), | |
5069 _collector(collector), | |
5070 _cms_space(cms_space), _perm_space(perm_space), | |
5071 _n_workers(n_workers), | |
5072 _task_queues(task_queues), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5073 _term(n_workers, task_queues) { } |
0 | 5074 |
5075 OopTaskQueueSet* task_queues() { return _task_queues; } | |
5076 | |
5077 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
5078 | |
5079 ParallelTaskTerminator* terminator() { return &_term; } | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5080 int n_workers() { return _n_workers; } |
0 | 5081 |
5082 void work(int i); | |
5083 | |
5084 private: | |
5085 // Work method in support of parallel rescan ... of young gen spaces | |
5086 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
5087 ContiguousSpace* space, | |
5088 HeapWord** chunk_array, size_t chunk_top); | |
5089 | |
5090 // ... of dirty cards in old space | |
5091 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, | |
5092 Par_MarkRefsIntoAndScanClosure* cl); | |
5093 | |
5094 // ... work stealing for the above | |
5095 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); | |
5096 }; | |
5097 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5098 // work_queue(i) is passed to the closure |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5099 // Par_MarkRefsIntoAndScanClosure. The "i" parameter |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5100 // also is passed to do_dirty_card_rescan_tasks() and to |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5101 // do_work_steal() to select the i-th task_queue. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5102 |
0 | 5103 void CMSParRemarkTask::work(int i) { |
5104 elapsedTimer _timer; | |
5105 ResourceMark rm; | |
5106 HandleMark hm; | |
5107 | |
5108 // ---------- rescan from roots -------------- | |
5109 _timer.start(); | |
5110 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5111 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, | |
5112 _collector->_span, _collector->ref_processor(), | |
5113 &(_collector->_markBitMap), | |
5114 work_queue(i), &(_collector->_revisitStack)); | |
5115 | |
5116 // Rescan young gen roots first since these are likely | |
5117 // coarsely partitioned and may, on that account, constitute | |
5118 // the critical path; thus, it's best to start off that | |
5119 // work first. | |
5120 // ---------- young gen roots -------------- | |
5121 { | |
5122 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); | |
5123 EdenSpace* eden_space = dng->eden(); | |
5124 ContiguousSpace* from_space = dng->from(); | |
5125 ContiguousSpace* to_space = dng->to(); | |
5126 | |
5127 HeapWord** eca = _collector->_eden_chunk_array; | |
5128 size_t ect = _collector->_eden_chunk_index; | |
5129 HeapWord** sca = _collector->_survivor_chunk_array; | |
5130 size_t sct = _collector->_survivor_chunk_index; | |
5131 | |
5132 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); | |
5133 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); | |
5134 | |
5135 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0); | |
5136 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct); | |
5137 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect); | |
5138 | |
5139 _timer.stop(); | |
5140 if (PrintCMSStatistics != 0) { | |
5141 gclog_or_tty->print_cr( | |
5142 "Finished young gen rescan work in %dth thread: %3.3f sec", | |
5143 i, _timer.seconds()); | |
5144 } | |
5145 } | |
5146 | |
5147 // ---------- remaining roots -------------- | |
5148 _timer.reset(); | |
5149 _timer.start(); | |
5150 gch->gen_process_strong_roots(_collector->_cmsGen->level(), | |
5151 false, // yg was scanned above | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5152 false, // this is parallel code |
0 | 5153 true, // collecting perm gen |
5154 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5155 &par_mrias_cl, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5156 true, // walk all of code cache if (so & SO_CodeCache) |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5157 NULL); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5158 assert(_collector->should_unload_classes() |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5159 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5160 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
0 | 5161 _timer.stop(); |
5162 if (PrintCMSStatistics != 0) { | |
5163 gclog_or_tty->print_cr( | |
5164 "Finished remaining root rescan work in %dth thread: %3.3f sec", | |
5165 i, _timer.seconds()); | |
5166 } | |
5167 | |
5168 // ---------- rescan dirty cards ------------ | |
5169 _timer.reset(); | |
5170 _timer.start(); | |
5171 | |
5172 // Do the rescan tasks for each of the two spaces | |
5173 // (cms_space and perm_space) in turn. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5174 // "i" is passed to select the "i-th" task_queue |
0 | 5175 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl); |
5176 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl); | |
5177 _timer.stop(); | |
5178 if (PrintCMSStatistics != 0) { | |
5179 gclog_or_tty->print_cr( | |
5180 "Finished dirty card rescan work in %dth thread: %3.3f sec", | |
5181 i, _timer.seconds()); | |
5182 } | |
5183 | |
5184 // ---------- steal work from other threads ... | |
5185 // ---------- ... and drain overflow list. | |
5186 _timer.reset(); | |
5187 _timer.start(); | |
5188 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i)); | |
5189 _timer.stop(); | |
5190 if (PrintCMSStatistics != 0) { | |
5191 gclog_or_tty->print_cr( | |
5192 "Finished work stealing in %dth thread: %3.3f sec", | |
5193 i, _timer.seconds()); | |
5194 } | |
5195 } | |
5196 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5197 // Note that parameter "i" is not used. |
0 | 5198 void |
5199 CMSParRemarkTask::do_young_space_rescan(int i, | |
5200 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space, | |
5201 HeapWord** chunk_array, size_t chunk_top) { | |
5202 // Until all tasks completed: | |
5203 // . claim an unclaimed task | |
5204 // . compute region boundaries corresponding to task claimed | |
5205 // using chunk_array | |
5206 // . par_oop_iterate(cl) over that region | |
5207 | |
5208 ResourceMark rm; | |
5209 HandleMark hm; | |
5210 | |
5211 SequentialSubTasksDone* pst = space->par_seq_tasks(); | |
5212 assert(pst->valid(), "Uninitialized use?"); | |
5213 | |
5214 int nth_task = 0; | |
5215 int n_tasks = pst->n_tasks(); | |
5216 | |
5217 HeapWord *start, *end; | |
5218 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5219 // We claimed task # nth_task; compute its boundaries. | |
5220 if (chunk_top == 0) { // no samples were taken | |
5221 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); | |
5222 start = space->bottom(); | |
5223 end = space->top(); | |
5224 } else if (nth_task == 0) { | |
5225 start = space->bottom(); | |
5226 end = chunk_array[nth_task]; | |
5227 } else if (nth_task < (jint)chunk_top) { | |
5228 assert(nth_task >= 1, "Control point invariant"); | |
5229 start = chunk_array[nth_task - 1]; | |
5230 end = chunk_array[nth_task]; | |
5231 } else { | |
5232 assert(nth_task == (jint)chunk_top, "Control point invariant"); | |
5233 start = chunk_array[chunk_top - 1]; | |
5234 end = space->top(); | |
5235 } | |
5236 MemRegion mr(start, end); | |
5237 // Verify that mr is in space | |
5238 assert(mr.is_empty() || space->used_region().contains(mr), | |
5239 "Should be in space"); | |
5240 // Verify that "start" is an object boundary | |
5241 assert(mr.is_empty() || oop(mr.start())->is_oop(), | |
5242 "Should be an oop"); | |
5243 space->par_oop_iterate(mr, cl); | |
5244 } | |
5245 pst->all_tasks_completed(); | |
5246 } | |
5247 | |
5248 void | |
5249 CMSParRemarkTask::do_dirty_card_rescan_tasks( | |
5250 CompactibleFreeListSpace* sp, int i, | |
5251 Par_MarkRefsIntoAndScanClosure* cl) { | |
5252 // Until all tasks completed: | |
5253 // . claim an unclaimed task | |
5254 // . compute region boundaries corresponding to task claimed | |
5255 // . transfer dirty bits ct->mut for that region | |
5256 // . apply rescanclosure to dirty mut bits for that region | |
5257 | |
5258 ResourceMark rm; | |
5259 HandleMark hm; | |
5260 | |
5261 OopTaskQueue* work_q = work_queue(i); | |
5262 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); | |
5263 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! | |
5264 // CAUTION: This closure has state that persists across calls to | |
5265 // the work method dirty_range_iterate_clear() in that it has | |
5266 // imbedded in it a (subtype of) UpwardsObjectClosure. The | |
5267 // use of that state in the imbedded UpwardsObjectClosure instance | |
5268 // assumes that the cards are always iterated (even if in parallel | |
5269 // by several threads) in monotonically increasing order per each | |
5270 // thread. This is true of the implementation below which picks | |
5271 // card ranges (chunks) in monotonically increasing order globally | |
5272 // and, a-fortiori, in monotonically increasing order per thread | |
5273 // (the latter order being a subsequence of the former). | |
5274 // If the work code below is ever reorganized into a more chaotic | |
5275 // work-partitioning form than the current "sequential tasks" | |
5276 // paradigm, the use of that persistent state will have to be | |
5277 // revisited and modified appropriately. See also related | |
5278 // bug 4756801 work on which should examine this code to make | |
5279 // sure that the changes there do not run counter to the | |
5280 // assumptions made here and necessary for correctness and | |
5281 // efficiency. Note also that this code might yield inefficient | |
5282 // behaviour in the case of very large objects that span one or | |
5283 // more work chunks. Such objects would potentially be scanned | |
5284 // several times redundantly. Work on 4756801 should try and | |
5285 // address that performance anomaly if at all possible. XXX | |
5286 MemRegion full_span = _collector->_span; | |
5287 CMSBitMap* bm = &(_collector->_markBitMap); // shared | |
5288 CMSMarkStack* rs = &(_collector->_revisitStack); // shared | |
5289 MarkFromDirtyCardsClosure | |
5290 greyRescanClosure(_collector, full_span, // entire span of interest | |
5291 sp, bm, work_q, rs, cl); | |
5292 | |
5293 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
5294 assert(pst->valid(), "Uninitialized use?"); | |
5295 int nth_task = 0; | |
5296 const int alignment = CardTableModRefBS::card_size * BitsPerWord; | |
5297 MemRegion span = sp->used_region(); | |
5298 HeapWord* start_addr = span.start(); | |
5299 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(), | |
5300 alignment); | |
5301 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units | |
5302 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) == | |
5303 start_addr, "Check alignment"); | |
5304 assert((size_t)round_to((intptr_t)chunk_size, alignment) == | |
5305 chunk_size, "Check alignment"); | |
5306 | |
5307 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5308 // Having claimed the nth_task, compute corresponding mem-region, | |
5309 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). | |
5310 // The alignment restriction ensures that we do not need any | |
5311 // synchronization with other gang-workers while setting or | |
5312 // clearing bits in thus chunk of the MUT. | |
5313 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, | |
5314 start_addr + (nth_task+1)*chunk_size); | |
5315 // The last chunk's end might be way beyond end of the | |
5316 // used region. In that case pull back appropriately. | |
5317 if (this_span.end() > end_addr) { | |
5318 this_span.set_end(end_addr); | |
5319 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); | |
5320 } | |
5321 // Iterate over the dirty cards covering this chunk, marking them | |
5322 // precleaned, and setting the corresponding bits in the mod union | |
5323 // table. Since we have been careful to partition at Card and MUT-word | |
5324 // boundaries no synchronization is needed between parallel threads. | |
5325 _collector->_ct->ct_bs()->dirty_card_iterate(this_span, | |
5326 &modUnionClosure); | |
5327 | |
5328 // Having transferred these marks into the modUnionTable, | |
5329 // rescan the marked objects on the dirty cards in the modUnionTable. | |
5330 // Even if this is at a synchronous collection, the initial marking | |
5331 // may have been done during an asynchronous collection so there | |
5332 // may be dirty bits in the mod-union table. | |
5333 _collector->_modUnionTable.dirty_range_iterate_clear( | |
5334 this_span, &greyRescanClosure); | |
5335 _collector->_modUnionTable.verifyNoOneBitsInRange( | |
5336 this_span.start(), | |
5337 this_span.end()); | |
5338 } | |
5339 pst->all_tasks_completed(); // declare that i am done | |
5340 } | |
5341 | |
5342 // . see if we can share work_queues with ParNew? XXX | |
5343 void | |
5344 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
5345 int* seed) { | |
5346 OopTaskQueue* work_q = work_queue(i); | |
5347 NOT_PRODUCT(int num_steals = 0;) | |
5348 oop obj_to_scan; | |
5349 CMSBitMap* bm = &(_collector->_markBitMap); | |
5350 | |
5351 while (true) { | |
5352 // Completely finish any left over work from (an) earlier round(s) | |
5353 cl->trim_queue(0); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5354 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5355 (size_t)ParGCDesiredObjsFromOverflowList); |
0 | 5356 // Now check if there's any work in the overflow list |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5357 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5358 // only affects the number of attempts made to get work from the |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5359 // overflow list and does not affect the number of workers. Just |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5360 // pass ParallelGCThreads so this behavior is unchanged. |
0 | 5361 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5362 work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5363 ParallelGCThreads)) { |
0 | 5364 // found something in global overflow list; |
5365 // not yet ready to go stealing work from others. | |
5366 // We'd like to assert(work_q->size() != 0, ...) | |
5367 // because we just took work from the overflow list, | |
5368 // but of course we can't since all of that could have | |
5369 // been already stolen from us. | |
5370 // "He giveth and He taketh away." | |
5371 continue; | |
5372 } | |
5373 // Verify that we have no work before we resort to stealing | |
5374 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5375 // Try to steal from other queues that have work | |
5376 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5377 NOT_PRODUCT(num_steals++;) | |
5378 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5379 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5380 // Do scanning work | |
5381 obj_to_scan->oop_iterate(cl); | |
5382 // Loop around, finish this work, and try to steal some more | |
5383 } else if (terminator()->offer_termination()) { | |
5384 break; // nirvana from the infinite cycle | |
5385 } | |
5386 } | |
5387 NOT_PRODUCT( | |
5388 if (PrintCMSStatistics != 0) { | |
5389 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5390 } | |
5391 ) | |
5392 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), | |
5393 "Else our work is not yet done"); | |
5394 } | |
5395 | |
5396 // Return a thread-local PLAB recording array, as appropriate. | |
5397 void* CMSCollector::get_data_recorder(int thr_num) { | |
5398 if (_survivor_plab_array != NULL && | |
5399 (CMSPLABRecordAlways || | |
5400 (_collectorState > Marking && _collectorState < FinalMarking))) { | |
5401 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); | |
5402 ChunkArray* ca = &_survivor_plab_array[thr_num]; | |
5403 ca->reset(); // clear it so that fresh data is recorded | |
5404 return (void*) ca; | |
5405 } else { | |
5406 return NULL; | |
5407 } | |
5408 } | |
5409 | |
5410 // Reset all the thread-local PLAB recording arrays | |
5411 void CMSCollector::reset_survivor_plab_arrays() { | |
5412 for (uint i = 0; i < ParallelGCThreads; i++) { | |
5413 _survivor_plab_array[i].reset(); | |
5414 } | |
5415 } | |
5416 | |
5417 // Merge the per-thread plab arrays into the global survivor chunk | |
5418 // array which will provide the partitioning of the survivor space | |
5419 // for CMS rescan. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5420 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5421 int no_of_gc_threads) { |
0 | 5422 assert(_survivor_plab_array != NULL, "Error"); |
5423 assert(_survivor_chunk_array != NULL, "Error"); | |
5424 assert(_collectorState == FinalMarking, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5425 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5426 _cursor[j] = 0; |
5427 } | |
5428 HeapWord* top = surv->top(); | |
5429 size_t i; | |
5430 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries | |
5431 HeapWord* min_val = top; // Higher than any PLAB address | |
5432 uint min_tid = 0; // position of min_val this round | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5433 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5434 ChunkArray* cur_sca = &_survivor_plab_array[j]; |
5435 if (_cursor[j] == cur_sca->end()) { | |
5436 continue; | |
5437 } | |
5438 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); | |
5439 HeapWord* cur_val = cur_sca->nth(_cursor[j]); | |
5440 assert(surv->used_region().contains(cur_val), "Out of bounds value"); | |
5441 if (cur_val < min_val) { | |
5442 min_tid = j; | |
5443 min_val = cur_val; | |
5444 } else { | |
5445 assert(cur_val < top, "All recorded addresses should be less"); | |
5446 } | |
5447 } | |
5448 // At this point min_val and min_tid are respectively | |
5449 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) | |
5450 // and the thread (j) that witnesses that address. | |
5451 // We record this address in the _survivor_chunk_array[i] | |
5452 // and increment _cursor[min_tid] prior to the next round i. | |
5453 if (min_val == top) { | |
5454 break; | |
5455 } | |
5456 _survivor_chunk_array[i] = min_val; | |
5457 _cursor[min_tid]++; | |
5458 } | |
5459 // We are all done; record the size of the _survivor_chunk_array | |
5460 _survivor_chunk_index = i; // exclusive: [0, i) | |
5461 if (PrintCMSStatistics > 0) { | |
5462 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); | |
5463 } | |
5464 // Verify that we used up all the recorded entries | |
5465 #ifdef ASSERT | |
5466 size_t total = 0; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5467 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5468 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); |
5469 total += _cursor[j]; | |
5470 } | |
5471 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); | |
5472 // Check that the merged array is in sorted order | |
5473 if (total > 0) { | |
5474 for (size_t i = 0; i < total - 1; i++) { | |
5475 if (PrintCMSStatistics > 0) { | |
5476 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", | |
5477 i, _survivor_chunk_array[i]); | |
5478 } | |
5479 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], | |
5480 "Not sorted"); | |
5481 } | |
5482 } | |
5483 #endif // ASSERT | |
5484 } | |
5485 | |
5486 // Set up the space's par_seq_tasks structure for work claiming | |
5487 // for parallel rescan of young gen. | |
5488 // See ParRescanTask where this is currently used. | |
5489 void | |
5490 CMSCollector:: | |
5491 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { | |
5492 assert(n_threads > 0, "Unexpected n_threads argument"); | |
5493 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
5494 | |
5495 // Eden space | |
5496 { | |
5497 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks(); | |
5498 assert(!pst->valid(), "Clobbering existing data?"); | |
5499 // Each valid entry in [0, _eden_chunk_index) represents a task. | |
5500 size_t n_tasks = _eden_chunk_index + 1; | |
5501 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5502 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5503 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5504 pst->set_n_threads(n_threads); |
0 | 5505 pst->set_n_tasks((int)n_tasks); |
5506 } | |
5507 | |
5508 // Merge the survivor plab arrays into _survivor_chunk_array | |
5509 if (_survivor_plab_array != NULL) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5510 merge_survivor_plab_arrays(dng->from(), n_threads); |
0 | 5511 } else { |
5512 assert(_survivor_chunk_index == 0, "Error"); | |
5513 } | |
5514 | |
5515 // To space | |
5516 { | |
5517 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks(); | |
5518 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5519 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5520 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5521 pst->set_n_threads(n_threads); |
0 | 5522 pst->set_n_tasks(1); |
5523 assert(pst->valid(), "Error"); | |
5524 } | |
5525 | |
5526 // From space | |
5527 { | |
5528 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks(); | |
5529 assert(!pst->valid(), "Clobbering existing data?"); | |
5530 size_t n_tasks = _survivor_chunk_index + 1; | |
5531 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5532 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5533 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5534 pst->set_n_threads(n_threads); |
0 | 5535 pst->set_n_tasks((int)n_tasks); |
5536 assert(pst->valid(), "Error"); | |
5537 } | |
5538 } | |
5539 | |
5540 // Parallel version of remark | |
5541 void CMSCollector::do_remark_parallel() { | |
5542 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5543 FlexibleWorkGang* workers = gch->workers(); |
0 | 5544 assert(workers != NULL, "Need parallel worker threads."); |
5545 int n_workers = workers->total_workers(); | |
5546 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
5547 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
5548 | |
5549 CMSParRemarkTask tsk(this, | |
5550 cms_space, perm_space, | |
5551 n_workers, workers, task_queues()); | |
5552 | |
5553 // Set up for parallel process_strong_roots work. | |
5554 gch->set_par_threads(n_workers); | |
5555 // We won't be iterating over the cards in the card table updating | |
5556 // the younger_gen cards, so we shouldn't call the following else | |
5557 // the verification code as well as subsequent younger_refs_iterate | |
5558 // code would get confused. XXX | |
5559 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel | |
5560 | |
5561 // The young gen rescan work will not be done as part of | |
5562 // process_strong_roots (which currently doesn't knw how to | |
5563 // parallelize such a scan), but rather will be broken up into | |
5564 // a set of parallel tasks (via the sampling that the [abortable] | |
5565 // preclean phase did of EdenSpace, plus the [two] tasks of | |
5566 // scanning the [two] survivor spaces. Further fine-grain | |
5567 // parallelization of the scanning of the survivor spaces | |
5568 // themselves, and of precleaning of the younger gen itself | |
5569 // is deferred to the future. | |
5570 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); | |
5571 | |
5572 // The dirty card rescan work is broken up into a "sequence" | |
5573 // of parallel tasks (per constituent space) that are dynamically | |
5574 // claimed by the parallel threads. | |
5575 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5576 perm_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5577 | |
5578 // It turns out that even when we're using 1 thread, doing the work in a | |
5579 // separate thread causes wide variance in run times. We can't help this | |
5580 // in the multi-threaded case, but we special-case n=1 here to get | |
5581 // repeatable measurements of the 1-thread overhead of the parallel code. | |
5582 if (n_workers > 1) { | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5583 // Make refs discovery MT-safe, if it isn't already: it may not |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5584 // necessarily be so, since it's possible that we are doing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5585 // ST marking. |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5586 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5587 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5588 workers->run_task(&tsk); |
5589 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5590 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5591 tsk.work(0); |
5592 } | |
5593 gch->set_par_threads(0); // 0 ==> non-parallel. | |
5594 // restore, single-threaded for now, any preserved marks | |
5595 // as a result of work_q overflow | |
5596 restore_preserved_marks_if_any(); | |
5597 } | |
5598 | |
5599 // Non-parallel version of remark | |
5600 void CMSCollector::do_remark_non_parallel() { | |
5601 ResourceMark rm; | |
5602 HandleMark hm; | |
5603 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5604 MarkRefsIntoAndScanClosure | |
5605 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
5606 &_markStack, &_revisitStack, this, | |
5607 false /* should_yield */, false /* not precleaning */); | |
5608 MarkFromDirtyCardsClosure | |
5609 markFromDirtyCardsClosure(this, _span, | |
5610 NULL, // space is set further below | |
5611 &_markBitMap, &_markStack, &_revisitStack, | |
5612 &mrias_cl); | |
5613 { | |
5614 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
5615 // Iterate over the dirty cards, setting the corresponding bits in the |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
5616 // mod union table. |
0 | 5617 { |
5618 ModUnionClosure modUnionClosure(&_modUnionTable); | |
5619 _ct->ct_bs()->dirty_card_iterate( | |
5620 _cmsGen->used_region(), | |
5621 &modUnionClosure); | |
5622 _ct->ct_bs()->dirty_card_iterate( | |
5623 _permGen->used_region(), | |
5624 &modUnionClosure); | |
5625 } | |
5626 // Having transferred these marks into the modUnionTable, we just need | |
5627 // to rescan the marked objects on the dirty cards in the modUnionTable. | |
5628 // The initial marking may have been done during an asynchronous | |
5629 // collection so there may be dirty bits in the mod-union table. | |
5630 const int alignment = | |
5631 CardTableModRefBS::card_size * BitsPerWord; | |
5632 { | |
5633 // ... First handle dirty cards in CMS gen | |
5634 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); | |
5635 MemRegion ur = _cmsGen->used_region(); | |
5636 HeapWord* lb = ur.start(); | |
5637 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5638 MemRegion cms_span(lb, ub); | |
5639 _modUnionTable.dirty_range_iterate_clear(cms_span, | |
5640 &markFromDirtyCardsClosure); | |
5641 verify_work_stacks_empty(); | |
5642 if (PrintCMSStatistics != 0) { | |
5643 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ", | |
5644 markFromDirtyCardsClosure.num_dirty_cards()); | |
5645 } | |
5646 } | |
5647 { | |
5648 // .. and then repeat for dirty cards in perm gen | |
5649 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace()); | |
5650 MemRegion ur = _permGen->used_region(); | |
5651 HeapWord* lb = ur.start(); | |
5652 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5653 MemRegion perm_span(lb, ub); | |
5654 _modUnionTable.dirty_range_iterate_clear(perm_span, | |
5655 &markFromDirtyCardsClosure); | |
5656 verify_work_stacks_empty(); | |
5657 if (PrintCMSStatistics != 0) { | |
5658 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ", | |
5659 markFromDirtyCardsClosure.num_dirty_cards()); | |
5660 } | |
5661 } | |
5662 } | |
5663 if (VerifyDuringGC && | |
5664 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
5665 HandleMark hm; // Discard invalid handles created during verification | |
5666 Universe::verify(true); | |
5667 } | |
5668 { | |
5669 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); | |
5670 | |
5671 verify_work_stacks_empty(); | |
5672 | |
5673 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5674 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5675 gch->gen_process_strong_roots(_cmsGen->level(), |
5676 true, // younger gens as roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5677 false, // use the local StrongRootsScope |
0 | 5678 true, // collecting perm gen |
5679 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5680 &mrias_cl, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5681 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5682 NULL); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5683 assert(should_unload_classes() |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5684 || (roots_scanning_options() & SharedHeap::SO_CodeCache), |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5685 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
0 | 5686 } |
5687 verify_work_stacks_empty(); | |
5688 // Restore evacuated mark words, if any, used for overflow list links | |
5689 if (!CMSOverflowEarlyRestoration) { | |
5690 restore_preserved_marks_if_any(); | |
5691 } | |
5692 verify_overflow_empty(); | |
5693 } | |
5694 | |
5695 //////////////////////////////////////////////////////// | |
5696 // Parallel Reference Processing Task Proxy Class | |
5697 //////////////////////////////////////////////////////// | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5698 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues { |
0 | 5699 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
5700 CMSCollector* _collector; | |
5701 CMSBitMap* _mark_bit_map; | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5702 const MemRegion _span; |
0 | 5703 ProcessTask& _task; |
5704 | |
5705 public: | |
5706 CMSRefProcTaskProxy(ProcessTask& task, | |
5707 CMSCollector* collector, | |
5708 const MemRegion& span, | |
5709 CMSBitMap* mark_bit_map, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5710 AbstractWorkGang* workers, |
0 | 5711 OopTaskQueueSet* task_queues): |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5712 // XXX Should superclass AGTWOQ also know about AWG since it knows |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5713 // about the task_queues used by the AWG? Then it could initialize |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5714 // the terminator() object. See 6984287. The set_for_termination() |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5715 // below is a temporary band-aid for the regression in 6984287. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5716 AbstractGangTaskWOopQueues("Process referents by policy in parallel", |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5717 task_queues), |
0 | 5718 _task(task), |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5719 _collector(collector), _span(span), _mark_bit_map(mark_bit_map) |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5720 { |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5721 assert(_collector->_span.equals(_span) && !_span.is_empty(), |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5722 "Inconsistency in _span"); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5723 set_for_termination(workers->active_workers()); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5724 } |
0 | 5725 |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5726 OopTaskQueueSet* task_queues() { return queues(); } |
0 | 5727 |
5728 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
5729 | |
5730 void do_work_steal(int i, | |
5731 CMSParDrainMarkingStackClosure* drain, | |
5732 CMSParKeepAliveClosure* keep_alive, | |
5733 int* seed); | |
5734 | |
5735 virtual void work(int i); | |
5736 }; | |
5737 | |
5738 void CMSRefProcTaskProxy::work(int i) { | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5739 assert(_collector->_span.equals(_span), "Inconsistency in _span"); |
0 | 5740 CMSParKeepAliveClosure par_keep_alive(_collector, _span, |
935 | 5741 _mark_bit_map, |
5742 &_collector->_revisitStack, | |
5743 work_queue(i)); | |
0 | 5744 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, |
935 | 5745 _mark_bit_map, |
5746 &_collector->_revisitStack, | |
5747 work_queue(i)); | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5748 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); |
0 | 5749 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); |
5750 if (_task.marks_oops_alive()) { | |
5751 do_work_steal(i, &par_drain_stack, &par_keep_alive, | |
5752 _collector->hash_seed(i)); | |
5753 } | |
5754 assert(work_queue(i)->size() == 0, "work_queue should be empty"); | |
5755 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); | |
5756 } | |
5757 | |
5758 class CMSRefEnqueueTaskProxy: public AbstractGangTask { | |
5759 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; | |
5760 EnqueueTask& _task; | |
5761 | |
5762 public: | |
5763 CMSRefEnqueueTaskProxy(EnqueueTask& task) | |
5764 : AbstractGangTask("Enqueue reference objects in parallel"), | |
5765 _task(task) | |
5766 { } | |
5767 | |
5768 virtual void work(int i) | |
5769 { | |
5770 _task.work(i); | |
5771 } | |
5772 }; | |
5773 | |
5774 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, | |
935 | 5775 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, |
5776 OopTaskQueue* work_queue): | |
5777 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
0 | 5778 _span(span), |
5779 _bit_map(bit_map), | |
5780 _work_queue(work_queue), | |
935 | 5781 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), |
0 | 5782 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), |
5783 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) | |
5784 { } | |
5785 | |
5786 // . see if we can share work_queues with ParNew? XXX | |
5787 void CMSRefProcTaskProxy::do_work_steal(int i, | |
5788 CMSParDrainMarkingStackClosure* drain, | |
5789 CMSParKeepAliveClosure* keep_alive, | |
5790 int* seed) { | |
5791 OopTaskQueue* work_q = work_queue(i); | |
5792 NOT_PRODUCT(int num_steals = 0;) | |
5793 oop obj_to_scan; | |
5794 | |
5795 while (true) { | |
5796 // Completely finish any left over work from (an) earlier round(s) | |
5797 drain->trim_queue(0); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5798 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5799 (size_t)ParGCDesiredObjsFromOverflowList); |
0 | 5800 // Now check if there's any work in the overflow list |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5801 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5802 // only affects the number of attempts made to get work from the |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5803 // overflow list and does not affect the number of workers. Just |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5804 // pass ParallelGCThreads so this behavior is unchanged. |
0 | 5805 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5806 work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5807 ParallelGCThreads)) { |
0 | 5808 // Found something in global overflow list; |
5809 // not yet ready to go stealing work from others. | |
5810 // We'd like to assert(work_q->size() != 0, ...) | |
5811 // because we just took work from the overflow list, | |
5812 // but of course we can't, since all of that might have | |
5813 // been already stolen from us. | |
5814 continue; | |
5815 } | |
5816 // Verify that we have no work before we resort to stealing | |
5817 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5818 // Try to steal from other queues that have work | |
5819 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5820 NOT_PRODUCT(num_steals++;) | |
5821 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5822 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5823 // Do scanning work | |
5824 obj_to_scan->oop_iterate(keep_alive); | |
5825 // Loop around, finish this work, and try to steal some more | |
5826 } else if (terminator()->offer_termination()) { | |
5827 break; // nirvana from the infinite cycle | |
5828 } | |
5829 } | |
5830 NOT_PRODUCT( | |
5831 if (PrintCMSStatistics != 0) { | |
5832 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5833 } | |
5834 ) | |
5835 } | |
5836 | |
5837 void CMSRefProcTaskExecutor::execute(ProcessTask& task) | |
5838 { | |
5839 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5840 FlexibleWorkGang* workers = gch->workers(); |
0 | 5841 assert(workers != NULL, "Need parallel worker threads."); |
5842 CMSRefProcTaskProxy rp_task(task, &_collector, | |
5843 _collector.ref_processor()->span(), | |
5844 _collector.markBitMap(), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5845 workers, _collector.task_queues()); |
0 | 5846 workers->run_task(&rp_task); |
5847 } | |
5848 | |
5849 void CMSRefProcTaskExecutor::execute(EnqueueTask& task) | |
5850 { | |
5851 | |
5852 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5853 FlexibleWorkGang* workers = gch->workers(); |
0 | 5854 assert(workers != NULL, "Need parallel worker threads."); |
5855 CMSRefEnqueueTaskProxy enq_task(task); | |
5856 workers->run_task(&enq_task); | |
5857 } | |
5858 | |
5859 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { | |
5860 | |
5861 ResourceMark rm; | |
5862 HandleMark hm; | |
5863 | |
5864 ReferenceProcessor* rp = ref_processor(); | |
5865 assert(rp->span().equals(_span), "Spans should be equal"); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5866 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5867 // Process weak references. |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
5868 rp->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5869 verify_work_stacks_empty(); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5870 |
0 | 5871 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
935 | 5872 &_markStack, &_revisitStack, |
5873 false /* !preclean */); | |
0 | 5874 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
5875 _span, &_markBitMap, &_markStack, | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5876 &cmsKeepAliveClosure, false /* !preclean */); |
0 | 5877 { |
5878 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); | |
5879 if (rp->processing_is_mt()) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5880 // Set the degree of MT here. If the discovery is done MT, there |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5881 // may have been a different number of threads doing the discovery |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5882 // and a different number of discovered lists may have Ref objects. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5883 // That is OK as long as the Reference lists are balanced (see |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5884 // balance_all_queues() and balance_queues()). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5885 |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5886 rp->set_active_mt_degree(ParallelGCThreads); |
0 | 5887 CMSRefProcTaskExecutor task_executor(*this); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5888 rp->process_discovered_references(&_is_alive_closure, |
0 | 5889 &cmsKeepAliveClosure, |
5890 &cmsDrainMarkingStackClosure, | |
5891 &task_executor); | |
5892 } else { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5893 rp->process_discovered_references(&_is_alive_closure, |
0 | 5894 &cmsKeepAliveClosure, |
5895 &cmsDrainMarkingStackClosure, | |
5896 NULL); | |
5897 } | |
5898 verify_work_stacks_empty(); | |
5899 } | |
5900 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5901 if (should_unload_classes()) { |
0 | 5902 { |
5903 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); | |
5904 | |
5905 // Follow SystemDictionary roots and unload classes | |
5906 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); | |
5907 | |
5908 // Follow CodeCache roots and unload any methods marked for unloading | |
5909 CodeCache::do_unloading(&_is_alive_closure, | |
5910 &cmsKeepAliveClosure, | |
5911 purged_class); | |
5912 | |
5913 cmsDrainMarkingStackClosure.do_void(); | |
5914 verify_work_stacks_empty(); | |
5915 | |
5916 // Update subklass/sibling/implementor links in KlassKlass descendants | |
5917 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty"); | |
5918 oop k; | |
5919 while ((k = _revisitStack.pop()) != NULL) { | |
5920 ((Klass*)(oopDesc*)k)->follow_weak_klass_links( | |
5921 &_is_alive_closure, | |
5922 &cmsKeepAliveClosure); | |
5923 } | |
5924 assert(!ClassUnloading || | |
5925 (_markStack.isEmpty() && overflow_list_is_empty()), | |
5926 "Should not have found new reachable objects"); | |
5927 assert(_revisitStack.isEmpty(), "revisit stack should have been drained"); | |
5928 cmsDrainMarkingStackClosure.do_void(); | |
5929 verify_work_stacks_empty(); | |
5930 } | |
5931 | |
5932 { | |
2379
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5933 TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2147
diff
changeset
|
5934 // Clean up unreferenced symbols in symbol table. |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2147
diff
changeset
|
5935 SymbolTable::unlink(); |
0 | 5936 } |
5937 } | |
5938 | |
2379
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5939 if (should_unload_classes() || !JavaObjectsInPerm) { |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5940 TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5941 // Now clean up stale oops in StringTable |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5942 StringTable::unlink(&_is_alive_closure); |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5943 } |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5944 |
0 | 5945 verify_work_stacks_empty(); |
5946 // Restore any preserved marks as a result of mark stack or | |
5947 // work queue overflow | |
5948 restore_preserved_marks_if_any(); // done single-threaded for now | |
5949 | |
5950 rp->set_enqueuing_is_done(true); | |
5951 if (rp->processing_is_mt()) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5952 rp->balance_all_queues(); |
0 | 5953 CMSRefProcTaskExecutor task_executor(*this); |
5954 rp->enqueue_discovered_references(&task_executor); | |
5955 } else { | |
5956 rp->enqueue_discovered_references(NULL); | |
5957 } | |
5958 rp->verify_no_references_recorded(); | |
5959 assert(!rp->discovery_enabled(), "should have been disabled"); | |
5960 } | |
5961 | |
5962 #ifndef PRODUCT | |
5963 void CMSCollector::check_correct_thread_executing() { | |
5964 Thread* t = Thread::current(); | |
5965 // Only the VM thread or the CMS thread should be here. | |
5966 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), | |
5967 "Unexpected thread type"); | |
5968 // If this is the vm thread, the foreground process | |
5969 // should not be waiting. Note that _foregroundGCIsActive is | |
5970 // true while the foreground collector is waiting. | |
5971 if (_foregroundGCShouldWait) { | |
5972 // We cannot be the VM thread | |
5973 assert(t->is_ConcurrentGC_thread(), | |
5974 "Should be CMS thread"); | |
5975 } else { | |
5976 // We can be the CMS thread only if we are in a stop-world | |
5977 // phase of CMS collection. | |
5978 if (t->is_ConcurrentGC_thread()) { | |
5979 assert(_collectorState == InitialMarking || | |
5980 _collectorState == FinalMarking, | |
5981 "Should be a stop-world phase"); | |
5982 // The CMS thread should be holding the CMS_token. | |
5983 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
5984 "Potential interference with concurrently " | |
5985 "executing VM thread"); | |
5986 } | |
5987 } | |
5988 } | |
5989 #endif | |
5990 | |
5991 void CMSCollector::sweep(bool asynch) { | |
5992 assert(_collectorState == Sweeping, "just checking"); | |
5993 check_correct_thread_executing(); | |
5994 verify_work_stacks_empty(); | |
5995 verify_overflow_empty(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
5996 increment_sweep_count(); |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
5997 TraceCMSMemoryManagerStats tms(_collectorState); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
5998 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
5999 _inter_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6000 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); |
0 | 6001 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); |
6002 | |
6003 // PermGen verification support: If perm gen sweeping is disabled in | |
6004 // this cycle, we preserve the perm gen object "deadness" information | |
6005 // in the perm_gen_verify_bit_map. In order to do that we traverse | |
6006 // all blocks in perm gen and mark all dead objects. | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6007 if (verifying() && !should_unload_classes()) { |
0 | 6008 assert(perm_gen_verify_bit_map()->sizeInBits() != 0, |
6009 "Should have already been allocated"); | |
6010 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), | |
6011 markBitMap(), perm_gen_verify_bit_map()); | |
7
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6012 if (asynch) { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6013 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6014 bitMapLock()); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6015 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6016 } else { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6017 // In the case of synchronous sweep, we already have |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6018 // the requisite locks/tokens. |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6019 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6020 } |
0 | 6021 } |
6022 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6023 assert(!_intra_sweep_timer.is_active(), "Should not be active"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6024 _intra_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6025 _intra_sweep_timer.start(); |
0 | 6026 if (asynch) { |
6027 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6028 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); | |
6029 // First sweep the old gen then the perm gen | |
6030 { | |
6031 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
6032 bitMapLock()); | |
6033 sweepWork(_cmsGen, asynch); | |
6034 } | |
6035 | |
6036 // Now repeat for perm gen | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6037 if (should_unload_classes()) { |
0 | 6038 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
6039 bitMapLock()); | |
6040 sweepWork(_permGen, asynch); | |
6041 } | |
6042 | |
6043 // Update Universe::_heap_*_at_gc figures. | |
6044 // We need all the free list locks to make the abstract state | |
6045 // transition from Sweeping to Resetting. See detailed note | |
6046 // further below. | |
6047 { | |
6048 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
6049 _permGen->freelistLock()); | |
6050 // Update heap occupancy information which is used as | |
6051 // input to soft ref clearing policy at the next gc. | |
6052 Universe::update_heap_info_at_gc(); | |
6053 _collectorState = Resizing; | |
6054 } | |
6055 } else { | |
6056 // already have needed locks | |
6057 sweepWork(_cmsGen, asynch); | |
6058 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6059 if (should_unload_classes()) { |
0 | 6060 sweepWork(_permGen, asynch); |
6061 } | |
6062 // Update heap occupancy information which is used as | |
6063 // input to soft ref clearing policy at the next gc. | |
6064 Universe::update_heap_info_at_gc(); | |
6065 _collectorState = Resizing; | |
6066 } | |
6067 verify_work_stacks_empty(); | |
6068 verify_overflow_empty(); | |
6069 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6070 _intra_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6071 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6072 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6073 _inter_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6074 _inter_sweep_timer.start(); |
0 | 6075 |
6076 update_time_of_last_gc(os::javaTimeMillis()); | |
6077 | |
6078 // NOTE on abstract state transitions: | |
6079 // Mutators allocate-live and/or mark the mod-union table dirty | |
6080 // based on the state of the collection. The former is done in | |
6081 // the interval [Marking, Sweeping] and the latter in the interval | |
6082 // [Marking, Sweeping). Thus the transitions into the Marking state | |
6083 // and out of the Sweeping state must be synchronously visible | |
6084 // globally to the mutators. | |
6085 // The transition into the Marking state happens with the world | |
6086 // stopped so the mutators will globally see it. Sweeping is | |
6087 // done asynchronously by the background collector so the transition | |
6088 // from the Sweeping state to the Resizing state must be done | |
6089 // under the freelistLock (as is the check for whether to | |
6090 // allocate-live and whether to dirty the mod-union table). | |
6091 assert(_collectorState == Resizing, "Change of collector state to" | |
6092 " Resizing must be done under the freelistLocks (plural)"); | |
6093 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6094 // Now that sweeping has been completed, we clear |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6095 // the incremental_collection_failed flag, |
0 | 6096 // thus inviting a younger gen collection to promote into |
6097 // this generation. If such a promotion may still fail, | |
6098 // the flag will be set again when a young collection is | |
6099 // attempted. | |
6100 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6101 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up |
0 | 6102 gch->update_full_collections_completed(_collection_count_start); |
6103 } | |
6104 | |
6105 // FIX ME!!! Looks like this belongs in CFLSpace, with | |
6106 // CMSGen merely delegating to it. | |
6107 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6108 double nearLargestPercent = FLSLargestBlockCoalesceProximity; |
0 | 6109 HeapWord* minAddr = _cmsSpace->bottom(); |
6110 HeapWord* largestAddr = | |
6111 (HeapWord*) _cmsSpace->dictionary()->findLargestDict(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6112 if (largestAddr == NULL) { |
0 | 6113 // The dictionary appears to be empty. In this case |
6114 // try to coalesce at the end of the heap. | |
6115 largestAddr = _cmsSpace->end(); | |
6116 } | |
6117 size_t largestOffset = pointer_delta(largestAddr, minAddr); | |
6118 size_t nearLargestOffset = | |
6119 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6120 if (PrintFLSStatistics != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6121 gclog_or_tty->print_cr( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6122 "CMS: Large Block: " PTR_FORMAT ";" |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6123 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6124 largestAddr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6125 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6126 } |
0 | 6127 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); |
6128 } | |
6129 | |
6130 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { | |
6131 return addr >= _cmsSpace->nearLargestChunk(); | |
6132 } | |
6133 | |
6134 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { | |
6135 return _cmsSpace->find_chunk_at_end(); | |
6136 } | |
6137 | |
6138 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, | |
6139 bool full) { | |
6140 // The next lower level has been collected. Gather any statistics | |
6141 // that are of interest at this point. | |
6142 if (!full && (current_level + 1) == level()) { | |
6143 // Gather statistics on the young generation collection. | |
6144 collector()->stats().record_gc0_end(used()); | |
6145 } | |
6146 } | |
6147 | |
6148 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() { | |
6149 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
6150 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
6151 "Wrong type of heap"); | |
6152 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
6153 gch->gen_policy()->size_policy(); | |
6154 assert(sp->is_gc_cms_adaptive_size_policy(), | |
6155 "Wrong type of size policy"); | |
6156 return sp; | |
6157 } | |
6158 | |
6159 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { | |
6160 if (PrintGCDetails && Verbose) { | |
6161 gclog_or_tty->print("Rotate from %d ", _debug_collection_type); | |
6162 } | |
6163 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); | |
6164 _debug_collection_type = | |
6165 (CollectionTypes) (_debug_collection_type % Unknown_collection_type); | |
6166 if (PrintGCDetails && Verbose) { | |
6167 gclog_or_tty->print_cr("to %d ", _debug_collection_type); | |
6168 } | |
6169 } | |
6170 | |
6171 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, | |
6172 bool asynch) { | |
6173 // We iterate over the space(s) underlying this generation, | |
6174 // checking the mark bit map to see if the bits corresponding | |
6175 // to specific blocks are marked or not. Blocks that are | |
6176 // marked are live and are not swept up. All remaining blocks | |
6177 // are swept up, with coalescing on-the-fly as we sweep up | |
6178 // contiguous free and/or garbage blocks: | |
6179 // We need to ensure that the sweeper synchronizes with allocators | |
6180 // and stop-the-world collectors. In particular, the following | |
6181 // locks are used: | |
6182 // . CMS token: if this is held, a stop the world collection cannot occur | |
6183 // . freelistLock: if this is held no allocation can occur from this | |
6184 // generation by another thread | |
6185 // . bitMapLock: if this is held, no other thread can access or update | |
6186 // | |
6187 | |
6188 // Note that we need to hold the freelistLock if we use | |
6189 // block iterate below; else the iterator might go awry if | |
6190 // a mutator (or promotion) causes block contents to change | |
6191 // (for instance if the allocator divvies up a block). | |
6192 // If we hold the free list lock, for all practical purposes | |
6193 // young generation GC's can't occur (they'll usually need to | |
6194 // promote), so we might as well prevent all young generation | |
6195 // GC's while we do a sweeping step. For the same reason, we might | |
6196 // as well take the bit map lock for the entire duration | |
6197 | |
6198 // check that we hold the requisite locks | |
6199 assert(have_cms_token(), "Should hold cms token"); | |
6200 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) | |
6201 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), | |
6202 "Should possess CMS token to sweep"); | |
6203 assert_lock_strong(gen->freelistLock()); | |
6204 assert_lock_strong(bitMapLock()); | |
6205 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6206 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6207 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6208 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6209 _inter_sweep_estimate.padded_average(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6210 _intra_sweep_estimate.padded_average()); |
0 | 6211 gen->setNearLargestChunk(); |
6212 | |
6213 { | |
6214 SweepClosure sweepClosure(this, gen, &_markBitMap, | |
6215 CMSYield && asynch); | |
6216 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); | |
6217 // We need to free-up/coalesce garbage/blocks from a | |
6218 // co-terminal free run. This is done in the SweepClosure | |
6219 // destructor; so, do not remove this scope, else the | |
6220 // end-of-sweep-census below will be off by a little bit. | |
6221 } | |
6222 gen->cmsSpace()->sweep_completed(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6223 gen->cmsSpace()->endSweepFLCensus(sweep_count()); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6224 if (should_unload_classes()) { // unloaded classes this cycle, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6225 _concurrent_cycles_since_last_unload = 0; // ... reset count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6226 } else { // did not unload classes, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6227 _concurrent_cycles_since_last_unload++; // ... increment count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6228 } |
0 | 6229 } |
6230 | |
6231 // Reset CMS data structures (for now just the marking bit map) | |
6232 // preparatory for the next cycle. | |
6233 void CMSCollector::reset(bool asynch) { | |
6234 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
6235 CMSAdaptiveSizePolicy* sp = size_policy(); | |
6236 AdaptiveSizePolicyOutput(sp, gch->total_collections()); | |
6237 if (asynch) { | |
6238 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
6239 | |
6240 // If the state is not "Resetting", the foreground thread | |
6241 // has done a collection and the resetting. | |
6242 if (_collectorState != Resetting) { | |
6243 assert(_collectorState == Idling, "The state should only change" | |
6244 " because the foreground collector has finished the collection"); | |
6245 return; | |
6246 } | |
6247 | |
6248 // Clear the mark bitmap (no grey objects to start with) | |
6249 // for the next cycle. | |
6250 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6251 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); | |
6252 | |
6253 HeapWord* curAddr = _markBitMap.startWord(); | |
6254 while (curAddr < _markBitMap.endWord()) { | |
6255 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); | |
6256 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); | |
6257 _markBitMap.clear_large_range(chunk); | |
6258 if (ConcurrentMarkSweepThread::should_yield() && | |
6259 !foregroundGCIsActive() && | |
6260 CMSYield) { | |
6261 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6262 "CMS thread should hold CMS token"); | |
6263 assert_lock_strong(bitMapLock()); | |
6264 bitMapLock()->unlock(); | |
6265 ConcurrentMarkSweepThread::desynchronize(true); | |
6266 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6267 stopTimer(); | |
6268 if (PrintCMSStatistics != 0) { | |
6269 incrementYields(); | |
6270 } | |
6271 icms_wait(); | |
6272 | |
6273 // See the comment in coordinator_yield() | |
6274 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6275 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6276 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6277 os::sleep(Thread::current(), 1, false); |
6278 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6279 } | |
6280 | |
6281 ConcurrentMarkSweepThread::synchronize(true); | |
6282 bitMapLock()->lock_without_safepoint_check(); | |
6283 startTimer(); | |
6284 } | |
6285 curAddr = chunk.end(); | |
6286 } | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6287 // A successful mostly concurrent collection has been done. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6288 // Because only the full (i.e., concurrent mode failure) collections |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6289 // are being measured for gc overhead limits, clean the "near" flag |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6290 // and count. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6291 sp->reset_gc_overhead_limit_count(); |
0 | 6292 _collectorState = Idling; |
6293 } else { | |
6294 // already have the lock | |
6295 assert(_collectorState == Resetting, "just checking"); | |
6296 assert_lock_strong(bitMapLock()); | |
6297 _markBitMap.clear_all(); | |
6298 _collectorState = Idling; | |
6299 } | |
6300 | |
6301 // Stop incremental mode after a cycle completes, so that any future cycles | |
6302 // are triggered by allocation. | |
6303 stop_icms(); | |
6304 | |
6305 NOT_PRODUCT( | |
6306 if (RotateCMSCollectionTypes) { | |
6307 _cmsGen->rotate_debug_collection_type(); | |
6308 } | |
6309 ) | |
6310 } | |
6311 | |
6312 void CMSCollector::do_CMS_operation(CMS_op_type op) { | |
6313 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
6314 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6315 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty); | |
6316 TraceCollectorStats tcs(counters()); | |
6317 | |
6318 switch (op) { | |
6319 case CMS_op_checkpointRootsInitial: { | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
6320 SvcGCMarker sgcm(SvcGCMarker::OTHER); |
0 | 6321 checkpointRootsInitial(true); // asynch |
6322 if (PrintGC) { | |
6323 _cmsGen->printOccupancy("initial-mark"); | |
6324 } | |
6325 break; | |
6326 } | |
6327 case CMS_op_checkpointRootsFinal: { | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
6328 SvcGCMarker sgcm(SvcGCMarker::OTHER); |
0 | 6329 checkpointRootsFinal(true, // asynch |
6330 false, // !clear_all_soft_refs | |
6331 false); // !init_mark_was_synchronous | |
6332 if (PrintGC) { | |
6333 _cmsGen->printOccupancy("remark"); | |
6334 } | |
6335 break; | |
6336 } | |
6337 default: | |
6338 fatal("No such CMS_op"); | |
6339 } | |
6340 } | |
6341 | |
6342 #ifndef PRODUCT | |
6343 size_t const CMSCollector::skip_header_HeapWords() { | |
6344 return FreeChunk::header_size(); | |
6345 } | |
6346 | |
6347 // Try and collect here conditions that should hold when | |
6348 // CMS thread is exiting. The idea is that the foreground GC | |
6349 // thread should not be blocked if it wants to terminate | |
6350 // the CMS thread and yet continue to run the VM for a while | |
6351 // after that. | |
6352 void CMSCollector::verify_ok_to_terminate() const { | |
6353 assert(Thread::current()->is_ConcurrentGC_thread(), | |
6354 "should be called by CMS thread"); | |
6355 assert(!_foregroundGCShouldWait, "should be false"); | |
6356 // We could check here that all the various low-level locks | |
6357 // are not held by the CMS thread, but that is overkill; see | |
6358 // also CMSThread::verify_ok_to_terminate() where the CGC_lock | |
6359 // is checked. | |
6360 } | |
6361 #endif | |
6362 | |
6363 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
6364 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
6365 "missing Printezis mark?"); |
0 | 6366 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
6367 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6368 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6369 "alignment problem"); | |
6370 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6371 return size; | |
6372 } | |
6373 | |
6374 // A variant of the above (block_size_using_printezis_bits()) except | |
6375 // that we return 0 if the P-bits are not yet set. | |
6376 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6377 if (_markBitMap.isMarked(addr + 1)) { |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6378 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects"); |
0 | 6379 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
6380 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6381 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6382 "alignment problem"); | |
6383 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6384 return size; | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6385 } |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6386 return 0; |
0 | 6387 } |
6388 | |
6389 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { | |
6390 size_t sz = 0; | |
6391 oop p = (oop)addr; | |
187 | 6392 if (p->klass_or_null() != NULL && p->is_parsable()) { |
0 | 6393 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); |
6394 } else { | |
6395 sz = block_size_using_printezis_bits(addr); | |
6396 } | |
6397 assert(sz > 0, "size must be nonzero"); | |
6398 HeapWord* next_block = addr + sz; | |
6399 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block, | |
6400 CardTableModRefBS::card_size); | |
6401 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) < | |
6402 round_down((uintptr_t)next_card, CardTableModRefBS::card_size), | |
6403 "must be different cards"); | |
6404 return next_card; | |
6405 } | |
6406 | |
6407 | |
6408 // CMS Bit Map Wrapper ///////////////////////////////////////// | |
6409 | |
6410 // Construct a CMS bit map infrastructure, but don't create the | |
6411 // bit vector itself. That is done by a separate call CMSBitMap::allocate() | |
6412 // further below. | |
6413 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
6414 _bm(), |
0 | 6415 _shifter(shifter), |
6416 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) | |
6417 { | |
6418 _bmStartWord = 0; | |
6419 _bmWordSize = 0; | |
6420 } | |
6421 | |
6422 bool CMSBitMap::allocate(MemRegion mr) { | |
6423 _bmStartWord = mr.start(); | |
6424 _bmWordSize = mr.word_size(); | |
6425 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
6426 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
6427 if (!brs.is_reserved()) { | |
6428 warning("CMS bit map allocation failure"); | |
6429 return false; | |
6430 } | |
6431 // For now we'll just commit all of the bit map up fromt. | |
6432 // Later on we'll try to be more parsimonious with swap. | |
6433 if (!_virtual_space.initialize(brs, brs.size())) { | |
6434 warning("CMS bit map backing store failure"); | |
6435 return false; | |
6436 } | |
6437 assert(_virtual_space.committed_size() == brs.size(), | |
6438 "didn't reserve backing store for all of CMS bit map?"); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
6439 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); |
0 | 6440 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= |
6441 _bmWordSize, "inconsistency in bit map sizing"); | |
6442 _bm.set_size(_bmWordSize >> _shifter); | |
6443 | |
6444 // bm.clear(); // can we rely on getting zero'd memory? verify below | |
6445 assert(isAllClear(), | |
6446 "Expected zero'd memory from ReservedSpace constructor"); | |
6447 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), | |
6448 "consistency check"); | |
6449 return true; | |
6450 } | |
6451 | |
6452 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { | |
6453 HeapWord *next_addr, *end_addr, *last_addr; | |
6454 assert_locked(); | |
6455 assert(covers(mr), "out-of-range error"); | |
6456 // XXX assert that start and end are appropriately aligned | |
6457 for (next_addr = mr.start(), end_addr = mr.end(); | |
6458 next_addr < end_addr; next_addr = last_addr) { | |
6459 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); | |
6460 last_addr = dirty_region.end(); | |
6461 if (!dirty_region.is_empty()) { | |
6462 cl->do_MemRegion(dirty_region); | |
6463 } else { | |
6464 assert(last_addr == end_addr, "program logic"); | |
6465 return; | |
6466 } | |
6467 } | |
6468 } | |
6469 | |
6470 #ifndef PRODUCT | |
6471 void CMSBitMap::assert_locked() const { | |
6472 CMSLockVerifier::assert_locked(lock()); | |
6473 } | |
6474 | |
6475 bool CMSBitMap::covers(MemRegion mr) const { | |
6476 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | |
6477 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), | |
6478 "size inconsistency"); | |
6479 return (mr.start() >= _bmStartWord) && | |
6480 (mr.end() <= endWord()); | |
6481 } | |
6482 | |
6483 bool CMSBitMap::covers(HeapWord* start, size_t size) const { | |
6484 return (start >= _bmStartWord && (start + size) <= endWord()); | |
6485 } | |
6486 | |
6487 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { | |
6488 // verify that there are no 1 bits in the interval [left, right) | |
6489 FalseBitMapClosure falseBitMapClosure; | |
6490 iterate(&falseBitMapClosure, left, right); | |
6491 } | |
6492 | |
6493 void CMSBitMap::region_invariant(MemRegion mr) | |
6494 { | |
6495 assert_locked(); | |
6496 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
6497 assert(!mr.is_empty(), "unexpected empty region"); | |
6498 assert(covers(mr), "mr should be covered by bit map"); | |
6499 // convert address range into offset range | |
6500 size_t start_ofs = heapWordToOffset(mr.start()); | |
6501 // Make sure that end() is appropriately aligned | |
6502 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(), | |
6503 (1 << (_shifter+LogHeapWordSize))), | |
6504 "Misaligned mr.end()"); | |
6505 size_t end_ofs = heapWordToOffset(mr.end()); | |
6506 assert(end_ofs > start_ofs, "Should mark at least one bit"); | |
6507 } | |
6508 | |
6509 #endif | |
6510 | |
6511 bool CMSMarkStack::allocate(size_t size) { | |
6512 // allocate a stack of the requisite depth | |
6513 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6514 size * sizeof(oop))); | |
6515 if (!rs.is_reserved()) { | |
6516 warning("CMSMarkStack allocation failure"); | |
6517 return false; | |
6518 } | |
6519 if (!_virtual_space.initialize(rs, rs.size())) { | |
6520 warning("CMSMarkStack backing store failure"); | |
6521 return false; | |
6522 } | |
6523 assert(_virtual_space.committed_size() == rs.size(), | |
6524 "didn't reserve backing store for all of CMS stack?"); | |
6525 _base = (oop*)(_virtual_space.low()); | |
6526 _index = 0; | |
6527 _capacity = size; | |
6528 NOT_PRODUCT(_max_depth = 0); | |
6529 return true; | |
6530 } | |
6531 | |
6532 // XXX FIX ME !!! In the MT case we come in here holding a | |
6533 // leaf lock. For printing we need to take a further lock | |
6534 // which has lower rank. We need to recallibrate the two | |
6535 // lock-ranks involved in order to be able to rpint the | |
6536 // messages below. (Or defer the printing to the caller. | |
6537 // For now we take the expedient path of just disabling the | |
6538 // messages for the problematic case.) | |
6539 void CMSMarkStack::expand() { | |
1284 | 6540 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); |
6541 if (_capacity == MarkStackSizeMax) { | |
0 | 6542 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { |
6543 // We print a warning message only once per CMS cycle. | |
6544 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); | |
6545 } | |
6546 return; | |
6547 } | |
6548 // Double capacity if possible | |
1284 | 6549 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); |
0 | 6550 // Do not give up existing stack until we have managed to |
6551 // get the double capacity that we desired. | |
6552 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6553 new_capacity * sizeof(oop))); | |
6554 if (rs.is_reserved()) { | |
6555 // Release the backing store associated with old stack | |
6556 _virtual_space.release(); | |
6557 // Reinitialize virtual space for new stack | |
6558 if (!_virtual_space.initialize(rs, rs.size())) { | |
6559 fatal("Not enough swap for expanded marking stack"); | |
6560 } | |
6561 _base = (oop*)(_virtual_space.low()); | |
6562 _index = 0; | |
6563 _capacity = new_capacity; | |
6564 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { | |
6565 // Failed to double capacity, continue; | |
6566 // we print a detail message only once per CMS cycle. | |
6567 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to " | |
6568 SIZE_FORMAT"K", | |
6569 _capacity / K, new_capacity / K); | |
6570 } | |
6571 } | |
6572 | |
6573 | |
6574 // Closures | |
6575 // XXX: there seems to be a lot of code duplication here; | |
6576 // should refactor and consolidate common code. | |
6577 | |
6578 // This closure is used to mark refs into the CMS generation in | |
6579 // the CMS bit map. Called at the first checkpoint. This closure | |
6580 // assumes that we do not need to re-mark dirty cards; if the CMS | |
6581 // generation on which this is used is not an oldest (modulo perm gen) | |
6582 // generation then this will lose younger_gen cards! | |
6583 | |
6584 MarkRefsIntoClosure::MarkRefsIntoClosure( | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6585 MemRegion span, CMSBitMap* bitMap): |
0 | 6586 _span(span), |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6587 _bitMap(bitMap) |
0 | 6588 { |
6589 assert(_ref_processor == NULL, "deliberately left NULL"); | |
6590 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); | |
6591 } | |
6592 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6593 void MarkRefsIntoClosure::do_oop(oop obj) { |
0 | 6594 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6595 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6596 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6597 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6598 // this should be made more efficient |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6599 _bitMap->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6600 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6601 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6602 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6603 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6604 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
0 | 6605 |
6606 // A variant of the above, used for CMS marking verification. | |
6607 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6608 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): |
0 | 6609 _span(span), |
6610 _verification_bm(verification_bm), | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6611 _cms_bm(cms_bm) |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6612 { |
0 | 6613 assert(_ref_processor == NULL, "deliberately left NULL"); |
6614 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); | |
6615 } | |
6616 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6617 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { |
0 | 6618 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6619 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6620 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6621 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6622 _verification_bm->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6623 if (!_cms_bm->isMarked(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6624 oop(addr)->print(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6625 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6626 fatal("... aborting"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6627 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6628 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6629 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6630 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6631 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6632 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
0 | 6633 |
6634 ////////////////////////////////////////////////// | |
6635 // MarkRefsIntoAndScanClosure | |
6636 ////////////////////////////////////////////////// | |
6637 | |
6638 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, | |
6639 ReferenceProcessor* rp, | |
6640 CMSBitMap* bit_map, | |
6641 CMSBitMap* mod_union_table, | |
6642 CMSMarkStack* mark_stack, | |
6643 CMSMarkStack* revisit_stack, | |
6644 CMSCollector* collector, | |
6645 bool should_yield, | |
6646 bool concurrent_precleaning): | |
6647 _collector(collector), | |
6648 _span(span), | |
6649 _bit_map(bit_map), | |
6650 _mark_stack(mark_stack), | |
6651 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table, | |
6652 mark_stack, revisit_stack, concurrent_precleaning), | |
6653 _yield(should_yield), | |
6654 _concurrent_precleaning(concurrent_precleaning), | |
6655 _freelistLock(NULL) | |
6656 { | |
6657 _ref_processor = rp; | |
6658 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6659 } | |
6660 | |
6661 // This closure is used to mark refs into the CMS generation at the | |
6662 // second (final) checkpoint, and to scan and transitively follow | |
6663 // the unmarked oops. It is also used during the concurrent precleaning | |
6664 // phase while scanning objects on dirty cards in the CMS generation. | |
6665 // The marks are made in the marking bit map and the marking stack is | |
6666 // used for keeping the (newly) grey objects during the scan. | |
6667 // The parallel version (Par_...) appears further below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6668 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6669 if (obj != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6670 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6671 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6672 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6673 assert(_collector->overflow_list_is_empty(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6674 "overflow list should be empty"); |
0 | 6675 if (_span.contains(addr) && |
6676 !_bit_map->isMarked(addr)) { | |
6677 // mark bit map (object is now grey) | |
6678 _bit_map->mark(addr); | |
6679 // push on marking stack (stack should be empty), and drain the | |
6680 // stack by applying this closure to the oops in the oops popped | |
6681 // from the stack (i.e. blacken the grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6682 bool res = _mark_stack->push(obj); |
0 | 6683 assert(res, "Should have space to push on empty stack"); |
6684 do { | |
6685 oop new_oop = _mark_stack->pop(); | |
6686 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6687 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6688 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6689 "only grey objects on this stack"); | |
6690 // iterate over the oops in this oop, marking and pushing | |
6691 // the ones in CMS heap (i.e. in _span). | |
6692 new_oop->oop_iterate(&_pushAndMarkClosure); | |
6693 // check if it's time to yield | |
6694 do_yield_check(); | |
6695 } while (!_mark_stack->isEmpty() || | |
6696 (!_concurrent_precleaning && take_from_overflow_list())); | |
6697 // if marking stack is empty, and we are not doing this | |
6698 // during precleaning, then check the overflow list | |
6699 } | |
6700 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
6701 assert(_collector->overflow_list_is_empty(), | |
6702 "overflow list was drained above"); | |
6703 // We could restore evacuated mark words, if any, used for | |
6704 // overflow list links here because the overflow list is | |
6705 // provably empty here. That would reduce the maximum | |
6706 // size requirements for preserved_{oop,mark}_stack. | |
6707 // But we'll just postpone it until we are all done | |
6708 // so we can just stream through. | |
6709 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) { | |
6710 _collector->restore_preserved_marks_if_any(); | |
6711 assert(_collector->no_preserved_marks(), "No preserved marks"); | |
6712 } | |
6713 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(), | |
6714 "All preserved marks should have been restored above"); | |
6715 } | |
6716 } | |
6717 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6718 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6719 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6720 |
0 | 6721 void MarkRefsIntoAndScanClosure::do_yield_work() { |
6722 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6723 "CMS thread should hold CMS token"); | |
6724 assert_lock_strong(_freelistLock); | |
6725 assert_lock_strong(_bit_map->lock()); | |
6726 // relinquish the free_list_lock and bitMaplock() | |
935 | 6727 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 6728 _bit_map->lock()->unlock(); |
6729 _freelistLock->unlock(); | |
6730 ConcurrentMarkSweepThread::desynchronize(true); | |
6731 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6732 _collector->stopTimer(); | |
6733 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6734 if (PrintCMSStatistics != 0) { | |
6735 _collector->incrementYields(); | |
6736 } | |
6737 _collector->icms_wait(); | |
6738 | |
6739 // See the comment in coordinator_yield() | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6740 for (unsigned i = 0; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6741 i < CMSYieldSleepCount && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6742 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6743 !CMSCollector::foregroundGCIsActive(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6744 ++i) { |
0 | 6745 os::sleep(Thread::current(), 1, false); |
6746 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6747 } | |
6748 | |
6749 ConcurrentMarkSweepThread::synchronize(true); | |
6750 _freelistLock->lock_without_safepoint_check(); | |
6751 _bit_map->lock()->lock_without_safepoint_check(); | |
6752 _collector->startTimer(); | |
6753 } | |
6754 | |
6755 /////////////////////////////////////////////////////////// | |
6756 // Par_MarkRefsIntoAndScanClosure: a parallel version of | |
6757 // MarkRefsIntoAndScanClosure | |
6758 /////////////////////////////////////////////////////////// | |
6759 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( | |
6760 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, | |
6761 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): | |
6762 _span(span), | |
6763 _bit_map(bit_map), | |
6764 _work_queue(work_queue), | |
6765 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), | |
6766 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))), | |
6767 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue, | |
6768 revisit_stack) | |
6769 { | |
6770 _ref_processor = rp; | |
6771 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6772 } | |
6773 | |
6774 // This closure is used to mark refs into the CMS generation at the | |
6775 // second (final) checkpoint, and to scan and transitively follow | |
6776 // the unmarked oops. The marks are made in the marking bit map and | |
6777 // the work_queue is used for keeping the (newly) grey objects during | |
6778 // the scan phase whence they are also available for stealing by parallel | |
6779 // threads. Since the marking bit map is shared, updates are | |
6780 // synchronized (via CAS). | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6781 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6782 if (obj != NULL) { |
0 | 6783 // Ignore mark word because this could be an already marked oop |
6784 // that may be chained at the end of the overflow list. | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
6785 assert(obj->is_oop(true), "expected an oop"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6786 HeapWord* addr = (HeapWord*)obj; |
0 | 6787 if (_span.contains(addr) && |
6788 !_bit_map->isMarked(addr)) { | |
6789 // mark bit map (object will become grey): | |
6790 // It is possible for several threads to be | |
6791 // trying to "claim" this object concurrently; | |
6792 // the unique thread that succeeds in marking the | |
6793 // object first will do the subsequent push on | |
6794 // to the work queue (or overflow list). | |
6795 if (_bit_map->par_mark(addr)) { | |
6796 // push on work_queue (which may not be empty), and trim the | |
6797 // queue to an appropriate length by applying this closure to | |
6798 // the oops in the oops popped from the stack (i.e. blacken the | |
6799 // grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6800 bool res = _work_queue->push(obj); |
0 | 6801 assert(res, "Low water mark should be less than capacity?"); |
6802 trim_queue(_low_water_mark); | |
6803 } // Else, another thread claimed the object | |
6804 } | |
6805 } | |
6806 } | |
6807 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6808 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6809 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6810 |
0 | 6811 // This closure is used to rescan the marked objects on the dirty cards |
6812 // in the mod union table and the card table proper. | |
6813 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( | |
6814 oop p, MemRegion mr) { | |
6815 | |
6816 size_t size = 0; | |
6817 HeapWord* addr = (HeapWord*)p; | |
6818 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6819 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
6820 // check if it's time to yield | |
6821 if (do_yield_check()) { | |
6822 // We yielded for some foreground stop-world work, | |
6823 // and we have been asked to abort this ongoing preclean cycle. | |
6824 return 0; | |
6825 } | |
6826 if (_bitMap->isMarked(addr)) { | |
6827 // it's marked; is it potentially uninitialized? | |
187 | 6828 if (p->klass_or_null() != NULL) { |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6829 // If is_conc_safe is false, the object may be undergoing |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6830 // change by the VM outside a safepoint. Don't try to |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6831 // scan it, but rather leave it for the remark phase. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6832 if (CMSPermGenPrecleaningEnabled && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6833 (!p->is_conc_safe() || !p->is_parsable())) { |
0 | 6834 // Signal precleaning to redirty the card since |
6835 // the klass pointer is already installed. | |
6836 assert(size == 0, "Initial value"); | |
6837 } else { | |
6838 assert(p->is_parsable(), "must be parsable."); | |
6839 // an initialized object; ignore mark word in verification below | |
6840 // since we are running concurrent with mutators | |
6841 assert(p->is_oop(true), "should be an oop"); | |
6842 if (p->is_objArray()) { | |
6843 // objArrays are precisely marked; restrict scanning | |
6844 // to dirty cards only. | |
187 | 6845 size = CompactibleFreeListSpace::adjustObjectSize( |
6846 p->oop_iterate(_scanningClosure, mr)); | |
0 | 6847 } else { |
6848 // A non-array may have been imprecisely marked; we need | |
6849 // to scan object in its entirety. | |
6850 size = CompactibleFreeListSpace::adjustObjectSize( | |
6851 p->oop_iterate(_scanningClosure)); | |
6852 } | |
6853 #ifdef DEBUG | |
6854 size_t direct_size = | |
6855 CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6856 assert(size == direct_size, "Inconsistency in size"); | |
6857 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6858 if (!_bitMap->isMarked(addr+1)) { | |
6859 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size); | |
6860 } else { | |
6861 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1); | |
6862 assert(_bitMap->isMarked(addr+size-1), | |
6863 "inconsistent Printezis mark"); | |
6864 } | |
6865 #endif // DEBUG | |
6866 } | |
6867 } else { | |
6868 // an unitialized object | |
6869 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); | |
6870 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
6871 size = pointer_delta(nextOneAddr + 1, addr); | |
6872 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6873 "alignment problem"); | |
6874 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() | |
6875 // will dirty the card when the klass pointer is installed in the | |
6876 // object (signalling the completion of initialization). | |
6877 } | |
6878 } else { | |
6879 // Either a not yet marked object or an uninitialized object | |
187 | 6880 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 6881 // An uninitialized object, skip to the next card, since |
6882 // we may not be able to read its P-bits yet. | |
6883 assert(size == 0, "Initial value"); | |
6884 } else { | |
6885 // An object not (yet) reached by marking: we merely need to | |
6886 // compute its size so as to go look at the next block. | |
6887 assert(p->is_oop(true), "should be an oop"); | |
6888 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6889 } | |
6890 } | |
6891 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6892 return size; | |
6893 } | |
6894 | |
6895 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { | |
6896 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6897 "CMS thread should hold CMS token"); | |
6898 assert_lock_strong(_freelistLock); | |
6899 assert_lock_strong(_bitMap->lock()); | |
935 | 6900 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 6901 // relinquish the free_list_lock and bitMaplock() |
6902 _bitMap->lock()->unlock(); | |
6903 _freelistLock->unlock(); | |
6904 ConcurrentMarkSweepThread::desynchronize(true); | |
6905 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6906 _collector->stopTimer(); | |
6907 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6908 if (PrintCMSStatistics != 0) { | |
6909 _collector->incrementYields(); | |
6910 } | |
6911 _collector->icms_wait(); | |
6912 | |
6913 // See the comment in coordinator_yield() | |
6914 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6915 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6916 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6917 os::sleep(Thread::current(), 1, false); |
6918 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6919 } | |
6920 | |
6921 ConcurrentMarkSweepThread::synchronize(true); | |
6922 _freelistLock->lock_without_safepoint_check(); | |
6923 _bitMap->lock()->lock_without_safepoint_check(); | |
6924 _collector->startTimer(); | |
6925 } | |
6926 | |
6927 | |
6928 ////////////////////////////////////////////////////////////////// | |
6929 // SurvivorSpacePrecleanClosure | |
6930 ////////////////////////////////////////////////////////////////// | |
6931 // This (single-threaded) closure is used to preclean the oops in | |
6932 // the survivor spaces. | |
6933 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { | |
6934 | |
6935 HeapWord* addr = (HeapWord*)p; | |
6936 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6937 assert(!_span.contains(addr), "we are scanning the survivor spaces"); | |
187 | 6938 assert(p->klass_or_null() != NULL, "object should be initializd"); |
0 | 6939 assert(p->is_parsable(), "must be parsable."); |
6940 // an initialized object; ignore mark word in verification below | |
6941 // since we are running concurrent with mutators | |
6942 assert(p->is_oop(true), "should be an oop"); | |
6943 // Note that we do not yield while we iterate over | |
6944 // the interior oops of p, pushing the relevant ones | |
6945 // on our marking stack. | |
6946 size_t size = p->oop_iterate(_scanning_closure); | |
6947 do_yield_check(); | |
6948 // Observe that below, we do not abandon the preclean | |
6949 // phase as soon as we should; rather we empty the | |
6950 // marking stack before returning. This is to satisfy | |
6951 // some existing assertions. In general, it may be a | |
6952 // good idea to abort immediately and complete the marking | |
6953 // from the grey objects at a later time. | |
6954 while (!_mark_stack->isEmpty()) { | |
6955 oop new_oop = _mark_stack->pop(); | |
6956 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6957 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6958 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6959 "only grey objects on this stack"); | |
6960 // iterate over the oops in this oop, marking and pushing | |
6961 // the ones in CMS heap (i.e. in _span). | |
6962 new_oop->oop_iterate(_scanning_closure); | |
6963 // check if it's time to yield | |
6964 do_yield_check(); | |
6965 } | |
6966 unsigned int after_count = | |
6967 GenCollectedHeap::heap()->total_collections(); | |
6968 bool abort = (_before_count != after_count) || | |
6969 _collector->should_abort_preclean(); | |
6970 return abort ? 0 : size; | |
6971 } | |
6972 | |
6973 void SurvivorSpacePrecleanClosure::do_yield_work() { | |
6974 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6975 "CMS thread should hold CMS token"); | |
6976 assert_lock_strong(_bit_map->lock()); | |
935 | 6977 DEBUG_ONLY(RememberKlassesChecker smx(false);) |
0 | 6978 // Relinquish the bit map lock |
6979 _bit_map->lock()->unlock(); | |
6980 ConcurrentMarkSweepThread::desynchronize(true); | |
6981 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6982 _collector->stopTimer(); | |
6983 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6984 if (PrintCMSStatistics != 0) { | |
6985 _collector->incrementYields(); | |
6986 } | |
6987 _collector->icms_wait(); | |
6988 | |
6989 // See the comment in coordinator_yield() | |
6990 for (unsigned i = 0; i < CMSYieldSleepCount && | |
6991 ConcurrentMarkSweepThread::should_yield() && | |
6992 !CMSCollector::foregroundGCIsActive(); ++i) { | |
6993 os::sleep(Thread::current(), 1, false); | |
6994 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6995 } | |
6996 | |
6997 ConcurrentMarkSweepThread::synchronize(true); | |
6998 _bit_map->lock()->lock_without_safepoint_check(); | |
6999 _collector->startTimer(); | |
7000 } | |
7001 | |
7002 // This closure is used to rescan the marked objects on the dirty cards | |
7003 // in the mod union table and the card table proper. In the parallel | |
7004 // case, although the bitMap is shared, we do a single read so the | |
7005 // isMarked() query is "safe". | |
7006 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { | |
7007 // Ignore mark word because we are running concurrent with mutators | |
7008 assert(p->is_oop_or_null(true), "expected an oop or null"); | |
7009 HeapWord* addr = (HeapWord*)p; | |
7010 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
7011 bool is_obj_array = false; | |
7012 #ifdef DEBUG | |
7013 if (!_parallel) { | |
7014 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); | |
7015 assert(_collector->overflow_list_is_empty(), | |
7016 "overflow list should be empty"); | |
7017 | |
7018 } | |
7019 #endif // DEBUG | |
7020 if (_bit_map->isMarked(addr)) { | |
7021 // Obj arrays are precisely marked, non-arrays are not; | |
7022 // so we scan objArrays precisely and non-arrays in their | |
7023 // entirety. | |
7024 if (p->is_objArray()) { | |
7025 is_obj_array = true; | |
7026 if (_parallel) { | |
7027 p->oop_iterate(_par_scan_closure, mr); | |
7028 } else { | |
7029 p->oop_iterate(_scan_closure, mr); | |
7030 } | |
7031 } else { | |
7032 if (_parallel) { | |
7033 p->oop_iterate(_par_scan_closure); | |
7034 } else { | |
7035 p->oop_iterate(_scan_closure); | |
7036 } | |
7037 } | |
7038 } | |
7039 #ifdef DEBUG | |
7040 if (!_parallel) { | |
7041 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
7042 assert(_collector->overflow_list_is_empty(), | |
7043 "overflow list should be empty"); | |
7044 | |
7045 } | |
7046 #endif // DEBUG | |
7047 return is_obj_array; | |
7048 } | |
7049 | |
7050 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, | |
7051 MemRegion span, | |
7052 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
7053 CMSMarkStack* revisitStack, | |
7054 bool should_yield, bool verifying): | |
7055 _collector(collector), | |
7056 _span(span), | |
7057 _bitMap(bitMap), | |
7058 _mut(&collector->_modUnionTable), | |
7059 _markStack(markStack), | |
7060 _revisitStack(revisitStack), | |
7061 _yield(should_yield), | |
7062 _skipBits(0) | |
7063 { | |
7064 assert(_markStack->isEmpty(), "stack should be empty"); | |
7065 _finger = _bitMap->startWord(); | |
7066 _threshold = _finger; | |
7067 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
7068 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7069 DEBUG_ONLY(_verifying = verifying;) | |
7070 } | |
7071 | |
7072 void MarkFromRootsClosure::reset(HeapWord* addr) { | |
7073 assert(_markStack->isEmpty(), "would cause duplicates on stack"); | |
7074 assert(_span.contains(addr), "Out of bounds _finger?"); | |
7075 _finger = addr; | |
7076 _threshold = (HeapWord*)round_to( | |
7077 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7078 } | |
7079 | |
7080 // Should revisit to see if this should be restructured for | |
7081 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7082 bool MarkFromRootsClosure::do_bit(size_t offset) { |
0 | 7083 if (_skipBits > 0) { |
7084 _skipBits--; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7085 return true; |
0 | 7086 } |
7087 // convert offset into a HeapWord* | |
7088 HeapWord* addr = _bitMap->startWord() + offset; | |
7089 assert(_bitMap->endWord() && addr < _bitMap->endWord(), | |
7090 "address out of range"); | |
7091 assert(_bitMap->isMarked(addr), "tautology"); | |
7092 if (_bitMap->isMarked(addr+1)) { | |
7093 // this is an allocated but not yet initialized object | |
7094 assert(_skipBits == 0, "tautology"); | |
7095 _skipBits = 2; // skip next two marked bits ("Printezis-marks") | |
7096 oop p = oop(addr); | |
187 | 7097 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 7098 DEBUG_ONLY(if (!_verifying) {) |
7099 // We re-dirty the cards on which this object lies and increase | |
7100 // the _threshold so that we'll come back to scan this object | |
7101 // during the preclean or remark phase. (CMSCleanOnEnter) | |
7102 if (CMSCleanOnEnter) { | |
7103 size_t sz = _collector->block_size_using_printezis_bits(addr); | |
7104 HeapWord* end_card_addr = (HeapWord*)round_to( | |
7105 (intptr_t)(addr+sz), CardTableModRefBS::card_size); | |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7106 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
0 | 7107 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
7108 // Bump _threshold to end_card_addr; note that | |
7109 // _threshold cannot possibly exceed end_card_addr, anyhow. | |
7110 // This prevents future clearing of the card as the scan proceeds | |
7111 // to the right. | |
7112 assert(_threshold <= end_card_addr, | |
7113 "Because we are just scanning into this object"); | |
7114 if (_threshold < end_card_addr) { | |
7115 _threshold = end_card_addr; | |
7116 } | |
187 | 7117 if (p->klass_or_null() != NULL) { |
0 | 7118 // Redirty the range of cards... |
7119 _mut->mark_range(redirty_range); | |
7120 } // ...else the setting of klass will dirty the card anyway. | |
7121 } | |
7122 DEBUG_ONLY(}) | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7123 return true; |
0 | 7124 } |
7125 } | |
7126 scanOopsInOop(addr); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7127 return true; |
0 | 7128 } |
7129 | |
7130 // We take a break if we've been at this for a while, | |
7131 // so as to avoid monopolizing the locks involved. | |
7132 void MarkFromRootsClosure::do_yield_work() { | |
7133 // First give up the locks, then yield, then re-lock | |
7134 // We should probably use a constructor/destructor idiom to | |
7135 // do this unlock/lock or modify the MutexUnlocker class to | |
7136 // serve our purpose. XXX | |
7137 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
7138 "CMS thread should hold CMS token"); | |
7139 assert_lock_strong(_bitMap->lock()); | |
935 | 7140 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 7141 _bitMap->lock()->unlock(); |
7142 ConcurrentMarkSweepThread::desynchronize(true); | |
7143 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7144 _collector->stopTimer(); | |
7145 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
7146 if (PrintCMSStatistics != 0) { | |
7147 _collector->incrementYields(); | |
7148 } | |
7149 _collector->icms_wait(); | |
7150 | |
7151 // See the comment in coordinator_yield() | |
7152 for (unsigned i = 0; i < CMSYieldSleepCount && | |
7153 ConcurrentMarkSweepThread::should_yield() && | |
7154 !CMSCollector::foregroundGCIsActive(); ++i) { | |
7155 os::sleep(Thread::current(), 1, false); | |
7156 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7157 } | |
7158 | |
7159 ConcurrentMarkSweepThread::synchronize(true); | |
7160 _bitMap->lock()->lock_without_safepoint_check(); | |
7161 _collector->startTimer(); | |
7162 } | |
7163 | |
7164 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { | |
7165 assert(_bitMap->isMarked(ptr), "expected bit to be set"); | |
7166 assert(_markStack->isEmpty(), | |
7167 "should drain stack to limit stack usage"); | |
7168 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7169 oop obj = oop(ptr); |
0 | 7170 // Ignore mark word in verification below, since we |
7171 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7172 assert(obj->is_oop(true), "should be an oop"); |
0 | 7173 assert(_finger <= ptr, "_finger runneth ahead"); |
7174 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7175 _finger = ptr + obj->size(); |
0 | 7176 assert(_finger > ptr, "we just incremented it above"); |
7177 // On large heaps, it may take us some time to get through | |
7178 // the marking phase (especially if running iCMS). During | |
7179 // this time it's possible that a lot of mutations have | |
7180 // accumulated in the card table and the mod union table -- | |
7181 // these mutation records are redundant until we have | |
7182 // actually traced into the corresponding card. | |
7183 // Here, we check whether advancing the finger would make | |
7184 // us cross into a new card, and if so clear corresponding | |
7185 // cards in the MUT (preclean them in the card-table in the | |
7186 // future). | |
7187 | |
7188 DEBUG_ONLY(if (!_verifying) {) | |
7189 // The clean-on-enter optimization is disabled by default, | |
7190 // until we fix 6178663. | |
7191 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
7192 // [_threshold, _finger) represents the interval | |
7193 // of cards to be cleared in MUT (or precleaned in card table). | |
7194 // The set of cards to be cleared is all those that overlap | |
7195 // with the interval [_threshold, _finger); note that | |
7196 // _threshold is always kept card-aligned but _finger isn't | |
7197 // always card-aligned. | |
7198 HeapWord* old_threshold = _threshold; | |
7199 assert(old_threshold == (HeapWord*)round_to( | |
7200 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
7201 "_threshold should always be card-aligned"); | |
7202 _threshold = (HeapWord*)round_to( | |
7203 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7204 MemRegion mr(old_threshold, _threshold); | |
7205 assert(!mr.is_empty(), "Control point invariant"); | |
7206 assert(_span.contains(mr), "Should clear within span"); | |
7207 // XXX When _finger crosses from old gen into perm gen | |
7208 // we may be doing unnecessary cleaning; do better in the | |
7209 // future by detecting that condition and clearing fewer | |
7210 // MUT/CT entries. | |
7211 _mut->clear_range(mr); | |
7212 } | |
7213 DEBUG_ONLY(}) | |
7214 // Note: the finger doesn't advance while we drain | |
7215 // the stack below. | |
7216 PushOrMarkClosure pushOrMarkClosure(_collector, | |
7217 _span, _bitMap, _markStack, | |
7218 _revisitStack, | |
7219 _finger, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7220 bool res = _markStack->push(obj); |
0 | 7221 assert(res, "Empty non-zero size stack should have space for single push"); |
7222 while (!_markStack->isEmpty()) { | |
7223 oop new_oop = _markStack->pop(); | |
7224 // Skip verifying header mark word below because we are | |
7225 // running concurrent with mutators. | |
7226 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7227 // now scan this oop's oops | |
7228 new_oop->oop_iterate(&pushOrMarkClosure); | |
7229 do_yield_check(); | |
7230 } | |
7231 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); | |
7232 } | |
7233 | |
7234 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, | |
7235 CMSCollector* collector, MemRegion span, | |
7236 CMSBitMap* bit_map, | |
7237 OopTaskQueue* work_queue, | |
7238 CMSMarkStack* overflow_stack, | |
7239 CMSMarkStack* revisit_stack, | |
7240 bool should_yield): | |
7241 _collector(collector), | |
7242 _whole_span(collector->_span), | |
7243 _span(span), | |
7244 _bit_map(bit_map), | |
7245 _mut(&collector->_modUnionTable), | |
7246 _work_queue(work_queue), | |
7247 _overflow_stack(overflow_stack), | |
7248 _revisit_stack(revisit_stack), | |
7249 _yield(should_yield), | |
7250 _skip_bits(0), | |
7251 _task(task) | |
7252 { | |
7253 assert(_work_queue->size() == 0, "work_queue should be empty"); | |
7254 _finger = span.start(); | |
7255 _threshold = _finger; // XXX Defer clear-on-enter optimization for now | |
7256 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7257 } | |
7258 | |
7259 // Should revisit to see if this should be restructured for | |
7260 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7261 bool Par_MarkFromRootsClosure::do_bit(size_t offset) { |
0 | 7262 if (_skip_bits > 0) { |
7263 _skip_bits--; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7264 return true; |
0 | 7265 } |
7266 // convert offset into a HeapWord* | |
7267 HeapWord* addr = _bit_map->startWord() + offset; | |
7268 assert(_bit_map->endWord() && addr < _bit_map->endWord(), | |
7269 "address out of range"); | |
7270 assert(_bit_map->isMarked(addr), "tautology"); | |
7271 if (_bit_map->isMarked(addr+1)) { | |
7272 // this is an allocated object that might not yet be initialized | |
7273 assert(_skip_bits == 0, "tautology"); | |
7274 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") | |
7275 oop p = oop(addr); | |
187 | 7276 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 7277 // in the case of Clean-on-Enter optimization, redirty card |
7278 // and avoid clearing card by increasing the threshold. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7279 return true; |
0 | 7280 } |
7281 } | |
7282 scan_oops_in_oop(addr); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7283 return true; |
0 | 7284 } |
7285 | |
7286 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { | |
7287 assert(_bit_map->isMarked(ptr), "expected bit to be set"); | |
7288 // Should we assert that our work queue is empty or | |
7289 // below some drain limit? | |
7290 assert(_work_queue->size() == 0, | |
7291 "should drain stack to limit stack usage"); | |
7292 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7293 oop obj = oop(ptr); |
0 | 7294 // Ignore mark word in verification below, since we |
7295 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7296 assert(obj->is_oop(true), "should be an oop"); |
0 | 7297 assert(_finger <= ptr, "_finger runneth ahead"); |
7298 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7299 _finger = ptr + obj->size(); |
0 | 7300 assert(_finger > ptr, "we just incremented it above"); |
7301 // On large heaps, it may take us some time to get through | |
7302 // the marking phase (especially if running iCMS). During | |
7303 // this time it's possible that a lot of mutations have | |
7304 // accumulated in the card table and the mod union table -- | |
7305 // these mutation records are redundant until we have | |
7306 // actually traced into the corresponding card. | |
7307 // Here, we check whether advancing the finger would make | |
7308 // us cross into a new card, and if so clear corresponding | |
7309 // cards in the MUT (preclean them in the card-table in the | |
7310 // future). | |
7311 | |
7312 // The clean-on-enter optimization is disabled by default, | |
7313 // until we fix 6178663. | |
7314 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
7315 // [_threshold, _finger) represents the interval | |
7316 // of cards to be cleared in MUT (or precleaned in card table). | |
7317 // The set of cards to be cleared is all those that overlap | |
7318 // with the interval [_threshold, _finger); note that | |
7319 // _threshold is always kept card-aligned but _finger isn't | |
7320 // always card-aligned. | |
7321 HeapWord* old_threshold = _threshold; | |
7322 assert(old_threshold == (HeapWord*)round_to( | |
7323 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
7324 "_threshold should always be card-aligned"); | |
7325 _threshold = (HeapWord*)round_to( | |
7326 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7327 MemRegion mr(old_threshold, _threshold); | |
7328 assert(!mr.is_empty(), "Control point invariant"); | |
7329 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? | |
7330 // XXX When _finger crosses from old gen into perm gen | |
7331 // we may be doing unnecessary cleaning; do better in the | |
7332 // future by detecting that condition and clearing fewer | |
7333 // MUT/CT entries. | |
7334 _mut->clear_range(mr); | |
7335 } | |
7336 | |
7337 // Note: the local finger doesn't advance while we drain | |
7338 // the stack below, but the global finger sure can and will. | |
7339 HeapWord** gfa = _task->global_finger_addr(); | |
7340 Par_PushOrMarkClosure pushOrMarkClosure(_collector, | |
7341 _span, _bit_map, | |
7342 _work_queue, | |
7343 _overflow_stack, | |
7344 _revisit_stack, | |
7345 _finger, | |
7346 gfa, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7347 bool res = _work_queue->push(obj); // overflow could occur here |
0 | 7348 assert(res, "Will hold once we use workqueues"); |
7349 while (true) { | |
7350 oop new_oop; | |
7351 if (!_work_queue->pop_local(new_oop)) { | |
7352 // We emptied our work_queue; check if there's stuff that can | |
7353 // be gotten from the overflow stack. | |
7354 if (CMSConcMarkingTask::get_work_from_overflow_stack( | |
7355 _overflow_stack, _work_queue)) { | |
7356 do_yield_check(); | |
7357 continue; | |
7358 } else { // done | |
7359 break; | |
7360 } | |
7361 } | |
7362 // Skip verifying header mark word below because we are | |
7363 // running concurrent with mutators. | |
7364 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7365 // now scan this oop's oops | |
7366 new_oop->oop_iterate(&pushOrMarkClosure); | |
7367 do_yield_check(); | |
7368 } | |
7369 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); | |
7370 } | |
7371 | |
7372 // Yield in response to a request from VM Thread or | |
7373 // from mutators. | |
7374 void Par_MarkFromRootsClosure::do_yield_work() { | |
7375 assert(_task != NULL, "sanity"); | |
7376 _task->yield(); | |
7377 } | |
7378 | |
7379 // A variant of the above used for verifying CMS marking work. | |
7380 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, | |
7381 MemRegion span, | |
7382 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7383 CMSMarkStack* mark_stack): | |
7384 _collector(collector), | |
7385 _span(span), | |
7386 _verification_bm(verification_bm), | |
7387 _cms_bm(cms_bm), | |
7388 _mark_stack(mark_stack), | |
7389 _pam_verify_closure(collector, span, verification_bm, cms_bm, | |
7390 mark_stack) | |
7391 { | |
7392 assert(_mark_stack->isEmpty(), "stack should be empty"); | |
7393 _finger = _verification_bm->startWord(); | |
7394 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
7395 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7396 } | |
7397 | |
7398 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { | |
7399 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); | |
7400 assert(_span.contains(addr), "Out of bounds _finger?"); | |
7401 _finger = addr; | |
7402 } | |
7403 | |
7404 // Should revisit to see if this should be restructured for | |
7405 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7406 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { |
0 | 7407 // convert offset into a HeapWord* |
7408 HeapWord* addr = _verification_bm->startWord() + offset; | |
7409 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), | |
7410 "address out of range"); | |
7411 assert(_verification_bm->isMarked(addr), "tautology"); | |
7412 assert(_cms_bm->isMarked(addr), "tautology"); | |
7413 | |
7414 assert(_mark_stack->isEmpty(), | |
7415 "should drain stack to limit stack usage"); | |
7416 // convert addr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7417 oop obj = oop(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7418 assert(obj->is_oop(), "should be an oop"); |
0 | 7419 assert(_finger <= addr, "_finger runneth ahead"); |
7420 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7421 _finger = addr + obj->size(); |
0 | 7422 assert(_finger > addr, "we just incremented it above"); |
7423 // Note: the finger doesn't advance while we drain | |
7424 // the stack below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7425 bool res = _mark_stack->push(obj); |
0 | 7426 assert(res, "Empty non-zero size stack should have space for single push"); |
7427 while (!_mark_stack->isEmpty()) { | |
7428 oop new_oop = _mark_stack->pop(); | |
7429 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); | |
7430 // now scan this oop's oops | |
7431 new_oop->oop_iterate(&_pam_verify_closure); | |
7432 } | |
7433 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7434 return true; |
0 | 7435 } |
7436 | |
7437 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( | |
7438 CMSCollector* collector, MemRegion span, | |
7439 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7440 CMSMarkStack* mark_stack): | |
7441 OopClosure(collector->ref_processor()), | |
7442 _collector(collector), | |
7443 _span(span), | |
7444 _verification_bm(verification_bm), | |
7445 _cms_bm(cms_bm), | |
7446 _mark_stack(mark_stack) | |
7447 { } | |
7448 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7449 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7450 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
0 | 7451 |
7452 // Upon stack overflow, we discard (part of) the stack, | |
7453 // remembering the least address amongst those discarded | |
7454 // in CMSCollector's _restart_address. | |
7455 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { | |
7456 // Remember the least grey address discarded | |
7457 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); | |
7458 _collector->lower_restart_addr(ra); | |
7459 _mark_stack->reset(); // discard stack contents | |
7460 _mark_stack->expand(); // expand the stack if possible | |
7461 } | |
7462 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7463 void PushAndMarkVerifyClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7464 assert(obj->is_oop_or_null(), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7465 HeapWord* addr = (HeapWord*)obj; |
0 | 7466 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { |
7467 // Oop lies in _span and isn't yet grey or black | |
7468 _verification_bm->mark(addr); // now grey | |
7469 if (!_cms_bm->isMarked(addr)) { | |
7470 oop(addr)->print(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7471 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7472 addr); |
0 | 7473 fatal("... aborting"); |
7474 } | |
7475 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7476 if (!_mark_stack->push(obj)) { // stack overflow |
0 | 7477 if (PrintCMSStatistics != 0) { |
7478 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7479 SIZE_FORMAT, _mark_stack->capacity()); | |
7480 } | |
7481 assert(_mark_stack->isFull(), "Else push should have succeeded"); | |
7482 handle_stack_overflow(addr); | |
7483 } | |
7484 // anything including and to the right of _finger | |
7485 // will be scanned as we iterate over the remainder of the | |
7486 // bit map | |
7487 } | |
7488 } | |
7489 | |
7490 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, | |
7491 MemRegion span, | |
7492 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
7493 CMSMarkStack* revisitStack, | |
7494 HeapWord* finger, MarkFromRootsClosure* parent) : | |
935 | 7495 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), |
0 | 7496 _span(span), |
7497 _bitMap(bitMap), | |
7498 _markStack(markStack), | |
7499 _finger(finger), | |
935 | 7500 _parent(parent) |
0 | 7501 { } |
7502 | |
7503 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, | |
7504 MemRegion span, | |
7505 CMSBitMap* bit_map, | |
7506 OopTaskQueue* work_queue, | |
7507 CMSMarkStack* overflow_stack, | |
7508 CMSMarkStack* revisit_stack, | |
7509 HeapWord* finger, | |
7510 HeapWord** global_finger_addr, | |
7511 Par_MarkFromRootsClosure* parent) : | |
935 | 7512 Par_KlassRememberingOopClosure(collector, |
7513 collector->ref_processor(), | |
7514 revisit_stack), | |
0 | 7515 _whole_span(collector->_span), |
7516 _span(span), | |
7517 _bit_map(bit_map), | |
7518 _work_queue(work_queue), | |
7519 _overflow_stack(overflow_stack), | |
7520 _finger(finger), | |
7521 _global_finger_addr(global_finger_addr), | |
935 | 7522 _parent(parent) |
0 | 7523 { } |
7524 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7525 // Assumes thread-safe access by callers, who are |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7526 // responsible for mutual exclusion. |
0 | 7527 void CMSCollector::lower_restart_addr(HeapWord* low) { |
7528 assert(_span.contains(low), "Out of bounds addr"); | |
7529 if (_restart_addr == NULL) { | |
7530 _restart_addr = low; | |
7531 } else { | |
7532 _restart_addr = MIN2(_restart_addr, low); | |
7533 } | |
7534 } | |
7535 | |
7536 // Upon stack overflow, we discard (part of) the stack, | |
7537 // remembering the least address amongst those discarded | |
7538 // in CMSCollector's _restart_address. | |
7539 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7540 // Remember the least grey address discarded | |
7541 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); | |
7542 _collector->lower_restart_addr(ra); | |
7543 _markStack->reset(); // discard stack contents | |
7544 _markStack->expand(); // expand the stack if possible | |
7545 } | |
7546 | |
7547 // Upon stack overflow, we discard (part of) the stack, | |
7548 // remembering the least address amongst those discarded | |
7549 // in CMSCollector's _restart_address. | |
7550 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7551 // We need to do this under a mutex to prevent other | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7552 // workers from interfering with the work done below. |
0 | 7553 MutexLockerEx ml(_overflow_stack->par_lock(), |
7554 Mutex::_no_safepoint_check_flag); | |
7555 // Remember the least grey address discarded | |
7556 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
7557 _collector->lower_restart_addr(ra); | |
7558 _overflow_stack->reset(); // discard stack contents | |
7559 _overflow_stack->expand(); // expand the stack if possible | |
7560 } | |
7561 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7562 void PushOrMarkClosure::do_oop(oop obj) { |
0 | 7563 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7564 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7565 HeapWord* addr = (HeapWord*)obj; |
0 | 7566 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { |
7567 // Oop lies in _span and isn't yet grey or black | |
7568 _bitMap->mark(addr); // now grey | |
7569 if (addr < _finger) { | |
7570 // the bit map iteration has already either passed, or | |
7571 // sampled, this bit in the bit map; we'll need to | |
7572 // use the marking stack to scan this oop's oops. | |
7573 bool simulate_overflow = false; | |
7574 NOT_PRODUCT( | |
7575 if (CMSMarkStackOverflowALot && | |
7576 _collector->simulate_overflow()) { | |
7577 // simulate a stack overflow | |
7578 simulate_overflow = true; | |
7579 } | |
7580 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7581 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow |
0 | 7582 if (PrintCMSStatistics != 0) { |
7583 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7584 SIZE_FORMAT, _markStack->capacity()); | |
7585 } | |
7586 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); | |
7587 handle_stack_overflow(addr); | |
7588 } | |
7589 } | |
7590 // anything including and to the right of _finger | |
7591 // will be scanned as we iterate over the remainder of the | |
7592 // bit map | |
7593 do_yield_check(); | |
7594 } | |
7595 } | |
7596 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7597 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7598 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7599 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7600 void Par_PushOrMarkClosure::do_oop(oop obj) { |
0 | 7601 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7602 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7603 HeapWord* addr = (HeapWord*)obj; |
0 | 7604 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { |
7605 // Oop lies in _span and isn't yet grey or black | |
7606 // We read the global_finger (volatile read) strictly after marking oop | |
7607 bool res = _bit_map->par_mark(addr); // now grey | |
7608 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; | |
7609 // Should we push this marked oop on our stack? | |
7610 // -- if someone else marked it, nothing to do | |
7611 // -- if target oop is above global finger nothing to do | |
7612 // -- if target oop is in chunk and above local finger | |
7613 // then nothing to do | |
7614 // -- else push on work queue | |
7615 if ( !res // someone else marked it, they will deal with it | |
7616 || (addr >= *gfa) // will be scanned in a later task | |
7617 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk | |
7618 return; | |
7619 } | |
7620 // the bit map iteration has already either passed, or | |
7621 // sampled, this bit in the bit map; we'll need to | |
7622 // use the marking stack to scan this oop's oops. | |
7623 bool simulate_overflow = false; | |
7624 NOT_PRODUCT( | |
7625 if (CMSMarkStackOverflowALot && | |
7626 _collector->simulate_overflow()) { | |
7627 // simulate a stack overflow | |
7628 simulate_overflow = true; | |
7629 } | |
7630 ) | |
7631 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7632 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 7633 // stack overflow |
7634 if (PrintCMSStatistics != 0) { | |
7635 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7636 SIZE_FORMAT, _overflow_stack->capacity()); | |
7637 } | |
7638 // We cannot assert that the overflow stack is full because | |
7639 // it may have been emptied since. | |
7640 assert(simulate_overflow || | |
7641 _work_queue->size() == _work_queue->max_elems(), | |
7642 "Else push should have succeeded"); | |
7643 handle_stack_overflow(addr); | |
7644 } | |
7645 do_yield_check(); | |
7646 } | |
7647 } | |
7648 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7649 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7650 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
0 | 7651 |
935 | 7652 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector, |
7653 ReferenceProcessor* rp, | |
7654 CMSMarkStack* revisit_stack) : | |
7655 OopClosure(rp), | |
7656 _collector(collector), | |
7657 _revisit_stack(revisit_stack), | |
7658 _should_remember_klasses(collector->should_unload_classes()) {} | |
7659 | |
0 | 7660 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, |
7661 MemRegion span, | |
7662 ReferenceProcessor* rp, | |
7663 CMSBitMap* bit_map, | |
7664 CMSBitMap* mod_union_table, | |
7665 CMSMarkStack* mark_stack, | |
7666 CMSMarkStack* revisit_stack, | |
7667 bool concurrent_precleaning): | |
935 | 7668 KlassRememberingOopClosure(collector, rp, revisit_stack), |
0 | 7669 _span(span), |
7670 _bit_map(bit_map), | |
7671 _mod_union_table(mod_union_table), | |
7672 _mark_stack(mark_stack), | |
935 | 7673 _concurrent_precleaning(concurrent_precleaning) |
0 | 7674 { |
7675 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7676 } | |
7677 | |
7678 // Grey object rescan during pre-cleaning and second checkpoint phases -- | |
7679 // the non-parallel version (the parallel version appears further below.) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7680 void PushAndMarkClosure::do_oop(oop obj) { |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7681 // Ignore mark word verification. If during concurrent precleaning, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7682 // the object monitor may be locked. If during the checkpoint |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7683 // phases, the object may already have been reached by a different |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7684 // path and may be at the end of the global overflow list (so |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7685 // the mark word may be NULL). |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7686 assert(obj->is_oop_or_null(true /* ignore mark word */), |
0 | 7687 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7688 HeapWord* addr = (HeapWord*)obj; |
0 | 7689 // Check if oop points into the CMS generation |
7690 // and is not marked | |
7691 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7692 // a white object ... | |
7693 _bit_map->mark(addr); // ... now grey | |
7694 // push on the marking stack (grey set) | |
7695 bool simulate_overflow = false; | |
7696 NOT_PRODUCT( | |
7697 if (CMSMarkStackOverflowALot && | |
7698 _collector->simulate_overflow()) { | |
7699 // simulate a stack overflow | |
7700 simulate_overflow = true; | |
7701 } | |
7702 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7703 if (simulate_overflow || !_mark_stack->push(obj)) { |
0 | 7704 if (_concurrent_precleaning) { |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7705 // During precleaning we can just dirty the appropriate card(s) |
0 | 7706 // in the mod union table, thus ensuring that the object remains |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7707 // in the grey set and continue. In the case of object arrays |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7708 // we need to dirty all of the cards that the object spans, |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7709 // since the rescan of object arrays will be limited to the |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7710 // dirty cards. |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7711 // Note that no one can be intefering with us in this action |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7712 // of dirtying the mod union table, so no locking or atomics |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7713 // are required. |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7714 if (obj->is_objArray()) { |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7715 size_t sz = obj->size(); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7716 HeapWord* end_card_addr = (HeapWord*)round_to( |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7717 (intptr_t)(addr+sz), CardTableModRefBS::card_size); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7718 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7719 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7720 _mod_union_table->mark_range(redirty_range); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7721 } else { |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7722 _mod_union_table->mark(addr); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7723 } |
0 | 7724 _collector->_ser_pmc_preclean_ovflw++; |
7725 } else { | |
7726 // During the remark phase, we need to remember this oop | |
7727 // in the overflow list. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7728 _collector->push_on_overflow_list(obj); |
0 | 7729 _collector->_ser_pmc_remark_ovflw++; |
7730 } | |
7731 } | |
7732 } | |
7733 } | |
7734 | |
7735 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, | |
7736 MemRegion span, | |
7737 ReferenceProcessor* rp, | |
7738 CMSBitMap* bit_map, | |
7739 OopTaskQueue* work_queue, | |
7740 CMSMarkStack* revisit_stack): | |
935 | 7741 Par_KlassRememberingOopClosure(collector, rp, revisit_stack), |
0 | 7742 _span(span), |
7743 _bit_map(bit_map), | |
935 | 7744 _work_queue(work_queue) |
0 | 7745 { |
7746 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7747 } | |
7748 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7749 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7750 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7751 |
0 | 7752 // Grey object rescan during second checkpoint phase -- |
7753 // the parallel version. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7754 void Par_PushAndMarkClosure::do_oop(oop obj) { |
0 | 7755 // In the assert below, we ignore the mark word because |
7756 // this oop may point to an already visited object that is | |
7757 // on the overflow stack (in which case the mark word has | |
7758 // been hijacked for chaining into the overflow stack -- | |
7759 // if this is the last object in the overflow stack then | |
7760 // its mark word will be NULL). Because this object may | |
7761 // have been subsequently popped off the global overflow | |
7762 // stack, and the mark word possibly restored to the prototypical | |
7763 // value, by the time we get to examined this failing assert in | |
7764 // the debugger, is_oop_or_null(false) may subsequently start | |
7765 // to hold. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7766 assert(obj->is_oop_or_null(true), |
0 | 7767 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7768 HeapWord* addr = (HeapWord*)obj; |
0 | 7769 // Check if oop points into the CMS generation |
7770 // and is not marked | |
7771 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7772 // a white object ... | |
7773 // If we manage to "claim" the object, by being the | |
7774 // first thread to mark it, then we push it on our | |
7775 // marking stack | |
7776 if (_bit_map->par_mark(addr)) { // ... now grey | |
7777 // push on work queue (grey set) | |
7778 bool simulate_overflow = false; | |
7779 NOT_PRODUCT( | |
7780 if (CMSMarkStackOverflowALot && | |
7781 _collector->par_simulate_overflow()) { | |
7782 // simulate a stack overflow | |
7783 simulate_overflow = true; | |
7784 } | |
7785 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7786 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7787 _collector->par_push_on_overflow_list(obj); |
0 | 7788 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS |
7789 } | |
7790 } // Else, some other thread got there first | |
7791 } | |
7792 } | |
7793 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7794 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7795 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7796 |
941 | 7797 void PushAndMarkClosure::remember_mdo(DataLayout* v) { |
7798 // TBD | |
7799 } | |
7800 | |
7801 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) { | |
7802 // TBD | |
7803 } | |
7804 | |
0 | 7805 void CMSPrecleanRefsYieldClosure::do_yield_work() { |
935 | 7806 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 7807 Mutex* bml = _collector->bitMapLock(); |
7808 assert_lock_strong(bml); | |
7809 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
7810 "CMS thread should hold CMS token"); | |
7811 | |
7812 bml->unlock(); | |
7813 ConcurrentMarkSweepThread::desynchronize(true); | |
7814 | |
7815 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7816 | |
7817 _collector->stopTimer(); | |
7818 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
7819 if (PrintCMSStatistics != 0) { | |
7820 _collector->incrementYields(); | |
7821 } | |
7822 _collector->icms_wait(); | |
7823 | |
7824 // See the comment in coordinator_yield() | |
7825 for (unsigned i = 0; i < CMSYieldSleepCount && | |
7826 ConcurrentMarkSweepThread::should_yield() && | |
7827 !CMSCollector::foregroundGCIsActive(); ++i) { | |
7828 os::sleep(Thread::current(), 1, false); | |
7829 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7830 } | |
7831 | |
7832 ConcurrentMarkSweepThread::synchronize(true); | |
7833 bml->lock(); | |
7834 | |
7835 _collector->startTimer(); | |
7836 } | |
7837 | |
7838 bool CMSPrecleanRefsYieldClosure::should_return() { | |
7839 if (ConcurrentMarkSweepThread::should_yield()) { | |
7840 do_yield_work(); | |
7841 } | |
7842 return _collector->foregroundGCIsActive(); | |
7843 } | |
7844 | |
7845 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { | |
7846 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0, | |
7847 "mr should be aligned to start at a card boundary"); | |
7848 // We'd like to assert: | |
7849 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, | |
7850 // "mr should be a range of cards"); | |
7851 // However, that would be too strong in one case -- the last | |
7852 // partition ends at _unallocated_block which, in general, can be | |
7853 // an arbitrary boundary, not necessarily card aligned. | |
7854 if (PrintCMSStatistics != 0) { | |
7855 _num_dirty_cards += | |
7856 mr.word_size()/CardTableModRefBS::card_size_in_words; | |
7857 } | |
7858 _space->object_iterate_mem(mr, &_scan_cl); | |
7859 } | |
7860 | |
7861 SweepClosure::SweepClosure(CMSCollector* collector, | |
7862 ConcurrentMarkSweepGeneration* g, | |
7863 CMSBitMap* bitMap, bool should_yield) : | |
7864 _collector(collector), | |
7865 _g(g), | |
7866 _sp(g->cmsSpace()), | |
7867 _limit(_sp->sweep_limit()), | |
7868 _freelistLock(_sp->freelistLock()), | |
7869 _bitMap(bitMap), | |
7870 _yield(should_yield), | |
7871 _inFreeRange(false), // No free range at beginning of sweep | |
7872 _freeRangeInFreeLists(false), // No free range at beginning of sweep | |
7873 _lastFreeRangeCoalesced(false), | |
7874 _freeFinger(g->used_region().start()) | |
7875 { | |
7876 NOT_PRODUCT( | |
7877 _numObjectsFreed = 0; | |
7878 _numWordsFreed = 0; | |
7879 _numObjectsLive = 0; | |
7880 _numWordsLive = 0; | |
7881 _numObjectsAlreadyFree = 0; | |
7882 _numWordsAlreadyFree = 0; | |
7883 _last_fc = NULL; | |
7884 | |
7885 _sp->initializeIndexedFreeListArrayReturnedBytes(); | |
7886 _sp->dictionary()->initializeDictReturnedBytes(); | |
7887 ) | |
7888 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), | |
7889 "sweep _limit out of bounds"); | |
7890 if (CMSTraceSweeper) { | |
7891 gclog_or_tty->print("\n====================\nStarting new sweep\n"); | |
7892 } | |
7893 } | |
7894 | |
7895 // We need this destructor to reclaim any space at the end | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7896 // of the space, which do_blk below may not yet have added back to |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7897 // the free lists. |
0 | 7898 SweepClosure::~SweepClosure() { |
7899 assert_lock_strong(_freelistLock); | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7900 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7901 "sweep _limit out of bounds"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7902 // Flush any remaining coterminal free run as a single |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7903 // coalesced chunk to the appropriate free list. |
0 | 7904 if (inFreeRange()) { |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7905 assert(freeFinger() < _limit, "freeFinger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7906 flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger())); |
0 | 7907 if (CMSTraceSweeper) { |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7908 gclog_or_tty->print("Sweep: last chunk: "); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7909 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n", |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7910 freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced()); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7911 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7912 } // else nothing to flush |
0 | 7913 NOT_PRODUCT( |
7914 if (Verbose && PrintGC) { | |
7915 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " | |
7916 SIZE_FORMAT " bytes", | |
7917 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); | |
7918 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " | |
7919 SIZE_FORMAT" bytes " | |
7920 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", | |
7921 _numObjectsLive, _numWordsLive*sizeof(HeapWord), | |
7922 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); | |
7923 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * | |
7924 sizeof(HeapWord); | |
7925 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); | |
7926 | |
7927 if (PrintCMSStatistics && CMSVerifyReturnedBytes) { | |
7928 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); | |
7929 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); | |
7930 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; | |
7931 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); | |
7932 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", | |
7933 indexListReturnedBytes); | |
7934 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", | |
7935 dictReturnedBytes); | |
7936 } | |
7937 } | |
7938 ) | |
7939 // Now, in debug mode, just null out the sweep_limit | |
7940 NOT_PRODUCT(_sp->clear_sweep_limit();) | |
7941 if (CMSTraceSweeper) { | |
7942 gclog_or_tty->print("end of sweep\n================\n"); | |
7943 } | |
7944 } | |
7945 | |
7946 void SweepClosure::initialize_free_range(HeapWord* freeFinger, | |
7947 bool freeRangeInFreeLists) { | |
7948 if (CMSTraceSweeper) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7949 gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n", |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7950 freeFinger, freeRangeInFreeLists); |
0 | 7951 } |
7952 assert(!inFreeRange(), "Trampling existing free range"); | |
7953 set_inFreeRange(true); | |
7954 set_lastFreeRangeCoalesced(false); | |
7955 | |
7956 set_freeFinger(freeFinger); | |
7957 set_freeRangeInFreeLists(freeRangeInFreeLists); | |
7958 if (CMSTestInFreeList) { | |
7959 if (freeRangeInFreeLists) { | |
7960 FreeChunk* fc = (FreeChunk*) freeFinger; | |
7961 assert(fc->isFree(), "A chunk on the free list should be free."); | |
7962 assert(fc->size() > 0, "Free range should have a size"); | |
7963 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists"); | |
7964 } | |
7965 } | |
7966 } | |
7967 | |
7968 // Note that the sweeper runs concurrently with mutators. Thus, | |
7969 // it is possible for direct allocation in this generation to happen | |
7970 // in the middle of the sweep. Note that the sweeper also coalesces | |
7971 // contiguous free blocks. Thus, unless the sweeper and the allocator | |
7972 // synchronize appropriately freshly allocated blocks may get swept up. | |
7973 // This is accomplished by the sweeper locking the free lists while | |
7974 // it is sweeping. Thus blocks that are determined to be free are | |
7975 // indeed free. There is however one additional complication: | |
7976 // blocks that have been allocated since the final checkpoint and | |
7977 // mark, will not have been marked and so would be treated as | |
7978 // unreachable and swept up. To prevent this, the allocator marks | |
7979 // the bit map when allocating during the sweep phase. This leads, | |
7980 // however, to a further complication -- objects may have been allocated | |
7981 // but not yet initialized -- in the sense that the header isn't yet | |
7982 // installed. The sweeper can not then determine the size of the block | |
7983 // in order to skip over it. To deal with this case, we use a technique | |
7984 // (due to Printezis) to encode such uninitialized block sizes in the | |
7985 // bit map. Since the bit map uses a bit per every HeapWord, but the | |
7986 // CMS generation has a minimum object size of 3 HeapWords, it follows | |
7987 // that "normal marks" won't be adjacent in the bit map (there will | |
7988 // always be at least two 0 bits between successive 1 bits). We make use | |
7989 // of these "unused" bits to represent uninitialized blocks -- the bit | |
7990 // corresponding to the start of the uninitialized object and the next | |
7991 // bit are both set. Finally, a 1 bit marks the end of the object that | |
7992 // started with the two consecutive 1 bits to indicate its potentially | |
7993 // uninitialized state. | |
7994 | |
7995 size_t SweepClosure::do_blk_careful(HeapWord* addr) { | |
7996 FreeChunk* fc = (FreeChunk*)addr; | |
7997 size_t res; | |
7998 | |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
7999 // Check if we are done sweeping. Below we check "addr >= _limit" rather |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8000 // than "addr == _limit" because although _limit was a block boundary when |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8001 // we started the sweep, it may no longer be one because heap expansion |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8002 // may have caused us to coalesce the block ending at the address _limit |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8003 // with a newly expanded chunk (this happens when _limit was set to the |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8004 // previous _end of the space), so we may have stepped past _limit; see CR 6977970. |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8005 if (addr >= _limit) { // we have swept up to or past the limit: finish up |
0 | 8006 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
8007 "sweep _limit out of bounds"); | |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8008 assert(addr < _sp->end(), "addr out of bounds"); |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8009 // Flush any remaining coterminal free run as a single |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8010 // coalesced chunk to the appropriate free list. |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8011 if (inFreeRange()) { |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8012 assert(freeFinger() < _limit, "finger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8013 flush_cur_free_chunk(freeFinger(), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8014 pointer_delta(addr, freeFinger())); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8015 if (CMSTraceSweeper) { |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8016 gclog_or_tty->print("Sweep: last chunk: "); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8017 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") " |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8018 "[coalesced:"SIZE_FORMAT"]\n", |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8019 freeFinger(), pointer_delta(addr, freeFinger()), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8020 lastFreeRangeCoalesced()); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8021 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8022 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8023 |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8024 // help the iterator loop finish |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8025 return pointer_delta(_sp->end(), addr); |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8026 } |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8027 |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8028 assert(addr < _limit, "sweep invariant"); |
0 | 8029 // check if we should yield |
8030 do_yield_check(addr); | |
8031 if (fc->isFree()) { | |
8032 // Chunk that is already free | |
8033 res = fc->size(); | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8034 do_already_free_chunk(fc); |
0 | 8035 debug_only(_sp->verifyFreeLists()); |
8036 assert(res == fc->size(), "Don't expect the size to change"); | |
8037 NOT_PRODUCT( | |
8038 _numObjectsAlreadyFree++; | |
8039 _numWordsAlreadyFree += res; | |
8040 ) | |
8041 NOT_PRODUCT(_last_fc = fc;) | |
8042 } else if (!_bitMap->isMarked(addr)) { | |
8043 // Chunk is fresh garbage | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8044 res = do_garbage_chunk(fc); |
0 | 8045 debug_only(_sp->verifyFreeLists()); |
8046 NOT_PRODUCT( | |
8047 _numObjectsFreed++; | |
8048 _numWordsFreed += res; | |
8049 ) | |
8050 } else { | |
8051 // Chunk that is alive. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8052 res = do_live_chunk(fc); |
0 | 8053 debug_only(_sp->verifyFreeLists()); |
8054 NOT_PRODUCT( | |
8055 _numObjectsLive++; | |
8056 _numWordsLive += res; | |
8057 ) | |
8058 } | |
8059 return res; | |
8060 } | |
8061 | |
8062 // For the smart allocation, record following | |
8063 // split deaths - a free chunk is removed from its free list because | |
8064 // it is being split into two or more chunks. | |
8065 // split birth - a free chunk is being added to its free list because | |
8066 // a larger free chunk has been split and resulted in this free chunk. | |
8067 // coal death - a free chunk is being removed from its free list because | |
8068 // it is being coalesced into a large free chunk. | |
8069 // coal birth - a free chunk is being added to its free list because | |
8070 // it was created when two or more free chunks where coalesced into | |
8071 // this free chunk. | |
8072 // | |
8073 // These statistics are used to determine the desired number of free | |
8074 // chunks of a given size. The desired number is chosen to be relative | |
8075 // to the end of a CMS sweep. The desired number at the end of a sweep | |
8076 // is the | |
8077 // count-at-end-of-previous-sweep (an amount that was enough) | |
8078 // - count-at-beginning-of-current-sweep (the excess) | |
8079 // + split-births (gains in this size during interval) | |
8080 // - split-deaths (demands on this size during interval) | |
8081 // where the interval is from the end of one sweep to the end of the | |
8082 // next. | |
8083 // | |
8084 // When sweeping the sweeper maintains an accumulated chunk which is | |
8085 // the chunk that is made up of chunks that have been coalesced. That | |
8086 // will be termed the left-hand chunk. A new chunk of garbage that | |
8087 // is being considered for coalescing will be referred to as the | |
8088 // right-hand chunk. | |
8089 // | |
8090 // When making a decision on whether to coalesce a right-hand chunk with | |
8091 // the current left-hand chunk, the current count vs. the desired count | |
8092 // of the left-hand chunk is considered. Also if the right-hand chunk | |
8093 // is near the large chunk at the end of the heap (see | |
8094 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the | |
8095 // left-hand chunk is coalesced. | |
8096 // | |
8097 // When making a decision about whether to split a chunk, the desired count | |
8098 // vs. the current count of the candidate to be split is also considered. | |
8099 // If the candidate is underpopulated (currently fewer chunks than desired) | |
8100 // a chunk of an overpopulated (currently more chunks than desired) size may | |
8101 // be chosen. The "hint" associated with a free list, if non-null, points | |
8102 // to a free list which may be overpopulated. | |
8103 // | |
8104 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8105 void SweepClosure::do_already_free_chunk(FreeChunk* fc) { |
0 | 8106 size_t size = fc->size(); |
8107 // Chunks that cannot be coalesced are not in the | |
8108 // free lists. | |
8109 if (CMSTestInFreeList && !fc->cantCoalesce()) { | |
8110 assert(_sp->verifyChunkInFreeLists(fc), | |
8111 "free chunk should be in free lists"); | |
8112 } | |
8113 // a chunk that is already free, should not have been | |
8114 // marked in the bit map | |
8115 HeapWord* addr = (HeapWord*) fc; | |
8116 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); | |
8117 // Verify that the bit map has no bits marked between | |
8118 // addr and purported end of this block. | |
8119 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8120 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8121 // Some chunks cannot be coalesced under any circumstances. |
0 | 8122 // See the definition of cantCoalesce(). |
8123 if (!fc->cantCoalesce()) { | |
8124 // This chunk can potentially be coalesced. | |
8125 if (_sp->adaptive_freelists()) { | |
8126 // All the work is done in | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8127 do_post_free_or_garbage_chunk(fc, size); |
0 | 8128 } else { // Not adaptive free lists |
8129 // this is a free chunk that can potentially be coalesced by the sweeper; | |
8130 if (!inFreeRange()) { | |
8131 // if the next chunk is a free block that can't be coalesced | |
8132 // it doesn't make sense to remove this chunk from the free lists | |
8133 FreeChunk* nextChunk = (FreeChunk*)(addr + size); | |
2136
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8134 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?"); |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8135 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ... |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8136 nextChunk->isFree() && // ... which is free... |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8137 nextChunk->cantCoalesce()) { // ... but can't be coalesced |
0 | 8138 // nothing to do |
8139 } else { | |
8140 // Potentially the start of a new free range: | |
8141 // Don't eagerly remove it from the free lists. | |
8142 // No need to remove it if it will just be put | |
8143 // back again. (Also from a pragmatic point of view | |
8144 // if it is a free block in a region that is beyond | |
8145 // any allocated blocks, an assertion will fail) | |
8146 // Remember the start of a free run. | |
8147 initialize_free_range(addr, true); | |
8148 // end - can coalesce with next chunk | |
8149 } | |
8150 } else { | |
8151 // the midst of a free range, we are coalescing | |
8152 debug_only(record_free_block_coalesced(fc);) | |
8153 if (CMSTraceSweeper) { | |
8154 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); | |
8155 } | |
8156 // remove it from the free lists | |
8157 _sp->removeFreeChunkFromFreeLists(fc); | |
8158 set_lastFreeRangeCoalesced(true); | |
8159 // If the chunk is being coalesced and the current free range is | |
8160 // in the free lists, remove the current free range so that it | |
8161 // will be returned to the free lists in its entirety - all | |
8162 // the coalesced pieces included. | |
8163 if (freeRangeInFreeLists()) { | |
8164 FreeChunk* ffc = (FreeChunk*) freeFinger(); | |
8165 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8166 "Size of free range is inconsistent with chunk size."); | |
8167 if (CMSTestInFreeList) { | |
8168 assert(_sp->verifyChunkInFreeLists(ffc), | |
8169 "free range is not in free lists"); | |
8170 } | |
8171 _sp->removeFreeChunkFromFreeLists(ffc); | |
8172 set_freeRangeInFreeLists(false); | |
8173 } | |
8174 } | |
8175 } | |
8176 } else { | |
8177 // Code path common to both original and adaptive free lists. | |
8178 | |
8179 // cant coalesce with previous block; this should be treated | |
8180 // as the end of a free run if any | |
8181 if (inFreeRange()) { | |
8182 // we kicked some butt; time to pick up the garbage | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8183 assert(freeFinger() < addr, "freeFinger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8184 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
0 | 8185 } |
8186 // else, nothing to do, just continue | |
8187 } | |
8188 } | |
8189 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8190 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { |
0 | 8191 // This is a chunk of garbage. It is not in any free list. |
8192 // Add it to a free list or let it possibly be coalesced into | |
8193 // a larger chunk. | |
8194 HeapWord* addr = (HeapWord*) fc; | |
8195 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); | |
8196 | |
8197 if (_sp->adaptive_freelists()) { | |
8198 // Verify that the bit map has no bits marked between | |
8199 // addr and purported end of just dead object. | |
8200 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8201 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8202 do_post_free_or_garbage_chunk(fc, size); |
0 | 8203 } else { |
8204 if (!inFreeRange()) { | |
8205 // start of a new free range | |
8206 assert(size > 0, "A free range should have a size"); | |
8207 initialize_free_range(addr, false); | |
8208 | |
8209 } else { | |
8210 // this will be swept up when we hit the end of the | |
8211 // free range | |
8212 if (CMSTraceSweeper) { | |
8213 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size); | |
8214 } | |
8215 // If the chunk is being coalesced and the current free range is | |
8216 // in the free lists, remove the current free range so that it | |
8217 // will be returned to the free lists in its entirety - all | |
8218 // the coalesced pieces included. | |
8219 if (freeRangeInFreeLists()) { | |
8220 FreeChunk* ffc = (FreeChunk*)freeFinger(); | |
8221 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8222 "Size of free range is inconsistent with chunk size."); | |
8223 if (CMSTestInFreeList) { | |
8224 assert(_sp->verifyChunkInFreeLists(ffc), | |
8225 "free range is not in free lists"); | |
8226 } | |
8227 _sp->removeFreeChunkFromFreeLists(ffc); | |
8228 set_freeRangeInFreeLists(false); | |
8229 } | |
8230 set_lastFreeRangeCoalesced(true); | |
8231 } | |
8232 // this will be swept up when we hit the end of the free range | |
8233 | |
8234 // Verify that the bit map has no bits marked between | |
8235 // addr and purported end of just dead object. | |
8236 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8237 } | |
8238 return size; | |
8239 } | |
8240 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8241 size_t SweepClosure::do_live_chunk(FreeChunk* fc) { |
0 | 8242 HeapWord* addr = (HeapWord*) fc; |
8243 // The sweeper has just found a live object. Return any accumulated | |
8244 // left hand chunk to the free lists. | |
8245 if (inFreeRange()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8246 assert(freeFinger() < addr, "freeFinger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8247 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8248 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8249 |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8250 // This object is live: we'd normally expect this to be |
0 | 8251 // an oop, and like to assert the following: |
8252 // assert(oop(addr)->is_oop(), "live block should be an oop"); | |
8253 // However, as we commented above, this may be an object whose | |
8254 // header hasn't yet been initialized. | |
8255 size_t size; | |
8256 assert(_bitMap->isMarked(addr), "Tautology for this control point"); | |
8257 if (_bitMap->isMarked(addr + 1)) { | |
8258 // Determine the size from the bit map, rather than trying to | |
8259 // compute it from the object header. | |
8260 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
8261 size = pointer_delta(nextOneAddr + 1, addr); | |
8262 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
8263 "alignment problem"); | |
8264 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8265 #ifdef DEBUG |
187 | 8266 if (oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8267 ( !_collector->should_unload_classes() |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8268 || (oop(addr)->is_parsable()) && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8269 oop(addr)->is_conc_safe())) { |
0 | 8270 // Ignore mark word because we are running concurrent with mutators |
8271 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8272 // is_conc_safe is checked before performing this assertion |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8273 // because an object that is not is_conc_safe may yet have |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8274 // the return from size() correct. |
0 | 8275 assert(size == |
8276 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), | |
8277 "P-mark and computed size do not agree"); | |
8278 } | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8279 #endif |
0 | 8280 |
8281 } else { | |
8282 // This should be an initialized object that's alive. | |
187 | 8283 assert(oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8284 (!_collector->should_unload_classes() |
0 | 8285 || oop(addr)->is_parsable()), |
8286 "Should be an initialized object"); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8287 // Note that there are objects used during class redefinition |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8288 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8289 // which are discarded with their is_conc_safe state still |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8290 // false. These object may be floating garbage so may be |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8291 // seen here. If they are floating garbage their size |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8292 // should be attainable from their klass. Do not that |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8293 // is_conc_safe() is true for oop(addr). |
0 | 8294 // Ignore mark word because we are running concurrent with mutators |
8295 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
8296 // Verify that the bit map has no bits marked between | |
8297 // addr and purported end of this block. | |
8298 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); | |
8299 assert(size >= 3, "Necessary for Printezis marks to work"); | |
8300 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); | |
8301 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) | |
8302 } | |
8303 return size; | |
8304 } | |
8305 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8306 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8307 size_t chunkSize) { |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8308 // do_post_free_or_garbage_chunk() should only be called in the case |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8309 // of the adaptive free list allocator. |
0 | 8310 bool fcInFreeLists = fc->isFree(); |
8311 assert(_sp->adaptive_freelists(), "Should only be used in this case."); | |
8312 assert((HeapWord*)fc <= _limit, "sweep invariant"); | |
8313 if (CMSTestInFreeList && fcInFreeLists) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8314 assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8315 } |
0 | 8316 |
8317 if (CMSTraceSweeper) { | |
8318 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); | |
8319 } | |
8320 | |
8321 HeapWord* addr = (HeapWord*) fc; | |
8322 | |
8323 bool coalesce; | |
8324 size_t left = pointer_delta(addr, freeFinger()); | |
8325 size_t right = chunkSize; | |
8326 switch (FLSCoalescePolicy) { | |
8327 // numeric value forms a coalition aggressiveness metric | |
8328 case 0: { // never coalesce | |
8329 coalesce = false; | |
8330 break; | |
8331 } | |
8332 case 1: { // coalesce if left & right chunks on overpopulated lists | |
8333 coalesce = _sp->coalOverPopulated(left) && | |
8334 _sp->coalOverPopulated(right); | |
8335 break; | |
8336 } | |
8337 case 2: { // coalesce if left chunk on overpopulated list (default) | |
8338 coalesce = _sp->coalOverPopulated(left); | |
8339 break; | |
8340 } | |
8341 case 3: { // coalesce if left OR right chunk on overpopulated list | |
8342 coalesce = _sp->coalOverPopulated(left) || | |
8343 _sp->coalOverPopulated(right); | |
8344 break; | |
8345 } | |
8346 case 4: { // always coalesce | |
8347 coalesce = true; | |
8348 break; | |
8349 } | |
8350 default: | |
8351 ShouldNotReachHere(); | |
8352 } | |
8353 | |
8354 // Should the current free range be coalesced? | |
8355 // If the chunk is in a free range and either we decided to coalesce above | |
8356 // or the chunk is near the large block at the end of the heap | |
8357 // (isNearLargestChunk() returns true), then coalesce this chunk. | |
8358 bool doCoalesce = inFreeRange() && | |
8359 (coalesce || _g->isNearLargestChunk((HeapWord*)fc)); | |
8360 if (doCoalesce) { | |
8361 // Coalesce the current free range on the left with the new | |
8362 // chunk on the right. If either is on a free list, | |
8363 // it must be removed from the list and stashed in the closure. | |
8364 if (freeRangeInFreeLists()) { | |
8365 FreeChunk* ffc = (FreeChunk*)freeFinger(); | |
8366 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8367 "Size of free range is inconsistent with chunk size."); | |
8368 if (CMSTestInFreeList) { | |
8369 assert(_sp->verifyChunkInFreeLists(ffc), | |
8370 "Chunk is not in free lists"); | |
8371 } | |
8372 _sp->coalDeath(ffc->size()); | |
8373 _sp->removeFreeChunkFromFreeLists(ffc); | |
8374 set_freeRangeInFreeLists(false); | |
8375 } | |
8376 if (fcInFreeLists) { | |
8377 _sp->coalDeath(chunkSize); | |
8378 assert(fc->size() == chunkSize, | |
8379 "The chunk has the wrong size or is not in the free lists"); | |
8380 _sp->removeFreeChunkFromFreeLists(fc); | |
8381 } | |
8382 set_lastFreeRangeCoalesced(true); | |
8383 } else { // not in a free range and/or should not coalesce | |
8384 // Return the current free range and start a new one. | |
8385 if (inFreeRange()) { | |
8386 // In a free range but cannot coalesce with the right hand chunk. | |
8387 // Put the current free range into the free lists. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8388 flush_cur_free_chunk(freeFinger(), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8389 pointer_delta(addr, freeFinger())); |
0 | 8390 } |
8391 // Set up for new free range. Pass along whether the right hand | |
8392 // chunk is in the free lists. | |
8393 initialize_free_range((HeapWord*)fc, fcInFreeLists); | |
8394 } | |
8395 } | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8396 |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8397 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { |
0 | 8398 assert(inFreeRange(), "Should only be called if currently in a free range."); |
8399 assert(size > 0, | |
8400 "A zero sized chunk cannot be added to the free lists."); | |
8401 if (!freeRangeInFreeLists()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8402 if (CMSTestInFreeList) { |
0 | 8403 FreeChunk* fc = (FreeChunk*) chunk; |
8404 fc->setSize(size); | |
8405 assert(!_sp->verifyChunkInFreeLists(fc), | |
8406 "chunk should not be in free lists yet"); | |
8407 } | |
8408 if (CMSTraceSweeper) { | |
8409 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists", | |
8410 chunk, size); | |
8411 } | |
8412 // A new free range is going to be starting. The current | |
8413 // free range has not been added to the free lists yet or | |
8414 // was removed so add it back. | |
8415 // If the current free range was coalesced, then the death | |
8416 // of the free range was recorded. Record a birth now. | |
8417 if (lastFreeRangeCoalesced()) { | |
8418 _sp->coalBirth(size); | |
8419 } | |
8420 _sp->addChunkAndRepairOffsetTable(chunk, size, | |
8421 lastFreeRangeCoalesced()); | |
8422 } | |
8423 set_inFreeRange(false); | |
8424 set_freeRangeInFreeLists(false); | |
8425 } | |
8426 | |
8427 // We take a break if we've been at this for a while, | |
8428 // so as to avoid monopolizing the locks involved. | |
8429 void SweepClosure::do_yield_work(HeapWord* addr) { | |
8430 // Return current free chunk being used for coalescing (if any) | |
8431 // to the appropriate freelist. After yielding, the next | |
8432 // free block encountered will start a coalescing range of | |
8433 // free blocks. If the next free block is adjacent to the | |
8434 // chunk just flushed, they will need to wait for the next | |
8435 // sweep to be coalesced. | |
8436 if (inFreeRange()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8437 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
0 | 8438 } |
8439 | |
8440 // First give up the locks, then yield, then re-lock. | |
8441 // We should probably use a constructor/destructor idiom to | |
8442 // do this unlock/lock or modify the MutexUnlocker class to | |
8443 // serve our purpose. XXX | |
8444 assert_lock_strong(_bitMap->lock()); | |
8445 assert_lock_strong(_freelistLock); | |
8446 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
8447 "CMS thread should hold CMS token"); | |
8448 _bitMap->lock()->unlock(); | |
8449 _freelistLock->unlock(); | |
8450 ConcurrentMarkSweepThread::desynchronize(true); | |
8451 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8452 _collector->stopTimer(); | |
8453 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
8454 if (PrintCMSStatistics != 0) { | |
8455 _collector->incrementYields(); | |
8456 } | |
8457 _collector->icms_wait(); | |
8458 | |
8459 // See the comment in coordinator_yield() | |
8460 for (unsigned i = 0; i < CMSYieldSleepCount && | |
8461 ConcurrentMarkSweepThread::should_yield() && | |
8462 !CMSCollector::foregroundGCIsActive(); ++i) { | |
8463 os::sleep(Thread::current(), 1, false); | |
8464 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8465 } | |
8466 | |
8467 ConcurrentMarkSweepThread::synchronize(true); | |
8468 _freelistLock->lock(); | |
8469 _bitMap->lock()->lock_without_safepoint_check(); | |
8470 _collector->startTimer(); | |
8471 } | |
8472 | |
8473 #ifndef PRODUCT | |
8474 // This is actually very useful in a product build if it can | |
8475 // be called from the debugger. Compile it into the product | |
8476 // as needed. | |
8477 bool debug_verifyChunkInFreeLists(FreeChunk* fc) { | |
8478 return debug_cms_space->verifyChunkInFreeLists(fc); | |
8479 } | |
8480 | |
8481 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const { | |
8482 if (CMSTraceSweeper) { | |
8483 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size()); | |
8484 } | |
8485 } | |
8486 #endif | |
8487 | |
8488 // CMSIsAliveClosure | |
8489 bool CMSIsAliveClosure::do_object_b(oop obj) { | |
8490 HeapWord* addr = (HeapWord*)obj; | |
8491 return addr != NULL && | |
8492 (!_span.contains(addr) || _bit_map->isMarked(addr)); | |
8493 } | |
8494 | |
935 | 8495 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, |
8496 MemRegion span, | |
8497 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | |
8498 CMSMarkStack* revisit_stack, bool cpc): | |
8499 KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8500 _span(span), | |
8501 _bit_map(bit_map), | |
8502 _mark_stack(mark_stack), | |
8503 _concurrent_precleaning(cpc) { | |
8504 assert(!_span.is_empty(), "Empty span could spell trouble"); | |
8505 } | |
8506 | |
8507 | |
0 | 8508 // CMSKeepAliveClosure: the serial version |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8509 void CMSKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8510 HeapWord* addr = (HeapWord*)obj; |
0 | 8511 if (_span.contains(addr) && |
8512 !_bit_map->isMarked(addr)) { | |
8513 _bit_map->mark(addr); | |
8514 bool simulate_overflow = false; | |
8515 NOT_PRODUCT( | |
8516 if (CMSMarkStackOverflowALot && | |
8517 _collector->simulate_overflow()) { | |
8518 // simulate a stack overflow | |
8519 simulate_overflow = true; | |
8520 } | |
8521 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8522 if (simulate_overflow || !_mark_stack->push(obj)) { |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8523 if (_concurrent_precleaning) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8524 // We dirty the overflown object and let the remark |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8525 // phase deal with it. |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8526 assert(_collector->overflow_list_is_empty(), "Error"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8527 // In the case of object arrays, we need to dirty all of |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8528 // the cards that the object spans. No locking or atomics |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8529 // are needed since no one else can be mutating the mod union |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8530 // table. |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8531 if (obj->is_objArray()) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8532 size_t sz = obj->size(); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8533 HeapWord* end_card_addr = |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8534 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8535 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8536 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8537 _collector->_modUnionTable.mark_range(redirty_range); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8538 } else { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8539 _collector->_modUnionTable.mark(addr); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8540 } |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8541 _collector->_ser_kac_preclean_ovflw++; |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8542 } else { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8543 _collector->push_on_overflow_list(obj); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8544 _collector->_ser_kac_ovflw++; |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8545 } |
0 | 8546 } |
8547 } | |
8548 } | |
8549 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8550 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8551 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8552 |
0 | 8553 // CMSParKeepAliveClosure: a parallel version of the above. |
8554 // The work queues are private to each closure (thread), | |
8555 // but (may be) available for stealing by other threads. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8556 void CMSParKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8557 HeapWord* addr = (HeapWord*)obj; |
0 | 8558 if (_span.contains(addr) && |
8559 !_bit_map->isMarked(addr)) { | |
8560 // In general, during recursive tracing, several threads | |
8561 // may be concurrently getting here; the first one to | |
8562 // "tag" it, claims it. | |
8563 if (_bit_map->par_mark(addr)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8564 bool res = _work_queue->push(obj); |
0 | 8565 assert(res, "Low water mark should be much less than capacity"); |
8566 // Do a recursive trim in the hope that this will keep | |
8567 // stack usage lower, but leave some oops for potential stealers | |
8568 trim_queue(_low_water_mark); | |
8569 } // Else, another thread got there first | |
8570 } | |
8571 } | |
8572 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8573 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8574 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8575 |
0 | 8576 void CMSParKeepAliveClosure::trim_queue(uint max) { |
8577 while (_work_queue->size() > max) { | |
8578 oop new_oop; | |
8579 if (_work_queue->pop_local(new_oop)) { | |
8580 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
8581 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8582 "no white objects on this stack!"); | |
8583 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8584 // iterate over the oops in this oop, marking and pushing | |
8585 // the ones in CMS heap (i.e. in _span). | |
8586 new_oop->oop_iterate(&_mark_and_push); | |
8587 } | |
8588 } | |
8589 } | |
8590 | |
935 | 8591 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( |
8592 CMSCollector* collector, | |
8593 MemRegion span, CMSBitMap* bit_map, | |
8594 CMSMarkStack* revisit_stack, | |
8595 OopTaskQueue* work_queue): | |
8596 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8597 _span(span), | |
8598 _bit_map(bit_map), | |
8599 _work_queue(work_queue) { } | |
8600 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8601 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8602 HeapWord* addr = (HeapWord*)obj; |
0 | 8603 if (_span.contains(addr) && |
8604 !_bit_map->isMarked(addr)) { | |
8605 if (_bit_map->par_mark(addr)) { | |
8606 bool simulate_overflow = false; | |
8607 NOT_PRODUCT( | |
8608 if (CMSMarkStackOverflowALot && | |
8609 _collector->par_simulate_overflow()) { | |
8610 // simulate a stack overflow | |
8611 simulate_overflow = true; | |
8612 } | |
8613 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8614 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8615 _collector->par_push_on_overflow_list(obj); |
0 | 8616 _collector->_par_kac_ovflw++; |
8617 } | |
8618 } // Else another thread got there already | |
8619 } | |
8620 } | |
8621 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8622 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8623 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8624 |
0 | 8625 ////////////////////////////////////////////////////////////////// |
8626 // CMSExpansionCause ///////////////////////////// | |
8627 ////////////////////////////////////////////////////////////////// | |
8628 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { | |
8629 switch (cause) { | |
8630 case _no_expansion: | |
8631 return "No expansion"; | |
8632 case _satisfy_free_ratio: | |
8633 return "Free ratio"; | |
8634 case _satisfy_promotion: | |
8635 return "Satisfy promotion"; | |
8636 case _satisfy_allocation: | |
8637 return "allocation"; | |
8638 case _allocate_par_lab: | |
8639 return "Par LAB"; | |
8640 case _allocate_par_spooling_space: | |
8641 return "Par Spooling Space"; | |
8642 case _adaptive_size_policy: | |
8643 return "Ergonomics"; | |
8644 default: | |
8645 return "unknown"; | |
8646 } | |
8647 } | |
8648 | |
8649 void CMSDrainMarkingStackClosure::do_void() { | |
8650 // the max number to take from overflow list at a time | |
8651 const size_t num = _mark_stack->capacity()/4; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8652 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8653 "Overflow list should be NULL during concurrent phases"); |
0 | 8654 while (!_mark_stack->isEmpty() || |
8655 // if stack is empty, check the overflow list | |
8656 _collector->take_from_overflow_list(num, _mark_stack)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8657 oop obj = _mark_stack->pop(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8658 HeapWord* addr = (HeapWord*)obj; |
0 | 8659 assert(_span.contains(addr), "Should be within span"); |
8660 assert(_bit_map->isMarked(addr), "Should be marked"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8661 assert(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8662 obj->oop_iterate(_keep_alive); |
0 | 8663 } |
8664 } | |
8665 | |
8666 void CMSParDrainMarkingStackClosure::do_void() { | |
8667 // drain queue | |
8668 trim_queue(0); | |
8669 } | |
8670 | |
8671 // Trim our work_queue so its length is below max at return | |
8672 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { | |
8673 while (_work_queue->size() > max) { | |
8674 oop new_oop; | |
8675 if (_work_queue->pop_local(new_oop)) { | |
8676 assert(new_oop->is_oop(), "Expected an oop"); | |
8677 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8678 "no white objects on this stack!"); | |
8679 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8680 // iterate over the oops in this oop, marking and pushing | |
8681 // the ones in CMS heap (i.e. in _span). | |
8682 new_oop->oop_iterate(&_mark_and_push); | |
8683 } | |
8684 } | |
8685 } | |
8686 | |
8687 //////////////////////////////////////////////////////////////////// | |
8688 // Support for Marking Stack Overflow list handling and related code | |
8689 //////////////////////////////////////////////////////////////////// | |
8690 // Much of the following code is similar in shape and spirit to the | |
8691 // code used in ParNewGC. We should try and share that code | |
8692 // as much as possible in the future. | |
8693 | |
8694 #ifndef PRODUCT | |
8695 // Debugging support for CMSStackOverflowALot | |
8696 | |
8697 // It's OK to call this multi-threaded; the worst thing | |
8698 // that can happen is that we'll get a bunch of closely | |
8699 // spaced simulated oveflows, but that's OK, in fact | |
8700 // probably good as it would exercise the overflow code | |
8701 // under contention. | |
8702 bool CMSCollector::simulate_overflow() { | |
8703 if (_overflow_counter-- <= 0) { // just being defensive | |
8704 _overflow_counter = CMSMarkStackOverflowInterval; | |
8705 return true; | |
8706 } else { | |
8707 return false; | |
8708 } | |
8709 } | |
8710 | |
8711 bool CMSCollector::par_simulate_overflow() { | |
8712 return simulate_overflow(); | |
8713 } | |
8714 #endif | |
8715 | |
8716 // Single-threaded | |
8717 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { | |
8718 assert(stack->isEmpty(), "Expected precondition"); | |
8719 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); | |
8720 size_t i = num; | |
8721 oop cur = _overflow_list; | |
8722 const markOop proto = markOopDesc::prototype(); | |
534 | 8723 NOT_PRODUCT(ssize_t n = 0;) |
0 | 8724 for (oop next; i > 0 && cur != NULL; cur = next, i--) { |
8725 next = oop(cur->mark()); | |
8726 cur->set_mark(proto); // until proven otherwise | |
8727 assert(cur->is_oop(), "Should be an oop"); | |
8728 bool res = stack->push(cur); | |
8729 assert(res, "Bit off more than can chew?"); | |
8730 NOT_PRODUCT(n++;) | |
8731 } | |
8732 _overflow_list = cur; | |
8733 #ifndef PRODUCT | |
8734 assert(_num_par_pushes >= n, "Too many pops?"); | |
8735 _num_par_pushes -=n; | |
8736 #endif | |
8737 return !stack->isEmpty(); | |
8738 } | |
8739 | |
534 | 8740 #define BUSY (oop(0x1aff1aff)) |
8741 // (MT-safe) Get a prefix of at most "num" from the list. | |
8742 // The overflow list is chained through the mark word of | |
8743 // each object in the list. We fetch the entire list, | |
8744 // break off a prefix of the right size and return the | |
8745 // remainder. If other threads try to take objects from | |
8746 // the overflow list at that time, they will wait for | |
8747 // some time to see if data becomes available. If (and | |
8748 // only if) another thread places one or more object(s) | |
8749 // on the global list before we have returned the suffix | |
8750 // to the global list, we will walk down our local list | |
8751 // to find its end and append the global list to | |
8752 // our suffix before returning it. This suffix walk can | |
8753 // prove to be expensive (quadratic in the amount of traffic) | |
8754 // when there are many objects in the overflow list and | |
8755 // there is much producer-consumer contention on the list. | |
8756 // *NOTE*: The overflow list manipulation code here and | |
8757 // in ParNewGeneration:: are very similar in shape, | |
8758 // except that in the ParNew case we use the old (from/eden) | |
8759 // copy of the object to thread the list via its klass word. | |
8760 // Because of the common code, if you make any changes in | |
8761 // the code below, please check the ParNew version to see if | |
8762 // similar changes might be needed. | |
8763 // CR 6797058 has been filed to consolidate the common code. | |
0 | 8764 bool CMSCollector::par_take_from_overflow_list(size_t num, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8765 OopTaskQueue* work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8766 int no_of_gc_threads) { |
534 | 8767 assert(work_q->size() == 0, "First empty local work queue"); |
0 | 8768 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); |
8769 if (_overflow_list == NULL) { | |
8770 return false; | |
8771 } | |
8772 // Grab the entire list; we'll put back a suffix | |
534 | 8773 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); |
8774 Thread* tid = Thread::current(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8775 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8776 // set to ParallelGCThreads. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8777 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads; |
534 | 8778 size_t sleep_time_millis = MAX2((size_t)1, num/100); |
8779 // If the list is busy, we spin for a short while, | |
8780 // sleeping between attempts to get the list. | |
8781 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) { | |
8782 os::sleep(tid, sleep_time_millis, false); | |
8783 if (_overflow_list == NULL) { | |
8784 // Nothing left to take | |
8785 return false; | |
8786 } else if (_overflow_list != BUSY) { | |
8787 // Try and grab the prefix | |
8788 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); | |
8789 } | |
8790 } | |
8791 // If the list was found to be empty, or we spun long | |
8792 // enough, we give up and return empty-handed. If we leave | |
8793 // the list in the BUSY state below, it must be the case that | |
8794 // some other thread holds the overflow list and will set it | |
8795 // to a non-BUSY state in the future. | |
8796 if (prefix == NULL || prefix == BUSY) { | |
8797 // Nothing to take or waited long enough | |
8798 if (prefix == NULL) { | |
8799 // Write back the NULL in case we overwrote it with BUSY above | |
8800 // and it is still the same value. | |
8801 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
8802 } | |
8803 return false; | |
8804 } | |
8805 assert(prefix != NULL && prefix != BUSY, "Error"); | |
0 | 8806 size_t i = num; |
8807 oop cur = prefix; | |
534 | 8808 // Walk down the first "num" objects, unless we reach the end. |
0 | 8809 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--); |
534 | 8810 if (cur->mark() == NULL) { |
8811 // We have "num" or fewer elements in the list, so there | |
8812 // is nothing to return to the global list. | |
8813 // Write back the NULL in lieu of the BUSY we wrote | |
8814 // above, if it is still the same value. | |
8815 if (_overflow_list == BUSY) { | |
8816 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
8817 } | |
8818 } else { | |
8819 // Chop off the suffix and rerturn it to the global list. | |
8820 assert(cur->mark() != BUSY, "Error"); | |
0 | 8821 oop suffix_head = cur->mark(); // suffix will be put back on global list |
8822 cur->set_mark(NULL); // break off suffix | |
534 | 8823 // It's possible that the list is still in the empty(busy) state |
8824 // we left it in a short while ago; in that case we may be | |
8825 // able to place back the suffix without incurring the cost | |
8826 // of a walk down the list. | |
0 | 8827 oop observed_overflow_list = _overflow_list; |
534 | 8828 oop cur_overflow_list = observed_overflow_list; |
8829 bool attached = false; | |
8830 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { | |
0 | 8831 observed_overflow_list = |
534 | 8832 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); |
8833 if (cur_overflow_list == observed_overflow_list) { | |
8834 attached = true; | |
8835 break; | |
8836 } else cur_overflow_list = observed_overflow_list; | |
8837 } | |
8838 if (!attached) { | |
8839 // Too bad, someone else sneaked in (at least) an element; we'll need | |
8840 // to do a splice. Find tail of suffix so we can prepend suffix to global | |
8841 // list. | |
8842 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark())); | |
8843 oop suffix_tail = cur; | |
8844 assert(suffix_tail != NULL && suffix_tail->mark() == NULL, | |
8845 "Tautology"); | |
8846 observed_overflow_list = _overflow_list; | |
8847 do { | |
8848 cur_overflow_list = observed_overflow_list; | |
8849 if (cur_overflow_list != BUSY) { | |
8850 // Do the splice ... | |
8851 suffix_tail->set_mark(markOop(cur_overflow_list)); | |
8852 } else { // cur_overflow_list == BUSY | |
8853 suffix_tail->set_mark(NULL); | |
8854 } | |
8855 // ... and try to place spliced list back on overflow_list ... | |
8856 observed_overflow_list = | |
8857 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); | |
8858 } while (cur_overflow_list != observed_overflow_list); | |
8859 // ... until we have succeeded in doing so. | |
8860 } | |
0 | 8861 } |
8862 | |
8863 // Push the prefix elements on work_q | |
8864 assert(prefix != NULL, "control point invariant"); | |
8865 const markOop proto = markOopDesc::prototype(); | |
8866 oop next; | |
534 | 8867 NOT_PRODUCT(ssize_t n = 0;) |
0 | 8868 for (cur = prefix; cur != NULL; cur = next) { |
8869 next = oop(cur->mark()); | |
8870 cur->set_mark(proto); // until proven otherwise | |
8871 assert(cur->is_oop(), "Should be an oop"); | |
8872 bool res = work_q->push(cur); | |
8873 assert(res, "Bit off more than we can chew?"); | |
8874 NOT_PRODUCT(n++;) | |
8875 } | |
8876 #ifndef PRODUCT | |
8877 assert(_num_par_pushes >= n, "Too many pops?"); | |
8878 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); | |
8879 #endif | |
8880 return true; | |
8881 } | |
8882 | |
8883 // Single-threaded | |
8884 void CMSCollector::push_on_overflow_list(oop p) { | |
8885 NOT_PRODUCT(_num_par_pushes++;) | |
8886 assert(p->is_oop(), "Not an oop"); | |
8887 preserve_mark_if_necessary(p); | |
8888 p->set_mark((markOop)_overflow_list); | |
8889 _overflow_list = p; | |
8890 } | |
8891 | |
8892 // Multi-threaded; use CAS to prepend to overflow list | |
8893 void CMSCollector::par_push_on_overflow_list(oop p) { | |
8894 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);) | |
8895 assert(p->is_oop(), "Not an oop"); | |
8896 par_preserve_mark_if_necessary(p); | |
8897 oop observed_overflow_list = _overflow_list; | |
8898 oop cur_overflow_list; | |
8899 do { | |
8900 cur_overflow_list = observed_overflow_list; | |
534 | 8901 if (cur_overflow_list != BUSY) { |
8902 p->set_mark(markOop(cur_overflow_list)); | |
8903 } else { | |
8904 p->set_mark(NULL); | |
8905 } | |
0 | 8906 observed_overflow_list = |
8907 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); | |
8908 } while (cur_overflow_list != observed_overflow_list); | |
8909 } | |
534 | 8910 #undef BUSY |
0 | 8911 |
8912 // Single threaded | |
8913 // General Note on GrowableArray: pushes may silently fail | |
8914 // because we are (temporarily) out of C-heap for expanding | |
8915 // the stack. The problem is quite ubiquitous and affects | |
8916 // a lot of code in the JVM. The prudent thing for GrowableArray | |
8917 // to do (for now) is to exit with an error. However, that may | |
8918 // be too draconian in some cases because the caller may be | |
534 | 8919 // able to recover without much harm. For such cases, we |
0 | 8920 // should probably introduce a "soft_push" method which returns |
8921 // an indication of success or failure with the assumption that | |
8922 // the caller may be able to recover from a failure; code in | |
8923 // the VM can then be changed, incrementally, to deal with such | |
8924 // failures where possible, thus, incrementally hardening the VM | |
8925 // in such low resource situations. | |
8926 void CMSCollector::preserve_mark_work(oop p, markOop m) { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8927 _preserved_oop_stack.push(p); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8928 _preserved_mark_stack.push(m); |
0 | 8929 assert(m == p->mark(), "Mark word changed"); |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8930 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
0 | 8931 "bijection"); |
8932 } | |
8933 | |
8934 // Single threaded | |
8935 void CMSCollector::preserve_mark_if_necessary(oop p) { | |
8936 markOop m = p->mark(); | |
8937 if (m->must_be_preserved(p)) { | |
8938 preserve_mark_work(p, m); | |
8939 } | |
8940 } | |
8941 | |
8942 void CMSCollector::par_preserve_mark_if_necessary(oop p) { | |
8943 markOop m = p->mark(); | |
8944 if (m->must_be_preserved(p)) { | |
8945 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
8946 // Even though we read the mark word without holding | |
8947 // the lock, we are assured that it will not change | |
8948 // because we "own" this oop, so no other thread can | |
8949 // be trying to push it on the overflow list; see | |
8950 // the assertion in preserve_mark_work() that checks | |
8951 // that m == p->mark(). | |
8952 preserve_mark_work(p, m); | |
8953 } | |
8954 } | |
8955 | |
8956 // We should be able to do this multi-threaded, | |
8957 // a chunk of stack being a task (this is | |
8958 // correct because each oop only ever appears | |
8959 // once in the overflow list. However, it's | |
8960 // not very easy to completely overlap this with | |
8961 // other operations, so will generally not be done | |
8962 // until all work's been completed. Because we | |
8963 // expect the preserved oop stack (set) to be small, | |
8964 // it's probably fine to do this single-threaded. | |
8965 // We can explore cleverer concurrent/overlapped/parallel | |
8966 // processing of preserved marks if we feel the | |
8967 // need for this in the future. Stack overflow should | |
8968 // be so rare in practice and, when it happens, its | |
8969 // effect on performance so great that this will | |
8970 // likely just be in the noise anyway. | |
8971 void CMSCollector::restore_preserved_marks_if_any() { | |
8972 assert(SafepointSynchronize::is_at_safepoint(), | |
8973 "world should be stopped"); | |
8974 assert(Thread::current()->is_ConcurrentGC_thread() || | |
8975 Thread::current()->is_VM_thread(), | |
8976 "should be single-threaded"); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8977 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8978 "bijection"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8979 |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8980 while (!_preserved_oop_stack.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8981 oop p = _preserved_oop_stack.pop(); |
0 | 8982 assert(p->is_oop(), "Should be an oop"); |
8983 assert(_span.contains(p), "oop should be in _span"); | |
8984 assert(p->mark() == markOopDesc::prototype(), | |
8985 "Set when taken from overflow list"); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8986 markOop m = _preserved_mark_stack.pop(); |
0 | 8987 p->set_mark(m); |
8988 } | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8989 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(), |
0 | 8990 "stacks were cleared above"); |
8991 } | |
8992 | |
8993 #ifndef PRODUCT | |
8994 bool CMSCollector::no_preserved_marks() const { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8995 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(); |
0 | 8996 } |
8997 #endif | |
8998 | |
8999 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const | |
9000 { | |
9001 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
9002 CMSAdaptiveSizePolicy* size_policy = | |
9003 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy(); | |
9004 assert(size_policy->is_gc_cms_adaptive_size_policy(), | |
9005 "Wrong type for size policy"); | |
9006 return size_policy; | |
9007 } | |
9008 | |
9009 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size, | |
9010 size_t desired_promo_size) { | |
9011 if (cur_promo_size < desired_promo_size) { | |
9012 size_t expand_bytes = desired_promo_size - cur_promo_size; | |
9013 if (PrintAdaptiveSizePolicy && Verbose) { | |
9014 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
9015 "Expanding tenured generation by " SIZE_FORMAT " (bytes)", | |
9016 expand_bytes); | |
9017 } | |
9018 expand(expand_bytes, | |
9019 MinHeapDeltaBytes, | |
9020 CMSExpansionCause::_adaptive_size_policy); | |
9021 } else if (desired_promo_size < cur_promo_size) { | |
9022 size_t shrink_bytes = cur_promo_size - desired_promo_size; | |
9023 if (PrintAdaptiveSizePolicy && Verbose) { | |
9024 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
9025 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)", | |
9026 shrink_bytes); | |
9027 } | |
9028 shrink(shrink_bytes); | |
9029 } | |
9030 } | |
9031 | |
9032 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() { | |
9033 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9034 CMSGCAdaptivePolicyCounters* counters = | |
9035 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); | |
9036 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
9037 "Wrong kind of counters"); | |
9038 return counters; | |
9039 } | |
9040 | |
9041 | |
9042 void ASConcurrentMarkSweepGeneration::update_counters() { | |
9043 if (UsePerfData) { | |
9044 _space_counters->update_all(); | |
9045 _gen_counters->update_all(); | |
9046 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9047 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9048 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
9049 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
9050 "Wrong gc statistics type"); | |
9051 counters->update_counters(gc_stats_l); | |
9052 } | |
9053 } | |
9054 | |
9055 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
9056 if (UsePerfData) { | |
9057 _space_counters->update_used(used); | |
9058 _space_counters->update_capacity(); | |
9059 _gen_counters->update_all(); | |
9060 | |
9061 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9062 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9063 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
9064 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
9065 "Wrong gc statistics type"); | |
9066 counters->update_counters(gc_stats_l); | |
9067 } | |
9068 } | |
9069 | |
9070 // The desired expansion delta is computed so that: | |
9071 // . desired free percentage or greater is used | |
9072 void ASConcurrentMarkSweepGeneration::compute_new_size() { | |
9073 assert_locked_or_safepoint(Heap_lock); | |
9074 | |
9075 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
9076 | |
9077 // If incremental collection failed, we just want to expand | |
9078 // to the limit. | |
9079 if (incremental_collection_failed()) { | |
9080 clear_incremental_collection_failed(); | |
9081 grow_to_reserved(); | |
9082 return; | |
9083 } | |
9084 | |
9085 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing"); | |
9086 | |
9087 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
9088 "Wrong type of heap"); | |
9089 int prev_level = level() - 1; | |
9090 assert(prev_level >= 0, "The cms generation is the lowest generation"); | |
9091 Generation* prev_gen = gch->get_gen(prev_level); | |
9092 assert(prev_gen->kind() == Generation::ASParNew, | |
9093 "Wrong type of young generation"); | |
9094 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen; | |
9095 size_t cur_eden = younger_gen->eden()->capacity(); | |
9096 CMSAdaptiveSizePolicy* size_policy = cms_size_policy(); | |
9097 size_t cur_promo = free(); | |
9098 size_policy->compute_tenured_generation_free_space(cur_promo, | |
9099 max_available(), | |
9100 cur_eden); | |
9101 resize(cur_promo, size_policy->promo_size()); | |
9102 | |
9103 // Record the new size of the space in the cms generation | |
9104 // that is available for promotions. This is temporary. | |
9105 // It should be the desired promo size. | |
9106 size_policy->avg_cms_promo()->sample(free()); | |
9107 size_policy->avg_old_live()->sample(used()); | |
9108 | |
9109 if (UsePerfData) { | |
9110 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9111 counters->update_cms_capacity_counter(capacity()); | |
9112 } | |
9113 } | |
9114 | |
9115 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { | |
9116 assert_locked_or_safepoint(Heap_lock); | |
9117 assert_lock_strong(freelistLock()); | |
9118 HeapWord* old_end = _cmsSpace->end(); | |
9119 HeapWord* unallocated_start = _cmsSpace->unallocated_block(); | |
9120 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start"); | |
9121 FreeChunk* chunk_at_end = find_chunk_at_end(); | |
9122 if (chunk_at_end == NULL) { | |
9123 // No room to shrink | |
9124 if (PrintGCDetails && Verbose) { | |
9125 gclog_or_tty->print_cr("No room to shrink: old_end " | |
9126 PTR_FORMAT " unallocated_start " PTR_FORMAT | |
9127 " chunk_at_end " PTR_FORMAT, | |
9128 old_end, unallocated_start, chunk_at_end); | |
9129 } | |
9130 return; | |
9131 } else { | |
9132 | |
9133 // Find the chunk at the end of the space and determine | |
9134 // how much it can be shrunk. | |
9135 size_t shrinkable_size_in_bytes = chunk_at_end->size(); | |
9136 size_t aligned_shrinkable_size_in_bytes = | |
9137 align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); | |
9138 assert(unallocated_start <= chunk_at_end->end(), | |
9139 "Inconsistent chunk at end of space"); | |
9140 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); | |
9141 size_t word_size_before = heap_word_size(_virtual_space.committed_size()); | |
9142 | |
9143 // Shrink the underlying space | |
9144 _virtual_space.shrink_by(bytes); | |
9145 if (PrintGCDetails && Verbose) { | |
9146 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:" | |
9147 " desired_bytes " SIZE_FORMAT | |
9148 " shrinkable_size_in_bytes " SIZE_FORMAT | |
9149 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT | |
9150 " bytes " SIZE_FORMAT, | |
9151 desired_bytes, shrinkable_size_in_bytes, | |
9152 aligned_shrinkable_size_in_bytes, bytes); | |
9153 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT | |
9154 " unallocated_start " SIZE_FORMAT, | |
9155 old_end, unallocated_start); | |
9156 } | |
9157 | |
9158 // If the space did shrink (shrinking is not guaranteed), | |
9159 // shrink the chunk at the end by the appropriate amount. | |
9160 if (((HeapWord*)_virtual_space.high()) < old_end) { | |
9161 size_t new_word_size = | |
9162 heap_word_size(_virtual_space.committed_size()); | |
9163 | |
9164 // Have to remove the chunk from the dictionary because it is changing | |
9165 // size and might be someplace elsewhere in the dictionary. | |
9166 | |
9167 // Get the chunk at end, shrink it, and put it | |
9168 // back. | |
9169 _cmsSpace->removeChunkFromDictionary(chunk_at_end); | |
9170 size_t word_size_change = word_size_before - new_word_size; | |
9171 size_t chunk_at_end_old_size = chunk_at_end->size(); | |
9172 assert(chunk_at_end_old_size >= word_size_change, | |
9173 "Shrink is too large"); | |
9174 chunk_at_end->setSize(chunk_at_end_old_size - | |
9175 word_size_change); | |
9176 _cmsSpace->freed((HeapWord*) chunk_at_end->end(), | |
9177 word_size_change); | |
9178 | |
9179 _cmsSpace->returnChunkToDictionary(chunk_at_end); | |
9180 | |
9181 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
9182 _bts->resize(new_word_size); // resize the block offset shared array | |
9183 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
9184 _cmsSpace->assert_locked(); | |
9185 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); | |
9186 | |
9187 NOT_PRODUCT(_cmsSpace->dictionary()->verify()); | |
9188 | |
9189 // update the space and generation capacity counters | |
9190 if (UsePerfData) { | |
9191 _space_counters->update_capacity(); | |
9192 _gen_counters->update_all(); | |
9193 } | |
9194 | |
9195 if (Verbose && PrintGCDetails) { | |
9196 size_t new_mem_size = _virtual_space.committed_size(); | |
9197 size_t old_mem_size = new_mem_size + bytes; | |
9198 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK", | |
9199 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
9200 } | |
9201 } | |
9202 | |
9203 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), | |
9204 "Inconsistency at end of space"); | |
9205 assert(chunk_at_end->end() == _cmsSpace->end(), | |
9206 "Shrinking is inconsistent"); | |
9207 return; | |
9208 } | |
9209 } | |
9210 | |
9211 // Transfer some number of overflown objects to usual marking | |
9212 // stack. Return true if some objects were transferred. | |
9213 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
9214 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, |
0 | 9215 (size_t)ParGCDesiredObjsFromOverflowList); |
9216 | |
9217 bool res = _collector->take_from_overflow_list(num, _mark_stack); | |
9218 assert(_collector->overflow_list_is_empty() || res, | |
9219 "If list is not empty, we should have taken something"); | |
9220 assert(!res || !_mark_stack->isEmpty(), | |
9221 "If we took something, it should now be on our stack"); | |
9222 return res; | |
9223 } | |
9224 | |
9225 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { | |
9226 size_t res = _sp->block_size_no_stall(addr, _collector); | |
9227 if (_sp->block_is_obj(addr)) { | |
9228 if (_live_bit_map->isMarked(addr)) { | |
9229 // It can't have been dead in a previous cycle | |
9230 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); | |
9231 } else { | |
9232 _dead_bit_map->mark(addr); // mark the dead object | |
9233 } | |
9234 } | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
9235 // Could be 0, if the block size could not be computed without stalling. |
0 | 9236 return res; |
9237 } | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9238 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9239 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9240 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9241 switch (phase) { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9242 case CMSCollector::InitialMarking: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9243 initialize(true /* fullGC */ , |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9244 true /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9245 true /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9246 false /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9247 false /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9248 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9249 false /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9250 false /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9251 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9252 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9253 case CMSCollector::FinalMarking: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9254 initialize(true /* fullGC */ , |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9255 false /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9256 false /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9257 false /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9258 false /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9259 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9260 false /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9261 false /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9262 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9263 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9264 case CMSCollector::Sweeping: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9265 initialize(true /* fullGC */ , |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9266 false /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9267 false /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9268 true /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9269 true /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9270 false /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9271 true /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9272 true /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9273 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9274 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9275 default: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9276 ShouldNotReachHere(); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9277 } |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9278 } |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9279 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9280 // when bailing out of cms in concurrent mode failure |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9281 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9282 initialize(true /* fullGC */ , |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9283 true /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9284 true /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9285 true /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9286 true /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9287 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9288 true /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9289 true /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9290 } |