Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 3461:81d815b05abb
7056328: JSR 292 invocation sometimes fails in adapters for types not on boot class path
Reviewed-by: never
author | jrose |
---|---|
date | Thu, 23 Jun 2011 17:14:06 -0700 |
parents | 8cbcd406c42e |
children | 48048b59a551 |
rev | line source |
---|---|
0 | 1 /* |
2365
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1521
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/symbolTable.hpp" | |
27 #include "classfile/systemDictionary.hpp" | |
28 #include "code/codeCache.hpp" | |
29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" | |
30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" | |
31 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp" | |
32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" | |
33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" | |
34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" | |
35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" | |
36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" | |
37 #include "gc_implementation/parNew/parNewGeneration.hpp" | |
38 #include "gc_implementation/shared/collectorCounters.hpp" | |
39 #include "gc_implementation/shared/isGCActiveMark.hpp" | |
40 #include "gc_interface/collectedHeap.inline.hpp" | |
41 #include "memory/cardTableRS.hpp" | |
42 #include "memory/collectorPolicy.hpp" | |
43 #include "memory/gcLocker.inline.hpp" | |
44 #include "memory/genCollectedHeap.hpp" | |
45 #include "memory/genMarkSweep.hpp" | |
46 #include "memory/genOopClosures.inline.hpp" | |
47 #include "memory/iterator.hpp" | |
48 #include "memory/referencePolicy.hpp" | |
49 #include "memory/resourceArea.hpp" | |
50 #include "oops/oop.inline.hpp" | |
51 #include "prims/jvmtiExport.hpp" | |
52 #include "runtime/globals_extension.hpp" | |
53 #include "runtime/handles.inline.hpp" | |
54 #include "runtime/java.hpp" | |
55 #include "runtime/vmThread.hpp" | |
56 #include "services/memoryService.hpp" | |
57 #include "services/runtimeService.hpp" | |
0 | 58 |
59 // statics | |
60 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; | |
61 bool CMSCollector::_full_gc_requested = false; | |
62 | |
63 ////////////////////////////////////////////////////////////////// | |
64 // In support of CMS/VM thread synchronization | |
65 ////////////////////////////////////////////////////////////////// | |
66 // We split use of the CGC_lock into 2 "levels". | |
67 // The low-level locking is of the usual CGC_lock monitor. We introduce | |
68 // a higher level "token" (hereafter "CMS token") built on top of the | |
69 // low level monitor (hereafter "CGC lock"). | |
70 // The token-passing protocol gives priority to the VM thread. The | |
71 // CMS-lock doesn't provide any fairness guarantees, but clients | |
72 // should ensure that it is only held for very short, bounded | |
73 // durations. | |
74 // | |
75 // When either of the CMS thread or the VM thread is involved in | |
76 // collection operations during which it does not want the other | |
77 // thread to interfere, it obtains the CMS token. | |
78 // | |
79 // If either thread tries to get the token while the other has | |
80 // it, that thread waits. However, if the VM thread and CMS thread | |
81 // both want the token, then the VM thread gets priority while the | |
82 // CMS thread waits. This ensures, for instance, that the "concurrent" | |
83 // phases of the CMS thread's work do not block out the VM thread | |
84 // for long periods of time as the CMS thread continues to hog | |
85 // the token. (See bug 4616232). | |
86 // | |
87 // The baton-passing functions are, however, controlled by the | |
88 // flags _foregroundGCShouldWait and _foregroundGCIsActive, | |
89 // and here the low-level CMS lock, not the high level token, | |
90 // ensures mutual exclusion. | |
91 // | |
92 // Two important conditions that we have to satisfy: | |
93 // 1. if a thread does a low-level wait on the CMS lock, then it | |
94 // relinquishes the CMS token if it were holding that token | |
95 // when it acquired the low-level CMS lock. | |
96 // 2. any low-level notifications on the low-level lock | |
97 // should only be sent when a thread has relinquished the token. | |
98 // | |
99 // In the absence of either property, we'd have potential deadlock. | |
100 // | |
101 // We protect each of the CMS (concurrent and sequential) phases | |
102 // with the CMS _token_, not the CMS _lock_. | |
103 // | |
104 // The only code protected by CMS lock is the token acquisition code | |
105 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the | |
106 // baton-passing code. | |
107 // | |
108 // Unfortunately, i couldn't come up with a good abstraction to factor and | |
109 // hide the naked CGC_lock manipulation in the baton-passing code | |
110 // further below. That's something we should try to do. Also, the proof | |
111 // of correctness of this 2-level locking scheme is far from obvious, | |
112 // and potentially quite slippery. We have an uneasy supsicion, for instance, | |
113 // that there may be a theoretical possibility of delay/starvation in the | |
114 // low-level lock/wait/notify scheme used for the baton-passing because of | |
115 // potential intereference with the priority scheme embodied in the | |
116 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() | |
117 // invocation further below and marked with "XXX 20011219YSR". | |
118 // Indeed, as we note elsewhere, this may become yet more slippery | |
119 // in the presence of multiple CMS and/or multiple VM threads. XXX | |
120 | |
121 class CMSTokenSync: public StackObj { | |
122 private: | |
123 bool _is_cms_thread; | |
124 public: | |
125 CMSTokenSync(bool is_cms_thread): | |
126 _is_cms_thread(is_cms_thread) { | |
127 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), | |
128 "Incorrect argument to constructor"); | |
129 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); | |
130 } | |
131 | |
132 ~CMSTokenSync() { | |
133 assert(_is_cms_thread ? | |
134 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : | |
135 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
136 "Incorrect state"); | |
137 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); | |
138 } | |
139 }; | |
140 | |
141 // Convenience class that does a CMSTokenSync, and then acquires | |
142 // upto three locks. | |
143 class CMSTokenSyncWithLocks: public CMSTokenSync { | |
144 private: | |
145 // Note: locks are acquired in textual declaration order | |
146 // and released in the opposite order | |
147 MutexLockerEx _locker1, _locker2, _locker3; | |
148 public: | |
149 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, | |
150 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): | |
151 CMSTokenSync(is_cms_thread), | |
152 _locker1(mutex1, Mutex::_no_safepoint_check_flag), | |
153 _locker2(mutex2, Mutex::_no_safepoint_check_flag), | |
154 _locker3(mutex3, Mutex::_no_safepoint_check_flag) | |
155 { } | |
156 }; | |
157 | |
158 | |
159 // Wrapper class to temporarily disable icms during a foreground cms collection. | |
160 class ICMSDisabler: public StackObj { | |
161 public: | |
162 // The ctor disables icms and wakes up the thread so it notices the change; | |
163 // the dtor re-enables icms. Note that the CMSCollector methods will check | |
164 // CMSIncrementalMode. | |
165 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); } | |
166 ~ICMSDisabler() { CMSCollector::enable_icms(); } | |
167 }; | |
168 | |
169 ////////////////////////////////////////////////////////////////// | |
170 // Concurrent Mark-Sweep Generation ///////////////////////////// | |
171 ////////////////////////////////////////////////////////////////// | |
172 | |
173 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) | |
174 | |
175 // This struct contains per-thread things necessary to support parallel | |
176 // young-gen collection. | |
177 class CMSParGCThreadState: public CHeapObj { | |
178 public: | |
179 CFLS_LAB lab; | |
180 PromotionInfo promo; | |
181 | |
182 // Constructor. | |
183 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { | |
184 promo.setSpace(cfls); | |
185 } | |
186 }; | |
187 | |
188 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( | |
189 ReservedSpace rs, size_t initial_byte_size, int level, | |
190 CardTableRS* ct, bool use_adaptive_freelists, | |
191 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
192 CardGeneration(rs, initial_byte_size, level, ct), | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
193 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), |
0 | 194 _debug_collection_type(Concurrent_collection_type) |
195 { | |
196 HeapWord* bottom = (HeapWord*) _virtual_space.low(); | |
197 HeapWord* end = (HeapWord*) _virtual_space.high(); | |
198 | |
199 _direct_allocated_words = 0; | |
200 NOT_PRODUCT( | |
201 _numObjectsPromoted = 0; | |
202 _numWordsPromoted = 0; | |
203 _numObjectsAllocated = 0; | |
204 _numWordsAllocated = 0; | |
205 ) | |
206 | |
207 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), | |
208 use_adaptive_freelists, | |
209 dictionaryChoice); | |
210 NOT_PRODUCT(debug_cms_space = _cmsSpace;) | |
211 if (_cmsSpace == NULL) { | |
212 vm_exit_during_initialization( | |
213 "CompactibleFreeListSpace allocation failure"); | |
214 } | |
215 _cmsSpace->_gen = this; | |
216 | |
217 _gc_stats = new CMSGCStats(); | |
218 | |
219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass | |
220 // offsets match. The ability to tell free chunks from objects | |
221 // depends on this property. | |
222 debug_only( | |
223 FreeChunk* junk = NULL; | |
187 | 224 assert(UseCompressedOops || |
225 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), | |
0 | 226 "Offset of FreeChunk::_prev within FreeChunk must match" |
227 " that of OopDesc::_klass within OopDesc"); | |
228 ) | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
229 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 230 typedef CMSParGCThreadState* CMSParGCThreadStatePtr; |
231 _par_gc_thread_states = | |
232 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads); | |
233 if (_par_gc_thread_states == NULL) { | |
234 vm_exit_during_initialization("Could not allocate par gc structs"); | |
235 } | |
236 for (uint i = 0; i < ParallelGCThreads; i++) { | |
237 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); | |
238 if (_par_gc_thread_states[i] == NULL) { | |
239 vm_exit_during_initialization("Could not allocate par gc structs"); | |
240 } | |
241 } | |
242 } else { | |
243 _par_gc_thread_states = NULL; | |
244 } | |
245 _incremental_collection_failed = false; | |
246 // The "dilatation_factor" is the expansion that can occur on | |
247 // account of the fact that the minimum object size in the CMS | |
248 // generation may be larger than that in, say, a contiguous young | |
249 // generation. | |
250 // Ideally, in the calculation below, we'd compute the dilatation | |
251 // factor as: MinChunkSize/(promoting_gen's min object size) | |
252 // Since we do not have such a general query interface for the | |
253 // promoting generation, we'll instead just use the mimimum | |
254 // object size (which today is a header's worth of space); | |
255 // note that all arithmetic is in units of HeapWords. | |
1571
2d127394260e
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents:
1521
diff
changeset
|
256 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); |
0 | 257 assert(_dilatation_factor >= 1.0, "from previous assert"); |
258 } | |
259 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
260 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
261 // The field "_initiating_occupancy" represents the occupancy percentage |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
262 // at which we trigger a new collection cycle. Unless explicitly specified |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
263 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
264 // is calculated by: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
265 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
266 // Let "f" be MinHeapFreeRatio in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
267 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
268 // _intiating_occupancy = 100-f + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
269 // f * (CMSTrigger[Perm]Ratio/100) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
270 // where CMSTrigger[Perm]Ratio is the argument "tr" below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
271 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
272 // That is, if we assume the heap is at its desired maximum occupancy at the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
273 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
274 // space be allocated before initiating a new collection cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
275 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
276 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
277 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
278 if (io >= 0) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
279 _initiating_occupancy = (double)io / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
280 } else { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
281 _initiating_occupancy = ((100 - MinHeapFreeRatio) + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
282 (double)(tr * MinHeapFreeRatio) / 100.0) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
283 / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
284 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
285 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
286 |
0 | 287 void ConcurrentMarkSweepGeneration::ref_processor_init() { |
288 assert(collector() != NULL, "no collector"); | |
289 collector()->ref_processor_init(); | |
290 } | |
291 | |
292 void CMSCollector::ref_processor_init() { | |
293 if (_ref_processor == NULL) { | |
294 // Allocate and initialize a reference processor | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
295 _ref_processor = |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
296 new ReferenceProcessor(_span, // span |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
297 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
298 (int) ParallelGCThreads, // mt processing degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
299 _cmsGen->refs_discovery_is_mt(), // mt discovery |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
300 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
301 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
302 &_is_alive_closure, // closure for liveness info |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
303 false); // next field updates do not need write barrier |
0 | 304 // Initialize the _ref_processor field of CMSGen |
305 _cmsGen->set_ref_processor(_ref_processor); | |
306 | |
307 // Allocate a dummy ref processor for perm gen. | |
308 ReferenceProcessor* rp2 = new ReferenceProcessor(); | |
309 if (rp2 == NULL) { | |
310 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); | |
311 } | |
312 _permGen->set_ref_processor(rp2); | |
313 } | |
314 } | |
315 | |
316 CMSAdaptiveSizePolicy* CMSCollector::size_policy() { | |
317 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
318 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
319 "Wrong type of heap"); | |
320 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
321 gch->gen_policy()->size_policy(); | |
322 assert(sp->is_gc_cms_adaptive_size_policy(), | |
323 "Wrong type of size policy"); | |
324 return sp; | |
325 } | |
326 | |
327 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() { | |
328 CMSGCAdaptivePolicyCounters* results = | |
329 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters(); | |
330 assert( | |
331 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
332 "Wrong gc policy counter kind"); | |
333 return results; | |
334 } | |
335 | |
336 | |
337 void ConcurrentMarkSweepGeneration::initialize_performance_counters() { | |
338 | |
339 const char* gen_name = "old"; | |
340 | |
341 // Generation Counters - generation 1, 1 subspace | |
342 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space); | |
343 | |
344 _space_counters = new GSpaceCounters(gen_name, 0, | |
345 _virtual_space.reserved_size(), | |
346 this, _gen_counters); | |
347 } | |
348 | |
349 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): | |
350 _cms_gen(cms_gen) | |
351 { | |
352 assert(alpha <= 100, "bad value"); | |
353 _saved_alpha = alpha; | |
354 | |
355 // Initialize the alphas to the bootstrap value of 100. | |
356 _gc0_alpha = _cms_alpha = 100; | |
357 | |
358 _cms_begin_time.update(); | |
359 _cms_end_time.update(); | |
360 | |
361 _gc0_duration = 0.0; | |
362 _gc0_period = 0.0; | |
363 _gc0_promoted = 0; | |
364 | |
365 _cms_duration = 0.0; | |
366 _cms_period = 0.0; | |
367 _cms_allocated = 0; | |
368 | |
369 _cms_used_at_gc0_begin = 0; | |
370 _cms_used_at_gc0_end = 0; | |
371 _allow_duty_cycle_reduction = false; | |
372 _valid_bits = 0; | |
373 _icms_duty_cycle = CMSIncrementalDutyCycle; | |
374 } | |
375 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
376 double CMSStats::cms_free_adjustment_factor(size_t free) const { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
377 // TBD: CR 6909490 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
378 return 1.0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
379 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
380 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
381 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
382 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
383 |
0 | 384 // If promotion failure handling is on use |
385 // the padded average size of the promotion for each | |
386 // young generation collection. | |
387 double CMSStats::time_until_cms_gen_full() const { | |
388 size_t cms_free = _cms_gen->cmsSpace()->free(); | |
389 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
390 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(), |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
391 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); |
0 | 392 if (cms_free > expected_promotion) { |
393 // Start a cms collection if there isn't enough space to promote | |
394 // for the next minor collection. Use the padded average as | |
395 // a safety factor. | |
396 cms_free -= expected_promotion; | |
397 | |
398 // Adjust by the safety factor. | |
399 double cms_free_dbl = (double)cms_free; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
400 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
401 // Apply a further correction factor which tries to adjust |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
402 // for recent occurance of concurrent mode failures. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
403 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
404 cms_free_dbl = cms_free_dbl * cms_adjustment; |
0 | 405 |
406 if (PrintGCDetails && Verbose) { | |
407 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " | |
408 SIZE_FORMAT " expected_promotion " SIZE_FORMAT, | |
409 cms_free, expected_promotion); | |
410 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", | |
411 cms_free_dbl, cms_consumption_rate() + 1.0); | |
412 } | |
413 // Add 1 in case the consumption rate goes to zero. | |
414 return cms_free_dbl / (cms_consumption_rate() + 1.0); | |
415 } | |
416 return 0.0; | |
417 } | |
418 | |
419 // Compare the duration of the cms collection to the | |
420 // time remaining before the cms generation is empty. | |
421 // Note that the time from the start of the cms collection | |
422 // to the start of the cms sweep (less than the total | |
423 // duration of the cms collection) can be used. This | |
424 // has been tried and some applications experienced | |
425 // promotion failures early in execution. This was | |
426 // possibly because the averages were not accurate | |
427 // enough at the beginning. | |
428 double CMSStats::time_until_cms_start() const { | |
429 // We add "gc0_period" to the "work" calculation | |
430 // below because this query is done (mostly) at the | |
431 // end of a scavenge, so we need to conservatively | |
432 // account for that much possible delay | |
433 // in the query so as to avoid concurrent mode failures | |
434 // due to starting the collection just a wee bit too | |
435 // late. | |
436 double work = cms_duration() + gc0_period(); | |
437 double deadline = time_until_cms_gen_full(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
438 // If a concurrent mode failure occurred recently, we want to be |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
439 // more conservative and halve our expected time_until_cms_gen_full() |
0 | 440 if (work > deadline) { |
441 if (Verbose && PrintGCDetails) { | |
442 gclog_or_tty->print( | |
443 " CMSCollector: collect because of anticipated promotion " | |
444 "before full %3.7f + %3.7f > %3.7f ", cms_duration(), | |
445 gc0_period(), time_until_cms_gen_full()); | |
446 } | |
447 return 0.0; | |
448 } | |
449 return work - deadline; | |
450 } | |
451 | |
452 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the | |
453 // amount of change to prevent wild oscillation. | |
454 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle, | |
455 unsigned int new_duty_cycle) { | |
456 assert(old_duty_cycle <= 100, "bad input value"); | |
457 assert(new_duty_cycle <= 100, "bad input value"); | |
458 | |
459 // Note: use subtraction with caution since it may underflow (values are | |
460 // unsigned). Addition is safe since we're in the range 0-100. | |
461 unsigned int damped_duty_cycle = new_duty_cycle; | |
462 if (new_duty_cycle < old_duty_cycle) { | |
463 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U); | |
464 if (new_duty_cycle + largest_delta < old_duty_cycle) { | |
465 damped_duty_cycle = old_duty_cycle - largest_delta; | |
466 } | |
467 } else if (new_duty_cycle > old_duty_cycle) { | |
468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U); | |
469 if (new_duty_cycle > old_duty_cycle + largest_delta) { | |
470 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U); | |
471 } | |
472 } | |
473 assert(damped_duty_cycle <= 100, "invalid duty cycle computed"); | |
474 | |
475 if (CMSTraceIncrementalPacing) { | |
476 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ", | |
477 old_duty_cycle, new_duty_cycle, damped_duty_cycle); | |
478 } | |
479 return damped_duty_cycle; | |
480 } | |
481 | |
482 unsigned int CMSStats::icms_update_duty_cycle_impl() { | |
483 assert(CMSIncrementalPacing && valid(), | |
484 "should be handled in icms_update_duty_cycle()"); | |
485 | |
486 double cms_time_so_far = cms_timer().seconds(); | |
487 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M; | |
488 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far); | |
489 | |
490 // Avoid division by 0. | |
491 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01); | |
492 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full; | |
493 | |
494 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U); | |
495 if (new_duty_cycle > _icms_duty_cycle) { | |
496 // Avoid very small duty cycles (1 or 2); 0 is allowed. | |
497 if (new_duty_cycle > 2) { | |
498 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, | |
499 new_duty_cycle); | |
500 } | |
501 } else if (_allow_duty_cycle_reduction) { | |
502 // The duty cycle is reduced only once per cms cycle (see record_cms_end()). | |
503 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle); | |
504 // Respect the minimum duty cycle. | |
505 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin; | |
506 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle); | |
507 } | |
508 | |
509 if (PrintGCDetails || CMSTraceIncrementalPacing) { | |
510 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle); | |
511 } | |
512 | |
513 _allow_duty_cycle_reduction = false; | |
514 return _icms_duty_cycle; | |
515 } | |
516 | |
517 #ifndef PRODUCT | |
518 void CMSStats::print_on(outputStream *st) const { | |
519 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); | |
520 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, | |
521 gc0_duration(), gc0_period(), gc0_promoted()); | |
522 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, | |
523 cms_duration(), cms_duration_per_mb(), | |
524 cms_period(), cms_allocated()); | |
525 st->print(",cms_since_beg=%g,cms_since_end=%g", | |
526 cms_time_since_begin(), cms_time_since_end()); | |
527 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, | |
528 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); | |
529 if (CMSIncrementalMode) { | |
530 st->print(",dc=%d", icms_duty_cycle()); | |
531 } | |
532 | |
533 if (valid()) { | |
534 st->print(",promo_rate=%g,cms_alloc_rate=%g", | |
535 promotion_rate(), cms_allocation_rate()); | |
536 st->print(",cms_consumption_rate=%g,time_until_full=%g", | |
537 cms_consumption_rate(), time_until_cms_gen_full()); | |
538 } | |
539 st->print(" "); | |
540 } | |
541 #endif // #ifndef PRODUCT | |
542 | |
543 CMSCollector::CollectorState CMSCollector::_collectorState = | |
544 CMSCollector::Idling; | |
545 bool CMSCollector::_foregroundGCIsActive = false; | |
546 bool CMSCollector::_foregroundGCShouldWait = false; | |
547 | |
548 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, | |
549 ConcurrentMarkSweepGeneration* permGen, | |
550 CardTableRS* ct, | |
551 ConcurrentMarkSweepPolicy* cp): | |
552 _cmsGen(cmsGen), | |
553 _permGen(permGen), | |
554 _ct(ct), | |
555 _ref_processor(NULL), // will be set later | |
556 _conc_workers(NULL), // may be set later | |
557 _abort_preclean(false), | |
558 _start_sampling(false), | |
559 _between_prologue_and_epilogue(false), | |
560 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), | |
561 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"), | |
562 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), | |
563 -1 /* lock-free */, "No_lock" /* dummy */), | |
564 _modUnionClosure(&_modUnionTable), | |
565 _modUnionClosurePar(&_modUnionTable), | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
566 // Adjust my span to cover old (cms) gen and perm gen |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
567 _span(cmsGen->reserved()._union(permGen->reserved())), |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
568 // Construct the is_alive_closure with _span & markBitMap |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
569 _is_alive_closure(_span, &_markBitMap), |
0 | 570 _restart_addr(NULL), |
571 _overflow_list(NULL), | |
572 _stats(cmsGen), | |
573 _eden_chunk_array(NULL), // may be set in ctor body | |
574 _eden_chunk_capacity(0), // -- ditto -- | |
575 _eden_chunk_index(0), // -- ditto -- | |
576 _survivor_plab_array(NULL), // -- ditto -- | |
577 _survivor_chunk_array(NULL), // -- ditto -- | |
578 _survivor_chunk_capacity(0), // -- ditto -- | |
579 _survivor_chunk_index(0), // -- ditto -- | |
580 _ser_pmc_preclean_ovflw(0), | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
581 _ser_kac_preclean_ovflw(0), |
0 | 582 _ser_pmc_remark_ovflw(0), |
583 _par_pmc_remark_ovflw(0), | |
584 _ser_kac_ovflw(0), | |
585 _par_kac_ovflw(0), | |
586 #ifndef PRODUCT | |
587 _num_par_pushes(0), | |
588 #endif | |
589 _collection_count_start(0), | |
590 _verifying(false), | |
591 _icms_start_limit(NULL), | |
592 _icms_stop_limit(NULL), | |
593 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), | |
594 _completed_initialization(false), | |
595 _collector_policy(cp), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
596 _should_unload_classes(false), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
597 _concurrent_cycles_since_last_unload(0), |
798
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
679
diff
changeset
|
598 _roots_scanning_options(0), |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
599 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
600 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) |
0 | 601 { |
602 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { | |
603 ExplicitGCInvokesConcurrent = true; | |
604 } | |
605 // Now expand the span and allocate the collection support structures | |
606 // (MUT, marking bit map etc.) to cover both generations subject to | |
607 // collection. | |
608 | |
609 // First check that _permGen is adjacent to _cmsGen and above it. | |
610 assert( _cmsGen->reserved().word_size() > 0 | |
611 && _permGen->reserved().word_size() > 0, | |
612 "generations should not be of zero size"); | |
613 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(), | |
614 "_cmsGen and _permGen should not overlap"); | |
615 assert(_cmsGen->reserved().end() == _permGen->reserved().start(), | |
616 "_cmsGen->end() different from _permGen->start()"); | |
617 | |
618 // For use by dirty card to oop closures. | |
619 _cmsGen->cmsSpace()->set_collector(this); | |
620 _permGen->cmsSpace()->set_collector(this); | |
621 | |
622 // Allocate MUT and marking bit map | |
623 { | |
624 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); | |
625 if (!_markBitMap.allocate(_span)) { | |
626 warning("Failed to allocate CMS Bit Map"); | |
627 return; | |
628 } | |
629 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); | |
630 } | |
631 { | |
632 _modUnionTable.allocate(_span); | |
633 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); | |
634 } | |
635 | |
1284 | 636 if (!_markStack.allocate(MarkStackSize)) { |
0 | 637 warning("Failed to allocate CMS Marking Stack"); |
638 return; | |
639 } | |
640 if (!_revisitStack.allocate(CMSRevisitStackSize)) { | |
641 warning("Failed to allocate CMS Revisit Stack"); | |
642 return; | |
643 } | |
644 | |
645 // Support for multi-threaded concurrent phases | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
646 if (CMSConcurrentMTEnabled) { |
1284 | 647 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
0 | 648 // just for now |
1284 | 649 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); |
650 } | |
651 if (ConcGCThreads > 1) { | |
0 | 652 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", |
1284 | 653 ConcGCThreads, true); |
0 | 654 if (_conc_workers == NULL) { |
655 warning("GC/CMS: _conc_workers allocation failure: " | |
656 "forcing -CMSConcurrentMTEnabled"); | |
657 CMSConcurrentMTEnabled = false; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
658 } else { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
659 _conc_workers->initialize_workers(); |
0 | 660 } |
661 } else { | |
662 CMSConcurrentMTEnabled = false; | |
663 } | |
664 } | |
665 if (!CMSConcurrentMTEnabled) { | |
1284 | 666 ConcGCThreads = 0; |
0 | 667 } else { |
668 // Turn off CMSCleanOnEnter optimization temporarily for | |
669 // the MT case where it's not fixed yet; see 6178663. | |
670 CMSCleanOnEnter = false; | |
671 } | |
1284 | 672 assert((_conc_workers != NULL) == (ConcGCThreads > 1), |
0 | 673 "Inconsistency"); |
674 | |
675 // Parallel task queues; these are shared for the | |
676 // concurrent and stop-world phases of CMS, but | |
677 // are not shared with parallel scavenge (ParNew). | |
678 { | |
679 uint i; | |
1284 | 680 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads); |
0 | 681 |
682 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled | |
683 || ParallelRefProcEnabled) | |
684 && num_queues > 0) { | |
685 _task_queues = new OopTaskQueueSet(num_queues); | |
686 if (_task_queues == NULL) { | |
687 warning("task_queues allocation failure."); | |
688 return; | |
689 } | |
690 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues); | |
691 if (_hash_seed == NULL) { | |
692 warning("_hash_seed array allocation failure"); | |
693 return; | |
694 } | |
695 | |
1665 | 696 typedef Padded<OopTaskQueue> PaddedOopTaskQueue; |
0 | 697 for (i = 0; i < num_queues; i++) { |
1665 | 698 PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); |
699 if (q == NULL) { | |
0 | 700 warning("work_queue allocation failure."); |
701 return; | |
702 } | |
1665 | 703 _task_queues->register_queue(i, q); |
0 | 704 } |
705 for (i = 0; i < num_queues; i++) { | |
706 _task_queues->queue(i)->initialize(); | |
707 _hash_seed[i] = 17; // copied from ParNew | |
708 } | |
709 } | |
710 } | |
711 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
712 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
713 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
714 |
0 | 715 // Clip CMSBootstrapOccupancy between 0 and 100. |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
716 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) |
0 | 717 /(double)100; |
718 | |
719 _full_gcs_since_conc_gc = 0; | |
720 | |
721 // Now tell CMS generations the identity of their collector | |
722 ConcurrentMarkSweepGeneration::set_collector(this); | |
723 | |
724 // Create & start a CMS thread for this CMS collector | |
725 _cmsThread = ConcurrentMarkSweepThread::start(this); | |
726 assert(cmsThread() != NULL, "CMS Thread should have been created"); | |
727 assert(cmsThread()->collector() == this, | |
728 "CMS Thread should refer to this gen"); | |
729 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | |
730 | |
731 // Support for parallelizing young gen rescan | |
732 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
733 _young_gen = gch->prev_gen(_cmsGen); | |
734 if (gch->supports_inline_contig_alloc()) { | |
735 _top_addr = gch->top_addr(); | |
736 _end_addr = gch->end_addr(); | |
737 assert(_young_gen != NULL, "no _young_gen"); | |
738 _eden_chunk_index = 0; | |
739 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; | |
740 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity); | |
741 if (_eden_chunk_array == NULL) { | |
742 _eden_chunk_capacity = 0; | |
743 warning("GC/CMS: _eden_chunk_array allocation failure"); | |
744 } | |
745 } | |
746 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error"); | |
747 | |
748 // Support for parallelizing survivor space rescan | |
749 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) { | |
1289
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
750 const size_t max_plab_samples = |
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
751 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize; |
d47555d7aca8
6910182: CMS: assert(_cursor[j] == _survivor_plab_array[j].end(),"Ctl pt invariant")
jmasa
parents:
1284
diff
changeset
|
752 |
0 | 753 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); |
754 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); | |
755 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); | |
756 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL | |
757 || _cursor == NULL) { | |
758 warning("Failed to allocate survivor plab/chunk array"); | |
759 if (_survivor_plab_array != NULL) { | |
760 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
761 _survivor_plab_array = NULL; | |
762 } | |
763 if (_survivor_chunk_array != NULL) { | |
764 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
765 _survivor_chunk_array = NULL; | |
766 } | |
767 if (_cursor != NULL) { | |
768 FREE_C_HEAP_ARRAY(size_t, _cursor); | |
769 _cursor = NULL; | |
770 } | |
771 } else { | |
772 _survivor_chunk_capacity = 2*max_plab_samples; | |
773 for (uint i = 0; i < ParallelGCThreads; i++) { | |
774 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples); | |
775 if (vec == NULL) { | |
776 warning("Failed to allocate survivor plab array"); | |
777 for (int j = i; j > 0; j--) { | |
778 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array()); | |
779 } | |
780 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
781 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
782 _survivor_plab_array = NULL; | |
783 _survivor_chunk_array = NULL; | |
784 _survivor_chunk_capacity = 0; | |
785 break; | |
786 } else { | |
787 ChunkArray* cur = | |
788 ::new (&_survivor_plab_array[i]) ChunkArray(vec, | |
789 max_plab_samples); | |
790 assert(cur->end() == 0, "Should be 0"); | |
791 assert(cur->array() == vec, "Should be vec"); | |
792 assert(cur->capacity() == max_plab_samples, "Error"); | |
793 } | |
794 } | |
795 } | |
796 } | |
797 assert( ( _survivor_plab_array != NULL | |
798 && _survivor_chunk_array != NULL) | |
799 || ( _survivor_chunk_capacity == 0 | |
800 && _survivor_chunk_index == 0), | |
801 "Error"); | |
802 | |
803 // Choose what strong roots should be scanned depending on verification options | |
804 // and perm gen collection mode. | |
805 if (!CMSClassUnloadingEnabled) { | |
806 // If class unloading is disabled we want to include all classes into the root set. | |
807 add_root_scanning_option(SharedHeap::SO_AllClasses); | |
808 } else { | |
809 add_root_scanning_option(SharedHeap::SO_SystemClasses); | |
810 } | |
811 | |
812 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) | |
813 _gc_counters = new CollectorCounters("CMS", 1); | |
814 _completed_initialization = true; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
815 _inter_sweep_timer.start(); // start of time |
1518
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
816 #ifdef SPARC |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
817 // Issue a stern warning, but allow use for experimentation and debugging. |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
818 if (VM_Version::is_sun4v() && UseMemSetInBOT) { |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
819 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
820 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
821 " on sun4v; please understand that you are using at your own risk!"); |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
822 } |
3bfae429e2cf
6948537: CMS: BOT walkers observe out-of-thin-air zeros on sun4v sparc/CMT
ysr
parents:
1387
diff
changeset
|
823 #endif |
0 | 824 } |
825 | |
826 const char* ConcurrentMarkSweepGeneration::name() const { | |
827 return "concurrent mark-sweep generation"; | |
828 } | |
829 void ConcurrentMarkSweepGeneration::update_counters() { | |
830 if (UsePerfData) { | |
831 _space_counters->update_all(); | |
832 _gen_counters->update_all(); | |
833 } | |
834 } | |
835 | |
836 // this is an optimized version of update_counters(). it takes the | |
837 // used value as a parameter rather than computing it. | |
838 // | |
839 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
840 if (UsePerfData) { | |
841 _space_counters->update_used(used); | |
842 _space_counters->update_capacity(); | |
843 _gen_counters->update_all(); | |
844 } | |
845 } | |
846 | |
847 void ConcurrentMarkSweepGeneration::print() const { | |
848 Generation::print(); | |
849 cmsSpace()->print(); | |
850 } | |
851 | |
852 #ifndef PRODUCT | |
853 void ConcurrentMarkSweepGeneration::print_statistics() { | |
854 cmsSpace()->printFLCensus(0); | |
855 } | |
856 #endif | |
857 | |
858 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { | |
859 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
860 if (PrintGCDetails) { | |
861 if (Verbose) { | |
862 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", | |
863 level(), short_name(), s, used(), capacity()); | |
864 } else { | |
865 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", | |
866 level(), short_name(), s, used() / K, capacity() / K); | |
867 } | |
868 } | |
869 if (Verbose) { | |
870 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")", | |
871 gch->used(), gch->capacity()); | |
872 } else { | |
873 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)", | |
874 gch->used() / K, gch->capacity() / K); | |
875 } | |
876 } | |
877 | |
878 size_t | |
879 ConcurrentMarkSweepGeneration::contiguous_available() const { | |
880 // dld proposes an improvement in precision here. If the committed | |
881 // part of the space ends in a free block we should add that to | |
882 // uncommitted size in the calculation below. Will make this | |
883 // change later, staying with the approximation below for the | |
884 // time being. -- ysr. | |
885 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); | |
886 } | |
887 | |
888 size_t | |
889 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { | |
890 return _cmsSpace->max_alloc_in_words() * HeapWordSize; | |
891 } | |
892 | |
893 size_t ConcurrentMarkSweepGeneration::max_available() const { | |
894 return free() + _virtual_space.uncommitted_size(); | |
895 } | |
896 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
897 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
898 size_t available = max_available(); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
899 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
900 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
901 if (Verbose && PrintGCDetails) { |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
902 gclog_or_tty->print_cr( |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
903 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT")," |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
904 "max_promo("SIZE_FORMAT")", |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
905 res? "":" not", available, res? ">=":"<", |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
906 av_promo, max_promotion_in_bytes); |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
907 } |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
908 return res; |
0 | 909 } |
910 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
911 // At a promotion failure dump information on block layout in heap |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
912 // (cms old generation). |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
913 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
914 if (CMSDumpAtPromotionFailure) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
915 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
916 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
917 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
918 |
0 | 919 CompactibleSpace* |
920 ConcurrentMarkSweepGeneration::first_compaction_space() const { | |
921 return _cmsSpace; | |
922 } | |
923 | |
924 void ConcurrentMarkSweepGeneration::reset_after_compaction() { | |
925 // Clear the promotion information. These pointers can be adjusted | |
926 // along with all the other pointers into the heap but | |
927 // compaction is expected to be a rare event with | |
928 // a heap using cms so don't do it without seeing the need. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
929 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 930 for (uint i = 0; i < ParallelGCThreads; i++) { |
931 _par_gc_thread_states[i]->promo.reset(); | |
932 } | |
933 } | |
934 } | |
935 | |
936 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { | |
937 blk->do_space(_cmsSpace); | |
938 } | |
939 | |
940 void ConcurrentMarkSweepGeneration::compute_new_size() { | |
941 assert_locked_or_safepoint(Heap_lock); | |
942 | |
943 // If incremental collection failed, we just want to expand | |
944 // to the limit. | |
945 if (incremental_collection_failed()) { | |
946 clear_incremental_collection_failed(); | |
947 grow_to_reserved(); | |
948 return; | |
949 } | |
950 | |
951 size_t expand_bytes = 0; | |
952 double free_percentage = ((double) free()) / capacity(); | |
953 double desired_free_percentage = (double) MinHeapFreeRatio / 100; | |
954 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
955 | |
956 // compute expansion delta needed for reaching desired free percentage | |
957 if (free_percentage < desired_free_percentage) { | |
958 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
959 assert(desired_capacity >= capacity(), "invalid expansion size"); | |
960 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); | |
961 } | |
962 if (expand_bytes > 0) { | |
963 if (PrintGCDetails && Verbose) { | |
964 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
965 gclog_or_tty->print_cr("\nFrom compute_new_size: "); | |
966 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); | |
967 gclog_or_tty->print_cr(" Desired free fraction %f", | |
968 desired_free_percentage); | |
969 gclog_or_tty->print_cr(" Maximum free fraction %f", | |
970 maximum_free_percentage); | |
971 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000); | |
972 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, | |
973 desired_capacity/1000); | |
974 int prev_level = level() - 1; | |
975 if (prev_level >= 0) { | |
976 size_t prev_size = 0; | |
977 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
978 Generation* prev_gen = gch->_gens[prev_level]; | |
979 prev_size = prev_gen->capacity(); | |
980 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, | |
981 prev_size/1000); | |
982 } | |
983 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, | |
984 unsafe_max_alloc_nogc()/1000); | |
985 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, | |
986 contiguous_available()/1000); | |
987 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", | |
988 expand_bytes); | |
989 } | |
990 // safe if expansion fails | |
991 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); | |
992 if (PrintGCDetails && Verbose) { | |
993 gclog_or_tty->print_cr(" Expanded free fraction %f", | |
994 ((double) free()) / capacity()); | |
995 } | |
996 } | |
997 } | |
998 | |
999 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { | |
1000 return cmsSpace()->freelistLock(); | |
1001 } | |
1002 | |
1003 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, | |
1004 bool tlab) { | |
1005 CMSSynchronousYieldRequest yr; | |
1006 MutexLockerEx x(freelistLock(), | |
1007 Mutex::_no_safepoint_check_flag); | |
1008 return have_lock_and_allocate(size, tlab); | |
1009 } | |
1010 | |
1011 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1012 bool tlab /* ignored */) { |
0 | 1013 assert_lock_strong(freelistLock()); |
1014 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); | |
1015 HeapWord* res = cmsSpace()->allocate(adjustedSize); | |
1016 // Allocate the object live (grey) if the background collector has | |
1017 // started marking. This is necessary because the marker may | |
1018 // have passed this address and consequently this object will | |
1019 // not otherwise be greyed and would be incorrectly swept up. | |
1020 // Note that if this object contains references, the writing | |
1021 // of those references will dirty the card containing this object | |
1022 // allowing the object to be blackened (and its references scanned) | |
1023 // either during a preclean phase or at the final checkpoint. | |
1024 if (res != NULL) { | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1025 // We may block here with an uninitialized object with |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1026 // its mark-bit or P-bits not yet set. Such objects need |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1027 // to be safely navigable by block_start(). |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1028 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1029 assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size"); |
0 | 1030 collector()->direct_allocated(res, adjustedSize); |
1031 _direct_allocated_words += adjustedSize; | |
1032 // allocation counters | |
1033 NOT_PRODUCT( | |
1034 _numObjectsAllocated++; | |
1035 _numWordsAllocated += (int)adjustedSize; | |
1036 ) | |
1037 } | |
1038 return res; | |
1039 } | |
1040 | |
1041 // In the case of direct allocation by mutators in a generation that | |
1042 // is being concurrently collected, the object must be allocated | |
1043 // live (grey) if the background collector has started marking. | |
1044 // This is necessary because the marker may | |
1045 // have passed this address and consequently this object will | |
1046 // not otherwise be greyed and would be incorrectly swept up. | |
1047 // Note that if this object contains references, the writing | |
1048 // of those references will dirty the card containing this object | |
1049 // allowing the object to be blackened (and its references scanned) | |
1050 // either during a preclean phase or at the final checkpoint. | |
1051 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { | |
1052 assert(_markBitMap.covers(start, size), "Out of bounds"); | |
1053 if (_collectorState >= Marking) { | |
1054 MutexLockerEx y(_markBitMap.lock(), | |
1055 Mutex::_no_safepoint_check_flag); | |
1056 // [see comments preceding SweepClosure::do_blk() below for details] | |
1057 // 1. need to mark the object as live so it isn't collected | |
1058 // 2. need to mark the 2nd bit to indicate the object may be uninitialized | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1059 // 3. need to mark the end of the object so marking, precleaning or sweeping |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1060 // can skip over uninitialized or unparsable objects. An allocated |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1061 // object is considered uninitialized for our purposes as long as |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1062 // its klass word is NULL. (Unparsable objects are those which are |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1063 // initialized in the sense just described, but whose sizes can still |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1064 // not be correctly determined. Note that the class of unparsable objects |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1065 // can only occur in the perm gen. All old gen objects are parsable |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1066 // as soon as they are initialized.) |
0 | 1067 _markBitMap.mark(start); // object is live |
1068 _markBitMap.mark(start + 1); // object is potentially uninitialized? | |
1069 _markBitMap.mark(start + size - 1); | |
1070 // mark end of object | |
1071 } | |
1072 // check that oop looks uninitialized | |
187 | 1073 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); |
0 | 1074 } |
1075 | |
1076 void CMSCollector::promoted(bool par, HeapWord* start, | |
1077 bool is_obj_array, size_t obj_size) { | |
1078 assert(_markBitMap.covers(start), "Out of bounds"); | |
1079 // See comment in direct_allocated() about when objects should | |
1080 // be allocated live. | |
1081 if (_collectorState >= Marking) { | |
1082 // we already hold the marking bit map lock, taken in | |
1083 // the prologue | |
1084 if (par) { | |
1085 _markBitMap.par_mark(start); | |
1086 } else { | |
1087 _markBitMap.mark(start); | |
1088 } | |
1089 // We don't need to mark the object as uninitialized (as | |
1090 // in direct_allocated above) because this is being done with the | |
1091 // world stopped and the object will be initialized by the | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1092 // time the marking, precleaning or sweeping get to look at it. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1093 // But see the code for copying objects into the CMS generation, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1094 // where we need to ensure that concurrent readers of the |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1095 // block offset table are able to safely navigate a block that |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1096 // is in flux from being free to being allocated (and in |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1097 // transition while being copied into) and subsequently |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1098 // becoming a bona-fide object when the copy/promotion is complete. |
0 | 1099 assert(SafepointSynchronize::is_at_safepoint(), |
1100 "expect promotion only at safepoints"); | |
1101 | |
1102 if (_collectorState < Sweeping) { | |
1103 // Mark the appropriate cards in the modUnionTable, so that | |
1104 // this object gets scanned before the sweep. If this is | |
1105 // not done, CMS generation references in the object might | |
1106 // not get marked. | |
1107 // For the case of arrays, which are otherwise precisely | |
1108 // marked, we need to dirty the entire array, not just its head. | |
1109 if (is_obj_array) { | |
1110 // The [par_]mark_range() method expects mr.end() below to | |
1111 // be aligned to the granularity of a bit's representation | |
1112 // in the heap. In the case of the MUT below, that's a | |
1113 // card size. | |
1114 MemRegion mr(start, | |
1115 (HeapWord*)round_to((intptr_t)(start + obj_size), | |
1116 CardTableModRefBS::card_size /* bytes */)); | |
1117 if (par) { | |
1118 _modUnionTable.par_mark_range(mr); | |
1119 } else { | |
1120 _modUnionTable.mark_range(mr); | |
1121 } | |
1122 } else { // not an obj array; we can just mark the head | |
1123 if (par) { | |
1124 _modUnionTable.par_mark(start); | |
1125 } else { | |
1126 _modUnionTable.mark(start); | |
1127 } | |
1128 } | |
1129 } | |
1130 } | |
1131 } | |
1132 | |
1133 static inline size_t percent_of_space(Space* space, HeapWord* addr) | |
1134 { | |
1135 size_t delta = pointer_delta(addr, space->bottom()); | |
1136 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); | |
1137 } | |
1138 | |
1139 void CMSCollector::icms_update_allocation_limits() | |
1140 { | |
1141 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0); | |
1142 EdenSpace* eden = gen0->as_DefNewGeneration()->eden(); | |
1143 | |
1144 const unsigned int duty_cycle = stats().icms_update_duty_cycle(); | |
1145 if (CMSTraceIncrementalPacing) { | |
1146 stats().print(); | |
1147 } | |
1148 | |
1149 assert(duty_cycle <= 100, "invalid duty cycle"); | |
1150 if (duty_cycle != 0) { | |
1151 // The duty_cycle is a percentage between 0 and 100; convert to words and | |
1152 // then compute the offset from the endpoints of the space. | |
1153 size_t free_words = eden->free() / HeapWordSize; | |
1154 double free_words_dbl = (double)free_words; | |
1155 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0); | |
1156 size_t offset_words = (free_words - duty_cycle_words) / 2; | |
1157 | |
1158 _icms_start_limit = eden->top() + offset_words; | |
1159 _icms_stop_limit = eden->end() - offset_words; | |
1160 | |
1161 // The limits may be adjusted (shifted to the right) by | |
1162 // CMSIncrementalOffset, to allow the application more mutator time after a | |
1163 // young gen gc (when all mutators were stopped) and before CMS starts and | |
1164 // takes away one or more cpus. | |
1165 if (CMSIncrementalOffset != 0) { | |
1166 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0; | |
1167 size_t adjustment = (size_t)adjustment_dbl; | |
1168 HeapWord* tmp_stop = _icms_stop_limit + adjustment; | |
1169 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) { | |
1170 _icms_start_limit += adjustment; | |
1171 _icms_stop_limit = tmp_stop; | |
1172 } | |
1173 } | |
1174 } | |
1175 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) { | |
1176 _icms_start_limit = _icms_stop_limit = eden->end(); | |
1177 } | |
1178 | |
1179 // Install the new start limit. | |
1180 eden->set_soft_end(_icms_start_limit); | |
1181 | |
1182 if (CMSTraceIncrementalMode) { | |
1183 gclog_or_tty->print(" icms alloc limits: " | |
1184 PTR_FORMAT "," PTR_FORMAT | |
1185 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ", | |
1186 _icms_start_limit, _icms_stop_limit, | |
1187 percent_of_space(eden, _icms_start_limit), | |
1188 percent_of_space(eden, _icms_stop_limit)); | |
1189 if (Verbose) { | |
1190 gclog_or_tty->print("eden: "); | |
1191 eden->print_on(gclog_or_tty); | |
1192 } | |
1193 } | |
1194 } | |
1195 | |
1196 // Any changes here should try to maintain the invariant | |
1197 // that if this method is called with _icms_start_limit | |
1198 // and _icms_stop_limit both NULL, then it should return NULL | |
1199 // and not notify the icms thread. | |
1200 HeapWord* | |
1201 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top, | |
1202 size_t word_size) | |
1203 { | |
1204 // A start_limit equal to end() means the duty cycle is 0, so treat that as a | |
1205 // nop. | |
1206 if (CMSIncrementalMode && _icms_start_limit != space->end()) { | |
1207 if (top <= _icms_start_limit) { | |
1208 if (CMSTraceIncrementalMode) { | |
1209 space->print_on(gclog_or_tty); | |
1210 gclog_or_tty->stamp(); | |
1211 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT | |
1212 ", new limit=" PTR_FORMAT | |
1213 " (" SIZE_FORMAT "%%)", | |
1214 top, _icms_stop_limit, | |
1215 percent_of_space(space, _icms_stop_limit)); | |
1216 } | |
1217 ConcurrentMarkSweepThread::start_icms(); | |
1218 assert(top < _icms_stop_limit, "Tautology"); | |
1219 if (word_size < pointer_delta(_icms_stop_limit, top)) { | |
1220 return _icms_stop_limit; | |
1221 } | |
1222 | |
1223 // The allocation will cross both the _start and _stop limits, so do the | |
1224 // stop notification also and return end(). | |
1225 if (CMSTraceIncrementalMode) { | |
1226 space->print_on(gclog_or_tty); | |
1227 gclog_or_tty->stamp(); | |
1228 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT | |
1229 ", new limit=" PTR_FORMAT | |
1230 " (" SIZE_FORMAT "%%)", | |
1231 top, space->end(), | |
1232 percent_of_space(space, space->end())); | |
1233 } | |
1234 ConcurrentMarkSweepThread::stop_icms(); | |
1235 return space->end(); | |
1236 } | |
1237 | |
1238 if (top <= _icms_stop_limit) { | |
1239 if (CMSTraceIncrementalMode) { | |
1240 space->print_on(gclog_or_tty); | |
1241 gclog_or_tty->stamp(); | |
1242 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT | |
1243 ", new limit=" PTR_FORMAT | |
1244 " (" SIZE_FORMAT "%%)", | |
1245 top, space->end(), | |
1246 percent_of_space(space, space->end())); | |
1247 } | |
1248 ConcurrentMarkSweepThread::stop_icms(); | |
1249 return space->end(); | |
1250 } | |
1251 | |
1252 if (CMSTraceIncrementalMode) { | |
1253 space->print_on(gclog_or_tty); | |
1254 gclog_or_tty->stamp(); | |
1255 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT | |
1256 ", new limit=" PTR_FORMAT, | |
1257 top, NULL); | |
1258 } | |
1259 } | |
1260 | |
1261 return NULL; | |
1262 } | |
1263 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1264 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { |
0 | 1265 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1266 // allocate, copy and if necessary update promoinfo -- | |
1267 // delegate to underlying space. | |
1268 assert_lock_strong(freelistLock()); | |
1269 | |
1270 #ifndef PRODUCT | |
1271 if (Universe::heap()->promotion_should_fail()) { | |
1272 return NULL; | |
1273 } | |
1274 #endif // #ifndef PRODUCT | |
1275 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1276 oop res = _cmsSpace->promote(obj, obj_size); |
0 | 1277 if (res == NULL) { |
1278 // expand and retry | |
1279 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords | |
1280 expand(s*HeapWordSize, MinHeapDeltaBytes, | |
1281 CMSExpansionCause::_satisfy_promotion); | |
1282 // Since there's currently no next generation, we don't try to promote | |
1283 // into a more senior generation. | |
1284 assert(next_gen() == NULL, "assumption, based upon which no attempt " | |
1285 "is made to pass on a possibly failing " | |
1286 "promotion to next generation"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1287 res = _cmsSpace->promote(obj, obj_size); |
0 | 1288 } |
1289 if (res != NULL) { | |
1290 // See comment in allocate() about when objects should | |
1291 // be allocated live. | |
1292 assert(obj->is_oop(), "Will dereference klass pointer below"); | |
1293 collector()->promoted(false, // Not parallel | |
1294 (HeapWord*)res, obj->is_objArray(), obj_size); | |
1295 // promotion counters | |
1296 NOT_PRODUCT( | |
1297 _numObjectsPromoted++; | |
1298 _numWordsPromoted += | |
1299 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); | |
1300 ) | |
1301 } | |
1302 return res; | |
1303 } | |
1304 | |
1305 | |
1306 HeapWord* | |
1307 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, | |
1308 HeapWord* top, | |
1309 size_t word_sz) | |
1310 { | |
1311 return collector()->allocation_limit_reached(space, top, word_sz); | |
1312 } | |
1313 | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1314 // IMPORTANT: Notes on object size recognition in CMS. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1315 // --------------------------------------------------- |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1316 // A block of storage in the CMS generation is always in |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1317 // one of three states. A free block (FREE), an allocated |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1318 // object (OBJECT) whose size() method reports the correct size, |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1319 // and an intermediate state (TRANSIENT) in which its size cannot |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1320 // be accurately determined. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1321 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS) |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1322 // ----------------------------------------------------- |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1323 // FREE: klass_word & 1 == 1; mark_word holds block size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1324 // |
1777
179464550c7d
6983930: CMS: Various small cleanups ca September 2010
ysr
parents:
1720
diff
changeset
|
1325 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1326 // obj->size() computes correct size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1327 // [Perm Gen objects needs to be "parsable" before they can be navigated] |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1328 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1329 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1330 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1331 // STATE IDENTIFICATION: (64 bit+COOPS) |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1332 // ------------------------------------ |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1333 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1334 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1335 // OBJECT: klass_word installed; klass_word != 0; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1336 // obj->size() computes correct size |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1337 // [Perm Gen comment above continues to hold] |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1338 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1339 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1340 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1341 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1342 // STATE TRANSITION DIAGRAM |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1343 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1344 // mut / parnew mut / parnew |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1345 // FREE --------------------> TRANSIENT ---------------------> OBJECT --| |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1346 // ^ | |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1347 // |------------------------ DEAD <------------------------------------| |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1348 // sweep mut |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1349 // |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1350 // While a block is in TRANSIENT state its size cannot be determined |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1351 // so readers will either need to come back later or stall until |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1352 // the size can be determined. Note that for the case of direct |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1353 // allocation, P-bits, when available, may be used to determine the |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1354 // size of an object that may not yet have been initialized. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1355 |
0 | 1356 // Things to support parallel young-gen collection. |
1357 oop | |
1358 ConcurrentMarkSweepGeneration::par_promote(int thread_num, | |
1359 oop old, markOop m, | |
1360 size_t word_sz) { | |
1361 #ifndef PRODUCT | |
1362 if (Universe::heap()->promotion_should_fail()) { | |
1363 return NULL; | |
1364 } | |
1365 #endif // #ifndef PRODUCT | |
1366 | |
1367 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1368 PromotionInfo* promoInfo = &ps->promo; | |
1369 // if we are tracking promotions, then first ensure space for | |
1370 // promotion (including spooling space for saving header if necessary). | |
1371 // then allocate and copy, then track promoted info if needed. | |
1372 // When tracking (see PromotionInfo::track()), the mark word may | |
1373 // be displaced and in this case restoration of the mark word | |
1374 // occurs in the (oop_since_save_marks_)iterate phase. | |
1375 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { | |
1376 // Out of space for allocating spooling buffers; | |
1377 // try expanding and allocating spooling buffers. | |
1378 if (!expand_and_ensure_spooling_space(promoInfo)) { | |
1379 return NULL; | |
1380 } | |
1381 } | |
1382 assert(promoInfo->has_spooling_space(), "Control point invariant"); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1383 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1384 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz); |
0 | 1385 if (obj_ptr == NULL) { |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1386 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz); |
0 | 1387 if (obj_ptr == NULL) { |
1388 return NULL; | |
1389 } | |
1390 } | |
1391 oop obj = oop(obj_ptr); | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1392 OrderAccess::storestore(); |
187 | 1393 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1394 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1395 // IMPORTANT: See note on object initialization for CMS above. |
0 | 1396 // Otherwise, copy the object. Here we must be careful to insert the |
1397 // klass pointer last, since this marks the block as an allocated object. | |
187 | 1398 // Except with compressed oops it's the mark word. |
0 | 1399 HeapWord* old_ptr = (HeapWord*)old; |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1400 // Restore the mark word copied above. |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1401 obj->set_mark(m); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1402 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1403 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1404 OrderAccess::storestore(); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1405 |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1406 if (UseCompressedOops) { |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1407 // Copy gap missed by (aligned) header size calculation below |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1408 obj->set_klass_gap(old->klass_gap()); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1409 } |
0 | 1410 if (word_sz > (size_t)oopDesc::header_size()) { |
1411 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), | |
1412 obj_ptr + oopDesc::header_size(), | |
1413 word_sz - oopDesc::header_size()); | |
1414 } | |
187 | 1415 |
0 | 1416 // Now we can track the promoted object, if necessary. We take care |
1521 | 1417 // to delay the transition from uninitialized to full object |
0 | 1418 // (i.e., insertion of klass pointer) until after, so that it |
1419 // atomically becomes a promoted object. | |
1420 if (promoInfo->tracking()) { | |
1421 promoInfo->track((PromotedObject*)obj, old->klass()); | |
1422 } | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1423 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1424 assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1425 assert(old->is_oop(), "Will use and dereference old klass ptr below"); |
187 | 1426 |
1427 // Finally, install the klass pointer (this should be volatile). | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1428 OrderAccess::storestore(); |
0 | 1429 obj->set_klass(old->klass()); |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1430 // We should now be able to calculate the right size for this object |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1431 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object"); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1432 |
0 | 1433 collector()->promoted(true, // parallel |
1434 obj_ptr, old->is_objArray(), word_sz); | |
1435 | |
1436 NOT_PRODUCT( | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1437 Atomic::inc_ptr(&_numObjectsPromoted); |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1704
diff
changeset
|
1438 Atomic::add_ptr(alloc_sz, &_numWordsPromoted); |
0 | 1439 ) |
1440 | |
1441 return obj; | |
1442 } | |
1443 | |
1444 void | |
1445 ConcurrentMarkSweepGeneration:: | |
1446 par_promote_alloc_undo(int thread_num, | |
1447 HeapWord* obj, size_t word_sz) { | |
1448 // CMS does not support promotion undo. | |
1449 ShouldNotReachHere(); | |
1450 } | |
1451 | |
1452 void | |
1453 ConcurrentMarkSweepGeneration:: | |
1454 par_promote_alloc_done(int thread_num) { | |
1455 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
1456 ps->lab.retire(thread_num); |
0 | 1457 } |
1458 | |
1459 void | |
1460 ConcurrentMarkSweepGeneration:: | |
1461 par_oop_since_save_marks_iterate_done(int thread_num) { | |
1462 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1463 ParScanWithoutBarrierClosure* dummy_cl = NULL; | |
1464 ps->promo.promoted_oops_iterate_nv(dummy_cl); | |
1465 } | |
1466 | |
1467 // XXXPERM | |
1468 bool ConcurrentMarkSweepGeneration::should_collect(bool full, | |
1469 size_t size, | |
1470 bool tlab) | |
1471 { | |
1472 // We allow a STW collection only if a full | |
1473 // collection was requested. | |
1474 return full || should_allocate(size, tlab); // FIX ME !!! | |
1475 // This and promotion failure handling are connected at the | |
1476 // hip and should be fixed by untying them. | |
1477 } | |
1478 | |
1479 bool CMSCollector::shouldConcurrentCollect() { | |
1480 if (_full_gc_requested) { | |
1481 if (Verbose && PrintGCDetails) { | |
1482 gclog_or_tty->print_cr("CMSCollector: collect because of explicit " | |
1520
bb843ebc7c55
6919638: CMS: ExplicitGCInvokesConcurrent misinteracts with gc locker
ysr
parents:
1518
diff
changeset
|
1483 " gc request (or gc_locker)"); |
0 | 1484 } |
1485 return true; | |
1486 } | |
1487 | |
1488 // For debugging purposes, change the type of collection. | |
1489 // If the rotation is not on the concurrent collection | |
1490 // type, don't start a concurrent collection. | |
1491 NOT_PRODUCT( | |
1492 if (RotateCMSCollectionTypes && | |
1493 (_cmsGen->debug_collection_type() != | |
1494 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { | |
1495 assert(_cmsGen->debug_collection_type() != | |
1496 ConcurrentMarkSweepGeneration::Unknown_collection_type, | |
1497 "Bad cms collection type"); | |
1498 return false; | |
1499 } | |
1500 ) | |
1501 | |
1502 FreelistLocker x(this); | |
1503 // ------------------------------------------------------------------ | |
1504 // Print out lots of information which affects the initiation of | |
1505 // a collection. | |
1506 if (PrintCMSInitiationStatistics && stats().valid()) { | |
1507 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); | |
1508 gclog_or_tty->stamp(); | |
1509 gclog_or_tty->print_cr(""); | |
1510 stats().print_on(gclog_or_tty); | |
1511 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", | |
1512 stats().time_until_cms_gen_full()); | |
1513 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free()); | |
1514 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, | |
1515 _cmsGen->contiguous_available()); | |
1516 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); | |
1517 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); | |
1518 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1519 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1520 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); |
0 | 1521 } |
1522 // ------------------------------------------------------------------ | |
1523 | |
1524 // If the estimated time to complete a cms collection (cms_duration()) | |
1525 // is less than the estimated time remaining until the cms generation | |
1526 // is full, start a collection. | |
1527 if (!UseCMSInitiatingOccupancyOnly) { | |
1528 if (stats().valid()) { | |
1529 if (stats().time_until_cms_start() == 0.0) { | |
1530 return true; | |
1531 } | |
1532 } else { | |
1533 // We want to conservatively collect somewhat early in order | |
1534 // to try and "bootstrap" our CMS/promotion statistics; | |
1535 // this branch will not fire after the first successful CMS | |
1536 // collection because the stats should then be valid. | |
1537 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { | |
1538 if (Verbose && PrintGCDetails) { | |
1539 gclog_or_tty->print_cr( | |
1540 " CMSCollector: collect for bootstrapping statistics:" | |
1541 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), | |
1542 _bootstrap_occupancy); | |
1543 } | |
1544 return true; | |
1545 } | |
1546 } | |
1547 } | |
1548 | |
1549 // Otherwise, we start a collection cycle if either the perm gen or | |
1550 // old gen want a collection cycle started. Each may use | |
1551 // an appropriate criterion for making this decision. | |
1552 // XXX We need to make sure that the gen expansion | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1553 // criterion dovetails well with this. XXX NEED TO FIX THIS |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1554 if (_cmsGen->should_concurrent_collect()) { |
0 | 1555 if (Verbose && PrintGCDetails) { |
1556 gclog_or_tty->print_cr("CMS old gen initiated"); | |
1557 } | |
1558 return true; | |
1559 } | |
1560 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1561 // We start a collection if we believe an incremental collection may fail; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1562 // this is not likely to be productive in practice because it's probably too |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1563 // late anyway. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1564 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1565 assert(gch->collector_policy()->is_two_generation_policy(), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1566 "You may want to check the correctness of the following"); |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1567 if (gch->incremental_collection_will_fail(true /* consult_young */)) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1568 if (Verbose && PrintGCDetails) { |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1569 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); |
0 | 1570 } |
1571 return true; | |
1572 } | |
1573 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1574 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1575 bool res = update_should_unload_classes(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1576 if (res) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1577 if (Verbose && PrintGCDetails) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1578 gclog_or_tty->print_cr("CMS perm gen initiated"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1579 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1580 return true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1581 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1582 } |
0 | 1583 return false; |
1584 } | |
1585 | |
1586 // Clear _expansion_cause fields of constituent generations | |
1587 void CMSCollector::clear_expansion_cause() { | |
1588 _cmsGen->clear_expansion_cause(); | |
1589 _permGen->clear_expansion_cause(); | |
1590 } | |
1591 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1592 // We should be conservative in starting a collection cycle. To |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1593 // start too eagerly runs the risk of collecting too often in the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1594 // extreme. To collect too rarely falls back on full collections, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1595 // which works, even if not optimum in terms of concurrent work. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1596 // As a work around for too eagerly collecting, use the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1597 // UseCMSInitiatingOccupancyOnly. This also has the advantage of |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1598 // giving the user an easily understandable way of controlling the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1599 // collections. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1600 // We want to start a new collection cycle if any of the following |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1601 // conditions hold: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1602 // . our current occupancy exceeds the configured initiating occupancy |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1603 // for this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1604 // . we recently needed to expand this space and have not, since that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1605 // expansion, done a collection of this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1606 // . the underlying space believes that it may be a good idea to initiate |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1607 // a concurrent collection (this may be based on criteria such as the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1608 // following: the space uses linear allocation and linear allocation is |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1609 // going to fail, or there is believed to be excessive fragmentation in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1610 // the generation, etc... or ... |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1611 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1612 // the case of the old generation, not the perm generation; see CR 6543076): |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1613 // we may be approaching a point at which allocation requests may fail because |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1614 // we will be out of sufficient free space given allocation rate estimates.] |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1615 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1616 |
0 | 1617 assert_lock_strong(freelistLock()); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1618 if (occupancy() > initiating_occupancy()) { |
0 | 1619 if (PrintGCDetails && Verbose) { |
1620 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1621 short_name(), occupancy(), initiating_occupancy()); |
0 | 1622 } |
1623 return true; | |
1624 } | |
1625 if (UseCMSInitiatingOccupancyOnly) { | |
1626 return false; | |
1627 } | |
1628 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { | |
1629 if (PrintGCDetails && Verbose) { | |
1630 gclog_or_tty->print(" %s: collect because expanded for allocation ", | |
1631 short_name()); | |
1632 } | |
1633 return true; | |
1634 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1635 if (_cmsSpace->should_concurrent_collect()) { |
0 | 1636 if (PrintGCDetails && Verbose) { |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1637 gclog_or_tty->print(" %s: collect because cmsSpace says so ", |
0 | 1638 short_name()); |
1639 } | |
1640 return true; | |
1641 } | |
1642 return false; | |
1643 } | |
1644 | |
1645 void ConcurrentMarkSweepGeneration::collect(bool full, | |
1646 bool clear_all_soft_refs, | |
1647 size_t size, | |
1648 bool tlab) | |
1649 { | |
1650 collector()->collect(full, clear_all_soft_refs, size, tlab); | |
1651 } | |
1652 | |
1653 void CMSCollector::collect(bool full, | |
1654 bool clear_all_soft_refs, | |
1655 size_t size, | |
1656 bool tlab) | |
1657 { | |
1658 if (!UseCMSCollectionPassing && _collectorState > Idling) { | |
1659 // For debugging purposes skip the collection if the state | |
1660 // is not currently idle | |
1661 if (TraceCMSState) { | |
1662 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", | |
1663 Thread::current(), full, _collectorState); | |
1664 } | |
1665 return; | |
1666 } | |
1667 | |
1668 // The following "if" branch is present for defensive reasons. | |
1669 // In the current uses of this interface, it can be replaced with: | |
1670 // assert(!GC_locker.is_active(), "Can't be called otherwise"); | |
1671 // But I am not placing that assert here to allow future | |
1672 // generality in invoking this interface. | |
1673 if (GC_locker::is_active()) { | |
1674 // A consistency test for GC_locker | |
1675 assert(GC_locker::needs_gc(), "Should have been set already"); | |
1676 // Skip this foreground collection, instead | |
1677 // expanding the heap if necessary. | |
1678 // Need the free list locks for the call to free() in compute_new_size() | |
1679 compute_new_size(); | |
1680 return; | |
1681 } | |
1682 acquire_control_and_collect(full, clear_all_soft_refs); | |
1683 _full_gcs_since_conc_gc++; | |
1684 | |
1685 } | |
1686 | |
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count) { | |
1688 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1689 unsigned int gc_count = gch->total_full_collections(); | |
1690 if (gc_count == full_gc_count) { | |
1691 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1692 _full_gc_requested = true; | |
1693 CGC_lock->notify(); // nudge CMS thread | |
2365
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
1694 } else { |
a181f3a124dd
6987703: iCMS: Intermittent hang with gc/gctests/CallGC/CallGC01 and +ExplicitGCInvokesConcurrent
ysr
parents:
2226
diff
changeset
|
1695 assert(gc_count > full_gc_count, "Error: causal loop"); |
0 | 1696 } |
1697 } | |
1698 | |
1699 | |
1700 // The foreground and background collectors need to coordinate in order | |
1701 // to make sure that they do not mutually interfere with CMS collections. | |
1702 // When a background collection is active, | |
1703 // the foreground collector may need to take over (preempt) and | |
1704 // synchronously complete an ongoing collection. Depending on the | |
1705 // frequency of the background collections and the heap usage | |
1706 // of the application, this preemption can be seldom or frequent. | |
1707 // There are only certain | |
1708 // points in the background collection that the "collection-baton" | |
1709 // can be passed to the foreground collector. | |
1710 // | |
1711 // The foreground collector will wait for the baton before | |
1712 // starting any part of the collection. The foreground collector | |
1713 // will only wait at one location. | |
1714 // | |
1715 // The background collector will yield the baton before starting a new | |
1716 // phase of the collection (e.g., before initial marking, marking from roots, | |
1717 // precleaning, final re-mark, sweep etc.) This is normally done at the head | |
1718 // of the loop which switches the phases. The background collector does some | |
1719 // of the phases (initial mark, final re-mark) with the world stopped. | |
1720 // Because of locking involved in stopping the world, | |
1721 // the foreground collector should not block waiting for the background | |
1722 // collector when it is doing a stop-the-world phase. The background | |
1723 // collector will yield the baton at an additional point just before | |
1724 // it enters a stop-the-world phase. Once the world is stopped, the | |
1725 // background collector checks the phase of the collection. If the | |
1726 // phase has not changed, it proceeds with the collection. If the | |
1727 // phase has changed, it skips that phase of the collection. See | |
1728 // the comments on the use of the Heap_lock in collect_in_background(). | |
1729 // | |
1730 // Variable used in baton passing. | |
1731 // _foregroundGCIsActive - Set to true by the foreground collector when | |
1732 // it wants the baton. The foreground clears it when it has finished | |
1733 // the collection. | |
1734 // _foregroundGCShouldWait - Set to true by the background collector | |
1735 // when it is running. The foreground collector waits while | |
1736 // _foregroundGCShouldWait is true. | |
1737 // CGC_lock - monitor used to protect access to the above variables | |
1738 // and to notify the foreground and background collectors. | |
1739 // _collectorState - current state of the CMS collection. | |
1740 // | |
1741 // The foreground collector | |
1742 // acquires the CGC_lock | |
1743 // sets _foregroundGCIsActive | |
1744 // waits on the CGC_lock for _foregroundGCShouldWait to be false | |
1745 // various locks acquired in preparation for the collection | |
1746 // are released so as not to block the background collector | |
1747 // that is in the midst of a collection | |
1748 // proceeds with the collection | |
1749 // clears _foregroundGCIsActive | |
1750 // returns | |
1751 // | |
1752 // The background collector in a loop iterating on the phases of the | |
1753 // collection | |
1754 // acquires the CGC_lock | |
1755 // sets _foregroundGCShouldWait | |
1756 // if _foregroundGCIsActive is set | |
1757 // clears _foregroundGCShouldWait, notifies _CGC_lock | |
1758 // waits on _CGC_lock for _foregroundGCIsActive to become false | |
1759 // and exits the loop. | |
1760 // otherwise | |
1761 // proceed with that phase of the collection | |
1762 // if the phase is a stop-the-world phase, | |
1763 // yield the baton once more just before enqueueing | |
1764 // the stop-world CMS operation (executed by the VM thread). | |
1765 // returns after all phases of the collection are done | |
1766 // | |
1767 | |
1768 void CMSCollector::acquire_control_and_collect(bool full, | |
1769 bool clear_all_soft_refs) { | |
1770 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
1771 assert(!Thread::current()->is_ConcurrentGC_thread(), | |
1772 "shouldn't try to acquire control from self!"); | |
1773 | |
1774 // Start the protocol for acquiring control of the | |
1775 // collection from the background collector (aka CMS thread). | |
1776 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1777 "VM thread should have CMS token"); | |
1778 // Remember the possibly interrupted state of an ongoing | |
1779 // concurrent collection | |
1780 CollectorState first_state = _collectorState; | |
1781 | |
1782 // Signal to a possibly ongoing concurrent collection that | |
1783 // we want to do a foreground collection. | |
1784 _foregroundGCIsActive = true; | |
1785 | |
1786 // Disable incremental mode during a foreground collection. | |
1787 ICMSDisabler icms_disabler; | |
1788 | |
1789 // release locks and wait for a notify from the background collector | |
1790 // releasing the locks in only necessary for phases which | |
1791 // do yields to improve the granularity of the collection. | |
1792 assert_lock_strong(bitMapLock()); | |
1793 // We need to lock the Free list lock for the space that we are | |
1794 // currently collecting. | |
1795 assert(haveFreelistLocks(), "Must be holding free list locks"); | |
1796 bitMapLock()->unlock(); | |
1797 releaseFreelistLocks(); | |
1798 { | |
1799 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1800 if (_foregroundGCShouldWait) { | |
1801 // We are going to be waiting for action for the CMS thread; | |
1802 // it had better not be gone (for instance at shutdown)! | |
1803 assert(ConcurrentMarkSweepThread::cmst() != NULL, | |
1804 "CMS thread must be running"); | |
1805 // Wait here until the background collector gives us the go-ahead | |
1806 ConcurrentMarkSweepThread::clear_CMS_flag( | |
1807 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token | |
1808 // Get a possibly blocked CMS thread going: | |
1809 // Note that we set _foregroundGCIsActive true above, | |
1810 // without protection of the CGC_lock. | |
1811 CGC_lock->notify(); | |
1812 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), | |
1813 "Possible deadlock"); | |
1814 while (_foregroundGCShouldWait) { | |
1815 // wait for notification | |
1816 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
1817 // Possibility of delay/starvation here, since CMS token does | |
1818 // not know to give priority to VM thread? Actually, i think | |
1819 // there wouldn't be any delay/starvation, but the proof of | |
1820 // that "fact" (?) appears non-trivial. XXX 20011219YSR | |
1821 } | |
1822 ConcurrentMarkSweepThread::set_CMS_flag( | |
1823 ConcurrentMarkSweepThread::CMS_vm_has_token); | |
1824 } | |
1825 } | |
1826 // The CMS_token is already held. Get back the other locks. | |
1827 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1828 "VM thread should have CMS token"); | |
1829 getFreelistLocks(); | |
1830 bitMapLock()->lock_without_safepoint_check(); | |
1831 if (TraceCMSState) { | |
1832 gclog_or_tty->print_cr("CMS foreground collector has asked for control " | |
1833 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); | |
1834 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); | |
1835 } | |
1836 | |
1837 // Check if we need to do a compaction, or if not, whether | |
1838 // we need to start the mark-sweep from scratch. | |
1839 bool should_compact = false; | |
1840 bool should_start_over = false; | |
1841 decide_foreground_collection_type(clear_all_soft_refs, | |
1842 &should_compact, &should_start_over); | |
1843 | |
1844 NOT_PRODUCT( | |
1845 if (RotateCMSCollectionTypes) { | |
1846 if (_cmsGen->debug_collection_type() == | |
1847 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { | |
1848 should_compact = true; | |
1849 } else if (_cmsGen->debug_collection_type() == | |
1850 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { | |
1851 should_compact = false; | |
1852 } | |
1853 } | |
1854 ) | |
1855 | |
1856 if (PrintGCDetails && first_state > Idling) { | |
1857 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); | |
1858 if (GCCause::is_user_requested_gc(cause) || | |
1859 GCCause::is_serviceability_requested_gc(cause)) { | |
1860 gclog_or_tty->print(" (concurrent mode interrupted)"); | |
1861 } else { | |
1862 gclog_or_tty->print(" (concurrent mode failure)"); | |
1863 } | |
1864 } | |
1865 | |
1866 if (should_compact) { | |
1867 // If the collection is being acquired from the background | |
1868 // collector, there may be references on the discovered | |
1869 // references lists that have NULL referents (being those | |
1870 // that were concurrently cleared by a mutator) or | |
1871 // that are no longer active (having been enqueued concurrently | |
1872 // by the mutator). | |
1873 // Scrub the list of those references because Mark-Sweep-Compact | |
1874 // code assumes referents are not NULL and that all discovered | |
1875 // Reference objects are active. | |
1876 ref_processor()->clean_up_discovered_references(); | |
1877 | |
1878 do_compaction_work(clear_all_soft_refs); | |
1879 | |
1880 // Has the GC time limit been exceeded? | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1881 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1882 size_t max_eden_size = young_gen->max_capacity() - |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1883 young_gen->to()->capacity() - |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1884 young_gen->from()->capacity(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1885 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1886 GCCause::Cause gc_cause = gch->gc_cause(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1887 size_policy()->check_gc_overhead_limit(_young_gen->used(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1888 young_gen->eden()->used(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1889 _cmsGen->max_capacity(), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1890 max_eden_size, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1891 full, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1892 gc_cause, |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
1893 gch->collector_policy()); |
0 | 1894 } else { |
1895 do_mark_sweep_work(clear_all_soft_refs, first_state, | |
1896 should_start_over); | |
1897 } | |
1898 // Reset the expansion cause, now that we just completed | |
1899 // a collection cycle. | |
1900 clear_expansion_cause(); | |
1901 _foregroundGCIsActive = false; | |
1902 return; | |
1903 } | |
1904 | |
1905 // Resize the perm generation and the tenured generation | |
1906 // after obtaining the free list locks for the | |
1907 // two generations. | |
1908 void CMSCollector::compute_new_size() { | |
1909 assert_locked_or_safepoint(Heap_lock); | |
1910 FreelistLocker z(this); | |
1911 _permGen->compute_new_size(); | |
1912 _cmsGen->compute_new_size(); | |
1913 } | |
1914 | |
1915 // A work method used by foreground collection to determine | |
1916 // what type of collection (compacting or not, continuing or fresh) | |
1917 // it should do. | |
1918 // NOTE: the intent is to make UseCMSCompactAtFullCollection | |
1919 // and CMSCompactWhenClearAllSoftRefs the default in the future | |
1920 // and do away with the flags after a suitable period. | |
1921 void CMSCollector::decide_foreground_collection_type( | |
1922 bool clear_all_soft_refs, bool* should_compact, | |
1923 bool* should_start_over) { | |
1924 // Normally, we'll compact only if the UseCMSCompactAtFullCollection | |
1925 // flag is set, and we have either requested a System.gc() or | |
1926 // the number of full gc's since the last concurrent cycle | |
1927 // has exceeded the threshold set by CMSFullGCsBeforeCompaction, | |
1928 // or if an incremental collection has failed | |
1929 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1930 assert(gch->collector_policy()->is_two_generation_policy(), | |
1931 "You may want to check the correctness of the following"); | |
1932 // Inform cms gen if this was due to partial collection failing. | |
1933 // The CMS gen may use this fact to determine its expansion policy. | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1934 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { |
0 | 1935 assert(!_cmsGen->incremental_collection_failed(), |
1936 "Should have been noticed, reacted to and cleared"); | |
1937 _cmsGen->set_incremental_collection_failed(); | |
1938 } | |
1939 *should_compact = | |
1940 UseCMSCompactAtFullCollection && | |
1941 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || | |
1942 GCCause::is_user_requested_gc(gch->gc_cause()) || | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
1943 gch->incremental_collection_will_fail(true /* consult_young */)); |
0 | 1944 *should_start_over = false; |
1945 if (clear_all_soft_refs && !*should_compact) { | |
1946 // We are about to do a last ditch collection attempt | |
1947 // so it would normally make sense to do a compaction | |
1948 // to reclaim as much space as possible. | |
1949 if (CMSCompactWhenClearAllSoftRefs) { | |
1950 // Default: The rationale is that in this case either | |
1951 // we are past the final marking phase, in which case | |
1952 // we'd have to start over, or so little has been done | |
1953 // that there's little point in saving that work. Compaction | |
1954 // appears to be the sensible choice in either case. | |
1955 *should_compact = true; | |
1956 } else { | |
1957 // We have been asked to clear all soft refs, but not to | |
1958 // compact. Make sure that we aren't past the final checkpoint | |
1959 // phase, for that is where we process soft refs. If we are already | |
1960 // past that phase, we'll need to redo the refs discovery phase and | |
1961 // if necessary clear soft refs that weren't previously | |
1962 // cleared. We do so by remembering the phase in which | |
1963 // we came in, and if we are past the refs processing | |
1964 // phase, we'll choose to just redo the mark-sweep | |
1965 // collection from scratch. | |
1966 if (_collectorState > FinalMarking) { | |
1967 // We are past the refs processing phase; | |
1968 // start over and do a fresh synchronous CMS cycle | |
1969 _collectorState = Resetting; // skip to reset to start new cycle | |
1970 reset(false /* == !asynch */); | |
1971 *should_start_over = true; | |
1972 } // else we can continue a possibly ongoing current cycle | |
1973 } | |
1974 } | |
1975 } | |
1976 | |
1977 // A work method used by the foreground collector to do | |
1978 // a mark-sweep-compact. | |
1979 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { | |
1980 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1981 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); | |
1982 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { | |
1983 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " | |
1984 "collections passed to foreground collector", _full_gcs_since_conc_gc); | |
1985 } | |
1986 | |
1987 // Sample collection interval time and reset for collection pause. | |
1988 if (UseAdaptiveSizePolicy) { | |
1989 size_policy()->msc_collection_begin(); | |
1990 } | |
1991 | |
1992 // Temporarily widen the span of the weak reference processing to | |
1993 // the entire heap. | |
1994 MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
1995 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); |
0 | 1996 // Temporarily, clear the "is_alive_non_header" field of the |
1997 // reference processor. | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
1998 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); |
0 | 1999 // Temporarily make reference _processing_ single threaded (non-MT). |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2000 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); |
0 | 2001 // Temporarily make refs discovery atomic |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2002 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2003 // Temporarily make reference _discovery_ single threaded (non-MT) |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
2004 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
0 | 2005 |
2006 ref_processor()->set_enqueuing_is_done(false); | |
2007 ref_processor()->enable_discovery(); | |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
2008 ref_processor()->setup_policy(clear_all_soft_refs); |
0 | 2009 // If an asynchronous collection finishes, the _modUnionTable is |
2010 // all clear. If we are assuming the collection from an asynchronous | |
2011 // collection, clear the _modUnionTable. | |
2012 assert(_collectorState != Idling || _modUnionTable.isAllClear(), | |
2013 "_modUnionTable should be clear if the baton was not passed"); | |
2014 _modUnionTable.clear_all(); | |
2015 | |
2016 // We must adjust the allocation statistics being maintained | |
2017 // in the free list space. We do so by reading and clearing | |
2018 // the sweep timer and updating the block flux rate estimates below. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2019 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2020 if (_inter_sweep_timer.is_active()) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2021 _inter_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2022 // Note that we do not use this sample to update the _inter_sweep_estimate. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2023 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2024 _inter_sweep_estimate.padded_average(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2025 _intra_sweep_estimate.padded_average()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2026 } |
0 | 2027 |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
2028 { |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
2029 TraceCMSMemoryManagerStats tmms(gch->gc_cause()); |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
2030 } |
0 | 2031 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), |
2032 ref_processor(), clear_all_soft_refs); | |
2033 #ifdef ASSERT | |
2034 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
2035 size_t free_size = cms_space->free(); | |
2036 assert(free_size == | |
2037 pointer_delta(cms_space->end(), cms_space->compaction_top()) | |
2038 * HeapWordSize, | |
2039 "All the free space should be compacted into one chunk at top"); | |
2040 assert(cms_space->dictionary()->totalChunkSize( | |
2041 debug_only(cms_space->freelistLock())) == 0 || | |
2042 cms_space->totalSizeInIndexedFreeLists() == 0, | |
2043 "All the free space should be in a single chunk"); | |
2044 size_t num = cms_space->totalCount(); | |
2045 assert((free_size == 0 && num == 0) || | |
2046 (free_size > 0 && (num == 1 || num == 2)), | |
2047 "There should be at most 2 free chunks after compaction"); | |
2048 #endif // ASSERT | |
2049 _collectorState = Resetting; | |
2050 assert(_restart_addr == NULL, | |
2051 "Should have been NULL'd before baton was passed"); | |
2052 reset(false /* == !asynch */); | |
2053 _cmsGen->reset_after_compaction(); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2054 _concurrent_cycles_since_last_unload = 0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2055 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2056 if (verifying() && !should_unload_classes()) { |
0 | 2057 perm_gen_verify_bit_map()->clear_all(); |
2058 } | |
2059 | |
2060 // Clear any data recorded in the PLAB chunk arrays. | |
2061 if (_survivor_plab_array != NULL) { | |
2062 reset_survivor_plab_arrays(); | |
2063 } | |
2064 | |
2065 // Adjust the per-size allocation stats for the next epoch. | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2066 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2067 // Restart the "inter sweep timer" for the next epoch. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2068 _inter_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2069 _inter_sweep_timer.start(); |
0 | 2070 |
2071 // Sample collection pause time and reset for collection interval. | |
2072 if (UseAdaptiveSizePolicy) { | |
2073 size_policy()->msc_collection_end(gch->gc_cause()); | |
2074 } | |
2075 | |
2076 // For a mark-sweep-compact, compute_new_size() will be called | |
2077 // in the heap's do_collection() method. | |
2078 } | |
2079 | |
2080 // A work method used by the foreground collector to do | |
2081 // a mark-sweep, after taking over from a possibly on-going | |
2082 // concurrent mark-sweep collection. | |
2083 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, | |
2084 CollectorState first_state, bool should_start_over) { | |
2085 if (PrintGC && Verbose) { | |
2086 gclog_or_tty->print_cr("Pass concurrent collection to foreground " | |
2087 "collector with count %d", | |
2088 _full_gcs_since_conc_gc); | |
2089 } | |
2090 switch (_collectorState) { | |
2091 case Idling: | |
2092 if (first_state == Idling || should_start_over) { | |
2093 // The background GC was not active, or should | |
2094 // restarted from scratch; start the cycle. | |
2095 _collectorState = InitialMarking; | |
2096 } | |
2097 // If first_state was not Idling, then a background GC | |
2098 // was in progress and has now finished. No need to do it | |
2099 // again. Leave the state as Idling. | |
2100 break; | |
2101 case Precleaning: | |
2102 // In the foreground case don't do the precleaning since | |
2103 // it is not done concurrently and there is extra work | |
2104 // required. | |
2105 _collectorState = FinalMarking; | |
2106 } | |
2107 if (PrintGCDetails && | |
2108 (_collectorState > Idling || | |
2109 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) { | |
2110 gclog_or_tty->print(" (concurrent mode failure)"); | |
2111 } | |
2112 collect_in_foreground(clear_all_soft_refs); | |
2113 | |
2114 // For a mark-sweep, compute_new_size() will be called | |
2115 // in the heap's do_collection() method. | |
2116 } | |
2117 | |
2118 | |
2119 void CMSCollector::getFreelistLocks() const { | |
2120 // Get locks for all free lists in all generations that this | |
2121 // collector is responsible for | |
2122 _cmsGen->freelistLock()->lock_without_safepoint_check(); | |
2123 _permGen->freelistLock()->lock_without_safepoint_check(); | |
2124 } | |
2125 | |
2126 void CMSCollector::releaseFreelistLocks() const { | |
2127 // Release locks for all free lists in all generations that this | |
2128 // collector is responsible for | |
2129 _cmsGen->freelistLock()->unlock(); | |
2130 _permGen->freelistLock()->unlock(); | |
2131 } | |
2132 | |
2133 bool CMSCollector::haveFreelistLocks() const { | |
2134 // Check locks for all free lists in all generations that this | |
2135 // collector is responsible for | |
2136 assert_lock_strong(_cmsGen->freelistLock()); | |
2137 assert_lock_strong(_permGen->freelistLock()); | |
2138 PRODUCT_ONLY(ShouldNotReachHere()); | |
2139 return true; | |
2140 } | |
2141 | |
2142 // A utility class that is used by the CMS collector to | |
2143 // temporarily "release" the foreground collector from its | |
2144 // usual obligation to wait for the background collector to | |
2145 // complete an ongoing phase before proceeding. | |
2146 class ReleaseForegroundGC: public StackObj { | |
2147 private: | |
2148 CMSCollector* _c; | |
2149 public: | |
2150 ReleaseForegroundGC(CMSCollector* c) : _c(c) { | |
2151 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); | |
2152 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2153 // allow a potentially blocked foreground collector to proceed | |
2154 _c->_foregroundGCShouldWait = false; | |
2155 if (_c->_foregroundGCIsActive) { | |
2156 CGC_lock->notify(); | |
2157 } | |
2158 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2159 "Possible deadlock"); | |
2160 } | |
2161 | |
2162 ~ReleaseForegroundGC() { | |
2163 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); | |
2164 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2165 _c->_foregroundGCShouldWait = true; | |
2166 } | |
2167 }; | |
2168 | |
2169 // There are separate collect_in_background and collect_in_foreground because of | |
2170 // the different locking requirements of the background collector and the | |
2171 // foreground collector. There was originally an attempt to share | |
2172 // one "collect" method between the background collector and the foreground | |
2173 // collector but the if-then-else required made it cleaner to have | |
2174 // separate methods. | |
2175 void CMSCollector::collect_in_background(bool clear_all_soft_refs) { | |
2176 assert(Thread::current()->is_ConcurrentGC_thread(), | |
2177 "A CMS asynchronous collection is only allowed on a CMS thread."); | |
2178 | |
2179 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2180 { | |
2181 bool safepoint_check = Mutex::_no_safepoint_check_flag; | |
2182 MutexLockerEx hl(Heap_lock, safepoint_check); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2183 FreelistLocker fll(this); |
0 | 2184 MutexLockerEx x(CGC_lock, safepoint_check); |
2185 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { | |
2186 // The foreground collector is active or we're | |
2187 // not using asynchronous collections. Skip this | |
2188 // background collection. | |
2189 assert(!_foregroundGCShouldWait, "Should be clear"); | |
2190 return; | |
2191 } else { | |
2192 assert(_collectorState == Idling, "Should be idling before start."); | |
2193 _collectorState = InitialMarking; | |
2194 // Reset the expansion cause, now that we are about to begin | |
2195 // a new cycle. | |
2196 clear_expansion_cause(); | |
2197 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2198 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2199 // ensuing concurrent GC cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2200 update_should_unload_classes(); |
0 | 2201 _full_gc_requested = false; // acks all outstanding full gc requests |
2202 // Signal that we are about to start a collection | |
2203 gch->increment_total_full_collections(); // ... starting a collection cycle | |
2204 _collection_count_start = gch->total_full_collections(); | |
2205 } | |
2206 | |
2207 // Used for PrintGC | |
2208 size_t prev_used; | |
2209 if (PrintGC && Verbose) { | |
2210 prev_used = _cmsGen->used(); // XXXPERM | |
2211 } | |
2212 | |
2213 // The change of the collection state is normally done at this level; | |
2214 // the exceptions are phases that are executed while the world is | |
2215 // stopped. For those phases the change of state is done while the | |
2216 // world is stopped. For baton passing purposes this allows the | |
2217 // background collector to finish the phase and change state atomically. | |
2218 // The foreground collector cannot wait on a phase that is done | |
2219 // while the world is stopped because the foreground collector already | |
2220 // has the world stopped and would deadlock. | |
2221 while (_collectorState != Idling) { | |
2222 if (TraceCMSState) { | |
2223 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2224 Thread::current(), _collectorState); | |
2225 } | |
2226 // The foreground collector | |
2227 // holds the Heap_lock throughout its collection. | |
2228 // holds the CMS token (but not the lock) | |
2229 // except while it is waiting for the background collector to yield. | |
2230 // | |
2231 // The foreground collector should be blocked (not for long) | |
2232 // if the background collector is about to start a phase | |
2233 // executed with world stopped. If the background | |
2234 // collector has already started such a phase, the | |
2235 // foreground collector is blocked waiting for the | |
2236 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) | |
2237 // are executed in the VM thread. | |
2238 // | |
2239 // The locking order is | |
2240 // PendingListLock (PLL) -- if applicable (FinalMarking) | |
2241 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) | |
2242 // CMS token (claimed in | |
2243 // stop_world_and_do() --> | |
2244 // safepoint_synchronize() --> | |
2245 // CMSThread::synchronize()) | |
2246 | |
2247 { | |
2248 // Check if the FG collector wants us to yield. | |
2249 CMSTokenSync x(true); // is cms thread | |
2250 if (waitForForegroundGC()) { | |
2251 // We yielded to a foreground GC, nothing more to be | |
2252 // done this round. | |
2253 assert(_foregroundGCShouldWait == false, "We set it to false in " | |
2254 "waitForForegroundGC()"); | |
2255 if (TraceCMSState) { | |
2256 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2257 " exiting collection CMS state %d", | |
2258 Thread::current(), _collectorState); | |
2259 } | |
2260 return; | |
2261 } else { | |
2262 // The background collector can run but check to see if the | |
2263 // foreground collector has done a collection while the | |
2264 // background collector was waiting to get the CGC_lock | |
2265 // above. If yes, break so that _foregroundGCShouldWait | |
2266 // is cleared before returning. | |
2267 if (_collectorState == Idling) { | |
2268 break; | |
2269 } | |
2270 } | |
2271 } | |
2272 | |
2273 assert(_foregroundGCShouldWait, "Foreground collector, if active, " | |
2274 "should be waiting"); | |
2275 | |
2276 switch (_collectorState) { | |
2277 case InitialMarking: | |
2278 { | |
2279 ReleaseForegroundGC x(this); | |
2280 stats().record_cms_begin(); | |
2281 | |
2282 VM_CMS_Initial_Mark initial_mark_op(this); | |
2283 VMThread::execute(&initial_mark_op); | |
2284 } | |
2285 // The collector state may be any legal state at this point | |
2286 // since the background collector may have yielded to the | |
2287 // foreground collector. | |
2288 break; | |
2289 case Marking: | |
2290 // initial marking in checkpointRootsInitialWork has been completed | |
2291 if (markFromRoots(true)) { // we were successful | |
2292 assert(_collectorState == Precleaning, "Collector state should " | |
2293 "have changed"); | |
2294 } else { | |
2295 assert(_foregroundGCIsActive, "Internal state inconsistency"); | |
2296 } | |
2297 break; | |
2298 case Precleaning: | |
2299 if (UseAdaptiveSizePolicy) { | |
2300 size_policy()->concurrent_precleaning_begin(); | |
2301 } | |
2302 // marking from roots in markFromRoots has been completed | |
2303 preclean(); | |
2304 if (UseAdaptiveSizePolicy) { | |
2305 size_policy()->concurrent_precleaning_end(); | |
2306 } | |
2307 assert(_collectorState == AbortablePreclean || | |
2308 _collectorState == FinalMarking, | |
2309 "Collector state should have changed"); | |
2310 break; | |
2311 case AbortablePreclean: | |
2312 if (UseAdaptiveSizePolicy) { | |
2313 size_policy()->concurrent_phases_resume(); | |
2314 } | |
2315 abortable_preclean(); | |
2316 if (UseAdaptiveSizePolicy) { | |
2317 size_policy()->concurrent_precleaning_end(); | |
2318 } | |
2319 assert(_collectorState == FinalMarking, "Collector state should " | |
2320 "have changed"); | |
2321 break; | |
2322 case FinalMarking: | |
2323 { | |
2324 ReleaseForegroundGC x(this); | |
2325 | |
2326 VM_CMS_Final_Remark final_remark_op(this); | |
2327 VMThread::execute(&final_remark_op); | |
935 | 2328 } |
0 | 2329 assert(_foregroundGCShouldWait, "block post-condition"); |
2330 break; | |
2331 case Sweeping: | |
2332 if (UseAdaptiveSizePolicy) { | |
2333 size_policy()->concurrent_sweeping_begin(); | |
2334 } | |
2335 // final marking in checkpointRootsFinal has been completed | |
2336 sweep(true); | |
2337 assert(_collectorState == Resizing, "Collector state change " | |
2338 "to Resizing must be done under the free_list_lock"); | |
2339 _full_gcs_since_conc_gc = 0; | |
2340 | |
2341 // Stop the timers for adaptive size policy for the concurrent phases | |
2342 if (UseAdaptiveSizePolicy) { | |
2343 size_policy()->concurrent_sweeping_end(); | |
2344 size_policy()->concurrent_phases_end(gch->gc_cause(), | |
2345 gch->prev_gen(_cmsGen)->capacity(), | |
2346 _cmsGen->free()); | |
2347 } | |
2348 | |
2349 case Resizing: { | |
2350 // Sweeping has been completed... | |
2351 // At this point the background collection has completed. | |
2352 // Don't move the call to compute_new_size() down | |
2353 // into code that might be executed if the background | |
2354 // collection was preempted. | |
2355 { | |
2356 ReleaseForegroundGC x(this); // unblock FG collection | |
2357 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag); | |
2358 CMSTokenSync z(true); // not strictly needed. | |
2359 if (_collectorState == Resizing) { | |
2360 compute_new_size(); | |
2361 _collectorState = Resetting; | |
2362 } else { | |
2363 assert(_collectorState == Idling, "The state should only change" | |
2364 " because the foreground collector has finished the collection"); | |
2365 } | |
2366 } | |
2367 break; | |
2368 } | |
2369 case Resetting: | |
2370 // CMS heap resizing has been completed | |
2371 reset(true); | |
2372 assert(_collectorState == Idling, "Collector state should " | |
2373 "have changed"); | |
2374 stats().record_cms_end(); | |
2375 // Don't move the concurrent_phases_end() and compute_new_size() | |
2376 // calls to here because a preempted background collection | |
2377 // has it's state set to "Resetting". | |
2378 break; | |
2379 case Idling: | |
2380 default: | |
2381 ShouldNotReachHere(); | |
2382 break; | |
2383 } | |
2384 if (TraceCMSState) { | |
2385 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2386 Thread::current(), _collectorState); | |
2387 } | |
2388 assert(_foregroundGCShouldWait, "block post-condition"); | |
2389 } | |
2390 | |
2391 // Should this be in gc_epilogue? | |
2392 collector_policy()->counters()->update_counters(); | |
2393 | |
2394 { | |
2395 // Clear _foregroundGCShouldWait and, in the event that the | |
2396 // foreground collector is waiting, notify it, before | |
2397 // returning. | |
2398 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2399 _foregroundGCShouldWait = false; | |
2400 if (_foregroundGCIsActive) { | |
2401 CGC_lock->notify(); | |
2402 } | |
2403 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2404 "Possible deadlock"); | |
2405 } | |
2406 if (TraceCMSState) { | |
2407 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2408 " exiting collection CMS state %d", | |
2409 Thread::current(), _collectorState); | |
2410 } | |
2411 if (PrintGC && Verbose) { | |
2412 _cmsGen->print_heap_change(prev_used); | |
2413 } | |
2414 } | |
2415 | |
2416 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { | |
2417 assert(_foregroundGCIsActive && !_foregroundGCShouldWait, | |
2418 "Foreground collector should be waiting, not executing"); | |
2419 assert(Thread::current()->is_VM_thread(), "A foreground collection" | |
2420 "may only be done by the VM Thread with the world stopped"); | |
2421 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
2422 "VM thread should have CMS token"); | |
2423 | |
2424 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, | |
2425 true, gclog_or_tty);) | |
2426 if (UseAdaptiveSizePolicy) { | |
2427 size_policy()->ms_collection_begin(); | |
2428 } | |
2429 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); | |
2430 | |
2431 HandleMark hm; // Discard invalid handles created during verification | |
2432 | |
2433 if (VerifyBeforeGC && | |
2434 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2435 Universe::verify(true); | |
2436 } | |
2437 | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
2438 // Snapshot the soft reference policy to be used in this collection cycle. |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
2439 ref_processor()->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
2440 |
0 | 2441 bool init_mark_was_synchronous = false; // until proven otherwise |
2442 while (_collectorState != Idling) { | |
2443 if (TraceCMSState) { | |
2444 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2445 Thread::current(), _collectorState); | |
2446 } | |
2447 switch (_collectorState) { | |
2448 case InitialMarking: | |
2449 init_mark_was_synchronous = true; // fact to be exploited in re-mark | |
2450 checkpointRootsInitial(false); | |
2451 assert(_collectorState == Marking, "Collector state should have changed" | |
2452 " within checkpointRootsInitial()"); | |
2453 break; | |
2454 case Marking: | |
2455 // initial marking in checkpointRootsInitialWork has been completed | |
2456 if (VerifyDuringGC && | |
2457 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2458 gclog_or_tty->print("Verify before initial mark: "); | |
2459 Universe::verify(true); | |
2460 } | |
2461 { | |
2462 bool res = markFromRoots(false); | |
2463 assert(res && _collectorState == FinalMarking, "Collector state should " | |
2464 "have changed"); | |
2465 break; | |
2466 } | |
2467 case FinalMarking: | |
2468 if (VerifyDuringGC && | |
2469 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2470 gclog_or_tty->print("Verify before re-mark: "); | |
2471 Universe::verify(true); | |
2472 } | |
2473 checkpointRootsFinal(false, clear_all_soft_refs, | |
2474 init_mark_was_synchronous); | |
2475 assert(_collectorState == Sweeping, "Collector state should not " | |
2476 "have changed within checkpointRootsFinal()"); | |
2477 break; | |
2478 case Sweeping: | |
2479 // final marking in checkpointRootsFinal has been completed | |
2480 if (VerifyDuringGC && | |
2481 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2482 gclog_or_tty->print("Verify before sweep: "); | |
2483 Universe::verify(true); | |
2484 } | |
2485 sweep(false); | |
2486 assert(_collectorState == Resizing, "Incorrect state"); | |
2487 break; | |
2488 case Resizing: { | |
2489 // Sweeping has been completed; the actual resize in this case | |
2490 // is done separately; nothing to be done in this state. | |
2491 _collectorState = Resetting; | |
2492 break; | |
2493 } | |
2494 case Resetting: | |
2495 // The heap has been resized. | |
2496 if (VerifyDuringGC && | |
2497 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2498 gclog_or_tty->print("Verify before reset: "); | |
2499 Universe::verify(true); | |
2500 } | |
2501 reset(false); | |
2502 assert(_collectorState == Idling, "Collector state should " | |
2503 "have changed"); | |
2504 break; | |
2505 case Precleaning: | |
2506 case AbortablePreclean: | |
2507 // Elide the preclean phase | |
2508 _collectorState = FinalMarking; | |
2509 break; | |
2510 default: | |
2511 ShouldNotReachHere(); | |
2512 } | |
2513 if (TraceCMSState) { | |
2514 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2515 Thread::current(), _collectorState); | |
2516 } | |
2517 } | |
2518 | |
2519 if (UseAdaptiveSizePolicy) { | |
2520 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2521 size_policy()->ms_collection_end(gch->gc_cause()); | |
2522 } | |
2523 | |
2524 if (VerifyAfterGC && | |
2525 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2526 Universe::verify(true); | |
2527 } | |
2528 if (TraceCMSState) { | |
2529 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2530 " exiting collection CMS state %d", | |
2531 Thread::current(), _collectorState); | |
2532 } | |
2533 } | |
2534 | |
2535 bool CMSCollector::waitForForegroundGC() { | |
2536 bool res = false; | |
2537 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2538 "CMS thread should have CMS token"); | |
2539 // Block the foreground collector until the | |
2540 // background collectors decides whether to | |
2541 // yield. | |
2542 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2543 _foregroundGCShouldWait = true; | |
2544 if (_foregroundGCIsActive) { | |
2545 // The background collector yields to the | |
2546 // foreground collector and returns a value | |
2547 // indicating that it has yielded. The foreground | |
2548 // collector can proceed. | |
2549 res = true; | |
2550 _foregroundGCShouldWait = false; | |
2551 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2552 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2553 ConcurrentMarkSweepThread::set_CMS_flag( | |
2554 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2555 // Get a possibly blocked foreground thread going | |
2556 CGC_lock->notify(); | |
2557 if (TraceCMSState) { | |
2558 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", | |
2559 Thread::current(), _collectorState); | |
2560 } | |
2561 while (_foregroundGCIsActive) { | |
2562 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
2563 } | |
2564 ConcurrentMarkSweepThread::set_CMS_flag( | |
2565 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2566 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2567 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2568 } | |
2569 if (TraceCMSState) { | |
2570 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", | |
2571 Thread::current(), _collectorState); | |
2572 } | |
2573 return res; | |
2574 } | |
2575 | |
2576 // Because of the need to lock the free lists and other structures in | |
2577 // the collector, common to all the generations that the collector is | |
2578 // collecting, we need the gc_prologues of individual CMS generations | |
2579 // delegate to their collector. It may have been simpler had the | |
2580 // current infrastructure allowed one to call a prologue on a | |
2581 // collector. In the absence of that we have the generation's | |
2582 // prologue delegate to the collector, which delegates back | |
2583 // some "local" work to a worker method in the individual generations | |
2584 // that it's responsible for collecting, while itself doing any | |
2585 // work common to all generations it's responsible for. A similar | |
2586 // comment applies to the gc_epilogue()'s. | |
2587 // The role of the varaible _between_prologue_and_epilogue is to | |
2588 // enforce the invocation protocol. | |
2589 void CMSCollector::gc_prologue(bool full) { | |
2590 // Call gc_prologue_work() for each CMSGen and PermGen that | |
2591 // we are responsible for. | |
2592 | |
2593 // The following locking discipline assumes that we are only called | |
2594 // when the world is stopped. | |
2595 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); | |
2596 | |
2597 // The CMSCollector prologue must call the gc_prologues for the | |
2598 // "generations" (including PermGen if any) that it's responsible | |
2599 // for. | |
2600 | |
2601 assert( Thread::current()->is_VM_thread() | |
2602 || ( CMSScavengeBeforeRemark | |
2603 && Thread::current()->is_ConcurrentGC_thread()), | |
2604 "Incorrect thread type for prologue execution"); | |
2605 | |
2606 if (_between_prologue_and_epilogue) { | |
2607 // We have already been invoked; this is a gc_prologue delegation | |
2608 // from yet another CMS generation that we are responsible for, just | |
2609 // ignore it since all relevant work has already been done. | |
2610 return; | |
2611 } | |
2612 | |
2613 // set a bit saying prologue has been called; cleared in epilogue | |
2614 _between_prologue_and_epilogue = true; | |
2615 // Claim locks for common data structures, then call gc_prologue_work() | |
2616 // for each CMSGen and PermGen that we are responsible for. | |
2617 | |
2618 getFreelistLocks(); // gets free list locks on constituent spaces | |
2619 bitMapLock()->lock_without_safepoint_check(); | |
2620 | |
2621 // Should call gc_prologue_work() for all cms gens we are responsible for | |
2622 bool registerClosure = _collectorState >= Marking | |
2623 && _collectorState < Sweeping; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2624 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ? |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2625 &_modUnionClosurePar |
0 | 2626 : &_modUnionClosure; |
2627 _cmsGen->gc_prologue_work(full, registerClosure, muc); | |
2628 _permGen->gc_prologue_work(full, registerClosure, muc); | |
2629 | |
2630 if (!full) { | |
2631 stats().record_gc0_begin(); | |
2632 } | |
2633 } | |
2634 | |
2635 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { | |
2636 // Delegate to CMScollector which knows how to coordinate between | |
2637 // this and any other CMS generations that it is responsible for | |
2638 // collecting. | |
2639 collector()->gc_prologue(full); | |
2640 } | |
2641 | |
2642 // This is a "private" interface for use by this generation's CMSCollector. | |
2643 // Not to be called directly by any other entity (for instance, | |
2644 // GenCollectedHeap, which calls the "public" gc_prologue method above). | |
2645 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, | |
2646 bool registerClosure, ModUnionClosure* modUnionClosure) { | |
2647 assert(!incremental_collection_failed(), "Shouldn't be set yet"); | |
2648 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, | |
2649 "Should be NULL"); | |
2650 if (registerClosure) { | |
2651 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); | |
2652 } | |
2653 cmsSpace()->gc_prologue(); | |
2654 // Clear stat counters | |
2655 NOT_PRODUCT( | |
2656 assert(_numObjectsPromoted == 0, "check"); | |
2657 assert(_numWordsPromoted == 0, "check"); | |
2658 if (Verbose && PrintGC) { | |
2659 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, " | |
2660 SIZE_FORMAT" bytes concurrently", | |
2661 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); | |
2662 } | |
2663 _numObjectsAllocated = 0; | |
2664 _numWordsAllocated = 0; | |
2665 ) | |
2666 } | |
2667 | |
2668 void CMSCollector::gc_epilogue(bool full) { | |
2669 // The following locking discipline assumes that we are only called | |
2670 // when the world is stopped. | |
2671 assert(SafepointSynchronize::is_at_safepoint(), | |
2672 "world is stopped assumption"); | |
2673 | |
2674 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks | |
2675 // if linear allocation blocks need to be appropriately marked to allow the | |
2676 // the blocks to be parsable. We also check here whether we need to nudge the | |
2677 // CMS collector thread to start a new cycle (if it's not already active). | |
2678 assert( Thread::current()->is_VM_thread() | |
2679 || ( CMSScavengeBeforeRemark | |
2680 && Thread::current()->is_ConcurrentGC_thread()), | |
2681 "Incorrect thread type for epilogue execution"); | |
2682 | |
2683 if (!_between_prologue_and_epilogue) { | |
2684 // We have already been invoked; this is a gc_epilogue delegation | |
2685 // from yet another CMS generation that we are responsible for, just | |
2686 // ignore it since all relevant work has already been done. | |
2687 return; | |
2688 } | |
2689 assert(haveFreelistLocks(), "must have freelist locks"); | |
2690 assert_lock_strong(bitMapLock()); | |
2691 | |
2692 _cmsGen->gc_epilogue_work(full); | |
2693 _permGen->gc_epilogue_work(full); | |
2694 | |
2695 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { | |
2696 // in case sampling was not already enabled, enable it | |
2697 _start_sampling = true; | |
2698 } | |
2699 // reset _eden_chunk_array so sampling starts afresh | |
2700 _eden_chunk_index = 0; | |
2701 | |
2702 size_t cms_used = _cmsGen->cmsSpace()->used(); | |
2703 size_t perm_used = _permGen->cmsSpace()->used(); | |
2704 | |
2705 // update performance counters - this uses a special version of | |
2706 // update_counters() that allows the utilization to be passed as a | |
2707 // parameter, avoiding multiple calls to used(). | |
2708 // | |
2709 _cmsGen->update_counters(cms_used); | |
2710 _permGen->update_counters(perm_used); | |
2711 | |
2712 if (CMSIncrementalMode) { | |
2713 icms_update_allocation_limits(); | |
2714 } | |
2715 | |
2716 bitMapLock()->unlock(); | |
2717 releaseFreelistLocks(); | |
2718 | |
2719 _between_prologue_and_epilogue = false; // ready for next cycle | |
2720 } | |
2721 | |
2722 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { | |
2723 collector()->gc_epilogue(full); | |
2724 | |
2725 // Also reset promotion tracking in par gc thread states. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
2726 if (CollectedHeap::use_parallel_gc_threads()) { |
0 | 2727 for (uint i = 0; i < ParallelGCThreads; i++) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2728 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i); |
0 | 2729 } |
2730 } | |
2731 } | |
2732 | |
2733 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { | |
2734 assert(!incremental_collection_failed(), "Should have been cleared"); | |
2735 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); | |
2736 cmsSpace()->gc_epilogue(); | |
2737 // Print stat counters | |
2738 NOT_PRODUCT( | |
2739 assert(_numObjectsAllocated == 0, "check"); | |
2740 assert(_numWordsAllocated == 0, "check"); | |
2741 if (Verbose && PrintGC) { | |
2742 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, " | |
2743 SIZE_FORMAT" bytes", | |
2744 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); | |
2745 } | |
2746 _numObjectsPromoted = 0; | |
2747 _numWordsPromoted = 0; | |
2748 ) | |
2749 | |
2750 if (PrintGC && Verbose) { | |
2751 // Call down the chain in contiguous_available needs the freelistLock | |
2752 // so print this out before releasing the freeListLock. | |
2753 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ", | |
2754 contiguous_available()); | |
2755 } | |
2756 } | |
2757 | |
2758 #ifndef PRODUCT | |
2759 bool CMSCollector::have_cms_token() { | |
2760 Thread* thr = Thread::current(); | |
2761 if (thr->is_VM_thread()) { | |
2762 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); | |
2763 } else if (thr->is_ConcurrentGC_thread()) { | |
2764 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); | |
2765 } else if (thr->is_GC_task_thread()) { | |
2766 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && | |
2767 ParGCRareEvent_lock->owned_by_self(); | |
2768 } | |
2769 return false; | |
2770 } | |
2771 #endif | |
2772 | |
2773 // Check reachability of the given heap address in CMS generation, | |
2774 // treating all other generations as roots. | |
2775 bool CMSCollector::is_cms_reachable(HeapWord* addr) { | |
2776 // We could "guarantee" below, rather than assert, but i'll | |
2777 // leave these as "asserts" so that an adventurous debugger | |
2778 // could try this in the product build provided some subset of | |
2779 // the conditions were met, provided they were intersted in the | |
2780 // results and knew that the computation below wouldn't interfere | |
2781 // with other concurrent computations mutating the structures | |
2782 // being read or written. | |
2783 assert(SafepointSynchronize::is_at_safepoint(), | |
2784 "Else mutations in object graph will make answer suspect"); | |
2785 assert(have_cms_token(), "Should hold cms token"); | |
2786 assert(haveFreelistLocks(), "must hold free list locks"); | |
2787 assert_lock_strong(bitMapLock()); | |
2788 | |
2789 // Clear the marking bit map array before starting, but, just | |
2790 // for kicks, first report if the given address is already marked | |
2791 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr, | |
2792 _markBitMap.isMarked(addr) ? "" : " not"); | |
2793 | |
2794 if (verify_after_remark()) { | |
2795 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2796 bool result = verification_mark_bm()->isMarked(addr); | |
2797 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr, | |
2798 result ? "IS" : "is NOT"); | |
2799 return result; | |
2800 } else { | |
2801 gclog_or_tty->print_cr("Could not compute result"); | |
2802 return false; | |
2803 } | |
2804 } | |
2805 | |
2806 //////////////////////////////////////////////////////// | |
2807 // CMS Verification Support | |
2808 //////////////////////////////////////////////////////// | |
2809 // Following the remark phase, the following invariant | |
2810 // should hold -- each object in the CMS heap which is | |
2811 // marked in markBitMap() should be marked in the verification_mark_bm(). | |
2812 | |
2813 class VerifyMarkedClosure: public BitMapClosure { | |
2814 CMSBitMap* _marks; | |
2815 bool _failed; | |
2816 | |
2817 public: | |
2818 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} | |
2819 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
2820 bool do_bit(size_t offset) { |
0 | 2821 HeapWord* addr = _marks->offsetToHeapWord(offset); |
2822 if (!_marks->isMarked(addr)) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2823 oop(addr)->print_on(gclog_or_tty); |
0 | 2824 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); |
2825 _failed = true; | |
2826 } | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
2827 return true; |
0 | 2828 } |
2829 | |
2830 bool failed() { return _failed; } | |
2831 }; | |
2832 | |
2833 bool CMSCollector::verify_after_remark() { | |
2834 gclog_or_tty->print(" [Verifying CMS Marking... "); | |
2835 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2836 static bool init = false; | |
2837 | |
2838 assert(SafepointSynchronize::is_at_safepoint(), | |
2839 "Else mutations in object graph will make answer suspect"); | |
2840 assert(have_cms_token(), | |
2841 "Else there may be mutual interference in use of " | |
2842 " verification data structures"); | |
2843 assert(_collectorState > Marking && _collectorState <= Sweeping, | |
2844 "Else marking info checked here may be obsolete"); | |
2845 assert(haveFreelistLocks(), "must hold free list locks"); | |
2846 assert_lock_strong(bitMapLock()); | |
2847 | |
2848 | |
2849 // Allocate marking bit map if not already allocated | |
2850 if (!init) { // first time | |
2851 if (!verification_mark_bm()->allocate(_span)) { | |
2852 return false; | |
2853 } | |
2854 init = true; | |
2855 } | |
2856 | |
2857 assert(verification_mark_stack()->isEmpty(), "Should be empty"); | |
2858 | |
2859 // Turn off refs discovery -- so we will be tracing through refs. | |
2860 // This is as intended, because by this time | |
2861 // GC must already have cleared any refs that need to be cleared, | |
2862 // and traced those that need to be marked; moreover, | |
2863 // the marking done here is not going to intefere in any | |
2864 // way with the marking information used by GC. | |
2865 NoRefDiscovery no_discovery(ref_processor()); | |
2866 | |
2867 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
2868 | |
2869 // Clear any marks from a previous round | |
2870 verification_mark_bm()->clear_all(); | |
2871 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2872 verify_work_stacks_empty(); |
0 | 2873 |
2874 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2875 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
2876 // Update the saved marks which may affect the root scans. | |
2877 gch->save_marks(); | |
2878 | |
2879 if (CMSRemarkVerifyVariant == 1) { | |
2880 // In this first variant of verification, we complete | |
2881 // all marking, then check if the new marks-verctor is | |
2882 // a subset of the CMS marks-vector. | |
2883 verify_after_remark_work_1(); | |
2884 } else if (CMSRemarkVerifyVariant == 2) { | |
2885 // In this second variant of verification, we flag an error | |
2886 // (i.e. an object reachable in the new marks-vector not reachable | |
2887 // in the CMS marks-vector) immediately, also indicating the | |
2888 // identify of an object (A) that references the unmarked object (B) -- | |
2889 // presumably, a mutation to A failed to be picked up by preclean/remark? | |
2890 verify_after_remark_work_2(); | |
2891 } else { | |
2892 warning("Unrecognized value %d for CMSRemarkVerifyVariant", | |
2893 CMSRemarkVerifyVariant); | |
2894 } | |
2895 gclog_or_tty->print(" done] "); | |
2896 return true; | |
2897 } | |
2898 | |
2899 void CMSCollector::verify_after_remark_work_1() { | |
2900 ResourceMark rm; | |
2901 HandleMark hm; | |
2902 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2903 | |
2904 // Mark from roots one level into CMS | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
2905 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); |
0 | 2906 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
2907 | |
2908 gch->gen_process_strong_roots(_cmsGen->level(), | |
2909 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2910 true, // activate StrongRootsScope |
0 | 2911 true, // collecting perm gen |
2912 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2913 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2914 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2915 NULL); |
0 | 2916 |
2917 // Now mark from the roots | |
2918 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2919 MarkFromRootsClosure markFromRootsClosure(this, _span, | |
2920 verification_mark_bm(), verification_mark_stack(), &_revisitStack, | |
2921 false /* don't yield */, true /* verifying */); | |
2922 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2923 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2924 while (_restart_addr != NULL) { | |
2925 // Deal with stack overflow: by restarting at the indicated | |
2926 // address. | |
2927 HeapWord* ra = _restart_addr; | |
2928 markFromRootsClosure.reset(ra); | |
2929 _restart_addr = NULL; | |
2930 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2931 } | |
2932 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2933 verify_work_stacks_empty(); | |
2934 // Should reset the revisit stack above, since no class tree | |
2935 // surgery is forthcoming. | |
2936 _revisitStack.reset(); // throwing away all contents | |
2937 | |
2938 // Marking completed -- now verify that each bit marked in | |
2939 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2940 // errors by printing corresponding objects. | |
2941 VerifyMarkedClosure vcl(markBitMap()); | |
2942 verification_mark_bm()->iterate(&vcl); | |
2943 if (vcl.failed()) { | |
2944 gclog_or_tty->print("Verification failed"); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2945 Universe::heap()->print_on(gclog_or_tty); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
2946 fatal("CMS: failed marking verification after remark"); |
0 | 2947 } |
2948 } | |
2949 | |
2950 void CMSCollector::verify_after_remark_work_2() { | |
2951 ResourceMark rm; | |
2952 HandleMark hm; | |
2953 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2954 | |
2955 // Mark from roots one level into CMS | |
2956 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
2957 markBitMap()); |
0 | 2958 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. |
2959 gch->gen_process_strong_roots(_cmsGen->level(), | |
2960 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2961 true, // activate StrongRootsScope |
0 | 2962 true, // collecting perm gen |
2963 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2964 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2965 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
2966 NULL); |
0 | 2967 |
2968 // Now mark from the roots | |
2969 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2970 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, | |
2971 verification_mark_bm(), markBitMap(), verification_mark_stack()); | |
2972 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2973 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2974 while (_restart_addr != NULL) { | |
2975 // Deal with stack overflow: by restarting at the indicated | |
2976 // address. | |
2977 HeapWord* ra = _restart_addr; | |
2978 markFromRootsClosure.reset(ra); | |
2979 _restart_addr = NULL; | |
2980 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2981 } | |
2982 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2983 verify_work_stacks_empty(); | |
2984 // Should reset the revisit stack above, since no class tree | |
2985 // surgery is forthcoming. | |
2986 _revisitStack.reset(); // throwing away all contents | |
2987 | |
2988 // Marking completed -- now verify that each bit marked in | |
2989 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2990 // errors by printing corresponding objects. | |
2991 VerifyMarkedClosure vcl(markBitMap()); | |
2992 verification_mark_bm()->iterate(&vcl); | |
2993 assert(!vcl.failed(), "Else verification above should not have succeeded"); | |
2994 } | |
2995 | |
2996 void ConcurrentMarkSweepGeneration::save_marks() { | |
2997 // delegate to CMS space | |
2998 cmsSpace()->save_marks(); | |
2999 for (uint i = 0; i < ParallelGCThreads; i++) { | |
3000 _par_gc_thread_states[i]->promo.startTrackingPromotions(); | |
3001 } | |
3002 } | |
3003 | |
3004 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { | |
3005 return cmsSpace()->no_allocs_since_save_marks(); | |
3006 } | |
3007 | |
3008 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
3009 \ | |
3010 void ConcurrentMarkSweepGeneration:: \ | |
3011 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
3012 cl->set_generation(this); \ | |
3013 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
3014 cl->reset_generation(); \ | |
3015 save_marks(); \ | |
3016 } | |
3017 | |
3018 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) | |
3019 | |
3020 void | |
3021 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) | |
3022 { | |
3023 // Not currently implemented; need to do the following. -- ysr. | |
3024 // dld -- I think that is used for some sort of allocation profiler. So it | |
3025 // really means the objects allocated by the mutator since the last | |
3026 // GC. We could potentially implement this cheaply by recording only | |
3027 // the direct allocations in a side data structure. | |
3028 // | |
3029 // I think we probably ought not to be required to support these | |
3030 // iterations at any arbitrary point; I think there ought to be some | |
3031 // call to enable/disable allocation profiling in a generation/space, | |
3032 // and the iterator ought to return the objects allocated in the | |
3033 // gen/space since the enable call, or the last iterator call (which | |
3034 // will probably be at a GC.) That way, for gens like CM&S that would | |
3035 // require some extra data structure to support this, we only pay the | |
3036 // cost when it's in use... | |
3037 cmsSpace()->object_iterate_since_last_GC(blk); | |
3038 } | |
3039 | |
3040 void | |
3041 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
3042 cl->set_generation(this); | |
3043 younger_refs_in_space_iterate(_cmsSpace, cl); | |
3044 cl->reset_generation(); | |
3045 } | |
3046 | |
3047 void | |
3048 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) { | |
3049 if (freelistLock()->owned_by_self()) { | |
3050 Generation::oop_iterate(mr, cl); | |
3051 } else { | |
3052 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3053 Generation::oop_iterate(mr, cl); | |
3054 } | |
3055 } | |
3056 | |
3057 void | |
3058 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) { | |
3059 if (freelistLock()->owned_by_self()) { | |
3060 Generation::oop_iterate(cl); | |
3061 } else { | |
3062 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3063 Generation::oop_iterate(cl); | |
3064 } | |
3065 } | |
3066 | |
3067 void | |
3068 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { | |
3069 if (freelistLock()->owned_by_self()) { | |
3070 Generation::object_iterate(cl); | |
3071 } else { | |
3072 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3073 Generation::object_iterate(cl); | |
3074 } | |
3075 } | |
3076 | |
3077 void | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3078 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3079 if (freelistLock()->owned_by_self()) { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3080 Generation::safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3081 } else { |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3082 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3083 Generation::safe_object_iterate(cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3084 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3085 } |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3086 |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
457
diff
changeset
|
3087 void |
0 | 3088 ConcurrentMarkSweepGeneration::pre_adjust_pointers() { |
3089 } | |
3090 | |
3091 void | |
3092 ConcurrentMarkSweepGeneration::post_compact() { | |
3093 } | |
3094 | |
3095 void | |
3096 ConcurrentMarkSweepGeneration::prepare_for_verify() { | |
3097 // Fix the linear allocation blocks to look like free blocks. | |
3098 | |
3099 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3100 // are not called when the heap is verified during universe initialization and | |
3101 // at vm shutdown. | |
3102 if (freelistLock()->owned_by_self()) { | |
3103 cmsSpace()->prepare_for_verify(); | |
3104 } else { | |
3105 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3106 cmsSpace()->prepare_for_verify(); | |
3107 } | |
3108 } | |
3109 | |
3110 void | |
3111 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) { | |
3112 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3113 // are not called when the heap is verified during universe initialization and | |
3114 // at vm shutdown. | |
3115 if (freelistLock()->owned_by_self()) { | |
3116 cmsSpace()->verify(false /* ignored */); | |
3117 } else { | |
3118 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3119 cmsSpace()->verify(false /* ignored */); | |
3120 } | |
3121 } | |
3122 | |
3123 void CMSCollector::verify(bool allow_dirty /* ignored */) { | |
3124 _cmsGen->verify(allow_dirty); | |
3125 _permGen->verify(allow_dirty); | |
3126 } | |
3127 | |
3128 #ifndef PRODUCT | |
3129 bool CMSCollector::overflow_list_is_empty() const { | |
3130 assert(_num_par_pushes >= 0, "Inconsistency"); | |
3131 if (_overflow_list == NULL) { | |
3132 assert(_num_par_pushes == 0, "Inconsistency"); | |
3133 } | |
3134 return _overflow_list == NULL; | |
3135 } | |
3136 | |
3137 // The methods verify_work_stacks_empty() and verify_overflow_empty() | |
3138 // merely consolidate assertion checks that appear to occur together frequently. | |
3139 void CMSCollector::verify_work_stacks_empty() const { | |
3140 assert(_markStack.isEmpty(), "Marking stack should be empty"); | |
3141 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3142 } | |
3143 | |
3144 void CMSCollector::verify_overflow_empty() const { | |
3145 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3146 assert(no_preserved_marks(), "No preserved marks"); | |
3147 } | |
3148 #endif // PRODUCT | |
3149 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3150 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3151 // ensuing concurrent GC cycle. We will collect the perm gen and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3152 // unload classes if it's the case that: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3153 // (1) an explicit gc request has been made and the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3154 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3155 // (2) (a) class unloading is enabled at the command line, and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3156 // (b) (i) perm gen threshold has been crossed, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3157 // (ii) old gen is getting really full, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3158 // (iii) the previous N CMS collections did not collect the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3159 // perm gen |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3160 // NOTE: Provided there is no change in the state of the heap between |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3161 // calls to this method, it should have idempotent results. Moreover, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3162 // its results should be monotonically increasing (i.e. going from 0 to 1, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3163 // but not 1 to 0) between successive calls between which the heap was |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3164 // not collected. For the implementation below, it must thus rely on |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3165 // the property that concurrent_cycles_since_last_unload() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3166 // will not decrease unless a collection cycle happened and that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3167 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3168 // themselves also monotonic in that sense. See check_monotonicity() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3169 // below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3170 bool CMSCollector::update_should_unload_classes() { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3171 _should_unload_classes = false; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3172 // Condition 1 above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3173 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3174 _should_unload_classes = true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3175 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3176 // Disjuncts 2.b.(i,ii,iii) above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3177 _should_unload_classes = (concurrent_cycles_since_last_unload() >= |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3178 CMSClassUnloadingMaxInterval) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3179 || _permGen->should_concurrent_collect() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3180 || _cmsGen->is_too_full(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3181 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3182 return _should_unload_classes; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3183 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3184 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3185 bool ConcurrentMarkSweepGeneration::is_too_full() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3186 bool res = should_concurrent_collect(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3187 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3188 return res; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3189 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3190 |
0 | 3191 void CMSCollector::setup_cms_unloading_and_verification_state() { |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2379
diff
changeset
|
3192 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC |
0 | 3193 || VerifyBeforeExit; |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
2379
diff
changeset
|
3194 const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
0 | 3195 |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3196 if (should_unload_classes()) { // Should unload classes this cycle |
0 | 3197 remove_root_scanning_option(rso); // Shrink the root set appropriately |
3198 set_verifying(should_verify); // Set verification state for this cycle | |
3199 return; // Nothing else needs to be done at this time | |
3200 } | |
3201 | |
3202 // Not unloading classes this cycle | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3203 assert(!should_unload_classes(), "Inconsitency!"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3204 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { |
0 | 3205 // We were not verifying, or we _were_ unloading classes in the last cycle, |
3206 // AND some verification options are enabled this cycle; in this case, | |
3207 // we must make sure that the deadness map is allocated if not already so, | |
3208 // and cleared (if already allocated previously -- | |
3209 // CMSBitMap::sizeInBits() is used to determine if it's allocated). | |
3210 if (perm_gen_verify_bit_map()->sizeInBits() == 0) { | |
3211 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) { | |
3212 warning("Failed to allocate permanent generation verification CMS Bit Map;\n" | |
3213 "permanent generation verification disabled"); | |
3214 return; // Note that we leave verification disabled, so we'll retry this | |
3215 // allocation next cycle. We _could_ remember this failure | |
3216 // and skip further attempts and permanently disable verification | |
3217 // attempts if that is considered more desirable. | |
3218 } | |
3219 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()), | |
3220 "_perm_gen_ver_bit_map inconsistency?"); | |
3221 } else { | |
3222 perm_gen_verify_bit_map()->clear_all(); | |
3223 } | |
3224 // Include symbols, strings and code cache elements to prevent their resurrection. | |
3225 add_root_scanning_option(rso); | |
3226 set_verifying(true); | |
3227 } else if (verifying() && !should_verify) { | |
3228 // We were verifying, but some verification flags got disabled. | |
3229 set_verifying(false); | |
3230 // Exclude symbols, strings and code cache elements from root scanning to | |
3231 // reduce IM and RM pauses. | |
3232 remove_root_scanning_option(rso); | |
3233 } | |
3234 } | |
3235 | |
3236 | |
3237 #ifndef PRODUCT | |
3238 HeapWord* CMSCollector::block_start(const void* p) const { | |
3239 const HeapWord* addr = (HeapWord*)p; | |
3240 if (_span.contains(p)) { | |
3241 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { | |
3242 return _cmsGen->cmsSpace()->block_start(p); | |
3243 } else { | |
3244 assert(_permGen->cmsSpace()->is_in_reserved(addr), | |
3245 "Inconsistent _span?"); | |
3246 return _permGen->cmsSpace()->block_start(p); | |
3247 } | |
3248 } | |
3249 return NULL; | |
3250 } | |
3251 #endif | |
3252 | |
3253 HeapWord* | |
3254 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, | |
3255 bool tlab, | |
3256 bool parallel) { | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3257 CMSSynchronousYieldRequest yr; |
0 | 3258 assert(!tlab, "Can't deal with TLAB allocation"); |
3259 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3260 expand(word_size*HeapWordSize, MinHeapDeltaBytes, | |
3261 CMSExpansionCause::_satisfy_allocation); | |
3262 if (GCExpandToAllocateDelayMillis > 0) { | |
3263 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3264 } | |
9
173195ff483a
6642634: Test nsk/regression/b6186200 crashed with SIGSEGV
ysr
parents:
7
diff
changeset
|
3265 return have_lock_and_allocate(word_size, tlab); |
0 | 3266 } |
3267 | |
3268 // YSR: All of this generation expansion/shrinking stuff is an exact copy of | |
3269 // OneContigSpaceCardGeneration, which makes me wonder if we should move this | |
3270 // to CardGeneration and share it... | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3271 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3272 return CardGeneration::expand(bytes, expand_bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3273 } |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3274 |
0 | 3275 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, |
3276 CMSExpansionCause::Cause cause) | |
3277 { | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3278 |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3279 bool success = expand(bytes, expand_bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
3280 |
0 | 3281 // remember why we expanded; this information is used |
3282 // by shouldConcurrentCollect() when making decisions on whether to start | |
3283 // a new CMS cycle. | |
3284 if (success) { | |
3285 set_expansion_cause(cause); | |
3286 if (PrintGCDetails && Verbose) { | |
3287 gclog_or_tty->print_cr("Expanded CMS gen for %s", | |
3288 CMSExpansionCause::to_string(cause)); | |
3289 } | |
3290 } | |
3291 } | |
3292 | |
3293 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { | |
3294 HeapWord* res = NULL; | |
3295 MutexLocker x(ParGCRareEvent_lock); | |
3296 while (true) { | |
3297 // Expansion by some other thread might make alloc OK now: | |
3298 res = ps->lab.alloc(word_sz); | |
3299 if (res != NULL) return res; | |
3300 // If there's not enough expansion space available, give up. | |
3301 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { | |
3302 return NULL; | |
3303 } | |
3304 // Otherwise, we try expansion. | |
3305 expand(word_sz*HeapWordSize, MinHeapDeltaBytes, | |
3306 CMSExpansionCause::_allocate_par_lab); | |
3307 // Now go around the loop and try alloc again; | |
3308 // A competing par_promote might beat us to the expansion space, | |
3309 // so we may go around the loop again if promotion fails agaion. | |
3310 if (GCExpandToAllocateDelayMillis > 0) { | |
3311 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3312 } | |
3313 } | |
3314 } | |
3315 | |
3316 | |
3317 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( | |
3318 PromotionInfo* promo) { | |
3319 MutexLocker x(ParGCRareEvent_lock); | |
3320 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; | |
3321 while (true) { | |
3322 // Expansion by some other thread might make alloc OK now: | |
3323 if (promo->ensure_spooling_space()) { | |
3324 assert(promo->has_spooling_space(), | |
3325 "Post-condition of successful ensure_spooling_space()"); | |
3326 return true; | |
3327 } | |
3328 // If there's not enough expansion space available, give up. | |
3329 if (_virtual_space.uncommitted_size() < refill_size_bytes) { | |
3330 return false; | |
3331 } | |
3332 // Otherwise, we try expansion. | |
3333 expand(refill_size_bytes, MinHeapDeltaBytes, | |
3334 CMSExpansionCause::_allocate_par_spooling_space); | |
3335 // Now go around the loop and try alloc again; | |
3336 // A competing allocation might beat us to the expansion space, | |
3337 // so we may go around the loop again if allocation fails again. | |
3338 if (GCExpandToAllocateDelayMillis > 0) { | |
3339 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3340 } | |
3341 } | |
3342 } | |
3343 | |
3344 | |
3345 | |
3346 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { | |
3347 assert_locked_or_safepoint(Heap_lock); | |
3348 size_t size = ReservedSpace::page_align_size_down(bytes); | |
3349 if (size > 0) { | |
3350 shrink_by(size); | |
3351 } | |
3352 } | |
3353 | |
3354 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { | |
3355 assert_locked_or_safepoint(Heap_lock); | |
3356 bool result = _virtual_space.expand_by(bytes); | |
3357 if (result) { | |
3358 HeapWord* old_end = _cmsSpace->end(); | |
3359 size_t new_word_size = | |
3360 heap_word_size(_virtual_space.committed_size()); | |
3361 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
3362 _bts->resize(new_word_size); // resize the block offset shared array | |
3363 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
3364 // Hmmmm... why doesn't CFLS::set_end verify locking? | |
3365 // This is quite ugly; FIX ME XXX | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
3366 _cmsSpace->assert_locked(freelistLock()); |
0 | 3367 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); |
3368 | |
3369 // update the space and generation capacity counters | |
3370 if (UsePerfData) { | |
3371 _space_counters->update_capacity(); | |
3372 _gen_counters->update_all(); | |
3373 } | |
3374 | |
3375 if (Verbose && PrintGC) { | |
3376 size_t new_mem_size = _virtual_space.committed_size(); | |
3377 size_t old_mem_size = new_mem_size - bytes; | |
3378 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK", | |
3379 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
3380 } | |
3381 } | |
3382 return result; | |
3383 } | |
3384 | |
3385 bool ConcurrentMarkSweepGeneration::grow_to_reserved() { | |
3386 assert_locked_or_safepoint(Heap_lock); | |
3387 bool success = true; | |
3388 const size_t remaining_bytes = _virtual_space.uncommitted_size(); | |
3389 if (remaining_bytes > 0) { | |
3390 success = grow_by(remaining_bytes); | |
3391 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) | |
3392 } | |
3393 return success; | |
3394 } | |
3395 | |
3396 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { | |
3397 assert_locked_or_safepoint(Heap_lock); | |
3398 assert_lock_strong(freelistLock()); | |
3399 // XXX Fix when compaction is implemented. | |
3400 warning("Shrinking of CMS not yet implemented"); | |
3401 return; | |
3402 } | |
3403 | |
3404 | |
3405 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent | |
3406 // phases. | |
3407 class CMSPhaseAccounting: public StackObj { | |
3408 public: | |
3409 CMSPhaseAccounting(CMSCollector *collector, | |
3410 const char *phase, | |
3411 bool print_cr = true); | |
3412 ~CMSPhaseAccounting(); | |
3413 | |
3414 private: | |
3415 CMSCollector *_collector; | |
3416 const char *_phase; | |
3417 elapsedTimer _wallclock; | |
3418 bool _print_cr; | |
3419 | |
3420 public: | |
3421 // Not MT-safe; so do not pass around these StackObj's | |
3422 // where they may be accessed by other threads. | |
3423 jlong wallclock_millis() { | |
3424 assert(_wallclock.is_active(), "Wall clock should not stop"); | |
3425 _wallclock.stop(); // to record time | |
3426 jlong ret = _wallclock.milliseconds(); | |
3427 _wallclock.start(); // restart | |
3428 return ret; | |
3429 } | |
3430 }; | |
3431 | |
3432 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, | |
3433 const char *phase, | |
3434 bool print_cr) : | |
3435 _collector(collector), _phase(phase), _print_cr(print_cr) { | |
3436 | |
3437 if (PrintCMSStatistics != 0) { | |
3438 _collector->resetYields(); | |
3439 } | |
3440 if (PrintGCDetails && PrintGCTimeStamps) { | |
3441 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3442 gclog_or_tty->stamp(); | |
3443 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", | |
3444 _collector->cmsGen()->short_name(), _phase); | |
3445 } | |
3446 _collector->resetTimer(); | |
3447 _wallclock.start(); | |
3448 _collector->startTimer(); | |
3449 } | |
3450 | |
3451 CMSPhaseAccounting::~CMSPhaseAccounting() { | |
3452 assert(_wallclock.is_active(), "Wall clock should not have stopped"); | |
3453 _collector->stopTimer(); | |
3454 _wallclock.stop(); | |
3455 if (PrintGCDetails) { | |
3456 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3457 if (PrintGCTimeStamps) { | |
3458 gclog_or_tty->stamp(); | |
3459 gclog_or_tty->print(": "); | |
3460 } | |
3461 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", | |
3462 _collector->cmsGen()->short_name(), | |
3463 _phase, _collector->timerValue(), _wallclock.seconds()); | |
3464 if (_print_cr) { | |
3465 gclog_or_tty->print_cr(""); | |
3466 } | |
3467 if (PrintCMSStatistics != 0) { | |
3468 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, | |
3469 _collector->yields()); | |
3470 } | |
3471 } | |
3472 } | |
3473 | |
3474 // CMS work | |
3475 | |
3476 // Checkpoint the roots into this generation from outside | |
3477 // this generation. [Note this initial checkpoint need only | |
3478 // be approximate -- we'll do a catch up phase subsequently.] | |
3479 void CMSCollector::checkpointRootsInitial(bool asynch) { | |
3480 assert(_collectorState == InitialMarking, "Wrong collector state"); | |
3481 check_correct_thread_executing(); | |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
3482 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
3483 |
0 | 3484 ReferenceProcessor* rp = ref_processor(); |
3485 SpecializationStats::clear(); | |
3486 assert(_restart_addr == NULL, "Control point invariant"); | |
3487 if (asynch) { | |
3488 // acquire locks for subsequent manipulations | |
3489 MutexLockerEx x(bitMapLock(), | |
3490 Mutex::_no_safepoint_check_flag); | |
3491 checkpointRootsInitialWork(asynch); | |
3492 rp->verify_no_references_recorded(); | |
3493 rp->enable_discovery(); // enable ("weak") refs discovery | |
3494 _collectorState = Marking; | |
3495 } else { | |
3496 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection | |
3497 // which recognizes if we are a CMS generation, and doesn't try to turn on | |
3498 // discovery; verify that they aren't meddling. | |
3499 assert(!rp->discovery_is_atomic(), | |
3500 "incorrect setting of discovery predicate"); | |
3501 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " | |
3502 "ref discovery for this generation kind"); | |
3503 // already have locks | |
3504 checkpointRootsInitialWork(asynch); | |
3505 rp->enable_discovery(); // now enable ("weak") refs discovery | |
3506 _collectorState = Marking; | |
3507 } | |
3508 SpecializationStats::print(); | |
3509 } | |
3510 | |
3511 void CMSCollector::checkpointRootsInitialWork(bool asynch) { | |
3512 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); | |
3513 assert(_collectorState == InitialMarking, "just checking"); | |
3514 | |
3515 // If there has not been a GC[n-1] since last GC[n] cycle completed, | |
3516 // precede our marking with a collection of all | |
3517 // younger generations to keep floating garbage to a minimum. | |
3518 // XXX: we won't do this for now -- it's an optimization to be done later. | |
3519 | |
3520 // already have locks | |
3521 assert_lock_strong(bitMapLock()); | |
3522 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); | |
3523 | |
3524 // Setup the verification and class unloading state for this | |
3525 // CMS collection cycle. | |
3526 setup_cms_unloading_and_verification_state(); | |
3527 | |
3528 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", | |
3529 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
3530 if (UseAdaptiveSizePolicy) { | |
3531 size_policy()->checkpoint_roots_initial_begin(); | |
3532 } | |
3533 | |
3534 // Reset all the PLAB chunk arrays if necessary. | |
3535 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { | |
3536 reset_survivor_plab_arrays(); | |
3537 } | |
3538 | |
3539 ResourceMark rm; | |
3540 HandleMark hm; | |
3541 | |
3542 FalseClosure falseClosure; | |
3543 // In the case of a synchronous collection, we will elide the | |
3544 // remark step, so it's important to catch all the nmethod oops | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3545 // in this step. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3546 // The final 'true' flag to gen_process_strong_roots will ensure this. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3547 // If 'async' is true, we can relax the nmethod tracing. |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
3548 MarkRefsIntoClosure notOlder(_span, &_markBitMap); |
0 | 3549 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
3550 | |
3551 verify_work_stacks_empty(); | |
3552 verify_overflow_empty(); | |
3553 | |
3554 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
3555 // Update the saved marks which may affect the root scans. | |
3556 gch->save_marks(); | |
3557 | |
3558 // weak reference processing has not started yet. | |
3559 ref_processor()->set_enqueuing_is_done(false); | |
3560 | |
3561 { | |
935 | 3562 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);) |
0 | 3563 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) |
3564 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
3565 gch->gen_process_strong_roots(_cmsGen->level(), | |
3566 true, // younger gens are roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3567 true, // activate StrongRootsScope |
0 | 3568 true, // collecting perm gen |
3569 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3570 ¬Older, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3571 true, // walk all of code cache if (so & SO_CodeCache) |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
3572 NULL); |
0 | 3573 } |
3574 | |
3575 // Clear mod-union table; it will be dirtied in the prologue of | |
3576 // CMS generation per each younger generation collection. | |
3577 | |
3578 assert(_modUnionTable.isAllClear(), | |
3579 "Was cleared in most recent final checkpoint phase" | |
3580 " or no bits are set in the gc_prologue before the start of the next " | |
3581 "subsequent marking phase."); | |
3582 | |
3583 // Temporarily disabled, since pre/post-consumption closures don't | |
3584 // care about precleaned cards | |
3585 #if 0 | |
3586 { | |
3587 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(), | |
3588 (HeapWord*)_virtual_space.high()); | |
3589 _ct->ct_bs()->preclean_dirty_cards(mr); | |
3590 } | |
3591 #endif | |
3592 | |
3593 // Save the end of the used_region of the constituent generations | |
3594 // to be used to limit the extent of sweep in each generation. | |
3595 save_sweep_limits(); | |
3596 if (UseAdaptiveSizePolicy) { | |
3597 size_policy()->checkpoint_roots_initial_end(gch->gc_cause()); | |
3598 } | |
3599 verify_overflow_empty(); | |
3600 } | |
3601 | |
3602 bool CMSCollector::markFromRoots(bool asynch) { | |
3603 // we might be tempted to assert that: | |
3604 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), | |
3605 // "inconsistent argument?"); | |
3606 // However that wouldn't be right, because it's possible that | |
3607 // a safepoint is indeed in progress as a younger generation | |
3608 // stop-the-world GC happens even as we mark in this generation. | |
3609 assert(_collectorState == Marking, "inconsistent state?"); | |
3610 check_correct_thread_executing(); | |
3611 verify_overflow_empty(); | |
3612 | |
3613 bool res; | |
3614 if (asynch) { | |
3615 | |
3616 // Start the timers for adaptive size policy for the concurrent phases | |
3617 // Do it here so that the foreground MS can use the concurrent | |
3618 // timer since a foreground MS might has the sweep done concurrently | |
3619 // or STW. | |
3620 if (UseAdaptiveSizePolicy) { | |
3621 size_policy()->concurrent_marking_begin(); | |
3622 } | |
3623 | |
3624 // Weak ref discovery note: We may be discovering weak | |
3625 // refs in this generation concurrent (but interleaved) with | |
3626 // weak ref discovery by a younger generation collector. | |
3627 | |
3628 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
3629 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
3630 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); | |
3631 res = markFromRootsWork(asynch); | |
3632 if (res) { | |
3633 _collectorState = Precleaning; | |
3634 } else { // We failed and a foreground collection wants to take over | |
3635 assert(_foregroundGCIsActive, "internal state inconsistency"); | |
3636 assert(_restart_addr == NULL, "foreground will restart from scratch"); | |
3637 if (PrintGCDetails) { | |
3638 gclog_or_tty->print_cr("bailing out to foreground collection"); | |
3639 } | |
3640 } | |
3641 if (UseAdaptiveSizePolicy) { | |
3642 size_policy()->concurrent_marking_end(); | |
3643 } | |
3644 } else { | |
3645 assert(SafepointSynchronize::is_at_safepoint(), | |
3646 "inconsistent with asynch == false"); | |
3647 if (UseAdaptiveSizePolicy) { | |
3648 size_policy()->ms_collection_marking_begin(); | |
3649 } | |
3650 // already have locks | |
3651 res = markFromRootsWork(asynch); | |
3652 _collectorState = FinalMarking; | |
3653 if (UseAdaptiveSizePolicy) { | |
3654 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
3655 size_policy()->ms_collection_marking_end(gch->gc_cause()); | |
3656 } | |
3657 } | |
3658 verify_overflow_empty(); | |
3659 return res; | |
3660 } | |
3661 | |
3662 bool CMSCollector::markFromRootsWork(bool asynch) { | |
3663 // iterate over marked bits in bit map, doing a full scan and mark | |
3664 // from these roots using the following algorithm: | |
3665 // . if oop is to the right of the current scan pointer, | |
3666 // mark corresponding bit (we'll process it later) | |
3667 // . else (oop is to left of current scan pointer) | |
3668 // push oop on marking stack | |
3669 // . drain the marking stack | |
3670 | |
3671 // Note that when we do a marking step we need to hold the | |
3672 // bit map lock -- recall that direct allocation (by mutators) | |
3673 // and promotion (by younger generation collectors) is also | |
3674 // marking the bit map. [the so-called allocate live policy.] | |
3675 // Because the implementation of bit map marking is not | |
3676 // robust wrt simultaneous marking of bits in the same word, | |
3677 // we need to make sure that there is no such interference | |
3678 // between concurrent such updates. | |
3679 | |
3680 // already have locks | |
3681 assert_lock_strong(bitMapLock()); | |
3682 | |
3683 // Clear the revisit stack, just in case there are any | |
3684 // obsolete contents from a short-circuited previous CMS cycle. | |
3685 _revisitStack.reset(); | |
3686 verify_work_stacks_empty(); | |
3687 verify_overflow_empty(); | |
3688 assert(_revisitStack.isEmpty(), "tabula rasa"); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
3689 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
0 | 3690 bool result = false; |
1284 | 3691 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { |
0 | 3692 result = do_marking_mt(asynch); |
3693 } else { | |
3694 result = do_marking_st(asynch); | |
3695 } | |
3696 return result; | |
3697 } | |
3698 | |
3699 // Forward decl | |
3700 class CMSConcMarkingTask; | |
3701 | |
3702 class CMSConcMarkingTerminator: public ParallelTaskTerminator { | |
3703 CMSCollector* _collector; | |
3704 CMSConcMarkingTask* _task; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3705 public: |
0 | 3706 virtual void yield(); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3707 |
0 | 3708 // "n_threads" is the number of threads to be terminated. |
3709 // "queue_set" is a set of work queues of other threads. | |
3710 // "collector" is the CMS collector associated with this task terminator. | |
3711 // "yield" indicates whether we need the gang as a whole to yield. | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3712 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : |
0 | 3713 ParallelTaskTerminator(n_threads, queue_set), |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3714 _collector(collector) { } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3715 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3716 void set_task(CMSConcMarkingTask* task) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3717 _task = task; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3718 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3719 }; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3720 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3721 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3722 CMSConcMarkingTask* _task; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3723 public: |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3724 bool should_exit_termination(); |
0 | 3725 void set_task(CMSConcMarkingTask* task) { |
3726 _task = task; | |
3727 } | |
3728 }; | |
3729 | |
3730 // MT Concurrent Marking Task | |
3731 class CMSConcMarkingTask: public YieldingFlexibleGangTask { | |
3732 CMSCollector* _collector; | |
3733 int _n_workers; // requested/desired # workers | |
3734 bool _asynch; | |
3735 bool _result; | |
3736 CompactibleFreeListSpace* _cms_space; | |
3737 CompactibleFreeListSpace* _perm_space; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3738 char _pad_front[64]; // padding to ... |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3739 HeapWord* _global_finger; // ... avoid sharing cache line |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3740 char _pad_back[64]; |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3741 HeapWord* _restart_addr; |
0 | 3742 |
3743 // Exposed here for yielding support | |
3744 Mutex* const _bit_map_lock; | |
3745 | |
3746 // The per thread work queues, available here for stealing | |
3747 OopTaskQueueSet* _task_queues; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3748 |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3749 // Termination (and yielding) support |
0 | 3750 CMSConcMarkingTerminator _term; |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3751 CMSConcMarkingTerminatorTerminator _term_term; |
0 | 3752 |
3753 public: | |
3754 CMSConcMarkingTask(CMSCollector* collector, | |
3755 CompactibleFreeListSpace* cms_space, | |
3756 CompactibleFreeListSpace* perm_space, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3757 bool asynch, |
0 | 3758 YieldingFlexibleWorkGang* workers, |
3759 OopTaskQueueSet* task_queues): | |
3760 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), | |
3761 _collector(collector), | |
3762 _cms_space(cms_space), | |
3763 _perm_space(perm_space), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3764 _asynch(asynch), _n_workers(0), _result(true), |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3765 _task_queues(task_queues), |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3766 _term(_n_workers, task_queues, _collector), |
0 | 3767 _bit_map_lock(collector->bitMapLock()) |
3768 { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3769 _requested_size = _n_workers; |
0 | 3770 _term.set_task(this); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3771 _term_term.set_task(this); |
0 | 3772 assert(_cms_space->bottom() < _perm_space->bottom(), |
3773 "Finger incorrectly initialized below"); | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3774 _restart_addr = _global_finger = _cms_space->bottom(); |
0 | 3775 } |
3776 | |
3777 | |
3778 OopTaskQueueSet* task_queues() { return _task_queues; } | |
3779 | |
3780 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
3781 | |
3782 HeapWord** global_finger_addr() { return &_global_finger; } | |
3783 | |
3784 CMSConcMarkingTerminator* terminator() { return &_term; } | |
3785 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3786 virtual void set_for_termination(int active_workers) { |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3787 terminator()->reset_for_reuse(active_workers); |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3788 } |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
3789 |
0 | 3790 void work(int i); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3791 bool should_yield() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3792 return ConcurrentMarkSweepThread::should_yield() |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3793 && !_collector->foregroundGCIsActive() |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3794 && _asynch; |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3795 } |
0 | 3796 |
3797 virtual void coordinator_yield(); // stuff done by coordinator | |
3798 bool result() { return _result; } | |
3799 | |
3800 void reset(HeapWord* ra) { | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3801 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3802 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3803 assert(ra < _perm_space->end(), "ra too large"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3804 _restart_addr = _global_finger = ra; |
0 | 3805 _term.reset_for_reuse(); |
3806 } | |
3807 | |
3808 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3809 OopTaskQueue* work_q); | |
3810 | |
3811 private: | |
3812 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); | |
3813 void do_work_steal(int i); | |
3814 void bump_global_finger(HeapWord* f); | |
3815 }; | |
3816 | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3817 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3818 assert(_task != NULL, "Error"); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3819 return _task->yielding(); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3820 // Note that we do not need the disjunct || _task->should_yield() above |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3821 // because we want terminating threads to yield only if the task |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3822 // is already in the midst of yielding, which happens only after at least one |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3823 // thread has yielded. |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3824 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3825 |
0 | 3826 void CMSConcMarkingTerminator::yield() { |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
3827 if (_task->should_yield()) { |
0 | 3828 _task->yield(); |
3829 } else { | |
3830 ParallelTaskTerminator::yield(); | |
3831 } | |
3832 } | |
3833 | |
3834 //////////////////////////////////////////////////////////////// | |
3835 // Concurrent Marking Algorithm Sketch | |
3836 //////////////////////////////////////////////////////////////// | |
3837 // Until all tasks exhausted (both spaces): | |
3838 // -- claim next available chunk | |
3839 // -- bump global finger via CAS | |
3840 // -- find first object that starts in this chunk | |
3841 // and start scanning bitmap from that position | |
3842 // -- scan marked objects for oops | |
3843 // -- CAS-mark target, and if successful: | |
3844 // . if target oop is above global finger (volatile read) | |
3845 // nothing to do | |
3846 // . if target oop is in chunk and above local finger | |
3847 // then nothing to do | |
3848 // . else push on work-queue | |
3849 // -- Deal with possible overflow issues: | |
3850 // . local work-queue overflow causes stuff to be pushed on | |
3851 // global (common) overflow queue | |
3852 // . always first empty local work queue | |
3853 // . then get a batch of oops from global work queue if any | |
3854 // . then do work stealing | |
3855 // -- When all tasks claimed (both spaces) | |
3856 // and local work queue empty, | |
3857 // then in a loop do: | |
3858 // . check global overflow stack; steal a batch of oops and trace | |
3859 // . try to steal from other threads oif GOS is empty | |
3860 // . if neither is available, offer termination | |
3861 // -- Terminate and return result | |
3862 // | |
3863 void CMSConcMarkingTask::work(int i) { | |
3864 elapsedTimer _timer; | |
3865 ResourceMark rm; | |
3866 HandleMark hm; | |
3867 | |
3868 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3869 | |
3870 // Before we begin work, our work queue should be empty | |
3871 assert(work_queue(i)->size() == 0, "Expected to be empty"); | |
3872 // Scan the bitmap covering _cms_space, tracing through grey objects. | |
3873 _timer.start(); | |
3874 do_scan_and_mark(i, _cms_space); | |
3875 _timer.stop(); | |
3876 if (PrintCMSStatistics != 0) { | |
3877 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", | |
3878 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3879 } | |
3880 | |
3881 // ... do the same for the _perm_space | |
3882 _timer.reset(); | |
3883 _timer.start(); | |
3884 do_scan_and_mark(i, _perm_space); | |
3885 _timer.stop(); | |
3886 if (PrintCMSStatistics != 0) { | |
3887 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec", | |
3888 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3889 } | |
3890 | |
3891 // ... do work stealing | |
3892 _timer.reset(); | |
3893 _timer.start(); | |
3894 do_work_steal(i); | |
3895 _timer.stop(); | |
3896 if (PrintCMSStatistics != 0) { | |
3897 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", | |
3898 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3899 } | |
3900 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); | |
3901 assert(work_queue(i)->size() == 0, "Should have been emptied"); | |
3902 // Note that under the current task protocol, the | |
3903 // following assertion is true even of the spaces | |
3904 // expanded since the completion of the concurrent | |
3905 // marking. XXX This will likely change under a strict | |
3906 // ABORT semantics. | |
3907 assert(_global_finger > _cms_space->end() && | |
3908 _global_finger >= _perm_space->end(), | |
3909 "All tasks have been completed"); | |
3910 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3911 } | |
3912 | |
3913 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { | |
3914 HeapWord* read = _global_finger; | |
3915 HeapWord* cur = read; | |
3916 while (f > read) { | |
3917 cur = read; | |
3918 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); | |
3919 if (cur == read) { | |
3920 // our cas succeeded | |
3921 assert(_global_finger >= f, "protocol consistency"); | |
3922 break; | |
3923 } | |
3924 } | |
3925 } | |
3926 | |
3927 // This is really inefficient, and should be redone by | |
3928 // using (not yet available) block-read and -write interfaces to the | |
3929 // stack and the work_queue. XXX FIX ME !!! | |
3930 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3931 OopTaskQueue* work_q) { | |
3932 // Fast lock-free check | |
3933 if (ovflw_stk->length() == 0) { | |
3934 return false; | |
3935 } | |
3936 assert(work_q->size() == 0, "Shouldn't steal"); | |
3937 MutexLockerEx ml(ovflw_stk->par_lock(), | |
3938 Mutex::_no_safepoint_check_flag); | |
3939 // Grab up to 1/4 the size of the work queue | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
3940 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
0 | 3941 (size_t)ParGCDesiredObjsFromOverflowList); |
3942 num = MIN2(num, ovflw_stk->length()); | |
3943 for (int i = (int) num; i > 0; i--) { | |
3944 oop cur = ovflw_stk->pop(); | |
3945 assert(cur != NULL, "Counted wrong?"); | |
3946 work_q->push(cur); | |
3947 } | |
3948 return num > 0; | |
3949 } | |
3950 | |
3951 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { | |
3952 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
3953 int n_tasks = pst->n_tasks(); | |
3954 // We allow that there may be no tasks to do here because | |
3955 // we are restarting after a stack overflow. | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3956 assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); |
0 | 3957 int nth_task = 0; |
3958 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3959 HeapWord* aligned_start = sp->bottom(); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3960 if (sp->used_region().contains(_restart_addr)) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3961 // Align down to a card boundary for the start of 0th task |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3962 // for this space. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3963 aligned_start = |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3964 (HeapWord*)align_size_down((uintptr_t)_restart_addr, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3965 CardTableModRefBS::card_size); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3966 } |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3967 |
0 | 3968 size_t chunk_size = sp->marking_task_size(); |
3969 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
3970 // Having claimed the nth task in this space, | |
3971 // compute the chunk that it corresponds to: | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3972 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3973 aligned_start + (nth_task+1)*chunk_size); |
0 | 3974 // Try and bump the global finger via a CAS; |
3975 // note that we need to do the global finger bump | |
3976 // _before_ taking the intersection below, because | |
3977 // the task corresponding to that region will be | |
3978 // deemed done even if the used_region() expands | |
3979 // because of allocation -- as it almost certainly will | |
3980 // during start-up while the threads yield in the | |
3981 // closure below. | |
3982 HeapWord* finger = span.end(); | |
3983 bump_global_finger(finger); // atomically | |
3984 // There are null tasks here corresponding to chunks | |
3985 // beyond the "top" address of the space. | |
3986 span = span.intersection(sp->used_region()); | |
3987 if (!span.is_empty()) { // Non-null task | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3988 HeapWord* prev_obj; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3989 assert(!span.contains(_restart_addr) || nth_task == 0, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3990 "Inconsistency"); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3991 if (nth_task == 0) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3992 // For the 0th task, we'll not need to compute a block_start. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3993 if (span.contains(_restart_addr)) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3994 // In the case of a restart because of stack overflow, |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3995 // we might additionally skip a chunk prefix. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3996 prev_obj = _restart_addr; |
0 | 3997 } else { |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3998 prev_obj = span.start(); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
3999 } |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4000 } else { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4001 // We want to skip the first object because |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4002 // the protocol is to scan any object in its entirety |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4003 // that _starts_ in this span; a fortiori, any |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4004 // object starting in an earlier span is scanned |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4005 // as part of an earlier claimed task. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4006 // Below we use the "careful" version of block_start |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4007 // so we do not try to navigate uninitialized objects. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4008 prev_obj = sp->block_start_careful(span.start()); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4009 // Below we use a variant of block_size that uses the |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4010 // Printezis bits to avoid waiting for allocated |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4011 // objects to become initialized/parsable. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4012 while (prev_obj < span.start()) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4013 size_t sz = sp->block_size_no_stall(prev_obj, _collector); |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4014 if (sz > 0) { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4015 prev_obj += sz; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4016 } else { |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4017 // In this case we may end up doing a bit of redundant |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4018 // scanning, but that appears unavoidable, short of |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4019 // locking the free list locks; see bug 6324141. |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4020 break; |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4021 } |
0 | 4022 } |
4023 } | |
4024 if (prev_obj < span.end()) { | |
4025 MemRegion my_span = MemRegion(prev_obj, span.end()); | |
4026 // Do the marking work within a non-empty span -- | |
4027 // the last argument to the constructor indicates whether the | |
4028 // iteration should be incremental with periodic yields. | |
4029 Par_MarkFromRootsClosure cl(this, _collector, my_span, | |
4030 &_collector->_markBitMap, | |
4031 work_queue(i), | |
4032 &_collector->_markStack, | |
4033 &_collector->_revisitStack, | |
4034 _asynch); | |
4035 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); | |
4036 } // else nothing to do for this task | |
4037 } // else nothing to do for this task | |
4038 } | |
4039 // We'd be tempted to assert here that since there are no | |
4040 // more tasks left to claim in this space, the global_finger | |
4041 // must exceed space->top() and a fortiori space->end(). However, | |
4042 // that would not quite be correct because the bumping of | |
4043 // global_finger occurs strictly after the claiming of a task, | |
4044 // so by the time we reach here the global finger may not yet | |
4045 // have been bumped up by the thread that claimed the last | |
4046 // task. | |
4047 pst->all_tasks_completed(); | |
4048 } | |
4049 | |
935 | 4050 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4051 private: |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4052 CMSConcMarkingTask* _task; |
0 | 4053 MemRegion _span; |
4054 CMSBitMap* _bit_map; | |
4055 CMSMarkStack* _overflow_stack; | |
4056 OopTaskQueue* _work_queue; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4057 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4058 DO_OOP_WORK_DEFN |
0 | 4059 public: |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4060 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, |
935 | 4061 CMSBitMap* bit_map, CMSMarkStack* overflow_stack, |
4062 CMSMarkStack* revisit_stack): | |
4063 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4064 _task(task), |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4065 _span(collector->_span), |
0 | 4066 _work_queue(work_queue), |
4067 _bit_map(bit_map), | |
935 | 4068 _overflow_stack(overflow_stack) |
4069 { } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4070 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4071 virtual void do_oop(narrowOop* p); |
0 | 4072 void trim_queue(size_t max); |
4073 void handle_stack_overflow(HeapWord* lost); | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4074 void do_yield_check() { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4075 if (_task->should_yield()) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4076 _task->yield(); |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4077 } |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4078 } |
0 | 4079 }; |
4080 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4081 // Grey object scanning during work stealing phase -- |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4082 // the salient assumption here is that any references |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4083 // that are in these stolen objects being scanned must |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4084 // already have been initialized (else they would not have |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4085 // been published), so we do not need to check for |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4086 // uninitialized objects before pushing here. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4087 void Par_ConcMarkingClosure::do_oop(oop obj) { |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4088 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4089 HeapWord* addr = (HeapWord*)obj; |
0 | 4090 // Check if oop points into the CMS generation |
4091 // and is not marked | |
4092 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
4093 // a white object ... | |
4094 // If we manage to "claim" the object, by being the | |
4095 // first thread to mark it, then we push it on our | |
4096 // marking stack | |
4097 if (_bit_map->par_mark(addr)) { // ... now grey | |
4098 // push on work queue (grey set) | |
4099 bool simulate_overflow = false; | |
4100 NOT_PRODUCT( | |
4101 if (CMSMarkStackOverflowALot && | |
4102 _collector->simulate_overflow()) { | |
4103 // simulate a stack overflow | |
4104 simulate_overflow = true; | |
4105 } | |
4106 ) | |
4107 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4108 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 4109 // stack overflow |
4110 if (PrintCMSStatistics != 0) { | |
4111 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
4112 SIZE_FORMAT, _overflow_stack->capacity()); | |
4113 } | |
4114 // We cannot assert that the overflow stack is full because | |
4115 // it may have been emptied since. | |
4116 assert(simulate_overflow || | |
4117 _work_queue->size() == _work_queue->max_elems(), | |
4118 "Else push should have succeeded"); | |
4119 handle_stack_overflow(addr); | |
4120 } | |
4121 } // Else, some other thread got there first | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4122 do_yield_check(); |
0 | 4123 } |
4124 } | |
4125 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4126 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4127 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4128 |
0 | 4129 void Par_ConcMarkingClosure::trim_queue(size_t max) { |
4130 while (_work_queue->size() > max) { | |
4131 oop new_oop; | |
4132 if (_work_queue->pop_local(new_oop)) { | |
4133 assert(new_oop->is_oop(), "Should be an oop"); | |
4134 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); | |
4135 assert(_span.contains((HeapWord*)new_oop), "Not in span"); | |
4136 assert(new_oop->is_parsable(), "Should be parsable"); | |
4137 new_oop->oop_iterate(this); // do_oop() above | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4138 do_yield_check(); |
0 | 4139 } |
4140 } | |
4141 } | |
4142 | |
4143 // Upon stack overflow, we discard (part of) the stack, | |
4144 // remembering the least address amongst those discarded | |
4145 // in CMSCollector's _restart_address. | |
4146 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { | |
4147 // We need to do this under a mutex to prevent other | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
4148 // workers from interfering with the work done below. |
0 | 4149 MutexLockerEx ml(_overflow_stack->par_lock(), |
4150 Mutex::_no_safepoint_check_flag); | |
4151 // Remember the least grey address discarded | |
4152 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
4153 _collector->lower_restart_addr(ra); | |
4154 _overflow_stack->reset(); // discard stack contents | |
4155 _overflow_stack->expand(); // expand the stack if possible | |
4156 } | |
4157 | |
4158 | |
4159 void CMSConcMarkingTask::do_work_steal(int i) { | |
4160 OopTaskQueue* work_q = work_queue(i); | |
4161 oop obj_to_scan; | |
4162 CMSBitMap* bm = &(_collector->_markBitMap); | |
4163 CMSMarkStack* ovflw = &(_collector->_markStack); | |
935 | 4164 CMSMarkStack* revisit = &(_collector->_revisitStack); |
0 | 4165 int* seed = _collector->hash_seed(i); |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4166 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit); |
0 | 4167 while (true) { |
4168 cl.trim_queue(0); | |
4169 assert(work_q->size() == 0, "Should have been emptied above"); | |
4170 if (get_work_from_overflow_stack(ovflw, work_q)) { | |
4171 // Can't assert below because the work obtained from the | |
4172 // overflow stack may already have been stolen from us. | |
4173 // assert(work_q->size() > 0, "Work from overflow stack"); | |
4174 continue; | |
4175 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
4176 assert(obj_to_scan->is_oop(), "Should be an oop"); | |
4177 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); | |
4178 obj_to_scan->oop_iterate(&cl); | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4179 } else if (terminator()->offer_termination(&_term_term)) { |
0 | 4180 assert(work_q->size() == 0, "Impossible!"); |
4181 break; | |
1837
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4182 } else if (yielding() || should_yield()) { |
c99c53f07c14
6692906: CMS: parallel concurrent marking may be prone to hanging or stalling mutators for periods of time
ysr
parents:
1836
diff
changeset
|
4183 yield(); |
0 | 4184 } |
4185 } | |
4186 } | |
4187 | |
4188 // This is run by the CMS (coordinator) thread. | |
4189 void CMSConcMarkingTask::coordinator_yield() { | |
4190 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4191 "CMS thread should hold CMS token"); | |
935 | 4192 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 4193 // First give up the locks, then yield, then re-lock |
4194 // We should probably use a constructor/destructor idiom to | |
4195 // do this unlock/lock or modify the MutexUnlocker class to | |
4196 // serve our purpose. XXX | |
4197 assert_lock_strong(_bit_map_lock); | |
4198 _bit_map_lock->unlock(); | |
4199 ConcurrentMarkSweepThread::desynchronize(true); | |
4200 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4201 _collector->stopTimer(); | |
4202 if (PrintCMSStatistics != 0) { | |
4203 _collector->incrementYields(); | |
4204 } | |
4205 _collector->icms_wait(); | |
4206 | |
4207 // It is possible for whichever thread initiated the yield request | |
4208 // not to get a chance to wake up and take the bitmap lock between | |
4209 // this thread releasing it and reacquiring it. So, while the | |
4210 // should_yield() flag is on, let's sleep for a bit to give the | |
4211 // other thread a chance to wake up. The limit imposed on the number | |
4212 // of iterations is defensive, to avoid any unforseen circumstances | |
4213 // putting us into an infinite loop. Since it's always been this | |
4214 // (coordinator_yield()) method that was observed to cause the | |
4215 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) | |
4216 // which is by default non-zero. For the other seven methods that | |
4217 // also perform the yield operation, as are using a different | |
4218 // parameter (CMSYieldSleepCount) which is by default zero. This way we | |
4219 // can enable the sleeping for those methods too, if necessary. | |
4220 // See 6442774. | |
4221 // | |
4222 // We really need to reconsider the synchronization between the GC | |
4223 // thread and the yield-requesting threads in the future and we | |
4224 // should really use wait/notify, which is the recommended | |
4225 // way of doing this type of interaction. Additionally, we should | |
4226 // consolidate the eight methods that do the yield operation and they | |
4227 // are almost identical into one for better maintenability and | |
4228 // readability. See 6445193. | |
4229 // | |
4230 // Tony 2006.06.29 | |
4231 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4232 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4233 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 4234 os::sleep(Thread::current(), 1, false); |
4235 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4236 } | |
4237 | |
4238 ConcurrentMarkSweepThread::synchronize(true); | |
4239 _bit_map_lock->lock_without_safepoint_check(); | |
4240 _collector->startTimer(); | |
4241 } | |
4242 | |
4243 bool CMSCollector::do_marking_mt(bool asynch) { | |
1284 | 4244 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); |
0 | 4245 // In the future this would be determined ergonomically, based |
4246 // on #cpu's, # active mutator threads (and load), and mutation rate. | |
1284 | 4247 int num_workers = ConcGCThreads; |
0 | 4248 |
4249 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
4250 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
4251 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4252 CMSConcMarkingTask tsk(this, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4253 cms_space, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4254 perm_space, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4255 asynch, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4256 conc_workers(), |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4257 task_queues()); |
0 | 4258 |
4259 // Since the actual number of workers we get may be different | |
4260 // from the number we requested above, do we need to do anything different | |
4261 // below? In particular, may be we need to subclass the SequantialSubTasksDone | |
4262 // class?? XXX | |
4263 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); | |
4264 perm_space->initialize_sequential_subtasks_for_marking(num_workers); | |
4265 | |
4266 // Refs discovery is already non-atomic. | |
4267 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4268 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4269 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
0 | 4270 conc_workers()->start_task(&tsk); |
4271 while (tsk.yielded()) { | |
4272 tsk.coordinator_yield(); | |
4273 conc_workers()->continue_task(&tsk); | |
4274 } | |
4275 // If the task was aborted, _restart_addr will be non-NULL | |
4276 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); | |
4277 while (_restart_addr != NULL) { | |
4278 // XXX For now we do not make use of ABORTED state and have not | |
4279 // yet implemented the right abort semantics (even in the original | |
4280 // single-threaded CMS case). That needs some more investigation | |
4281 // and is deferred for now; see CR# TBF. 07252005YSR. XXX | |
4282 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); | |
4283 // If _restart_addr is non-NULL, a marking stack overflow | |
605 | 4284 // occurred; we need to do a fresh marking iteration from the |
0 | 4285 // indicated restart address. |
4286 if (_foregroundGCIsActive && asynch) { | |
4287 // We may be running into repeated stack overflows, having | |
4288 // reached the limit of the stack size, while making very | |
4289 // slow forward progress. It may be best to bail out and | |
4290 // let the foreground collector do its job. | |
4291 // Clear _restart_addr, so that foreground GC | |
4292 // works from scratch. This avoids the headache of | |
4293 // a "rescan" which would otherwise be needed because | |
4294 // of the dirty mod union table & card table. | |
4295 _restart_addr = NULL; | |
4296 return false; | |
4297 } | |
4298 // Adjust the task to restart from _restart_addr | |
4299 tsk.reset(_restart_addr); | |
4300 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, | |
4301 _restart_addr); | |
4302 perm_space->initialize_sequential_subtasks_for_marking(num_workers, | |
4303 _restart_addr); | |
4304 _restart_addr = NULL; | |
4305 // Get the workers going again | |
4306 conc_workers()->start_task(&tsk); | |
4307 while (tsk.yielded()) { | |
4308 tsk.coordinator_yield(); | |
4309 conc_workers()->continue_task(&tsk); | |
4310 } | |
4311 } | |
4312 assert(tsk.completed(), "Inconsistency"); | |
4313 assert(tsk.result() == true, "Inconsistency"); | |
4314 return true; | |
4315 } | |
4316 | |
4317 bool CMSCollector::do_marking_st(bool asynch) { | |
4318 ResourceMark rm; | |
4319 HandleMark hm; | |
4320 | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4321 // Temporarily make refs discovery single threaded (non-MT) |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4322 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); |
0 | 4323 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, |
4324 &_markStack, &_revisitStack, CMSYield && asynch); | |
4325 // the last argument to iterate indicates whether the iteration | |
4326 // should be incremental with periodic yields. | |
4327 _markBitMap.iterate(&markFromRootsClosure); | |
4328 // If _restart_addr is non-NULL, a marking stack overflow | |
605 | 4329 // occurred; we need to do a fresh iteration from the |
0 | 4330 // indicated restart address. |
4331 while (_restart_addr != NULL) { | |
4332 if (_foregroundGCIsActive && asynch) { | |
4333 // We may be running into repeated stack overflows, having | |
4334 // reached the limit of the stack size, while making very | |
4335 // slow forward progress. It may be best to bail out and | |
4336 // let the foreground collector do its job. | |
4337 // Clear _restart_addr, so that foreground GC | |
4338 // works from scratch. This avoids the headache of | |
4339 // a "rescan" which would otherwise be needed because | |
4340 // of the dirty mod union table & card table. | |
4341 _restart_addr = NULL; | |
4342 return false; // indicating failure to complete marking | |
4343 } | |
4344 // Deal with stack overflow: | |
4345 // we restart marking from _restart_addr | |
4346 HeapWord* ra = _restart_addr; | |
4347 markFromRootsClosure.reset(ra); | |
4348 _restart_addr = NULL; | |
4349 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); | |
4350 } | |
4351 return true; | |
4352 } | |
4353 | |
4354 void CMSCollector::preclean() { | |
4355 check_correct_thread_executing(); | |
4356 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); | |
4357 verify_work_stacks_empty(); | |
4358 verify_overflow_empty(); | |
4359 _abort_preclean = false; | |
4360 if (CMSPrecleaningEnabled) { | |
4361 _eden_chunk_index = 0; | |
4362 size_t used = get_eden_used(); | |
4363 size_t capacity = get_eden_capacity(); | |
4364 // Don't start sampling unless we will get sufficiently | |
4365 // many samples. | |
4366 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100) | |
4367 * CMSScheduleRemarkEdenPenetration)) { | |
4368 _start_sampling = true; | |
4369 } else { | |
4370 _start_sampling = false; | |
4371 } | |
4372 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4373 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); | |
4374 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); | |
4375 } | |
4376 CMSTokenSync x(true); // is cms thread | |
4377 if (CMSPrecleaningEnabled) { | |
4378 sample_eden(); | |
4379 _collectorState = AbortablePreclean; | |
4380 } else { | |
4381 _collectorState = FinalMarking; | |
4382 } | |
4383 verify_work_stacks_empty(); | |
4384 verify_overflow_empty(); | |
4385 } | |
4386 | |
4387 // Try and schedule the remark such that young gen | |
4388 // occupancy is CMSScheduleRemarkEdenPenetration %. | |
4389 void CMSCollector::abortable_preclean() { | |
4390 check_correct_thread_executing(); | |
4391 assert(CMSPrecleaningEnabled, "Inconsistent control state"); | |
4392 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); | |
4393 | |
4394 // If Eden's current occupancy is below this threshold, | |
4395 // immediately schedule the remark; else preclean | |
4396 // past the next scavenge in an effort to | |
4397 // schedule the pause as described avove. By choosing | |
4398 // CMSScheduleRemarkEdenSizeThreshold >= max eden size | |
4399 // we will never do an actual abortable preclean cycle. | |
4400 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { | |
4401 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4402 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); | |
4403 // We need more smarts in the abortable preclean | |
4404 // loop below to deal with cases where allocation | |
4405 // in young gen is very very slow, and our precleaning | |
4406 // is running a losing race against a horde of | |
4407 // mutators intent on flooding us with CMS updates | |
4408 // (dirty cards). | |
4409 // One, admittedly dumb, strategy is to give up | |
4410 // after a certain number of abortable precleaning loops | |
4411 // or after a certain maximum time. We want to make | |
4412 // this smarter in the next iteration. | |
4413 // XXX FIX ME!!! YSR | |
4414 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; | |
4415 while (!(should_abort_preclean() || | |
4416 ConcurrentMarkSweepThread::should_terminate())) { | |
4417 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); | |
4418 cumworkdone += workdone; | |
4419 loops++; | |
4420 // Voluntarily terminate abortable preclean phase if we have | |
4421 // been at it for too long. | |
4422 if ((CMSMaxAbortablePrecleanLoops != 0) && | |
4423 loops >= CMSMaxAbortablePrecleanLoops) { | |
4424 if (PrintGCDetails) { | |
4425 gclog_or_tty->print(" CMS: abort preclean due to loops "); | |
4426 } | |
4427 break; | |
4428 } | |
4429 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { | |
4430 if (PrintGCDetails) { | |
4431 gclog_or_tty->print(" CMS: abort preclean due to time "); | |
4432 } | |
4433 break; | |
4434 } | |
4435 // If we are doing little work each iteration, we should | |
4436 // take a short break. | |
4437 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { | |
4438 // Sleep for some time, waiting for work to accumulate | |
4439 stopTimer(); | |
4440 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); | |
4441 startTimer(); | |
4442 waited++; | |
4443 } | |
4444 } | |
4445 if (PrintCMSStatistics > 0) { | |
4446 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ", | |
4447 loops, waited, cumworkdone); | |
4448 } | |
4449 } | |
4450 CMSTokenSync x(true); // is cms thread | |
4451 if (_collectorState != Idling) { | |
4452 assert(_collectorState == AbortablePreclean, | |
4453 "Spontaneous state transition?"); | |
4454 _collectorState = FinalMarking; | |
4455 } // Else, a foreground collection completed this CMS cycle. | |
4456 return; | |
4457 } | |
4458 | |
4459 // Respond to an Eden sampling opportunity | |
4460 void CMSCollector::sample_eden() { | |
4461 // Make sure a young gc cannot sneak in between our | |
4462 // reading and recording of a sample. | |
4463 assert(Thread::current()->is_ConcurrentGC_thread(), | |
4464 "Only the cms thread may collect Eden samples"); | |
4465 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4466 "Should collect samples while holding CMS token"); | |
4467 if (!_start_sampling) { | |
4468 return; | |
4469 } | |
4470 if (_eden_chunk_array) { | |
4471 if (_eden_chunk_index < _eden_chunk_capacity) { | |
4472 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample | |
4473 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, | |
4474 "Unexpected state of Eden"); | |
4475 // We'd like to check that what we just sampled is an oop-start address; | |
4476 // however, we cannot do that here since the object may not yet have been | |
4477 // initialized. So we'll instead do the check when we _use_ this sample | |
4478 // later. | |
4479 if (_eden_chunk_index == 0 || | |
4480 (pointer_delta(_eden_chunk_array[_eden_chunk_index], | |
4481 _eden_chunk_array[_eden_chunk_index-1]) | |
4482 >= CMSSamplingGrain)) { | |
4483 _eden_chunk_index++; // commit sample | |
4484 } | |
4485 } | |
4486 } | |
4487 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { | |
4488 size_t used = get_eden_used(); | |
4489 size_t capacity = get_eden_capacity(); | |
4490 assert(used <= capacity, "Unexpected state of Eden"); | |
4491 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { | |
4492 _abort_preclean = true; | |
4493 } | |
4494 } | |
4495 } | |
4496 | |
4497 | |
4498 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { | |
4499 assert(_collectorState == Precleaning || | |
4500 _collectorState == AbortablePreclean, "incorrect state"); | |
4501 ResourceMark rm; | |
4502 HandleMark hm; | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4503 |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4504 // Precleaning is currently not MT but the reference processor |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4505 // may be set for MT. Disable it temporarily here. |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4506 ReferenceProcessor* rp = ref_processor(); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4507 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
4508 |
0 | 4509 // Do one pass of scrubbing the discovered reference lists |
4510 // to remove any reference objects with strongly-reachable | |
4511 // referents. | |
4512 if (clean_refs) { | |
4513 CMSPrecleanRefsYieldClosure yield_cl(this); | |
4514 assert(rp->span().equals(_span), "Spans should be equal"); | |
4515 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, | |
935 | 4516 &_markStack, &_revisitStack, |
4517 true /* preclean */); | |
0 | 4518 CMSDrainMarkingStackClosure complete_trace(this, |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4519 _span, &_markBitMap, &_markStack, |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4520 &keep_alive, true /* preclean */); |
0 | 4521 |
4522 // We don't want this step to interfere with a young | |
4523 // collection because we don't want to take CPU | |
4524 // or memory bandwidth away from the young GC threads | |
4525 // (which may be as many as there are CPUs). | |
4526 // Note that we don't need to protect ourselves from | |
4527 // interference with mutators because they can't | |
4528 // manipulate the discovered reference lists nor affect | |
4529 // the computed reachability of the referents, the | |
4530 // only properties manipulated by the precleaning | |
4531 // of these reference lists. | |
4532 stopTimer(); | |
4533 CMSTokenSyncWithLocks x(true /* is cms thread */, | |
4534 bitMapLock()); | |
4535 startTimer(); | |
4536 sample_eden(); | |
935 | 4537 |
0 | 4538 // The following will yield to allow foreground |
4539 // collection to proceed promptly. XXX YSR: | |
4540 // The code in this method may need further | |
4541 // tweaking for better performance and some restructuring | |
4542 // for cleaner interfaces. | |
4543 rp->preclean_discovered_references( | |
4544 rp->is_alive_non_header(), &keep_alive, &complete_trace, | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4545 &yield_cl, should_unload_classes()); |
0 | 4546 } |
4547 | |
4548 if (clean_survivor) { // preclean the active survivor space(s) | |
4549 assert(_young_gen->kind() == Generation::DefNew || | |
4550 _young_gen->kind() == Generation::ParNew || | |
4551 _young_gen->kind() == Generation::ASParNew, | |
4552 "incorrect type for cast"); | |
4553 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
4554 PushAndMarkClosure pam_cl(this, _span, ref_processor(), | |
4555 &_markBitMap, &_modUnionTable, | |
4556 &_markStack, &_revisitStack, | |
4557 true /* precleaning phase */); | |
4558 stopTimer(); | |
4559 CMSTokenSyncWithLocks ts(true /* is cms thread */, | |
4560 bitMapLock()); | |
4561 startTimer(); | |
4562 unsigned int before_count = | |
4563 GenCollectedHeap::heap()->total_collections(); | |
4564 SurvivorSpacePrecleanClosure | |
4565 sss_cl(this, _span, &_markBitMap, &_markStack, | |
4566 &pam_cl, before_count, CMSYield); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4567 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4568 dng->from()->object_iterate_careful(&sss_cl); |
4569 dng->to()->object_iterate_careful(&sss_cl); | |
4570 } | |
4571 MarkRefsIntoAndScanClosure | |
4572 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
4573 &_markStack, &_revisitStack, this, CMSYield, | |
4574 true /* precleaning phase */); | |
4575 // CAUTION: The following closure has persistent state that may need to | |
4576 // be reset upon a decrease in the sequence of addresses it | |
4577 // processes. | |
4578 ScanMarkedObjectsAgainCarefullyClosure | |
4579 smoac_cl(this, _span, | |
4580 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield); | |
4581 | |
4582 // Preclean dirty cards in ModUnionTable and CardTable using | |
4583 // appropriate convergence criterion; | |
4584 // repeat CMSPrecleanIter times unless we find that | |
4585 // we are losing. | |
4586 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); | |
4587 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, | |
4588 "Bad convergence multiplier"); | |
4589 assert(CMSPrecleanThreshold >= 100, | |
4590 "Unreasonably low CMSPrecleanThreshold"); | |
4591 | |
4592 size_t numIter, cumNumCards, lastNumCards, curNumCards; | |
4593 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; | |
4594 numIter < CMSPrecleanIter; | |
4595 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { | |
4596 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); | |
4597 if (CMSPermGenPrecleaningEnabled) { | |
4598 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl); | |
4599 } | |
4600 if (Verbose && PrintGCDetails) { | |
4601 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards); | |
4602 } | |
4603 // Either there are very few dirty cards, so re-mark | |
4604 // pause will be small anyway, or our pre-cleaning isn't | |
4605 // that much faster than the rate at which cards are being | |
4606 // dirtied, so we might as well stop and re-mark since | |
4607 // precleaning won't improve our re-mark time by much. | |
4608 if (curNumCards <= CMSPrecleanThreshold || | |
4609 (numIter > 0 && | |
4610 (curNumCards * CMSPrecleanDenominator > | |
4611 lastNumCards * CMSPrecleanNumerator))) { | |
4612 numIter++; | |
4613 cumNumCards += curNumCards; | |
4614 break; | |
4615 } | |
4616 } | |
4617 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); | |
4618 if (CMSPermGenPrecleaningEnabled) { | |
4619 curNumCards += preclean_card_table(_permGen, &smoac_cl); | |
4620 } | |
4621 cumNumCards += curNumCards; | |
4622 if (PrintGCDetails && PrintCMSStatistics != 0) { | |
4623 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)", | |
4624 curNumCards, cumNumCards, numIter); | |
4625 } | |
4626 return cumNumCards; // as a measure of useful work done | |
4627 } | |
4628 | |
4629 // PRECLEANING NOTES: | |
4630 // Precleaning involves: | |
4631 // . reading the bits of the modUnionTable and clearing the set bits. | |
4632 // . For the cards corresponding to the set bits, we scan the | |
4633 // objects on those cards. This means we need the free_list_lock | |
4634 // so that we can safely iterate over the CMS space when scanning | |
4635 // for oops. | |
4636 // . When we scan the objects, we'll be both reading and setting | |
4637 // marks in the marking bit map, so we'll need the marking bit map. | |
4638 // . For protecting _collector_state transitions, we take the CGC_lock. | |
4639 // Note that any races in the reading of of card table entries by the | |
4640 // CMS thread on the one hand and the clearing of those entries by the | |
4641 // VM thread or the setting of those entries by the mutator threads on the | |
4642 // other are quite benign. However, for efficiency it makes sense to keep | |
4643 // the VM thread from racing with the CMS thread while the latter is | |
4644 // dirty card info to the modUnionTable. We therefore also use the | |
4645 // CGC_lock to protect the reading of the card table and the mod union | |
4646 // table by the CM thread. | |
4647 // . We run concurrently with mutator updates, so scanning | |
4648 // needs to be done carefully -- we should not try to scan | |
4649 // potentially uninitialized objects. | |
4650 // | |
4651 // Locking strategy: While holding the CGC_lock, we scan over and | |
4652 // reset a maximal dirty range of the mod union / card tables, then lock | |
4653 // the free_list_lock and bitmap lock to do a full marking, then | |
4654 // release these locks; and repeat the cycle. This allows for a | |
4655 // certain amount of fairness in the sharing of these locks between | |
4656 // the CMS collector on the one hand, and the VM thread and the | |
4657 // mutators on the other. | |
4658 | |
4659 // NOTE: preclean_mod_union_table() and preclean_card_table() | |
4660 // further below are largely identical; if you need to modify | |
4661 // one of these methods, please check the other method too. | |
4662 | |
4663 size_t CMSCollector::preclean_mod_union_table( | |
4664 ConcurrentMarkSweepGeneration* gen, | |
4665 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4666 verify_work_stacks_empty(); | |
4667 verify_overflow_empty(); | |
4668 | |
935 | 4669 // Turn off checking for this method but turn it back on |
4670 // selectively. There are yield points in this method | |
4671 // but it is difficult to turn the checking off just around | |
4672 // the yield points. It is simpler to selectively turn | |
4673 // it on. | |
4674 DEBUG_ONLY(RememberKlassesChecker mux(false);) | |
4675 | |
0 | 4676 // strategy: starting with the first card, accumulate contiguous |
4677 // ranges of dirty cards; clear these cards, then scan the region | |
4678 // covered by these cards. | |
4679 | |
4680 // Since all of the MUT is committed ahead, we can just use | |
4681 // that, in case the generations expand while we are precleaning. | |
4682 // It might also be fine to just use the committed part of the | |
4683 // generation, but we might potentially miss cards when the | |
4684 // generation is rapidly expanding while we are in the midst | |
4685 // of precleaning. | |
4686 HeapWord* startAddr = gen->reserved().start(); | |
4687 HeapWord* endAddr = gen->reserved().end(); | |
4688 | |
4689 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4690 | |
4691 size_t numDirtyCards, cumNumDirtyCards; | |
4692 HeapWord *nextAddr, *lastAddr; | |
4693 for (cumNumDirtyCards = numDirtyCards = 0, | |
4694 nextAddr = lastAddr = startAddr; | |
4695 nextAddr < endAddr; | |
4696 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4697 | |
4698 ResourceMark rm; | |
4699 HandleMark hm; | |
4700 | |
4701 MemRegion dirtyRegion; | |
4702 { | |
4703 stopTimer(); | |
935 | 4704 // Potential yield point |
0 | 4705 CMSTokenSync ts(true); |
4706 startTimer(); | |
4707 sample_eden(); | |
4708 // Get dirty region starting at nextOffset (inclusive), | |
4709 // simultaneously clearing it. | |
4710 dirtyRegion = | |
4711 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); | |
4712 assert(dirtyRegion.start() >= nextAddr, | |
4713 "returned region inconsistent?"); | |
4714 } | |
4715 // Remember where the next search should begin. | |
4716 // The returned region (if non-empty) is a right open interval, | |
4717 // so lastOffset is obtained from the right end of that | |
4718 // interval. | |
4719 lastAddr = dirtyRegion.end(); | |
4720 // Should do something more transparent and less hacky XXX | |
4721 numDirtyCards = | |
4722 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); | |
4723 | |
4724 // We'll scan the cards in the dirty region (with periodic | |
4725 // yields for foreground GC as needed). | |
4726 if (!dirtyRegion.is_empty()) { | |
4727 assert(numDirtyCards > 0, "consistency check"); | |
4728 HeapWord* stop_point = NULL; | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4729 stopTimer(); |
935 | 4730 // Potential yield point |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4731 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4732 bitMapLock()); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
4733 startTimer(); |
0 | 4734 { |
4735 verify_work_stacks_empty(); | |
4736 verify_overflow_empty(); | |
4737 sample_eden(); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4738 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4739 stop_point = |
4740 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4741 } | |
4742 if (stop_point != NULL) { | |
4743 // The careful iteration stopped early either because it found an | |
4744 // uninitialized object, or because we were in the midst of an | |
4745 // "abortable preclean", which should now be aborted. Redirty | |
4746 // the bits corresponding to the partially-scanned or unscanned | |
4747 // cards. We'll either restart at the next block boundary or | |
4748 // abort the preclean. | |
4749 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || | |
4750 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4751 "Unparsable objects should only be in perm gen."); | |
4752 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); | |
4753 if (should_abort_preclean()) { | |
4754 break; // out of preclean loop | |
4755 } else { | |
4756 // Compute the next address at which preclean should pick up; | |
4757 // might need bitMapLock in order to read P-bits. | |
4758 lastAddr = next_card_start_after_block(stop_point); | |
4759 } | |
4760 } | |
4761 } else { | |
4762 assert(lastAddr == endAddr, "consistency check"); | |
4763 assert(numDirtyCards == 0, "consistency check"); | |
4764 break; | |
4765 } | |
4766 } | |
4767 verify_work_stacks_empty(); | |
4768 verify_overflow_empty(); | |
4769 return cumNumDirtyCards; | |
4770 } | |
4771 | |
4772 // NOTE: preclean_mod_union_table() above and preclean_card_table() | |
4773 // below are largely identical; if you need to modify | |
4774 // one of these methods, please check the other method too. | |
4775 | |
4776 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, | |
4777 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4778 // strategy: it's similar to precleamModUnionTable above, in that | |
4779 // we accumulate contiguous ranges of dirty cards, mark these cards | |
4780 // precleaned, then scan the region covered by these cards. | |
4781 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); | |
4782 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); | |
4783 | |
4784 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4785 | |
4786 size_t numDirtyCards, cumNumDirtyCards; | |
4787 HeapWord *lastAddr, *nextAddr; | |
4788 | |
4789 for (cumNumDirtyCards = numDirtyCards = 0, | |
4790 nextAddr = lastAddr = startAddr; | |
4791 nextAddr < endAddr; | |
4792 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4793 | |
4794 ResourceMark rm; | |
4795 HandleMark hm; | |
4796 | |
4797 MemRegion dirtyRegion; | |
4798 { | |
4799 // See comments in "Precleaning notes" above on why we | |
4800 // do this locking. XXX Could the locking overheads be | |
4801 // too high when dirty cards are sparse? [I don't think so.] | |
4802 stopTimer(); | |
4803 CMSTokenSync x(true); // is cms thread | |
4804 startTimer(); | |
4805 sample_eden(); | |
4806 // Get and clear dirty region from card table | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4807 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4808 MemRegion(nextAddr, endAddr), |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4809 true, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4810 CardTableModRefBS::precleaned_card_val()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
4811 |
0 | 4812 assert(dirtyRegion.start() >= nextAddr, |
4813 "returned region inconsistent?"); | |
4814 } | |
4815 lastAddr = dirtyRegion.end(); | |
4816 numDirtyCards = | |
4817 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; | |
4818 | |
4819 if (!dirtyRegion.is_empty()) { | |
4820 stopTimer(); | |
4821 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); | |
4822 startTimer(); | |
4823 sample_eden(); | |
4824 verify_work_stacks_empty(); | |
4825 verify_overflow_empty(); | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4826 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) |
0 | 4827 HeapWord* stop_point = |
4828 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4829 if (stop_point != NULL) { | |
4830 // The careful iteration stopped early because it found an | |
4831 // uninitialized object. Redirty the bits corresponding to the | |
4832 // partially-scanned or unscanned cards, and start again at the | |
4833 // next block boundary. | |
4834 assert(CMSPermGenPrecleaningEnabled || | |
4835 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4836 "Unparsable objects should only be in perm gen."); | |
4837 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); | |
4838 if (should_abort_preclean()) { | |
4839 break; // out of preclean loop | |
4840 } else { | |
4841 // Compute the next address at which preclean should pick up. | |
4842 lastAddr = next_card_start_after_block(stop_point); | |
4843 } | |
4844 } | |
4845 } else { | |
4846 break; | |
4847 } | |
4848 } | |
4849 verify_work_stacks_empty(); | |
4850 verify_overflow_empty(); | |
4851 return cumNumDirtyCards; | |
4852 } | |
4853 | |
4854 void CMSCollector::checkpointRootsFinal(bool asynch, | |
4855 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4856 assert(_collectorState == FinalMarking, "incorrect state transition?"); | |
4857 check_correct_thread_executing(); | |
4858 // world is stopped at this checkpoint | |
4859 assert(SafepointSynchronize::is_at_safepoint(), | |
4860 "world should be stopped"); | |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
4861 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); |
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
4862 |
0 | 4863 verify_work_stacks_empty(); |
4864 verify_overflow_empty(); | |
4865 | |
4866 SpecializationStats::clear(); | |
4867 if (PrintGCDetails) { | |
4868 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", | |
4869 _young_gen->used() / K, | |
4870 _young_gen->capacity() / K); | |
4871 } | |
4872 if (asynch) { | |
4873 if (CMSScavengeBeforeRemark) { | |
4874 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4875 // Temporarily set flag to false, GCH->do_collection will | |
4876 // expect it to be false and set to true | |
4877 FlagSetting fl(gch->_is_gc_active, false); | |
4878 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", | |
4879 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
4880 int level = _cmsGen->level() - 1; | |
4881 if (level >= 0) { | |
4882 gch->do_collection(true, // full (i.e. force, see below) | |
4883 false, // !clear_all_soft_refs | |
4884 0, // size | |
4885 false, // is_tlab | |
4886 level // max_level | |
4887 ); | |
4888 } | |
4889 } | |
4890 FreelistLocker x(this); | |
4891 MutexLockerEx y(bitMapLock(), | |
4892 Mutex::_no_safepoint_check_flag); | |
4893 assert(!init_mark_was_synchronous, "but that's impossible!"); | |
4894 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); | |
4895 } else { | |
4896 // already have all the locks | |
4897 checkpointRootsFinalWork(asynch, clear_all_soft_refs, | |
4898 init_mark_was_synchronous); | |
4899 } | |
4900 verify_work_stacks_empty(); | |
4901 verify_overflow_empty(); | |
4902 SpecializationStats::print(); | |
4903 } | |
4904 | |
4905 void CMSCollector::checkpointRootsFinalWork(bool asynch, | |
4906 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4907 | |
4908 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) | |
4909 | |
4910 assert(haveFreelistLocks(), "must have free list locks"); | |
4911 assert_lock_strong(bitMapLock()); | |
4912 | |
4913 if (UseAdaptiveSizePolicy) { | |
4914 size_policy()->checkpoint_roots_final_begin(); | |
4915 } | |
4916 | |
4917 ResourceMark rm; | |
4918 HandleMark hm; | |
4919 | |
4920 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4921 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4922 if (should_unload_classes()) { |
0 | 4923 CodeCache::gc_prologue(); |
4924 } | |
4925 assert(haveFreelistLocks(), "must have free list locks"); | |
4926 assert_lock_strong(bitMapLock()); | |
4927 | |
1190
4788266644c1
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
jmasa
parents:
1145
diff
changeset
|
4928 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());) |
0 | 4929 if (!init_mark_was_synchronous) { |
4930 // We might assume that we need not fill TLAB's when | |
4931 // CMSScavengeBeforeRemark is set, because we may have just done | |
4932 // a scavenge which would have filled all TLAB's -- and besides | |
4933 // Eden would be empty. This however may not always be the case -- | |
4934 // for instance although we asked for a scavenge, it may not have | |
4935 // happened because of a JNI critical section. We probably need | |
4936 // a policy for deciding whether we can in that case wait until | |
4937 // the critical section releases and then do the remark following | |
4938 // the scavenge, and skip it here. In the absence of that policy, | |
4939 // or of an indication of whether the scavenge did indeed occur, | |
4940 // we cannot rely on TLAB's having been filled and must do | |
4941 // so here just in case a scavenge did not happen. | |
4942 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them | |
4943 // Update the saved marks which may affect the root scans. | |
4944 gch->save_marks(); | |
4945 | |
4946 { | |
4947 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
4948 | |
4949 // Note on the role of the mod union table: | |
4950 // Since the marker in "markFromRoots" marks concurrently with | |
4951 // mutators, it is possible for some reachable objects not to have been | |
4952 // scanned. For instance, an only reference to an object A was | |
4953 // placed in object B after the marker scanned B. Unless B is rescanned, | |
4954 // A would be collected. Such updates to references in marked objects | |
4955 // are detected via the mod union table which is the set of all cards | |
4956 // dirtied since the first checkpoint in this GC cycle and prior to | |
4957 // the most recent young generation GC, minus those cleaned up by the | |
4958 // concurrent precleaning. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
4959 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { |
0 | 4960 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); |
4961 do_remark_parallel(); | |
4962 } else { | |
4963 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, | |
4964 gclog_or_tty); | |
4965 do_remark_non_parallel(); | |
4966 } | |
4967 } | |
4968 } else { | |
4969 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); | |
4970 // The initial mark was stop-world, so there's no rescanning to | |
4971 // do; go straight on to the next step below. | |
4972 } | |
4973 verify_work_stacks_empty(); | |
4974 verify_overflow_empty(); | |
4975 | |
4976 { | |
4977 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) | |
4978 refProcessingWork(asynch, clear_all_soft_refs); | |
4979 } | |
4980 verify_work_stacks_empty(); | |
4981 verify_overflow_empty(); | |
4982 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4983 if (should_unload_classes()) { |
0 | 4984 CodeCache::gc_epilogue(); |
4985 } | |
2147
9afee0b9fc1d
7012505: BreakpointWithFullGC.sh fails with Internal Error (src/share/vm/oops/methodOop.cpp:220)
kamg
parents:
2137
diff
changeset
|
4986 JvmtiExport::gc_epilogue(); |
0 | 4987 |
4988 // If we encountered any (marking stack / work queue) overflow | |
4989 // events during the current CMS cycle, take appropriate | |
4990 // remedial measures, where possible, so as to try and avoid | |
4991 // recurrence of that condition. | |
4992 assert(_markStack.isEmpty(), "No grey objects"); | |
4993 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4994 _ser_kac_ovflw + _ser_kac_preclean_ovflw; |
0 | 4995 if (ser_ovflw > 0) { |
4996 if (PrintCMSStatistics != 0) { | |
4997 gclog_or_tty->print_cr("Marking stack overflow (benign) " | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4998 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
4999 ", kac_preclean="SIZE_FORMAT")", |
0 | 5000 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5001 _ser_kac_ovflw, _ser_kac_preclean_ovflw); |
0 | 5002 } |
5003 _markStack.expand(); | |
5004 _ser_pmc_remark_ovflw = 0; | |
5005 _ser_pmc_preclean_ovflw = 0; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5006 _ser_kac_preclean_ovflw = 0; |
0 | 5007 _ser_kac_ovflw = 0; |
5008 } | |
5009 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { | |
5010 if (PrintCMSStatistics != 0) { | |
5011 gclog_or_tty->print_cr("Work queue overflow (benign) " | |
5012 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", | |
5013 _par_pmc_remark_ovflw, _par_kac_ovflw); | |
5014 } | |
5015 _par_pmc_remark_ovflw = 0; | |
5016 _par_kac_ovflw = 0; | |
5017 } | |
5018 if (PrintCMSStatistics != 0) { | |
5019 if (_markStack._hit_limit > 0) { | |
5020 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")", | |
5021 _markStack._hit_limit); | |
5022 } | |
5023 if (_markStack._failed_double > 0) { | |
5024 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT")," | |
5025 " current capacity "SIZE_FORMAT, | |
5026 _markStack._failed_double, | |
5027 _markStack.capacity()); | |
5028 } | |
5029 } | |
5030 _markStack._hit_limit = 0; | |
5031 _markStack._failed_double = 0; | |
5032 | |
935 | 5033 // Check that all the klasses have been checked |
5034 assert(_revisitStack.isEmpty(), "Not all klasses revisited"); | |
5035 | |
0 | 5036 if ((VerifyAfterGC || VerifyDuringGC) && |
5037 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
5038 verify_after_remark(); | |
5039 } | |
5040 | |
5041 // Change under the freelistLocks. | |
5042 _collectorState = Sweeping; | |
5043 // Call isAllClear() under bitMapLock | |
5044 assert(_modUnionTable.isAllClear(), "Should be clear by end of the" | |
5045 " final marking"); | |
5046 if (UseAdaptiveSizePolicy) { | |
5047 size_policy()->checkpoint_roots_final_end(gch->gc_cause()); | |
5048 } | |
5049 } | |
5050 | |
5051 // Parallel remark task | |
5052 class CMSParRemarkTask: public AbstractGangTask { | |
5053 CMSCollector* _collector; | |
5054 int _n_workers; | |
5055 CompactibleFreeListSpace* _cms_space; | |
5056 CompactibleFreeListSpace* _perm_space; | |
5057 | |
5058 // The per-thread work queues, available here for stealing. | |
5059 OopTaskQueueSet* _task_queues; | |
5060 ParallelTaskTerminator _term; | |
5061 | |
5062 public: | |
5063 CMSParRemarkTask(CMSCollector* collector, | |
5064 CompactibleFreeListSpace* cms_space, | |
5065 CompactibleFreeListSpace* perm_space, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5066 int n_workers, FlexibleWorkGang* workers, |
0 | 5067 OopTaskQueueSet* task_queues): |
5068 AbstractGangTask("Rescan roots and grey objects in parallel"), | |
5069 _collector(collector), | |
5070 _cms_space(cms_space), _perm_space(perm_space), | |
5071 _n_workers(n_workers), | |
5072 _task_queues(task_queues), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5073 _term(n_workers, task_queues) { } |
0 | 5074 |
5075 OopTaskQueueSet* task_queues() { return _task_queues; } | |
5076 | |
5077 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
5078 | |
5079 ParallelTaskTerminator* terminator() { return &_term; } | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5080 int n_workers() { return _n_workers; } |
0 | 5081 |
5082 void work(int i); | |
5083 | |
5084 private: | |
5085 // Work method in support of parallel rescan ... of young gen spaces | |
5086 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
5087 ContiguousSpace* space, | |
5088 HeapWord** chunk_array, size_t chunk_top); | |
5089 | |
5090 // ... of dirty cards in old space | |
5091 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, | |
5092 Par_MarkRefsIntoAndScanClosure* cl); | |
5093 | |
5094 // ... work stealing for the above | |
5095 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); | |
5096 }; | |
5097 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5098 // work_queue(i) is passed to the closure |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5099 // Par_MarkRefsIntoAndScanClosure. The "i" parameter |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5100 // also is passed to do_dirty_card_rescan_tasks() and to |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5101 // do_work_steal() to select the i-th task_queue. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5102 |
0 | 5103 void CMSParRemarkTask::work(int i) { |
5104 elapsedTimer _timer; | |
5105 ResourceMark rm; | |
5106 HandleMark hm; | |
5107 | |
5108 // ---------- rescan from roots -------------- | |
5109 _timer.start(); | |
5110 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5111 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, | |
5112 _collector->_span, _collector->ref_processor(), | |
5113 &(_collector->_markBitMap), | |
5114 work_queue(i), &(_collector->_revisitStack)); | |
5115 | |
5116 // Rescan young gen roots first since these are likely | |
5117 // coarsely partitioned and may, on that account, constitute | |
5118 // the critical path; thus, it's best to start off that | |
5119 // work first. | |
5120 // ---------- young gen roots -------------- | |
5121 { | |
5122 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); | |
5123 EdenSpace* eden_space = dng->eden(); | |
5124 ContiguousSpace* from_space = dng->from(); | |
5125 ContiguousSpace* to_space = dng->to(); | |
5126 | |
5127 HeapWord** eca = _collector->_eden_chunk_array; | |
5128 size_t ect = _collector->_eden_chunk_index; | |
5129 HeapWord** sca = _collector->_survivor_chunk_array; | |
5130 size_t sct = _collector->_survivor_chunk_index; | |
5131 | |
5132 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); | |
5133 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); | |
5134 | |
5135 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0); | |
5136 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct); | |
5137 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect); | |
5138 | |
5139 _timer.stop(); | |
5140 if (PrintCMSStatistics != 0) { | |
5141 gclog_or_tty->print_cr( | |
5142 "Finished young gen rescan work in %dth thread: %3.3f sec", | |
5143 i, _timer.seconds()); | |
5144 } | |
5145 } | |
5146 | |
5147 // ---------- remaining roots -------------- | |
5148 _timer.reset(); | |
5149 _timer.start(); | |
5150 gch->gen_process_strong_roots(_collector->_cmsGen->level(), | |
5151 false, // yg was scanned above | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5152 false, // this is parallel code |
0 | 5153 true, // collecting perm gen |
5154 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5155 &par_mrias_cl, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5156 true, // walk all of code cache if (so & SO_CodeCache) |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5157 NULL); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5158 assert(_collector->should_unload_classes() |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5159 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5160 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
0 | 5161 _timer.stop(); |
5162 if (PrintCMSStatistics != 0) { | |
5163 gclog_or_tty->print_cr( | |
5164 "Finished remaining root rescan work in %dth thread: %3.3f sec", | |
5165 i, _timer.seconds()); | |
5166 } | |
5167 | |
5168 // ---------- rescan dirty cards ------------ | |
5169 _timer.reset(); | |
5170 _timer.start(); | |
5171 | |
5172 // Do the rescan tasks for each of the two spaces | |
5173 // (cms_space and perm_space) in turn. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5174 // "i" is passed to select the "i-th" task_queue |
0 | 5175 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl); |
5176 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl); | |
5177 _timer.stop(); | |
5178 if (PrintCMSStatistics != 0) { | |
5179 gclog_or_tty->print_cr( | |
5180 "Finished dirty card rescan work in %dth thread: %3.3f sec", | |
5181 i, _timer.seconds()); | |
5182 } | |
5183 | |
5184 // ---------- steal work from other threads ... | |
5185 // ---------- ... and drain overflow list. | |
5186 _timer.reset(); | |
5187 _timer.start(); | |
5188 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i)); | |
5189 _timer.stop(); | |
5190 if (PrintCMSStatistics != 0) { | |
5191 gclog_or_tty->print_cr( | |
5192 "Finished work stealing in %dth thread: %3.3f sec", | |
5193 i, _timer.seconds()); | |
5194 } | |
5195 } | |
5196 | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5197 // Note that parameter "i" is not used. |
0 | 5198 void |
5199 CMSParRemarkTask::do_young_space_rescan(int i, | |
5200 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space, | |
5201 HeapWord** chunk_array, size_t chunk_top) { | |
5202 // Until all tasks completed: | |
5203 // . claim an unclaimed task | |
5204 // . compute region boundaries corresponding to task claimed | |
5205 // using chunk_array | |
5206 // . par_oop_iterate(cl) over that region | |
5207 | |
5208 ResourceMark rm; | |
5209 HandleMark hm; | |
5210 | |
5211 SequentialSubTasksDone* pst = space->par_seq_tasks(); | |
5212 assert(pst->valid(), "Uninitialized use?"); | |
5213 | |
5214 int nth_task = 0; | |
5215 int n_tasks = pst->n_tasks(); | |
5216 | |
5217 HeapWord *start, *end; | |
5218 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5219 // We claimed task # nth_task; compute its boundaries. | |
5220 if (chunk_top == 0) { // no samples were taken | |
5221 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); | |
5222 start = space->bottom(); | |
5223 end = space->top(); | |
5224 } else if (nth_task == 0) { | |
5225 start = space->bottom(); | |
5226 end = chunk_array[nth_task]; | |
5227 } else if (nth_task < (jint)chunk_top) { | |
5228 assert(nth_task >= 1, "Control point invariant"); | |
5229 start = chunk_array[nth_task - 1]; | |
5230 end = chunk_array[nth_task]; | |
5231 } else { | |
5232 assert(nth_task == (jint)chunk_top, "Control point invariant"); | |
5233 start = chunk_array[chunk_top - 1]; | |
5234 end = space->top(); | |
5235 } | |
5236 MemRegion mr(start, end); | |
5237 // Verify that mr is in space | |
5238 assert(mr.is_empty() || space->used_region().contains(mr), | |
5239 "Should be in space"); | |
5240 // Verify that "start" is an object boundary | |
5241 assert(mr.is_empty() || oop(mr.start())->is_oop(), | |
5242 "Should be an oop"); | |
5243 space->par_oop_iterate(mr, cl); | |
5244 } | |
5245 pst->all_tasks_completed(); | |
5246 } | |
5247 | |
5248 void | |
5249 CMSParRemarkTask::do_dirty_card_rescan_tasks( | |
5250 CompactibleFreeListSpace* sp, int i, | |
5251 Par_MarkRefsIntoAndScanClosure* cl) { | |
5252 // Until all tasks completed: | |
5253 // . claim an unclaimed task | |
5254 // . compute region boundaries corresponding to task claimed | |
5255 // . transfer dirty bits ct->mut for that region | |
5256 // . apply rescanclosure to dirty mut bits for that region | |
5257 | |
5258 ResourceMark rm; | |
5259 HandleMark hm; | |
5260 | |
5261 OopTaskQueue* work_q = work_queue(i); | |
5262 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); | |
5263 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! | |
5264 // CAUTION: This closure has state that persists across calls to | |
5265 // the work method dirty_range_iterate_clear() in that it has | |
5266 // imbedded in it a (subtype of) UpwardsObjectClosure. The | |
5267 // use of that state in the imbedded UpwardsObjectClosure instance | |
5268 // assumes that the cards are always iterated (even if in parallel | |
5269 // by several threads) in monotonically increasing order per each | |
5270 // thread. This is true of the implementation below which picks | |
5271 // card ranges (chunks) in monotonically increasing order globally | |
5272 // and, a-fortiori, in monotonically increasing order per thread | |
5273 // (the latter order being a subsequence of the former). | |
5274 // If the work code below is ever reorganized into a more chaotic | |
5275 // work-partitioning form than the current "sequential tasks" | |
5276 // paradigm, the use of that persistent state will have to be | |
5277 // revisited and modified appropriately. See also related | |
5278 // bug 4756801 work on which should examine this code to make | |
5279 // sure that the changes there do not run counter to the | |
5280 // assumptions made here and necessary for correctness and | |
5281 // efficiency. Note also that this code might yield inefficient | |
5282 // behaviour in the case of very large objects that span one or | |
5283 // more work chunks. Such objects would potentially be scanned | |
5284 // several times redundantly. Work on 4756801 should try and | |
5285 // address that performance anomaly if at all possible. XXX | |
5286 MemRegion full_span = _collector->_span; | |
5287 CMSBitMap* bm = &(_collector->_markBitMap); // shared | |
5288 CMSMarkStack* rs = &(_collector->_revisitStack); // shared | |
5289 MarkFromDirtyCardsClosure | |
5290 greyRescanClosure(_collector, full_span, // entire span of interest | |
5291 sp, bm, work_q, rs, cl); | |
5292 | |
5293 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
5294 assert(pst->valid(), "Uninitialized use?"); | |
5295 int nth_task = 0; | |
5296 const int alignment = CardTableModRefBS::card_size * BitsPerWord; | |
5297 MemRegion span = sp->used_region(); | |
5298 HeapWord* start_addr = span.start(); | |
5299 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(), | |
5300 alignment); | |
5301 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units | |
5302 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) == | |
5303 start_addr, "Check alignment"); | |
5304 assert((size_t)round_to((intptr_t)chunk_size, alignment) == | |
5305 chunk_size, "Check alignment"); | |
5306 | |
5307 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5308 // Having claimed the nth_task, compute corresponding mem-region, | |
5309 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). | |
5310 // The alignment restriction ensures that we do not need any | |
5311 // synchronization with other gang-workers while setting or | |
5312 // clearing bits in thus chunk of the MUT. | |
5313 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, | |
5314 start_addr + (nth_task+1)*chunk_size); | |
5315 // The last chunk's end might be way beyond end of the | |
5316 // used region. In that case pull back appropriately. | |
5317 if (this_span.end() > end_addr) { | |
5318 this_span.set_end(end_addr); | |
5319 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); | |
5320 } | |
5321 // Iterate over the dirty cards covering this chunk, marking them | |
5322 // precleaned, and setting the corresponding bits in the mod union | |
5323 // table. Since we have been careful to partition at Card and MUT-word | |
5324 // boundaries no synchronization is needed between parallel threads. | |
5325 _collector->_ct->ct_bs()->dirty_card_iterate(this_span, | |
5326 &modUnionClosure); | |
5327 | |
5328 // Having transferred these marks into the modUnionTable, | |
5329 // rescan the marked objects on the dirty cards in the modUnionTable. | |
5330 // Even if this is at a synchronous collection, the initial marking | |
5331 // may have been done during an asynchronous collection so there | |
5332 // may be dirty bits in the mod-union table. | |
5333 _collector->_modUnionTable.dirty_range_iterate_clear( | |
5334 this_span, &greyRescanClosure); | |
5335 _collector->_modUnionTable.verifyNoOneBitsInRange( | |
5336 this_span.start(), | |
5337 this_span.end()); | |
5338 } | |
5339 pst->all_tasks_completed(); // declare that i am done | |
5340 } | |
5341 | |
5342 // . see if we can share work_queues with ParNew? XXX | |
5343 void | |
5344 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
5345 int* seed) { | |
5346 OopTaskQueue* work_q = work_queue(i); | |
5347 NOT_PRODUCT(int num_steals = 0;) | |
5348 oop obj_to_scan; | |
5349 CMSBitMap* bm = &(_collector->_markBitMap); | |
5350 | |
5351 while (true) { | |
5352 // Completely finish any left over work from (an) earlier round(s) | |
5353 cl->trim_queue(0); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5354 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5355 (size_t)ParGCDesiredObjsFromOverflowList); |
0 | 5356 // Now check if there's any work in the overflow list |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5357 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5358 // only affects the number of attempts made to get work from the |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5359 // overflow list and does not affect the number of workers. Just |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5360 // pass ParallelGCThreads so this behavior is unchanged. |
0 | 5361 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5362 work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5363 ParallelGCThreads)) { |
0 | 5364 // found something in global overflow list; |
5365 // not yet ready to go stealing work from others. | |
5366 // We'd like to assert(work_q->size() != 0, ...) | |
5367 // because we just took work from the overflow list, | |
5368 // but of course we can't since all of that could have | |
5369 // been already stolen from us. | |
5370 // "He giveth and He taketh away." | |
5371 continue; | |
5372 } | |
5373 // Verify that we have no work before we resort to stealing | |
5374 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5375 // Try to steal from other queues that have work | |
5376 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5377 NOT_PRODUCT(num_steals++;) | |
5378 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5379 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5380 // Do scanning work | |
5381 obj_to_scan->oop_iterate(cl); | |
5382 // Loop around, finish this work, and try to steal some more | |
5383 } else if (terminator()->offer_termination()) { | |
5384 break; // nirvana from the infinite cycle | |
5385 } | |
5386 } | |
5387 NOT_PRODUCT( | |
5388 if (PrintCMSStatistics != 0) { | |
5389 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5390 } | |
5391 ) | |
5392 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), | |
5393 "Else our work is not yet done"); | |
5394 } | |
5395 | |
5396 // Return a thread-local PLAB recording array, as appropriate. | |
5397 void* CMSCollector::get_data_recorder(int thr_num) { | |
5398 if (_survivor_plab_array != NULL && | |
5399 (CMSPLABRecordAlways || | |
5400 (_collectorState > Marking && _collectorState < FinalMarking))) { | |
5401 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); | |
5402 ChunkArray* ca = &_survivor_plab_array[thr_num]; | |
5403 ca->reset(); // clear it so that fresh data is recorded | |
5404 return (void*) ca; | |
5405 } else { | |
5406 return NULL; | |
5407 } | |
5408 } | |
5409 | |
5410 // Reset all the thread-local PLAB recording arrays | |
5411 void CMSCollector::reset_survivor_plab_arrays() { | |
5412 for (uint i = 0; i < ParallelGCThreads; i++) { | |
5413 _survivor_plab_array[i].reset(); | |
5414 } | |
5415 } | |
5416 | |
5417 // Merge the per-thread plab arrays into the global survivor chunk | |
5418 // array which will provide the partitioning of the survivor space | |
5419 // for CMS rescan. | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5420 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5421 int no_of_gc_threads) { |
0 | 5422 assert(_survivor_plab_array != NULL, "Error"); |
5423 assert(_survivor_chunk_array != NULL, "Error"); | |
5424 assert(_collectorState == FinalMarking, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5425 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5426 _cursor[j] = 0; |
5427 } | |
5428 HeapWord* top = surv->top(); | |
5429 size_t i; | |
5430 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries | |
5431 HeapWord* min_val = top; // Higher than any PLAB address | |
5432 uint min_tid = 0; // position of min_val this round | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5433 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5434 ChunkArray* cur_sca = &_survivor_plab_array[j]; |
5435 if (_cursor[j] == cur_sca->end()) { | |
5436 continue; | |
5437 } | |
5438 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); | |
5439 HeapWord* cur_val = cur_sca->nth(_cursor[j]); | |
5440 assert(surv->used_region().contains(cur_val), "Out of bounds value"); | |
5441 if (cur_val < min_val) { | |
5442 min_tid = j; | |
5443 min_val = cur_val; | |
5444 } else { | |
5445 assert(cur_val < top, "All recorded addresses should be less"); | |
5446 } | |
5447 } | |
5448 // At this point min_val and min_tid are respectively | |
5449 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) | |
5450 // and the thread (j) that witnesses that address. | |
5451 // We record this address in the _survivor_chunk_array[i] | |
5452 // and increment _cursor[min_tid] prior to the next round i. | |
5453 if (min_val == top) { | |
5454 break; | |
5455 } | |
5456 _survivor_chunk_array[i] = min_val; | |
5457 _cursor[min_tid]++; | |
5458 } | |
5459 // We are all done; record the size of the _survivor_chunk_array | |
5460 _survivor_chunk_index = i; // exclusive: [0, i) | |
5461 if (PrintCMSStatistics > 0) { | |
5462 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); | |
5463 } | |
5464 // Verify that we used up all the recorded entries | |
5465 #ifdef ASSERT | |
5466 size_t total = 0; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5467 for (int j = 0; j < no_of_gc_threads; j++) { |
0 | 5468 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); |
5469 total += _cursor[j]; | |
5470 } | |
5471 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); | |
5472 // Check that the merged array is in sorted order | |
5473 if (total > 0) { | |
5474 for (size_t i = 0; i < total - 1; i++) { | |
5475 if (PrintCMSStatistics > 0) { | |
5476 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", | |
5477 i, _survivor_chunk_array[i]); | |
5478 } | |
5479 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], | |
5480 "Not sorted"); | |
5481 } | |
5482 } | |
5483 #endif // ASSERT | |
5484 } | |
5485 | |
5486 // Set up the space's par_seq_tasks structure for work claiming | |
5487 // for parallel rescan of young gen. | |
5488 // See ParRescanTask where this is currently used. | |
5489 void | |
5490 CMSCollector:: | |
5491 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { | |
5492 assert(n_threads > 0, "Unexpected n_threads argument"); | |
5493 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
5494 | |
5495 // Eden space | |
5496 { | |
5497 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks(); | |
5498 assert(!pst->valid(), "Clobbering existing data?"); | |
5499 // Each valid entry in [0, _eden_chunk_index) represents a task. | |
5500 size_t n_tasks = _eden_chunk_index + 1; | |
5501 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5502 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5503 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5504 pst->set_n_threads(n_threads); |
0 | 5505 pst->set_n_tasks((int)n_tasks); |
5506 } | |
5507 | |
5508 // Merge the survivor plab arrays into _survivor_chunk_array | |
5509 if (_survivor_plab_array != NULL) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5510 merge_survivor_plab_arrays(dng->from(), n_threads); |
0 | 5511 } else { |
5512 assert(_survivor_chunk_index == 0, "Error"); | |
5513 } | |
5514 | |
5515 // To space | |
5516 { | |
5517 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks(); | |
5518 assert(!pst->valid(), "Clobbering existing data?"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5519 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5520 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5521 pst->set_n_threads(n_threads); |
0 | 5522 pst->set_n_tasks(1); |
5523 assert(pst->valid(), "Error"); | |
5524 } | |
5525 | |
5526 // From space | |
5527 { | |
5528 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks(); | |
5529 assert(!pst->valid(), "Clobbering existing data?"); | |
5530 size_t n_tasks = _survivor_chunk_index + 1; | |
5531 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5532 // Sets the condition for completion of the subtask (how many threads |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5533 // need to finish in order to be done). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5534 pst->set_n_threads(n_threads); |
0 | 5535 pst->set_n_tasks((int)n_tasks); |
5536 assert(pst->valid(), "Error"); | |
5537 } | |
5538 } | |
5539 | |
5540 // Parallel version of remark | |
5541 void CMSCollector::do_remark_parallel() { | |
5542 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5543 FlexibleWorkGang* workers = gch->workers(); |
0 | 5544 assert(workers != NULL, "Need parallel worker threads."); |
5545 int n_workers = workers->total_workers(); | |
5546 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
5547 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
5548 | |
5549 CMSParRemarkTask tsk(this, | |
5550 cms_space, perm_space, | |
5551 n_workers, workers, task_queues()); | |
5552 | |
5553 // Set up for parallel process_strong_roots work. | |
5554 gch->set_par_threads(n_workers); | |
5555 // We won't be iterating over the cards in the card table updating | |
5556 // the younger_gen cards, so we shouldn't call the following else | |
5557 // the verification code as well as subsequent younger_refs_iterate | |
5558 // code would get confused. XXX | |
5559 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel | |
5560 | |
5561 // The young gen rescan work will not be done as part of | |
5562 // process_strong_roots (which currently doesn't knw how to | |
5563 // parallelize such a scan), but rather will be broken up into | |
5564 // a set of parallel tasks (via the sampling that the [abortable] | |
5565 // preclean phase did of EdenSpace, plus the [two] tasks of | |
5566 // scanning the [two] survivor spaces. Further fine-grain | |
5567 // parallelization of the scanning of the survivor spaces | |
5568 // themselves, and of precleaning of the younger gen itself | |
5569 // is deferred to the future. | |
5570 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); | |
5571 | |
5572 // The dirty card rescan work is broken up into a "sequence" | |
5573 // of parallel tasks (per constituent space) that are dynamically | |
5574 // claimed by the parallel threads. | |
5575 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5576 perm_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5577 | |
5578 // It turns out that even when we're using 1 thread, doing the work in a | |
5579 // separate thread causes wide variance in run times. We can't help this | |
5580 // in the multi-threaded case, but we special-case n=1 here to get | |
5581 // repeatable measurements of the 1-thread overhead of the parallel code. | |
5582 if (n_workers > 1) { | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5583 // Make refs discovery MT-safe, if it isn't already: it may not |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5584 // necessarily be so, since it's possible that we are doing |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5585 // ST marking. |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5586 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5587 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5588 workers->run_task(&tsk); |
5589 } else { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5590 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5591 tsk.work(0); |
5592 } | |
5593 gch->set_par_threads(0); // 0 ==> non-parallel. | |
5594 // restore, single-threaded for now, any preserved marks | |
5595 // as a result of work_q overflow | |
5596 restore_preserved_marks_if_any(); | |
5597 } | |
5598 | |
5599 // Non-parallel version of remark | |
5600 void CMSCollector::do_remark_non_parallel() { | |
5601 ResourceMark rm; | |
5602 HandleMark hm; | |
5603 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5604 MarkRefsIntoAndScanClosure | |
5605 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
5606 &_markStack, &_revisitStack, this, | |
5607 false /* should_yield */, false /* not precleaning */); | |
5608 MarkFromDirtyCardsClosure | |
5609 markFromDirtyCardsClosure(this, _span, | |
5610 NULL, // space is set further below | |
5611 &_markBitMap, &_markStack, &_revisitStack, | |
5612 &mrias_cl); | |
5613 { | |
5614 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
5615 // Iterate over the dirty cards, setting the corresponding bits in the |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
5616 // mod union table. |
0 | 5617 { |
5618 ModUnionClosure modUnionClosure(&_modUnionTable); | |
5619 _ct->ct_bs()->dirty_card_iterate( | |
5620 _cmsGen->used_region(), | |
5621 &modUnionClosure); | |
5622 _ct->ct_bs()->dirty_card_iterate( | |
5623 _permGen->used_region(), | |
5624 &modUnionClosure); | |
5625 } | |
5626 // Having transferred these marks into the modUnionTable, we just need | |
5627 // to rescan the marked objects on the dirty cards in the modUnionTable. | |
5628 // The initial marking may have been done during an asynchronous | |
5629 // collection so there may be dirty bits in the mod-union table. | |
5630 const int alignment = | |
5631 CardTableModRefBS::card_size * BitsPerWord; | |
5632 { | |
5633 // ... First handle dirty cards in CMS gen | |
5634 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); | |
5635 MemRegion ur = _cmsGen->used_region(); | |
5636 HeapWord* lb = ur.start(); | |
5637 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5638 MemRegion cms_span(lb, ub); | |
5639 _modUnionTable.dirty_range_iterate_clear(cms_span, | |
5640 &markFromDirtyCardsClosure); | |
5641 verify_work_stacks_empty(); | |
5642 if (PrintCMSStatistics != 0) { | |
5643 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ", | |
5644 markFromDirtyCardsClosure.num_dirty_cards()); | |
5645 } | |
5646 } | |
5647 { | |
5648 // .. and then repeat for dirty cards in perm gen | |
5649 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace()); | |
5650 MemRegion ur = _permGen->used_region(); | |
5651 HeapWord* lb = ur.start(); | |
5652 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5653 MemRegion perm_span(lb, ub); | |
5654 _modUnionTable.dirty_range_iterate_clear(perm_span, | |
5655 &markFromDirtyCardsClosure); | |
5656 verify_work_stacks_empty(); | |
5657 if (PrintCMSStatistics != 0) { | |
5658 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ", | |
5659 markFromDirtyCardsClosure.num_dirty_cards()); | |
5660 } | |
5661 } | |
5662 } | |
5663 if (VerifyDuringGC && | |
5664 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
5665 HandleMark hm; // Discard invalid handles created during verification | |
5666 Universe::verify(true); | |
5667 } | |
5668 { | |
5669 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); | |
5670 | |
5671 verify_work_stacks_empty(); | |
5672 | |
5673 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5674 GenCollectedHeap::StrongRootsScope srs(gch); |
0 | 5675 gch->gen_process_strong_roots(_cmsGen->level(), |
5676 true, // younger gens as roots | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5677 false, // use the local StrongRootsScope |
0 | 5678 true, // collecting perm gen |
5679 SharedHeap::ScanningOption(roots_scanning_options()), | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5680 &mrias_cl, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5681 true, // walk code active on stacks |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5682 NULL); |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5683 assert(should_unload_classes() |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5684 || (roots_scanning_options() & SharedHeap::SO_CodeCache), |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
798
diff
changeset
|
5685 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); |
0 | 5686 } |
5687 verify_work_stacks_empty(); | |
5688 // Restore evacuated mark words, if any, used for overflow list links | |
5689 if (!CMSOverflowEarlyRestoration) { | |
5690 restore_preserved_marks_if_any(); | |
5691 } | |
5692 verify_overflow_empty(); | |
5693 } | |
5694 | |
5695 //////////////////////////////////////////////////////// | |
5696 // Parallel Reference Processing Task Proxy Class | |
5697 //////////////////////////////////////////////////////// | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5698 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues { |
0 | 5699 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
5700 CMSCollector* _collector; | |
5701 CMSBitMap* _mark_bit_map; | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5702 const MemRegion _span; |
0 | 5703 ProcessTask& _task; |
5704 | |
5705 public: | |
5706 CMSRefProcTaskProxy(ProcessTask& task, | |
5707 CMSCollector* collector, | |
5708 const MemRegion& span, | |
5709 CMSBitMap* mark_bit_map, | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5710 AbstractWorkGang* workers, |
0 | 5711 OopTaskQueueSet* task_queues): |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5712 // XXX Should superclass AGTWOQ also know about AWG since it knows |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5713 // about the task_queues used by the AWG? Then it could initialize |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5714 // the terminator() object. See 6984287. The set_for_termination() |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5715 // below is a temporary band-aid for the regression in 6984287. |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5716 AbstractGangTaskWOopQueues("Process referents by policy in parallel", |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5717 task_queues), |
0 | 5718 _task(task), |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5719 _collector(collector), _span(span), _mark_bit_map(mark_bit_map) |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5720 { |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5721 assert(_collector->_span.equals(_span) && !_span.is_empty(), |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5722 "Inconsistency in _span"); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5723 set_for_termination(workers->active_workers()); |
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5724 } |
0 | 5725 |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5726 OopTaskQueueSet* task_queues() { return queues(); } |
0 | 5727 |
5728 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
5729 | |
5730 void do_work_steal(int i, | |
5731 CMSParDrainMarkingStackClosure* drain, | |
5732 CMSParKeepAliveClosure* keep_alive, | |
5733 int* seed); | |
5734 | |
5735 virtual void work(int i); | |
5736 }; | |
5737 | |
5738 void CMSRefProcTaskProxy::work(int i) { | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5739 assert(_collector->_span.equals(_span), "Inconsistency in _span"); |
0 | 5740 CMSParKeepAliveClosure par_keep_alive(_collector, _span, |
935 | 5741 _mark_bit_map, |
5742 &_collector->_revisitStack, | |
5743 work_queue(i)); | |
0 | 5744 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, |
935 | 5745 _mark_bit_map, |
5746 &_collector->_revisitStack, | |
5747 work_queue(i)); | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5748 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); |
0 | 5749 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); |
5750 if (_task.marks_oops_alive()) { | |
5751 do_work_steal(i, &par_drain_stack, &par_keep_alive, | |
5752 _collector->hash_seed(i)); | |
5753 } | |
5754 assert(work_queue(i)->size() == 0, "work_queue should be empty"); | |
5755 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); | |
5756 } | |
5757 | |
5758 class CMSRefEnqueueTaskProxy: public AbstractGangTask { | |
5759 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; | |
5760 EnqueueTask& _task; | |
5761 | |
5762 public: | |
5763 CMSRefEnqueueTaskProxy(EnqueueTask& task) | |
5764 : AbstractGangTask("Enqueue reference objects in parallel"), | |
5765 _task(task) | |
5766 { } | |
5767 | |
5768 virtual void work(int i) | |
5769 { | |
5770 _task.work(i); | |
5771 } | |
5772 }; | |
5773 | |
5774 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, | |
935 | 5775 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, |
5776 OopTaskQueue* work_queue): | |
5777 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
0 | 5778 _span(span), |
5779 _bit_map(bit_map), | |
5780 _work_queue(work_queue), | |
935 | 5781 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), |
0 | 5782 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), |
5783 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) | |
5784 { } | |
5785 | |
5786 // . see if we can share work_queues with ParNew? XXX | |
5787 void CMSRefProcTaskProxy::do_work_steal(int i, | |
5788 CMSParDrainMarkingStackClosure* drain, | |
5789 CMSParKeepAliveClosure* keep_alive, | |
5790 int* seed) { | |
5791 OopTaskQueue* work_q = work_queue(i); | |
5792 NOT_PRODUCT(int num_steals = 0;) | |
5793 oop obj_to_scan; | |
5794 | |
5795 while (true) { | |
5796 // Completely finish any left over work from (an) earlier round(s) | |
5797 drain->trim_queue(0); | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5798 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
5799 (size_t)ParGCDesiredObjsFromOverflowList); |
0 | 5800 // Now check if there's any work in the overflow list |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5801 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5802 // only affects the number of attempts made to get work from the |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5803 // overflow list and does not affect the number of workers. Just |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5804 // pass ParallelGCThreads so this behavior is unchanged. |
0 | 5805 if (_collector->par_take_from_overflow_list(num_from_overflow_list, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5806 work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5807 ParallelGCThreads)) { |
0 | 5808 // Found something in global overflow list; |
5809 // not yet ready to go stealing work from others. | |
5810 // We'd like to assert(work_q->size() != 0, ...) | |
5811 // because we just took work from the overflow list, | |
5812 // but of course we can't, since all of that might have | |
5813 // been already stolen from us. | |
5814 continue; | |
5815 } | |
5816 // Verify that we have no work before we resort to stealing | |
5817 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5818 // Try to steal from other queues that have work | |
5819 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5820 NOT_PRODUCT(num_steals++;) | |
5821 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5822 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5823 // Do scanning work | |
5824 obj_to_scan->oop_iterate(keep_alive); | |
5825 // Loop around, finish this work, and try to steal some more | |
5826 } else if (terminator()->offer_termination()) { | |
5827 break; // nirvana from the infinite cycle | |
5828 } | |
5829 } | |
5830 NOT_PRODUCT( | |
5831 if (PrintCMSStatistics != 0) { | |
5832 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5833 } | |
5834 ) | |
5835 } | |
5836 | |
5837 void CMSRefProcTaskExecutor::execute(ProcessTask& task) | |
5838 { | |
5839 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5840 FlexibleWorkGang* workers = gch->workers(); |
0 | 5841 assert(workers != NULL, "Need parallel worker threads."); |
5842 CMSRefProcTaskProxy rp_task(task, &_collector, | |
5843 _collector.ref_processor()->span(), | |
5844 _collector.markBitMap(), | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5845 workers, _collector.task_queues()); |
0 | 5846 workers->run_task(&rp_task); |
5847 } | |
5848 | |
5849 void CMSRefProcTaskExecutor::execute(EnqueueTask& task) | |
5850 { | |
5851 | |
5852 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5853 FlexibleWorkGang* workers = gch->workers(); |
0 | 5854 assert(workers != NULL, "Need parallel worker threads."); |
5855 CMSRefEnqueueTaskProxy enq_task(task); | |
5856 workers->run_task(&enq_task); | |
5857 } | |
5858 | |
5859 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { | |
5860 | |
5861 ResourceMark rm; | |
5862 HandleMark hm; | |
5863 | |
5864 ReferenceProcessor* rp = ref_processor(); | |
5865 assert(rp->span().equals(_span), "Spans should be equal"); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5866 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5867 // Process weak references. |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
5868 rp->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5869 verify_work_stacks_empty(); |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5870 |
0 | 5871 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, |
935 | 5872 &_markStack, &_revisitStack, |
5873 false /* !preclean */); | |
0 | 5874 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, |
5875 _span, &_markBitMap, &_markStack, | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
5876 &cmsKeepAliveClosure, false /* !preclean */); |
0 | 5877 { |
5878 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); | |
5879 if (rp->processing_is_mt()) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5880 // Set the degree of MT here. If the discovery is done MT, there |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5881 // may have been a different number of threads doing the discovery |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5882 // and a different number of discovered lists may have Ref objects. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5883 // That is OK as long as the Reference lists are balanced (see |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5884 // balance_all_queues() and balance_queues()). |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5885 |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2365
diff
changeset
|
5886 rp->set_active_mt_degree(ParallelGCThreads); |
0 | 5887 CMSRefProcTaskExecutor task_executor(*this); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5888 rp->process_discovered_references(&_is_alive_closure, |
0 | 5889 &cmsKeepAliveClosure, |
5890 &cmsDrainMarkingStackClosure, | |
5891 &task_executor); | |
5892 } else { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
5893 rp->process_discovered_references(&_is_alive_closure, |
0 | 5894 &cmsKeepAliveClosure, |
5895 &cmsDrainMarkingStackClosure, | |
5896 NULL); | |
5897 } | |
5898 verify_work_stacks_empty(); | |
5899 } | |
5900 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5901 if (should_unload_classes()) { |
0 | 5902 { |
5903 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); | |
5904 | |
5905 // Follow SystemDictionary roots and unload classes | |
5906 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); | |
5907 | |
5908 // Follow CodeCache roots and unload any methods marked for unloading | |
5909 CodeCache::do_unloading(&_is_alive_closure, | |
5910 &cmsKeepAliveClosure, | |
5911 purged_class); | |
5912 | |
5913 cmsDrainMarkingStackClosure.do_void(); | |
5914 verify_work_stacks_empty(); | |
5915 | |
5916 // Update subklass/sibling/implementor links in KlassKlass descendants | |
5917 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty"); | |
5918 oop k; | |
5919 while ((k = _revisitStack.pop()) != NULL) { | |
5920 ((Klass*)(oopDesc*)k)->follow_weak_klass_links( | |
5921 &_is_alive_closure, | |
5922 &cmsKeepAliveClosure); | |
5923 } | |
5924 assert(!ClassUnloading || | |
5925 (_markStack.isEmpty() && overflow_list_is_empty()), | |
5926 "Should not have found new reachable objects"); | |
5927 assert(_revisitStack.isEmpty(), "revisit stack should have been drained"); | |
5928 cmsDrainMarkingStackClosure.do_void(); | |
5929 verify_work_stacks_empty(); | |
5930 } | |
5931 | |
5932 { | |
2379
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5933 TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); |
2177
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2147
diff
changeset
|
5934 // Clean up unreferenced symbols in symbol table. |
3582bf76420e
6990754: Use native memory and reference counting to implement SymbolTable
coleenp
parents:
2147
diff
changeset
|
5935 SymbolTable::unlink(); |
0 | 5936 } |
5937 } | |
5938 | |
2379
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5939 if (should_unload_classes() || !JavaObjectsInPerm) { |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5940 TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5941 // Now clean up stale oops in StringTable |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5942 StringTable::unlink(&_is_alive_closure); |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5943 } |
b099aaf51bf8
6962931: move interned strings out of the perm gen
jcoomes
parents:
2369
diff
changeset
|
5944 |
0 | 5945 verify_work_stacks_empty(); |
5946 // Restore any preserved marks as a result of mark stack or | |
5947 // work queue overflow | |
5948 restore_preserved_marks_if_any(); // done single-threaded for now | |
5949 | |
5950 rp->set_enqueuing_is_done(true); | |
5951 if (rp->processing_is_mt()) { | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
5952 rp->balance_all_queues(); |
0 | 5953 CMSRefProcTaskExecutor task_executor(*this); |
5954 rp->enqueue_discovered_references(&task_executor); | |
5955 } else { | |
5956 rp->enqueue_discovered_references(NULL); | |
5957 } | |
5958 rp->verify_no_references_recorded(); | |
5959 assert(!rp->discovery_enabled(), "should have been disabled"); | |
5960 } | |
5961 | |
5962 #ifndef PRODUCT | |
5963 void CMSCollector::check_correct_thread_executing() { | |
5964 Thread* t = Thread::current(); | |
5965 // Only the VM thread or the CMS thread should be here. | |
5966 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), | |
5967 "Unexpected thread type"); | |
5968 // If this is the vm thread, the foreground process | |
5969 // should not be waiting. Note that _foregroundGCIsActive is | |
5970 // true while the foreground collector is waiting. | |
5971 if (_foregroundGCShouldWait) { | |
5972 // We cannot be the VM thread | |
5973 assert(t->is_ConcurrentGC_thread(), | |
5974 "Should be CMS thread"); | |
5975 } else { | |
5976 // We can be the CMS thread only if we are in a stop-world | |
5977 // phase of CMS collection. | |
5978 if (t->is_ConcurrentGC_thread()) { | |
5979 assert(_collectorState == InitialMarking || | |
5980 _collectorState == FinalMarking, | |
5981 "Should be a stop-world phase"); | |
5982 // The CMS thread should be holding the CMS_token. | |
5983 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
5984 "Potential interference with concurrently " | |
5985 "executing VM thread"); | |
5986 } | |
5987 } | |
5988 } | |
5989 #endif | |
5990 | |
5991 void CMSCollector::sweep(bool asynch) { | |
5992 assert(_collectorState == Sweeping, "just checking"); | |
5993 check_correct_thread_executing(); | |
5994 verify_work_stacks_empty(); | |
5995 verify_overflow_empty(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
5996 increment_sweep_count(); |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
5997 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
5998 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
5999 _inter_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6000 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); |
0 | 6001 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); |
6002 | |
6003 // PermGen verification support: If perm gen sweeping is disabled in | |
6004 // this cycle, we preserve the perm gen object "deadness" information | |
6005 // in the perm_gen_verify_bit_map. In order to do that we traverse | |
6006 // all blocks in perm gen and mark all dead objects. | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6007 if (verifying() && !should_unload_classes()) { |
0 | 6008 assert(perm_gen_verify_bit_map()->sizeInBits() != 0, |
6009 "Should have already been allocated"); | |
6010 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), | |
6011 markBitMap(), perm_gen_verify_bit_map()); | |
7
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6012 if (asynch) { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6013 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6014 bitMapLock()); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6015 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6016 } else { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6017 // In the case of synchronous sweep, we already have |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6018 // the requisite locks/tokens. |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6019 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
6020 } |
0 | 6021 } |
6022 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6023 assert(!_intra_sweep_timer.is_active(), "Should not be active"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6024 _intra_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6025 _intra_sweep_timer.start(); |
0 | 6026 if (asynch) { |
6027 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6028 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); | |
6029 // First sweep the old gen then the perm gen | |
6030 { | |
6031 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
6032 bitMapLock()); | |
6033 sweepWork(_cmsGen, asynch); | |
6034 } | |
6035 | |
6036 // Now repeat for perm gen | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6037 if (should_unload_classes()) { |
0 | 6038 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
6039 bitMapLock()); | |
6040 sweepWork(_permGen, asynch); | |
6041 } | |
6042 | |
6043 // Update Universe::_heap_*_at_gc figures. | |
6044 // We need all the free list locks to make the abstract state | |
6045 // transition from Sweeping to Resetting. See detailed note | |
6046 // further below. | |
6047 { | |
6048 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
6049 _permGen->freelistLock()); | |
6050 // Update heap occupancy information which is used as | |
6051 // input to soft ref clearing policy at the next gc. | |
6052 Universe::update_heap_info_at_gc(); | |
6053 _collectorState = Resizing; | |
6054 } | |
6055 } else { | |
6056 // already have needed locks | |
6057 sweepWork(_cmsGen, asynch); | |
6058 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6059 if (should_unload_classes()) { |
0 | 6060 sweepWork(_permGen, asynch); |
6061 } | |
6062 // Update heap occupancy information which is used as | |
6063 // input to soft ref clearing policy at the next gc. | |
6064 Universe::update_heap_info_at_gc(); | |
6065 _collectorState = Resizing; | |
6066 } | |
6067 verify_work_stacks_empty(); | |
6068 verify_overflow_empty(); | |
6069 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6070 _intra_sweep_timer.stop(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6071 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6072 |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6073 _inter_sweep_timer.reset(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6074 _inter_sweep_timer.start(); |
0 | 6075 |
6076 update_time_of_last_gc(os::javaTimeMillis()); | |
6077 | |
6078 // NOTE on abstract state transitions: | |
6079 // Mutators allocate-live and/or mark the mod-union table dirty | |
6080 // based on the state of the collection. The former is done in | |
6081 // the interval [Marking, Sweeping] and the latter in the interval | |
6082 // [Marking, Sweeping). Thus the transitions into the Marking state | |
6083 // and out of the Sweeping state must be synchronously visible | |
6084 // globally to the mutators. | |
6085 // The transition into the Marking state happens with the world | |
6086 // stopped so the mutators will globally see it. Sweeping is | |
6087 // done asynchronously by the background collector so the transition | |
6088 // from the Sweeping state to the Resizing state must be done | |
6089 // under the freelistLock (as is the check for whether to | |
6090 // allocate-live and whether to dirty the mod-union table). | |
6091 assert(_collectorState == Resizing, "Change of collector state to" | |
6092 " Resizing must be done under the freelistLocks (plural)"); | |
6093 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6094 // Now that sweeping has been completed, we clear |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6095 // the incremental_collection_failed flag, |
0 | 6096 // thus inviting a younger gen collection to promote into |
6097 // this generation. If such a promotion may still fail, | |
6098 // the flag will be set again when a young collection is | |
6099 // attempted. | |
6100 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1837
diff
changeset
|
6101 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up |
0 | 6102 gch->update_full_collections_completed(_collection_count_start); |
6103 } | |
6104 | |
6105 // FIX ME!!! Looks like this belongs in CFLSpace, with | |
6106 // CMSGen merely delegating to it. | |
6107 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6108 double nearLargestPercent = FLSLargestBlockCoalesceProximity; |
0 | 6109 HeapWord* minAddr = _cmsSpace->bottom(); |
6110 HeapWord* largestAddr = | |
6111 (HeapWord*) _cmsSpace->dictionary()->findLargestDict(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6112 if (largestAddr == NULL) { |
0 | 6113 // The dictionary appears to be empty. In this case |
6114 // try to coalesce at the end of the heap. | |
6115 largestAddr = _cmsSpace->end(); | |
6116 } | |
6117 size_t largestOffset = pointer_delta(largestAddr, minAddr); | |
6118 size_t nearLargestOffset = | |
6119 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6120 if (PrintFLSStatistics != 0) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6121 gclog_or_tty->print_cr( |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6122 "CMS: Large Block: " PTR_FORMAT ";" |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6123 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6124 largestAddr, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6125 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6126 } |
0 | 6127 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); |
6128 } | |
6129 | |
6130 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { | |
6131 return addr >= _cmsSpace->nearLargestChunk(); | |
6132 } | |
6133 | |
6134 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { | |
6135 return _cmsSpace->find_chunk_at_end(); | |
6136 } | |
6137 | |
6138 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, | |
6139 bool full) { | |
6140 // The next lower level has been collected. Gather any statistics | |
6141 // that are of interest at this point. | |
6142 if (!full && (current_level + 1) == level()) { | |
6143 // Gather statistics on the young generation collection. | |
6144 collector()->stats().record_gc0_end(used()); | |
6145 } | |
6146 } | |
6147 | |
6148 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() { | |
6149 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
6150 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
6151 "Wrong type of heap"); | |
6152 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
6153 gch->gen_policy()->size_policy(); | |
6154 assert(sp->is_gc_cms_adaptive_size_policy(), | |
6155 "Wrong type of size policy"); | |
6156 return sp; | |
6157 } | |
6158 | |
6159 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { | |
6160 if (PrintGCDetails && Verbose) { | |
6161 gclog_or_tty->print("Rotate from %d ", _debug_collection_type); | |
6162 } | |
6163 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); | |
6164 _debug_collection_type = | |
6165 (CollectionTypes) (_debug_collection_type % Unknown_collection_type); | |
6166 if (PrintGCDetails && Verbose) { | |
6167 gclog_or_tty->print_cr("to %d ", _debug_collection_type); | |
6168 } | |
6169 } | |
6170 | |
6171 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, | |
6172 bool asynch) { | |
6173 // We iterate over the space(s) underlying this generation, | |
6174 // checking the mark bit map to see if the bits corresponding | |
6175 // to specific blocks are marked or not. Blocks that are | |
6176 // marked are live and are not swept up. All remaining blocks | |
6177 // are swept up, with coalescing on-the-fly as we sweep up | |
6178 // contiguous free and/or garbage blocks: | |
6179 // We need to ensure that the sweeper synchronizes with allocators | |
6180 // and stop-the-world collectors. In particular, the following | |
6181 // locks are used: | |
6182 // . CMS token: if this is held, a stop the world collection cannot occur | |
6183 // . freelistLock: if this is held no allocation can occur from this | |
6184 // generation by another thread | |
6185 // . bitMapLock: if this is held, no other thread can access or update | |
6186 // | |
6187 | |
6188 // Note that we need to hold the freelistLock if we use | |
6189 // block iterate below; else the iterator might go awry if | |
6190 // a mutator (or promotion) causes block contents to change | |
6191 // (for instance if the allocator divvies up a block). | |
6192 // If we hold the free list lock, for all practical purposes | |
6193 // young generation GC's can't occur (they'll usually need to | |
6194 // promote), so we might as well prevent all young generation | |
6195 // GC's while we do a sweeping step. For the same reason, we might | |
6196 // as well take the bit map lock for the entire duration | |
6197 | |
6198 // check that we hold the requisite locks | |
6199 assert(have_cms_token(), "Should hold cms token"); | |
6200 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) | |
6201 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), | |
6202 "Should possess CMS token to sweep"); | |
6203 assert_lock_strong(gen->freelistLock()); | |
6204 assert_lock_strong(bitMapLock()); | |
6205 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6206 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6207 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6208 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6209 _inter_sweep_estimate.padded_average(), |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6210 _intra_sweep_estimate.padded_average()); |
0 | 6211 gen->setNearLargestChunk(); |
6212 | |
6213 { | |
6214 SweepClosure sweepClosure(this, gen, &_markBitMap, | |
6215 CMSYield && asynch); | |
6216 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); | |
6217 // We need to free-up/coalesce garbage/blocks from a | |
6218 // co-terminal free run. This is done in the SweepClosure | |
6219 // destructor; so, do not remove this scope, else the | |
6220 // end-of-sweep-census below will be off by a little bit. | |
6221 } | |
6222 gen->cmsSpace()->sweep_completed(); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
1085
diff
changeset
|
6223 gen->cmsSpace()->endSweepFLCensus(sweep_count()); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6224 if (should_unload_classes()) { // unloaded classes this cycle, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6225 _concurrent_cycles_since_last_unload = 0; // ... reset count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6226 } else { // did not unload classes, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6227 _concurrent_cycles_since_last_unload++; // ... increment count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6228 } |
0 | 6229 } |
6230 | |
6231 // Reset CMS data structures (for now just the marking bit map) | |
6232 // preparatory for the next cycle. | |
6233 void CMSCollector::reset(bool asynch) { | |
6234 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
6235 CMSAdaptiveSizePolicy* sp = size_policy(); | |
6236 AdaptiveSizePolicyOutput(sp, gch->total_collections()); | |
6237 if (asynch) { | |
6238 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
6239 | |
6240 // If the state is not "Resetting", the foreground thread | |
6241 // has done a collection and the resetting. | |
6242 if (_collectorState != Resetting) { | |
6243 assert(_collectorState == Idling, "The state should only change" | |
6244 " because the foreground collector has finished the collection"); | |
6245 return; | |
6246 } | |
6247 | |
6248 // Clear the mark bitmap (no grey objects to start with) | |
6249 // for the next cycle. | |
6250 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6251 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); | |
6252 | |
6253 HeapWord* curAddr = _markBitMap.startWord(); | |
6254 while (curAddr < _markBitMap.endWord()) { | |
6255 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); | |
6256 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); | |
6257 _markBitMap.clear_large_range(chunk); | |
6258 if (ConcurrentMarkSweepThread::should_yield() && | |
6259 !foregroundGCIsActive() && | |
6260 CMSYield) { | |
6261 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6262 "CMS thread should hold CMS token"); | |
6263 assert_lock_strong(bitMapLock()); | |
6264 bitMapLock()->unlock(); | |
6265 ConcurrentMarkSweepThread::desynchronize(true); | |
6266 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6267 stopTimer(); | |
6268 if (PrintCMSStatistics != 0) { | |
6269 incrementYields(); | |
6270 } | |
6271 icms_wait(); | |
6272 | |
6273 // See the comment in coordinator_yield() | |
6274 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6275 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6276 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6277 os::sleep(Thread::current(), 1, false); |
6278 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6279 } | |
6280 | |
6281 ConcurrentMarkSweepThread::synchronize(true); | |
6282 bitMapLock()->lock_without_safepoint_check(); | |
6283 startTimer(); | |
6284 } | |
6285 curAddr = chunk.end(); | |
6286 } | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6287 // A successful mostly concurrent collection has been done. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6288 // Because only the full (i.e., concurrent mode failure) collections |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6289 // are being measured for gc overhead limits, clean the "near" flag |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6290 // and count. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1289
diff
changeset
|
6291 sp->reset_gc_overhead_limit_count(); |
0 | 6292 _collectorState = Idling; |
6293 } else { | |
6294 // already have the lock | |
6295 assert(_collectorState == Resetting, "just checking"); | |
6296 assert_lock_strong(bitMapLock()); | |
6297 _markBitMap.clear_all(); | |
6298 _collectorState = Idling; | |
6299 } | |
6300 | |
6301 // Stop incremental mode after a cycle completes, so that any future cycles | |
6302 // are triggered by allocation. | |
6303 stop_icms(); | |
6304 | |
6305 NOT_PRODUCT( | |
6306 if (RotateCMSCollectionTypes) { | |
6307 _cmsGen->rotate_debug_collection_type(); | |
6308 } | |
6309 ) | |
6310 } | |
6311 | |
6312 void CMSCollector::do_CMS_operation(CMS_op_type op) { | |
6313 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
6314 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6315 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty); | |
6316 TraceCollectorStats tcs(counters()); | |
6317 | |
6318 switch (op) { | |
6319 case CMS_op_checkpointRootsInitial: { | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
6320 SvcGCMarker sgcm(SvcGCMarker::OTHER); |
0 | 6321 checkpointRootsInitial(true); // asynch |
6322 if (PrintGC) { | |
6323 _cmsGen->printOccupancy("initial-mark"); | |
6324 } | |
6325 break; | |
6326 } | |
6327 case CMS_op_checkpointRootsFinal: { | |
2125
7246a374a9f2
6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
kamg
parents:
1994
diff
changeset
|
6328 SvcGCMarker sgcm(SvcGCMarker::OTHER); |
0 | 6329 checkpointRootsFinal(true, // asynch |
6330 false, // !clear_all_soft_refs | |
6331 false); // !init_mark_was_synchronous | |
6332 if (PrintGC) { | |
6333 _cmsGen->printOccupancy("remark"); | |
6334 } | |
6335 break; | |
6336 } | |
6337 default: | |
6338 fatal("No such CMS_op"); | |
6339 } | |
6340 } | |
6341 | |
6342 #ifndef PRODUCT | |
6343 size_t const CMSCollector::skip_header_HeapWords() { | |
6344 return FreeChunk::header_size(); | |
6345 } | |
6346 | |
6347 // Try and collect here conditions that should hold when | |
6348 // CMS thread is exiting. The idea is that the foreground GC | |
6349 // thread should not be blocked if it wants to terminate | |
6350 // the CMS thread and yet continue to run the VM for a while | |
6351 // after that. | |
6352 void CMSCollector::verify_ok_to_terminate() const { | |
6353 assert(Thread::current()->is_ConcurrentGC_thread(), | |
6354 "should be called by CMS thread"); | |
6355 assert(!_foregroundGCShouldWait, "should be false"); | |
6356 // We could check here that all the various low-level locks | |
6357 // are not held by the CMS thread, but that is overkill; see | |
6358 // also CMSThread::verify_ok_to_terminate() where the CGC_lock | |
6359 // is checked. | |
6360 } | |
6361 #endif | |
6362 | |
6363 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
6364 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
452
diff
changeset
|
6365 "missing Printezis mark?"); |
0 | 6366 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
6367 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6368 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6369 "alignment problem"); | |
6370 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6371 return size; | |
6372 } | |
6373 | |
6374 // A variant of the above (block_size_using_printezis_bits()) except | |
6375 // that we return 0 if the P-bits are not yet set. | |
6376 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6377 if (_markBitMap.isMarked(addr + 1)) { |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6378 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects"); |
0 | 6379 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); |
6380 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6381 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6382 "alignment problem"); | |
6383 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6384 return size; | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6385 } |
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
6386 return 0; |
0 | 6387 } |
6388 | |
6389 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { | |
6390 size_t sz = 0; | |
6391 oop p = (oop)addr; | |
187 | 6392 if (p->klass_or_null() != NULL && p->is_parsable()) { |
0 | 6393 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); |
6394 } else { | |
6395 sz = block_size_using_printezis_bits(addr); | |
6396 } | |
6397 assert(sz > 0, "size must be nonzero"); | |
6398 HeapWord* next_block = addr + sz; | |
6399 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block, | |
6400 CardTableModRefBS::card_size); | |
6401 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) < | |
6402 round_down((uintptr_t)next_card, CardTableModRefBS::card_size), | |
6403 "must be different cards"); | |
6404 return next_card; | |
6405 } | |
6406 | |
6407 | |
6408 // CMS Bit Map Wrapper ///////////////////////////////////////// | |
6409 | |
6410 // Construct a CMS bit map infrastructure, but don't create the | |
6411 // bit vector itself. That is done by a separate call CMSBitMap::allocate() | |
6412 // further below. | |
6413 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
6414 _bm(), |
0 | 6415 _shifter(shifter), |
6416 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) | |
6417 { | |
6418 _bmStartWord = 0; | |
6419 _bmWordSize = 0; | |
6420 } | |
6421 | |
6422 bool CMSBitMap::allocate(MemRegion mr) { | |
6423 _bmStartWord = mr.start(); | |
6424 _bmWordSize = mr.word_size(); | |
6425 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
6426 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
6427 if (!brs.is_reserved()) { | |
6428 warning("CMS bit map allocation failure"); | |
6429 return false; | |
6430 } | |
6431 // For now we'll just commit all of the bit map up fromt. | |
6432 // Later on we'll try to be more parsimonious with swap. | |
6433 if (!_virtual_space.initialize(brs, brs.size())) { | |
6434 warning("CMS bit map backing store failure"); | |
6435 return false; | |
6436 } | |
6437 assert(_virtual_space.committed_size() == brs.size(), | |
6438 "didn't reserve backing store for all of CMS bit map?"); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
6439 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); |
0 | 6440 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= |
6441 _bmWordSize, "inconsistency in bit map sizing"); | |
6442 _bm.set_size(_bmWordSize >> _shifter); | |
6443 | |
6444 // bm.clear(); // can we rely on getting zero'd memory? verify below | |
6445 assert(isAllClear(), | |
6446 "Expected zero'd memory from ReservedSpace constructor"); | |
6447 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), | |
6448 "consistency check"); | |
6449 return true; | |
6450 } | |
6451 | |
6452 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { | |
6453 HeapWord *next_addr, *end_addr, *last_addr; | |
6454 assert_locked(); | |
6455 assert(covers(mr), "out-of-range error"); | |
6456 // XXX assert that start and end are appropriately aligned | |
6457 for (next_addr = mr.start(), end_addr = mr.end(); | |
6458 next_addr < end_addr; next_addr = last_addr) { | |
6459 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); | |
6460 last_addr = dirty_region.end(); | |
6461 if (!dirty_region.is_empty()) { | |
6462 cl->do_MemRegion(dirty_region); | |
6463 } else { | |
6464 assert(last_addr == end_addr, "program logic"); | |
6465 return; | |
6466 } | |
6467 } | |
6468 } | |
6469 | |
6470 #ifndef PRODUCT | |
6471 void CMSBitMap::assert_locked() const { | |
6472 CMSLockVerifier::assert_locked(lock()); | |
6473 } | |
6474 | |
6475 bool CMSBitMap::covers(MemRegion mr) const { | |
6476 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | |
6477 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), | |
6478 "size inconsistency"); | |
6479 return (mr.start() >= _bmStartWord) && | |
6480 (mr.end() <= endWord()); | |
6481 } | |
6482 | |
6483 bool CMSBitMap::covers(HeapWord* start, size_t size) const { | |
6484 return (start >= _bmStartWord && (start + size) <= endWord()); | |
6485 } | |
6486 | |
6487 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { | |
6488 // verify that there are no 1 bits in the interval [left, right) | |
6489 FalseBitMapClosure falseBitMapClosure; | |
6490 iterate(&falseBitMapClosure, left, right); | |
6491 } | |
6492 | |
6493 void CMSBitMap::region_invariant(MemRegion mr) | |
6494 { | |
6495 assert_locked(); | |
6496 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
6497 assert(!mr.is_empty(), "unexpected empty region"); | |
6498 assert(covers(mr), "mr should be covered by bit map"); | |
6499 // convert address range into offset range | |
6500 size_t start_ofs = heapWordToOffset(mr.start()); | |
6501 // Make sure that end() is appropriately aligned | |
6502 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(), | |
6503 (1 << (_shifter+LogHeapWordSize))), | |
6504 "Misaligned mr.end()"); | |
6505 size_t end_ofs = heapWordToOffset(mr.end()); | |
6506 assert(end_ofs > start_ofs, "Should mark at least one bit"); | |
6507 } | |
6508 | |
6509 #endif | |
6510 | |
6511 bool CMSMarkStack::allocate(size_t size) { | |
6512 // allocate a stack of the requisite depth | |
6513 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6514 size * sizeof(oop))); | |
6515 if (!rs.is_reserved()) { | |
6516 warning("CMSMarkStack allocation failure"); | |
6517 return false; | |
6518 } | |
6519 if (!_virtual_space.initialize(rs, rs.size())) { | |
6520 warning("CMSMarkStack backing store failure"); | |
6521 return false; | |
6522 } | |
6523 assert(_virtual_space.committed_size() == rs.size(), | |
6524 "didn't reserve backing store for all of CMS stack?"); | |
6525 _base = (oop*)(_virtual_space.low()); | |
6526 _index = 0; | |
6527 _capacity = size; | |
6528 NOT_PRODUCT(_max_depth = 0); | |
6529 return true; | |
6530 } | |
6531 | |
6532 // XXX FIX ME !!! In the MT case we come in here holding a | |
6533 // leaf lock. For printing we need to take a further lock | |
6534 // which has lower rank. We need to recallibrate the two | |
6535 // lock-ranks involved in order to be able to rpint the | |
6536 // messages below. (Or defer the printing to the caller. | |
6537 // For now we take the expedient path of just disabling the | |
6538 // messages for the problematic case.) | |
6539 void CMSMarkStack::expand() { | |
1284 | 6540 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); |
6541 if (_capacity == MarkStackSizeMax) { | |
0 | 6542 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { |
6543 // We print a warning message only once per CMS cycle. | |
6544 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); | |
6545 } | |
6546 return; | |
6547 } | |
6548 // Double capacity if possible | |
1284 | 6549 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); |
0 | 6550 // Do not give up existing stack until we have managed to |
6551 // get the double capacity that we desired. | |
6552 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6553 new_capacity * sizeof(oop))); | |
6554 if (rs.is_reserved()) { | |
6555 // Release the backing store associated with old stack | |
6556 _virtual_space.release(); | |
6557 // Reinitialize virtual space for new stack | |
6558 if (!_virtual_space.initialize(rs, rs.size())) { | |
6559 fatal("Not enough swap for expanded marking stack"); | |
6560 } | |
6561 _base = (oop*)(_virtual_space.low()); | |
6562 _index = 0; | |
6563 _capacity = new_capacity; | |
6564 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { | |
6565 // Failed to double capacity, continue; | |
6566 // we print a detail message only once per CMS cycle. | |
6567 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to " | |
6568 SIZE_FORMAT"K", | |
6569 _capacity / K, new_capacity / K); | |
6570 } | |
6571 } | |
6572 | |
6573 | |
6574 // Closures | |
6575 // XXX: there seems to be a lot of code duplication here; | |
6576 // should refactor and consolidate common code. | |
6577 | |
6578 // This closure is used to mark refs into the CMS generation in | |
6579 // the CMS bit map. Called at the first checkpoint. This closure | |
6580 // assumes that we do not need to re-mark dirty cards; if the CMS | |
6581 // generation on which this is used is not an oldest (modulo perm gen) | |
6582 // generation then this will lose younger_gen cards! | |
6583 | |
6584 MarkRefsIntoClosure::MarkRefsIntoClosure( | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6585 MemRegion span, CMSBitMap* bitMap): |
0 | 6586 _span(span), |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6587 _bitMap(bitMap) |
0 | 6588 { |
6589 assert(_ref_processor == NULL, "deliberately left NULL"); | |
6590 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); | |
6591 } | |
6592 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6593 void MarkRefsIntoClosure::do_oop(oop obj) { |
0 | 6594 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6595 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6596 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6597 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6598 // this should be made more efficient |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6599 _bitMap->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6600 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6601 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6602 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6603 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6604 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
0 | 6605 |
6606 // A variant of the above, used for CMS marking verification. | |
6607 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6608 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): |
0 | 6609 _span(span), |
6610 _verification_bm(verification_bm), | |
994
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6611 _cms_bm(cms_bm) |
753cf9794df9
6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
jrose
parents:
993
diff
changeset
|
6612 { |
0 | 6613 assert(_ref_processor == NULL, "deliberately left NULL"); |
6614 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); | |
6615 } | |
6616 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6617 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { |
0 | 6618 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6619 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6620 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6621 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6622 _verification_bm->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6623 if (!_cms_bm->isMarked(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6624 oop(addr)->print(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6625 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6626 fatal("... aborting"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6627 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6628 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6629 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6630 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6631 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6632 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
0 | 6633 |
6634 ////////////////////////////////////////////////// | |
6635 // MarkRefsIntoAndScanClosure | |
6636 ////////////////////////////////////////////////// | |
6637 | |
6638 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, | |
6639 ReferenceProcessor* rp, | |
6640 CMSBitMap* bit_map, | |
6641 CMSBitMap* mod_union_table, | |
6642 CMSMarkStack* mark_stack, | |
6643 CMSMarkStack* revisit_stack, | |
6644 CMSCollector* collector, | |
6645 bool should_yield, | |
6646 bool concurrent_precleaning): | |
6647 _collector(collector), | |
6648 _span(span), | |
6649 _bit_map(bit_map), | |
6650 _mark_stack(mark_stack), | |
6651 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table, | |
6652 mark_stack, revisit_stack, concurrent_precleaning), | |
6653 _yield(should_yield), | |
6654 _concurrent_precleaning(concurrent_precleaning), | |
6655 _freelistLock(NULL) | |
6656 { | |
6657 _ref_processor = rp; | |
6658 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6659 } | |
6660 | |
6661 // This closure is used to mark refs into the CMS generation at the | |
6662 // second (final) checkpoint, and to scan and transitively follow | |
6663 // the unmarked oops. It is also used during the concurrent precleaning | |
6664 // phase while scanning objects on dirty cards in the CMS generation. | |
6665 // The marks are made in the marking bit map and the marking stack is | |
6666 // used for keeping the (newly) grey objects during the scan. | |
6667 // The parallel version (Par_...) appears further below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6668 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6669 if (obj != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6670 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6671 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6672 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6673 assert(_collector->overflow_list_is_empty(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6674 "overflow list should be empty"); |
0 | 6675 if (_span.contains(addr) && |
6676 !_bit_map->isMarked(addr)) { | |
6677 // mark bit map (object is now grey) | |
6678 _bit_map->mark(addr); | |
6679 // push on marking stack (stack should be empty), and drain the | |
6680 // stack by applying this closure to the oops in the oops popped | |
6681 // from the stack (i.e. blacken the grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6682 bool res = _mark_stack->push(obj); |
0 | 6683 assert(res, "Should have space to push on empty stack"); |
6684 do { | |
6685 oop new_oop = _mark_stack->pop(); | |
6686 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6687 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6688 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6689 "only grey objects on this stack"); | |
6690 // iterate over the oops in this oop, marking and pushing | |
6691 // the ones in CMS heap (i.e. in _span). | |
6692 new_oop->oop_iterate(&_pushAndMarkClosure); | |
6693 // check if it's time to yield | |
6694 do_yield_check(); | |
6695 } while (!_mark_stack->isEmpty() || | |
6696 (!_concurrent_precleaning && take_from_overflow_list())); | |
6697 // if marking stack is empty, and we are not doing this | |
6698 // during precleaning, then check the overflow list | |
6699 } | |
6700 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
6701 assert(_collector->overflow_list_is_empty(), | |
6702 "overflow list was drained above"); | |
6703 // We could restore evacuated mark words, if any, used for | |
6704 // overflow list links here because the overflow list is | |
6705 // provably empty here. That would reduce the maximum | |
6706 // size requirements for preserved_{oop,mark}_stack. | |
6707 // But we'll just postpone it until we are all done | |
6708 // so we can just stream through. | |
6709 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) { | |
6710 _collector->restore_preserved_marks_if_any(); | |
6711 assert(_collector->no_preserved_marks(), "No preserved marks"); | |
6712 } | |
6713 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(), | |
6714 "All preserved marks should have been restored above"); | |
6715 } | |
6716 } | |
6717 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6718 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6719 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6720 |
0 | 6721 void MarkRefsIntoAndScanClosure::do_yield_work() { |
6722 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6723 "CMS thread should hold CMS token"); | |
6724 assert_lock_strong(_freelistLock); | |
6725 assert_lock_strong(_bit_map->lock()); | |
6726 // relinquish the free_list_lock and bitMaplock() | |
935 | 6727 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 6728 _bit_map->lock()->unlock(); |
6729 _freelistLock->unlock(); | |
6730 ConcurrentMarkSweepThread::desynchronize(true); | |
6731 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6732 _collector->stopTimer(); | |
6733 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6734 if (PrintCMSStatistics != 0) { | |
6735 _collector->incrementYields(); | |
6736 } | |
6737 _collector->icms_wait(); | |
6738 | |
6739 // See the comment in coordinator_yield() | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6740 for (unsigned i = 0; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6741 i < CMSYieldSleepCount && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6742 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6743 !CMSCollector::foregroundGCIsActive(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6744 ++i) { |
0 | 6745 os::sleep(Thread::current(), 1, false); |
6746 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6747 } | |
6748 | |
6749 ConcurrentMarkSweepThread::synchronize(true); | |
6750 _freelistLock->lock_without_safepoint_check(); | |
6751 _bit_map->lock()->lock_without_safepoint_check(); | |
6752 _collector->startTimer(); | |
6753 } | |
6754 | |
6755 /////////////////////////////////////////////////////////// | |
6756 // Par_MarkRefsIntoAndScanClosure: a parallel version of | |
6757 // MarkRefsIntoAndScanClosure | |
6758 /////////////////////////////////////////////////////////// | |
6759 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( | |
6760 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, | |
6761 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): | |
6762 _span(span), | |
6763 _bit_map(bit_map), | |
6764 _work_queue(work_queue), | |
6765 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), | |
6766 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))), | |
6767 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue, | |
6768 revisit_stack) | |
6769 { | |
6770 _ref_processor = rp; | |
6771 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6772 } | |
6773 | |
6774 // This closure is used to mark refs into the CMS generation at the | |
6775 // second (final) checkpoint, and to scan and transitively follow | |
6776 // the unmarked oops. The marks are made in the marking bit map and | |
6777 // the work_queue is used for keeping the (newly) grey objects during | |
6778 // the scan phase whence they are also available for stealing by parallel | |
6779 // threads. Since the marking bit map is shared, updates are | |
6780 // synchronized (via CAS). | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6781 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6782 if (obj != NULL) { |
0 | 6783 // Ignore mark word because this could be an already marked oop |
6784 // that may be chained at the end of the overflow list. | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
6785 assert(obj->is_oop(true), "expected an oop"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6786 HeapWord* addr = (HeapWord*)obj; |
0 | 6787 if (_span.contains(addr) && |
6788 !_bit_map->isMarked(addr)) { | |
6789 // mark bit map (object will become grey): | |
6790 // It is possible for several threads to be | |
6791 // trying to "claim" this object concurrently; | |
6792 // the unique thread that succeeds in marking the | |
6793 // object first will do the subsequent push on | |
6794 // to the work queue (or overflow list). | |
6795 if (_bit_map->par_mark(addr)) { | |
6796 // push on work_queue (which may not be empty), and trim the | |
6797 // queue to an appropriate length by applying this closure to | |
6798 // the oops in the oops popped from the stack (i.e. blacken the | |
6799 // grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6800 bool res = _work_queue->push(obj); |
0 | 6801 assert(res, "Low water mark should be less than capacity?"); |
6802 trim_queue(_low_water_mark); | |
6803 } // Else, another thread claimed the object | |
6804 } | |
6805 } | |
6806 } | |
6807 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6808 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6809 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6810 |
0 | 6811 // This closure is used to rescan the marked objects on the dirty cards |
6812 // in the mod union table and the card table proper. | |
6813 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( | |
6814 oop p, MemRegion mr) { | |
6815 | |
6816 size_t size = 0; | |
6817 HeapWord* addr = (HeapWord*)p; | |
6818 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6819 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
6820 // check if it's time to yield | |
6821 if (do_yield_check()) { | |
6822 // We yielded for some foreground stop-world work, | |
6823 // and we have been asked to abort this ongoing preclean cycle. | |
6824 return 0; | |
6825 } | |
6826 if (_bitMap->isMarked(addr)) { | |
6827 // it's marked; is it potentially uninitialized? | |
187 | 6828 if (p->klass_or_null() != NULL) { |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6829 // If is_conc_safe is false, the object may be undergoing |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6830 // change by the VM outside a safepoint. Don't try to |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6831 // scan it, but rather leave it for the remark phase. |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6832 if (CMSPermGenPrecleaningEnabled && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
6833 (!p->is_conc_safe() || !p->is_parsable())) { |
0 | 6834 // Signal precleaning to redirty the card since |
6835 // the klass pointer is already installed. | |
6836 assert(size == 0, "Initial value"); | |
6837 } else { | |
6838 assert(p->is_parsable(), "must be parsable."); | |
6839 // an initialized object; ignore mark word in verification below | |
6840 // since we are running concurrent with mutators | |
6841 assert(p->is_oop(true), "should be an oop"); | |
6842 if (p->is_objArray()) { | |
6843 // objArrays are precisely marked; restrict scanning | |
6844 // to dirty cards only. | |
187 | 6845 size = CompactibleFreeListSpace::adjustObjectSize( |
6846 p->oop_iterate(_scanningClosure, mr)); | |
0 | 6847 } else { |
6848 // A non-array may have been imprecisely marked; we need | |
6849 // to scan object in its entirety. | |
6850 size = CompactibleFreeListSpace::adjustObjectSize( | |
6851 p->oop_iterate(_scanningClosure)); | |
6852 } | |
6853 #ifdef DEBUG | |
6854 size_t direct_size = | |
6855 CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6856 assert(size == direct_size, "Inconsistency in size"); | |
6857 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6858 if (!_bitMap->isMarked(addr+1)) { | |
6859 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size); | |
6860 } else { | |
6861 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1); | |
6862 assert(_bitMap->isMarked(addr+size-1), | |
6863 "inconsistent Printezis mark"); | |
6864 } | |
6865 #endif // DEBUG | |
6866 } | |
6867 } else { | |
6868 // an unitialized object | |
6869 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); | |
6870 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
6871 size = pointer_delta(nextOneAddr + 1, addr); | |
6872 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6873 "alignment problem"); | |
6874 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() | |
6875 // will dirty the card when the klass pointer is installed in the | |
6876 // object (signalling the completion of initialization). | |
6877 } | |
6878 } else { | |
6879 // Either a not yet marked object or an uninitialized object | |
187 | 6880 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 6881 // An uninitialized object, skip to the next card, since |
6882 // we may not be able to read its P-bits yet. | |
6883 assert(size == 0, "Initial value"); | |
6884 } else { | |
6885 // An object not (yet) reached by marking: we merely need to | |
6886 // compute its size so as to go look at the next block. | |
6887 assert(p->is_oop(true), "should be an oop"); | |
6888 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6889 } | |
6890 } | |
6891 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6892 return size; | |
6893 } | |
6894 | |
6895 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { | |
6896 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6897 "CMS thread should hold CMS token"); | |
6898 assert_lock_strong(_freelistLock); | |
6899 assert_lock_strong(_bitMap->lock()); | |
935 | 6900 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 6901 // relinquish the free_list_lock and bitMaplock() |
6902 _bitMap->lock()->unlock(); | |
6903 _freelistLock->unlock(); | |
6904 ConcurrentMarkSweepThread::desynchronize(true); | |
6905 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6906 _collector->stopTimer(); | |
6907 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6908 if (PrintCMSStatistics != 0) { | |
6909 _collector->incrementYields(); | |
6910 } | |
6911 _collector->icms_wait(); | |
6912 | |
6913 // See the comment in coordinator_yield() | |
6914 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6915 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6916 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6917 os::sleep(Thread::current(), 1, false); |
6918 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6919 } | |
6920 | |
6921 ConcurrentMarkSweepThread::synchronize(true); | |
6922 _freelistLock->lock_without_safepoint_check(); | |
6923 _bitMap->lock()->lock_without_safepoint_check(); | |
6924 _collector->startTimer(); | |
6925 } | |
6926 | |
6927 | |
6928 ////////////////////////////////////////////////////////////////// | |
6929 // SurvivorSpacePrecleanClosure | |
6930 ////////////////////////////////////////////////////////////////// | |
6931 // This (single-threaded) closure is used to preclean the oops in | |
6932 // the survivor spaces. | |
6933 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { | |
6934 | |
6935 HeapWord* addr = (HeapWord*)p; | |
6936 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6937 assert(!_span.contains(addr), "we are scanning the survivor spaces"); | |
187 | 6938 assert(p->klass_or_null() != NULL, "object should be initializd"); |
0 | 6939 assert(p->is_parsable(), "must be parsable."); |
6940 // an initialized object; ignore mark word in verification below | |
6941 // since we are running concurrent with mutators | |
6942 assert(p->is_oop(true), "should be an oop"); | |
6943 // Note that we do not yield while we iterate over | |
6944 // the interior oops of p, pushing the relevant ones | |
6945 // on our marking stack. | |
6946 size_t size = p->oop_iterate(_scanning_closure); | |
6947 do_yield_check(); | |
6948 // Observe that below, we do not abandon the preclean | |
6949 // phase as soon as we should; rather we empty the | |
6950 // marking stack before returning. This is to satisfy | |
6951 // some existing assertions. In general, it may be a | |
6952 // good idea to abort immediately and complete the marking | |
6953 // from the grey objects at a later time. | |
6954 while (!_mark_stack->isEmpty()) { | |
6955 oop new_oop = _mark_stack->pop(); | |
6956 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6957 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6958 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6959 "only grey objects on this stack"); | |
6960 // iterate over the oops in this oop, marking and pushing | |
6961 // the ones in CMS heap (i.e. in _span). | |
6962 new_oop->oop_iterate(_scanning_closure); | |
6963 // check if it's time to yield | |
6964 do_yield_check(); | |
6965 } | |
6966 unsigned int after_count = | |
6967 GenCollectedHeap::heap()->total_collections(); | |
6968 bool abort = (_before_count != after_count) || | |
6969 _collector->should_abort_preclean(); | |
6970 return abort ? 0 : size; | |
6971 } | |
6972 | |
6973 void SurvivorSpacePrecleanClosure::do_yield_work() { | |
6974 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6975 "CMS thread should hold CMS token"); | |
6976 assert_lock_strong(_bit_map->lock()); | |
935 | 6977 DEBUG_ONLY(RememberKlassesChecker smx(false);) |
0 | 6978 // Relinquish the bit map lock |
6979 _bit_map->lock()->unlock(); | |
6980 ConcurrentMarkSweepThread::desynchronize(true); | |
6981 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6982 _collector->stopTimer(); | |
6983 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6984 if (PrintCMSStatistics != 0) { | |
6985 _collector->incrementYields(); | |
6986 } | |
6987 _collector->icms_wait(); | |
6988 | |
6989 // See the comment in coordinator_yield() | |
6990 for (unsigned i = 0; i < CMSYieldSleepCount && | |
6991 ConcurrentMarkSweepThread::should_yield() && | |
6992 !CMSCollector::foregroundGCIsActive(); ++i) { | |
6993 os::sleep(Thread::current(), 1, false); | |
6994 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6995 } | |
6996 | |
6997 ConcurrentMarkSweepThread::synchronize(true); | |
6998 _bit_map->lock()->lock_without_safepoint_check(); | |
6999 _collector->startTimer(); | |
7000 } | |
7001 | |
7002 // This closure is used to rescan the marked objects on the dirty cards | |
7003 // in the mod union table and the card table proper. In the parallel | |
7004 // case, although the bitMap is shared, we do a single read so the | |
7005 // isMarked() query is "safe". | |
7006 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { | |
7007 // Ignore mark word because we are running concurrent with mutators | |
7008 assert(p->is_oop_or_null(true), "expected an oop or null"); | |
7009 HeapWord* addr = (HeapWord*)p; | |
7010 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
7011 bool is_obj_array = false; | |
7012 #ifdef DEBUG | |
7013 if (!_parallel) { | |
7014 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); | |
7015 assert(_collector->overflow_list_is_empty(), | |
7016 "overflow list should be empty"); | |
7017 | |
7018 } | |
7019 #endif // DEBUG | |
7020 if (_bit_map->isMarked(addr)) { | |
7021 // Obj arrays are precisely marked, non-arrays are not; | |
7022 // so we scan objArrays precisely and non-arrays in their | |
7023 // entirety. | |
7024 if (p->is_objArray()) { | |
7025 is_obj_array = true; | |
7026 if (_parallel) { | |
7027 p->oop_iterate(_par_scan_closure, mr); | |
7028 } else { | |
7029 p->oop_iterate(_scan_closure, mr); | |
7030 } | |
7031 } else { | |
7032 if (_parallel) { | |
7033 p->oop_iterate(_par_scan_closure); | |
7034 } else { | |
7035 p->oop_iterate(_scan_closure); | |
7036 } | |
7037 } | |
7038 } | |
7039 #ifdef DEBUG | |
7040 if (!_parallel) { | |
7041 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
7042 assert(_collector->overflow_list_is_empty(), | |
7043 "overflow list should be empty"); | |
7044 | |
7045 } | |
7046 #endif // DEBUG | |
7047 return is_obj_array; | |
7048 } | |
7049 | |
7050 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, | |
7051 MemRegion span, | |
7052 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
7053 CMSMarkStack* revisitStack, | |
7054 bool should_yield, bool verifying): | |
7055 _collector(collector), | |
7056 _span(span), | |
7057 _bitMap(bitMap), | |
7058 _mut(&collector->_modUnionTable), | |
7059 _markStack(markStack), | |
7060 _revisitStack(revisitStack), | |
7061 _yield(should_yield), | |
7062 _skipBits(0) | |
7063 { | |
7064 assert(_markStack->isEmpty(), "stack should be empty"); | |
7065 _finger = _bitMap->startWord(); | |
7066 _threshold = _finger; | |
7067 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
7068 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7069 DEBUG_ONLY(_verifying = verifying;) | |
7070 } | |
7071 | |
7072 void MarkFromRootsClosure::reset(HeapWord* addr) { | |
7073 assert(_markStack->isEmpty(), "would cause duplicates on stack"); | |
7074 assert(_span.contains(addr), "Out of bounds _finger?"); | |
7075 _finger = addr; | |
7076 _threshold = (HeapWord*)round_to( | |
7077 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7078 } | |
7079 | |
7080 // Should revisit to see if this should be restructured for | |
7081 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7082 bool MarkFromRootsClosure::do_bit(size_t offset) { |
0 | 7083 if (_skipBits > 0) { |
7084 _skipBits--; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7085 return true; |
0 | 7086 } |
7087 // convert offset into a HeapWord* | |
7088 HeapWord* addr = _bitMap->startWord() + offset; | |
7089 assert(_bitMap->endWord() && addr < _bitMap->endWord(), | |
7090 "address out of range"); | |
7091 assert(_bitMap->isMarked(addr), "tautology"); | |
7092 if (_bitMap->isMarked(addr+1)) { | |
7093 // this is an allocated but not yet initialized object | |
7094 assert(_skipBits == 0, "tautology"); | |
7095 _skipBits = 2; // skip next two marked bits ("Printezis-marks") | |
7096 oop p = oop(addr); | |
187 | 7097 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 7098 DEBUG_ONLY(if (!_verifying) {) |
7099 // We re-dirty the cards on which this object lies and increase | |
7100 // the _threshold so that we'll come back to scan this object | |
7101 // during the preclean or remark phase. (CMSCleanOnEnter) | |
7102 if (CMSCleanOnEnter) { | |
7103 size_t sz = _collector->block_size_using_printezis_bits(addr); | |
7104 HeapWord* end_card_addr = (HeapWord*)round_to( | |
7105 (intptr_t)(addr+sz), CardTableModRefBS::card_size); | |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7106 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
0 | 7107 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
7108 // Bump _threshold to end_card_addr; note that | |
7109 // _threshold cannot possibly exceed end_card_addr, anyhow. | |
7110 // This prevents future clearing of the card as the scan proceeds | |
7111 // to the right. | |
7112 assert(_threshold <= end_card_addr, | |
7113 "Because we are just scanning into this object"); | |
7114 if (_threshold < end_card_addr) { | |
7115 _threshold = end_card_addr; | |
7116 } | |
187 | 7117 if (p->klass_or_null() != NULL) { |
0 | 7118 // Redirty the range of cards... |
7119 _mut->mark_range(redirty_range); | |
7120 } // ...else the setting of klass will dirty the card anyway. | |
7121 } | |
7122 DEBUG_ONLY(}) | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7123 return true; |
0 | 7124 } |
7125 } | |
7126 scanOopsInOop(addr); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7127 return true; |
0 | 7128 } |
7129 | |
7130 // We take a break if we've been at this for a while, | |
7131 // so as to avoid monopolizing the locks involved. | |
7132 void MarkFromRootsClosure::do_yield_work() { | |
7133 // First give up the locks, then yield, then re-lock | |
7134 // We should probably use a constructor/destructor idiom to | |
7135 // do this unlock/lock or modify the MutexUnlocker class to | |
7136 // serve our purpose. XXX | |
7137 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
7138 "CMS thread should hold CMS token"); | |
7139 assert_lock_strong(_bitMap->lock()); | |
935 | 7140 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 7141 _bitMap->lock()->unlock(); |
7142 ConcurrentMarkSweepThread::desynchronize(true); | |
7143 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7144 _collector->stopTimer(); | |
7145 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
7146 if (PrintCMSStatistics != 0) { | |
7147 _collector->incrementYields(); | |
7148 } | |
7149 _collector->icms_wait(); | |
7150 | |
7151 // See the comment in coordinator_yield() | |
7152 for (unsigned i = 0; i < CMSYieldSleepCount && | |
7153 ConcurrentMarkSweepThread::should_yield() && | |
7154 !CMSCollector::foregroundGCIsActive(); ++i) { | |
7155 os::sleep(Thread::current(), 1, false); | |
7156 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7157 } | |
7158 | |
7159 ConcurrentMarkSweepThread::synchronize(true); | |
7160 _bitMap->lock()->lock_without_safepoint_check(); | |
7161 _collector->startTimer(); | |
7162 } | |
7163 | |
7164 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { | |
7165 assert(_bitMap->isMarked(ptr), "expected bit to be set"); | |
7166 assert(_markStack->isEmpty(), | |
7167 "should drain stack to limit stack usage"); | |
7168 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7169 oop obj = oop(ptr); |
0 | 7170 // Ignore mark word in verification below, since we |
7171 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7172 assert(obj->is_oop(true), "should be an oop"); |
0 | 7173 assert(_finger <= ptr, "_finger runneth ahead"); |
7174 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7175 _finger = ptr + obj->size(); |
0 | 7176 assert(_finger > ptr, "we just incremented it above"); |
7177 // On large heaps, it may take us some time to get through | |
7178 // the marking phase (especially if running iCMS). During | |
7179 // this time it's possible that a lot of mutations have | |
7180 // accumulated in the card table and the mod union table -- | |
7181 // these mutation records are redundant until we have | |
7182 // actually traced into the corresponding card. | |
7183 // Here, we check whether advancing the finger would make | |
7184 // us cross into a new card, and if so clear corresponding | |
7185 // cards in the MUT (preclean them in the card-table in the | |
7186 // future). | |
7187 | |
7188 DEBUG_ONLY(if (!_verifying) {) | |
7189 // The clean-on-enter optimization is disabled by default, | |
7190 // until we fix 6178663. | |
7191 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
7192 // [_threshold, _finger) represents the interval | |
7193 // of cards to be cleared in MUT (or precleaned in card table). | |
7194 // The set of cards to be cleared is all those that overlap | |
7195 // with the interval [_threshold, _finger); note that | |
7196 // _threshold is always kept card-aligned but _finger isn't | |
7197 // always card-aligned. | |
7198 HeapWord* old_threshold = _threshold; | |
7199 assert(old_threshold == (HeapWord*)round_to( | |
7200 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
7201 "_threshold should always be card-aligned"); | |
7202 _threshold = (HeapWord*)round_to( | |
7203 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7204 MemRegion mr(old_threshold, _threshold); | |
7205 assert(!mr.is_empty(), "Control point invariant"); | |
7206 assert(_span.contains(mr), "Should clear within span"); | |
7207 // XXX When _finger crosses from old gen into perm gen | |
7208 // we may be doing unnecessary cleaning; do better in the | |
7209 // future by detecting that condition and clearing fewer | |
7210 // MUT/CT entries. | |
7211 _mut->clear_range(mr); | |
7212 } | |
7213 DEBUG_ONLY(}) | |
7214 // Note: the finger doesn't advance while we drain | |
7215 // the stack below. | |
7216 PushOrMarkClosure pushOrMarkClosure(_collector, | |
7217 _span, _bitMap, _markStack, | |
7218 _revisitStack, | |
7219 _finger, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7220 bool res = _markStack->push(obj); |
0 | 7221 assert(res, "Empty non-zero size stack should have space for single push"); |
7222 while (!_markStack->isEmpty()) { | |
7223 oop new_oop = _markStack->pop(); | |
7224 // Skip verifying header mark word below because we are | |
7225 // running concurrent with mutators. | |
7226 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7227 // now scan this oop's oops | |
7228 new_oop->oop_iterate(&pushOrMarkClosure); | |
7229 do_yield_check(); | |
7230 } | |
7231 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); | |
7232 } | |
7233 | |
7234 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, | |
7235 CMSCollector* collector, MemRegion span, | |
7236 CMSBitMap* bit_map, | |
7237 OopTaskQueue* work_queue, | |
7238 CMSMarkStack* overflow_stack, | |
7239 CMSMarkStack* revisit_stack, | |
7240 bool should_yield): | |
7241 _collector(collector), | |
7242 _whole_span(collector->_span), | |
7243 _span(span), | |
7244 _bit_map(bit_map), | |
7245 _mut(&collector->_modUnionTable), | |
7246 _work_queue(work_queue), | |
7247 _overflow_stack(overflow_stack), | |
7248 _revisit_stack(revisit_stack), | |
7249 _yield(should_yield), | |
7250 _skip_bits(0), | |
7251 _task(task) | |
7252 { | |
7253 assert(_work_queue->size() == 0, "work_queue should be empty"); | |
7254 _finger = span.start(); | |
7255 _threshold = _finger; // XXX Defer clear-on-enter optimization for now | |
7256 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7257 } | |
7258 | |
7259 // Should revisit to see if this should be restructured for | |
7260 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7261 bool Par_MarkFromRootsClosure::do_bit(size_t offset) { |
0 | 7262 if (_skip_bits > 0) { |
7263 _skip_bits--; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7264 return true; |
0 | 7265 } |
7266 // convert offset into a HeapWord* | |
7267 HeapWord* addr = _bit_map->startWord() + offset; | |
7268 assert(_bit_map->endWord() && addr < _bit_map->endWord(), | |
7269 "address out of range"); | |
7270 assert(_bit_map->isMarked(addr), "tautology"); | |
7271 if (_bit_map->isMarked(addr+1)) { | |
7272 // this is an allocated object that might not yet be initialized | |
7273 assert(_skip_bits == 0, "tautology"); | |
7274 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") | |
7275 oop p = oop(addr); | |
187 | 7276 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 7277 // in the case of Clean-on-Enter optimization, redirty card |
7278 // and avoid clearing card by increasing the threshold. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7279 return true; |
0 | 7280 } |
7281 } | |
7282 scan_oops_in_oop(addr); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7283 return true; |
0 | 7284 } |
7285 | |
7286 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { | |
7287 assert(_bit_map->isMarked(ptr), "expected bit to be set"); | |
7288 // Should we assert that our work queue is empty or | |
7289 // below some drain limit? | |
7290 assert(_work_queue->size() == 0, | |
7291 "should drain stack to limit stack usage"); | |
7292 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7293 oop obj = oop(ptr); |
0 | 7294 // Ignore mark word in verification below, since we |
7295 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7296 assert(obj->is_oop(true), "should be an oop"); |
0 | 7297 assert(_finger <= ptr, "_finger runneth ahead"); |
7298 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7299 _finger = ptr + obj->size(); |
0 | 7300 assert(_finger > ptr, "we just incremented it above"); |
7301 // On large heaps, it may take us some time to get through | |
7302 // the marking phase (especially if running iCMS). During | |
7303 // this time it's possible that a lot of mutations have | |
7304 // accumulated in the card table and the mod union table -- | |
7305 // these mutation records are redundant until we have | |
7306 // actually traced into the corresponding card. | |
7307 // Here, we check whether advancing the finger would make | |
7308 // us cross into a new card, and if so clear corresponding | |
7309 // cards in the MUT (preclean them in the card-table in the | |
7310 // future). | |
7311 | |
7312 // The clean-on-enter optimization is disabled by default, | |
7313 // until we fix 6178663. | |
7314 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
7315 // [_threshold, _finger) represents the interval | |
7316 // of cards to be cleared in MUT (or precleaned in card table). | |
7317 // The set of cards to be cleared is all those that overlap | |
7318 // with the interval [_threshold, _finger); note that | |
7319 // _threshold is always kept card-aligned but _finger isn't | |
7320 // always card-aligned. | |
7321 HeapWord* old_threshold = _threshold; | |
7322 assert(old_threshold == (HeapWord*)round_to( | |
7323 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
7324 "_threshold should always be card-aligned"); | |
7325 _threshold = (HeapWord*)round_to( | |
7326 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7327 MemRegion mr(old_threshold, _threshold); | |
7328 assert(!mr.is_empty(), "Control point invariant"); | |
7329 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? | |
7330 // XXX When _finger crosses from old gen into perm gen | |
7331 // we may be doing unnecessary cleaning; do better in the | |
7332 // future by detecting that condition and clearing fewer | |
7333 // MUT/CT entries. | |
7334 _mut->clear_range(mr); | |
7335 } | |
7336 | |
7337 // Note: the local finger doesn't advance while we drain | |
7338 // the stack below, but the global finger sure can and will. | |
7339 HeapWord** gfa = _task->global_finger_addr(); | |
7340 Par_PushOrMarkClosure pushOrMarkClosure(_collector, | |
7341 _span, _bit_map, | |
7342 _work_queue, | |
7343 _overflow_stack, | |
7344 _revisit_stack, | |
7345 _finger, | |
7346 gfa, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7347 bool res = _work_queue->push(obj); // overflow could occur here |
0 | 7348 assert(res, "Will hold once we use workqueues"); |
7349 while (true) { | |
7350 oop new_oop; | |
7351 if (!_work_queue->pop_local(new_oop)) { | |
7352 // We emptied our work_queue; check if there's stuff that can | |
7353 // be gotten from the overflow stack. | |
7354 if (CMSConcMarkingTask::get_work_from_overflow_stack( | |
7355 _overflow_stack, _work_queue)) { | |
7356 do_yield_check(); | |
7357 continue; | |
7358 } else { // done | |
7359 break; | |
7360 } | |
7361 } | |
7362 // Skip verifying header mark word below because we are | |
7363 // running concurrent with mutators. | |
7364 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7365 // now scan this oop's oops | |
7366 new_oop->oop_iterate(&pushOrMarkClosure); | |
7367 do_yield_check(); | |
7368 } | |
7369 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); | |
7370 } | |
7371 | |
7372 // Yield in response to a request from VM Thread or | |
7373 // from mutators. | |
7374 void Par_MarkFromRootsClosure::do_yield_work() { | |
7375 assert(_task != NULL, "sanity"); | |
7376 _task->yield(); | |
7377 } | |
7378 | |
7379 // A variant of the above used for verifying CMS marking work. | |
7380 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, | |
7381 MemRegion span, | |
7382 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7383 CMSMarkStack* mark_stack): | |
7384 _collector(collector), | |
7385 _span(span), | |
7386 _verification_bm(verification_bm), | |
7387 _cms_bm(cms_bm), | |
7388 _mark_stack(mark_stack), | |
7389 _pam_verify_closure(collector, span, verification_bm, cms_bm, | |
7390 mark_stack) | |
7391 { | |
7392 assert(_mark_stack->isEmpty(), "stack should be empty"); | |
7393 _finger = _verification_bm->startWord(); | |
7394 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
7395 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7396 } | |
7397 | |
7398 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { | |
7399 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); | |
7400 assert(_span.contains(addr), "Out of bounds _finger?"); | |
7401 _finger = addr; | |
7402 } | |
7403 | |
7404 // Should revisit to see if this should be restructured for | |
7405 // greater efficiency. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7406 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { |
0 | 7407 // convert offset into a HeapWord* |
7408 HeapWord* addr = _verification_bm->startWord() + offset; | |
7409 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), | |
7410 "address out of range"); | |
7411 assert(_verification_bm->isMarked(addr), "tautology"); | |
7412 assert(_cms_bm->isMarked(addr), "tautology"); | |
7413 | |
7414 assert(_mark_stack->isEmpty(), | |
7415 "should drain stack to limit stack usage"); | |
7416 // convert addr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7417 oop obj = oop(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7418 assert(obj->is_oop(), "should be an oop"); |
0 | 7419 assert(_finger <= addr, "_finger runneth ahead"); |
7420 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7421 _finger = addr + obj->size(); |
0 | 7422 assert(_finger > addr, "we just incremented it above"); |
7423 // Note: the finger doesn't advance while we drain | |
7424 // the stack below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7425 bool res = _mark_stack->push(obj); |
0 | 7426 assert(res, "Empty non-zero size stack should have space for single push"); |
7427 while (!_mark_stack->isEmpty()) { | |
7428 oop new_oop = _mark_stack->pop(); | |
7429 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); | |
7430 // now scan this oop's oops | |
7431 new_oop->oop_iterate(&_pam_verify_closure); | |
7432 } | |
7433 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7434 return true; |
0 | 7435 } |
7436 | |
7437 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( | |
7438 CMSCollector* collector, MemRegion span, | |
7439 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7440 CMSMarkStack* mark_stack): | |
7441 OopClosure(collector->ref_processor()), | |
7442 _collector(collector), | |
7443 _span(span), | |
7444 _verification_bm(verification_bm), | |
7445 _cms_bm(cms_bm), | |
7446 _mark_stack(mark_stack) | |
7447 { } | |
7448 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7449 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7450 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
0 | 7451 |
7452 // Upon stack overflow, we discard (part of) the stack, | |
7453 // remembering the least address amongst those discarded | |
7454 // in CMSCollector's _restart_address. | |
7455 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { | |
7456 // Remember the least grey address discarded | |
7457 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); | |
7458 _collector->lower_restart_addr(ra); | |
7459 _mark_stack->reset(); // discard stack contents | |
7460 _mark_stack->expand(); // expand the stack if possible | |
7461 } | |
7462 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7463 void PushAndMarkVerifyClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7464 assert(obj->is_oop_or_null(), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7465 HeapWord* addr = (HeapWord*)obj; |
0 | 7466 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { |
7467 // Oop lies in _span and isn't yet grey or black | |
7468 _verification_bm->mark(addr); // now grey | |
7469 if (!_cms_bm->isMarked(addr)) { | |
7470 oop(addr)->print(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7471 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7472 addr); |
0 | 7473 fatal("... aborting"); |
7474 } | |
7475 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7476 if (!_mark_stack->push(obj)) { // stack overflow |
0 | 7477 if (PrintCMSStatistics != 0) { |
7478 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7479 SIZE_FORMAT, _mark_stack->capacity()); | |
7480 } | |
7481 assert(_mark_stack->isFull(), "Else push should have succeeded"); | |
7482 handle_stack_overflow(addr); | |
7483 } | |
7484 // anything including and to the right of _finger | |
7485 // will be scanned as we iterate over the remainder of the | |
7486 // bit map | |
7487 } | |
7488 } | |
7489 | |
7490 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, | |
7491 MemRegion span, | |
7492 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
7493 CMSMarkStack* revisitStack, | |
7494 HeapWord* finger, MarkFromRootsClosure* parent) : | |
935 | 7495 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), |
0 | 7496 _span(span), |
7497 _bitMap(bitMap), | |
7498 _markStack(markStack), | |
7499 _finger(finger), | |
935 | 7500 _parent(parent) |
0 | 7501 { } |
7502 | |
7503 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, | |
7504 MemRegion span, | |
7505 CMSBitMap* bit_map, | |
7506 OopTaskQueue* work_queue, | |
7507 CMSMarkStack* overflow_stack, | |
7508 CMSMarkStack* revisit_stack, | |
7509 HeapWord* finger, | |
7510 HeapWord** global_finger_addr, | |
7511 Par_MarkFromRootsClosure* parent) : | |
935 | 7512 Par_KlassRememberingOopClosure(collector, |
7513 collector->ref_processor(), | |
7514 revisit_stack), | |
0 | 7515 _whole_span(collector->_span), |
7516 _span(span), | |
7517 _bit_map(bit_map), | |
7518 _work_queue(work_queue), | |
7519 _overflow_stack(overflow_stack), | |
7520 _finger(finger), | |
7521 _global_finger_addr(global_finger_addr), | |
935 | 7522 _parent(parent) |
0 | 7523 { } |
7524 | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7525 // Assumes thread-safe access by callers, who are |
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7526 // responsible for mutual exclusion. |
0 | 7527 void CMSCollector::lower_restart_addr(HeapWord* low) { |
7528 assert(_span.contains(low), "Out of bounds addr"); | |
7529 if (_restart_addr == NULL) { | |
7530 _restart_addr = low; | |
7531 } else { | |
7532 _restart_addr = MIN2(_restart_addr, low); | |
7533 } | |
7534 } | |
7535 | |
7536 // Upon stack overflow, we discard (part of) the stack, | |
7537 // remembering the least address amongst those discarded | |
7538 // in CMSCollector's _restart_address. | |
7539 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7540 // Remember the least grey address discarded | |
7541 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); | |
7542 _collector->lower_restart_addr(ra); | |
7543 _markStack->reset(); // discard stack contents | |
7544 _markStack->expand(); // expand the stack if possible | |
7545 } | |
7546 | |
7547 // Upon stack overflow, we discard (part of) the stack, | |
7548 // remembering the least address amongst those discarded | |
7549 // in CMSCollector's _restart_address. | |
7550 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7551 // We need to do this under a mutex to prevent other | |
340
ebeb6490b814
6722116: CMS: Incorrect overflow handling when using parallel concurrent marking
ysr
parents:
283
diff
changeset
|
7552 // workers from interfering with the work done below. |
0 | 7553 MutexLockerEx ml(_overflow_stack->par_lock(), |
7554 Mutex::_no_safepoint_check_flag); | |
7555 // Remember the least grey address discarded | |
7556 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
7557 _collector->lower_restart_addr(ra); | |
7558 _overflow_stack->reset(); // discard stack contents | |
7559 _overflow_stack->expand(); // expand the stack if possible | |
7560 } | |
7561 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7562 void PushOrMarkClosure::do_oop(oop obj) { |
0 | 7563 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7564 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7565 HeapWord* addr = (HeapWord*)obj; |
0 | 7566 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { |
7567 // Oop lies in _span and isn't yet grey or black | |
7568 _bitMap->mark(addr); // now grey | |
7569 if (addr < _finger) { | |
7570 // the bit map iteration has already either passed, or | |
7571 // sampled, this bit in the bit map; we'll need to | |
7572 // use the marking stack to scan this oop's oops. | |
7573 bool simulate_overflow = false; | |
7574 NOT_PRODUCT( | |
7575 if (CMSMarkStackOverflowALot && | |
7576 _collector->simulate_overflow()) { | |
7577 // simulate a stack overflow | |
7578 simulate_overflow = true; | |
7579 } | |
7580 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7581 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow |
0 | 7582 if (PrintCMSStatistics != 0) { |
7583 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7584 SIZE_FORMAT, _markStack->capacity()); | |
7585 } | |
7586 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); | |
7587 handle_stack_overflow(addr); | |
7588 } | |
7589 } | |
7590 // anything including and to the right of _finger | |
7591 // will be scanned as we iterate over the remainder of the | |
7592 // bit map | |
7593 do_yield_check(); | |
7594 } | |
7595 } | |
7596 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7597 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7598 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7599 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7600 void Par_PushOrMarkClosure::do_oop(oop obj) { |
0 | 7601 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7602 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7603 HeapWord* addr = (HeapWord*)obj; |
0 | 7604 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { |
7605 // Oop lies in _span and isn't yet grey or black | |
7606 // We read the global_finger (volatile read) strictly after marking oop | |
7607 bool res = _bit_map->par_mark(addr); // now grey | |
7608 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; | |
7609 // Should we push this marked oop on our stack? | |
7610 // -- if someone else marked it, nothing to do | |
7611 // -- if target oop is above global finger nothing to do | |
7612 // -- if target oop is in chunk and above local finger | |
7613 // then nothing to do | |
7614 // -- else push on work queue | |
7615 if ( !res // someone else marked it, they will deal with it | |
7616 || (addr >= *gfa) // will be scanned in a later task | |
7617 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk | |
7618 return; | |
7619 } | |
7620 // the bit map iteration has already either passed, or | |
7621 // sampled, this bit in the bit map; we'll need to | |
7622 // use the marking stack to scan this oop's oops. | |
7623 bool simulate_overflow = false; | |
7624 NOT_PRODUCT( | |
7625 if (CMSMarkStackOverflowALot && | |
7626 _collector->simulate_overflow()) { | |
7627 // simulate a stack overflow | |
7628 simulate_overflow = true; | |
7629 } | |
7630 ) | |
7631 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7632 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 7633 // stack overflow |
7634 if (PrintCMSStatistics != 0) { | |
7635 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7636 SIZE_FORMAT, _overflow_stack->capacity()); | |
7637 } | |
7638 // We cannot assert that the overflow stack is full because | |
7639 // it may have been emptied since. | |
7640 assert(simulate_overflow || | |
7641 _work_queue->size() == _work_queue->max_elems(), | |
7642 "Else push should have succeeded"); | |
7643 handle_stack_overflow(addr); | |
7644 } | |
7645 do_yield_check(); | |
7646 } | |
7647 } | |
7648 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7649 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7650 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
0 | 7651 |
935 | 7652 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector, |
7653 ReferenceProcessor* rp, | |
7654 CMSMarkStack* revisit_stack) : | |
7655 OopClosure(rp), | |
7656 _collector(collector), | |
7657 _revisit_stack(revisit_stack), | |
7658 _should_remember_klasses(collector->should_unload_classes()) {} | |
7659 | |
0 | 7660 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, |
7661 MemRegion span, | |
7662 ReferenceProcessor* rp, | |
7663 CMSBitMap* bit_map, | |
7664 CMSBitMap* mod_union_table, | |
7665 CMSMarkStack* mark_stack, | |
7666 CMSMarkStack* revisit_stack, | |
7667 bool concurrent_precleaning): | |
935 | 7668 KlassRememberingOopClosure(collector, rp, revisit_stack), |
0 | 7669 _span(span), |
7670 _bit_map(bit_map), | |
7671 _mod_union_table(mod_union_table), | |
7672 _mark_stack(mark_stack), | |
935 | 7673 _concurrent_precleaning(concurrent_precleaning) |
0 | 7674 { |
7675 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7676 } | |
7677 | |
7678 // Grey object rescan during pre-cleaning and second checkpoint phases -- | |
7679 // the non-parallel version (the parallel version appears further below.) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7680 void PushAndMarkClosure::do_oop(oop obj) { |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7681 // Ignore mark word verification. If during concurrent precleaning, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7682 // the object monitor may be locked. If during the checkpoint |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7683 // phases, the object may already have been reached by a different |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7684 // path and may be at the end of the global overflow list (so |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7685 // the mark word may be NULL). |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
7686 assert(obj->is_oop_or_null(true /* ignore mark word */), |
0 | 7687 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7688 HeapWord* addr = (HeapWord*)obj; |
0 | 7689 // Check if oop points into the CMS generation |
7690 // and is not marked | |
7691 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7692 // a white object ... | |
7693 _bit_map->mark(addr); // ... now grey | |
7694 // push on the marking stack (grey set) | |
7695 bool simulate_overflow = false; | |
7696 NOT_PRODUCT( | |
7697 if (CMSMarkStackOverflowALot && | |
7698 _collector->simulate_overflow()) { | |
7699 // simulate a stack overflow | |
7700 simulate_overflow = true; | |
7701 } | |
7702 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7703 if (simulate_overflow || !_mark_stack->push(obj)) { |
0 | 7704 if (_concurrent_precleaning) { |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7705 // During precleaning we can just dirty the appropriate card(s) |
0 | 7706 // in the mod union table, thus ensuring that the object remains |
283
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7707 // in the grey set and continue. In the case of object arrays |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7708 // we need to dirty all of the cards that the object spans, |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7709 // since the rescan of object arrays will be limited to the |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7710 // dirty cards. |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7711 // Note that no one can be intefering with us in this action |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7712 // of dirtying the mod union table, so no locking or atomics |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7713 // are required. |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7714 if (obj->is_objArray()) { |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7715 size_t sz = obj->size(); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7716 HeapWord* end_card_addr = (HeapWord*)round_to( |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7717 (intptr_t)(addr+sz), CardTableModRefBS::card_size); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7718 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7719 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7720 _mod_union_table->mark_range(redirty_range); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7721 } else { |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7722 _mod_union_table->mark(addr); |
9199f248b0ee
6722112: CMS: Incorrect encoding of overflown object arrays during concurrent precleaning
ysr
parents:
271
diff
changeset
|
7723 } |
0 | 7724 _collector->_ser_pmc_preclean_ovflw++; |
7725 } else { | |
7726 // During the remark phase, we need to remember this oop | |
7727 // in the overflow list. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7728 _collector->push_on_overflow_list(obj); |
0 | 7729 _collector->_ser_pmc_remark_ovflw++; |
7730 } | |
7731 } | |
7732 } | |
7733 } | |
7734 | |
7735 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, | |
7736 MemRegion span, | |
7737 ReferenceProcessor* rp, | |
7738 CMSBitMap* bit_map, | |
7739 OopTaskQueue* work_queue, | |
7740 CMSMarkStack* revisit_stack): | |
935 | 7741 Par_KlassRememberingOopClosure(collector, rp, revisit_stack), |
0 | 7742 _span(span), |
7743 _bit_map(bit_map), | |
935 | 7744 _work_queue(work_queue) |
0 | 7745 { |
7746 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7747 } | |
7748 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7749 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7750 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7751 |
0 | 7752 // Grey object rescan during second checkpoint phase -- |
7753 // the parallel version. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7754 void Par_PushAndMarkClosure::do_oop(oop obj) { |
0 | 7755 // In the assert below, we ignore the mark word because |
7756 // this oop may point to an already visited object that is | |
7757 // on the overflow stack (in which case the mark word has | |
7758 // been hijacked for chaining into the overflow stack -- | |
7759 // if this is the last object in the overflow stack then | |
7760 // its mark word will be NULL). Because this object may | |
7761 // have been subsequently popped off the global overflow | |
7762 // stack, and the mark word possibly restored to the prototypical | |
7763 // value, by the time we get to examined this failing assert in | |
7764 // the debugger, is_oop_or_null(false) may subsequently start | |
7765 // to hold. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7766 assert(obj->is_oop_or_null(true), |
0 | 7767 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7768 HeapWord* addr = (HeapWord*)obj; |
0 | 7769 // Check if oop points into the CMS generation |
7770 // and is not marked | |
7771 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7772 // a white object ... | |
7773 // If we manage to "claim" the object, by being the | |
7774 // first thread to mark it, then we push it on our | |
7775 // marking stack | |
7776 if (_bit_map->par_mark(addr)) { // ... now grey | |
7777 // push on work queue (grey set) | |
7778 bool simulate_overflow = false; | |
7779 NOT_PRODUCT( | |
7780 if (CMSMarkStackOverflowALot && | |
7781 _collector->par_simulate_overflow()) { | |
7782 // simulate a stack overflow | |
7783 simulate_overflow = true; | |
7784 } | |
7785 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7786 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7787 _collector->par_push_on_overflow_list(obj); |
0 | 7788 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS |
7789 } | |
7790 } // Else, some other thread got there first | |
7791 } | |
7792 } | |
7793 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7794 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7795 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7796 |
941 | 7797 void PushAndMarkClosure::remember_mdo(DataLayout* v) { |
7798 // TBD | |
7799 } | |
7800 | |
7801 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) { | |
7802 // TBD | |
7803 } | |
7804 | |
0 | 7805 void CMSPrecleanRefsYieldClosure::do_yield_work() { |
935 | 7806 DEBUG_ONLY(RememberKlassesChecker mux(false);) |
0 | 7807 Mutex* bml = _collector->bitMapLock(); |
7808 assert_lock_strong(bml); | |
7809 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
7810 "CMS thread should hold CMS token"); | |
7811 | |
7812 bml->unlock(); | |
7813 ConcurrentMarkSweepThread::desynchronize(true); | |
7814 | |
7815 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7816 | |
7817 _collector->stopTimer(); | |
7818 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
7819 if (PrintCMSStatistics != 0) { | |
7820 _collector->incrementYields(); | |
7821 } | |
7822 _collector->icms_wait(); | |
7823 | |
7824 // See the comment in coordinator_yield() | |
7825 for (unsigned i = 0; i < CMSYieldSleepCount && | |
7826 ConcurrentMarkSweepThread::should_yield() && | |
7827 !CMSCollector::foregroundGCIsActive(); ++i) { | |
7828 os::sleep(Thread::current(), 1, false); | |
7829 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7830 } | |
7831 | |
7832 ConcurrentMarkSweepThread::synchronize(true); | |
7833 bml->lock(); | |
7834 | |
7835 _collector->startTimer(); | |
7836 } | |
7837 | |
7838 bool CMSPrecleanRefsYieldClosure::should_return() { | |
7839 if (ConcurrentMarkSweepThread::should_yield()) { | |
7840 do_yield_work(); | |
7841 } | |
7842 return _collector->foregroundGCIsActive(); | |
7843 } | |
7844 | |
7845 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { | |
7846 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0, | |
7847 "mr should be aligned to start at a card boundary"); | |
7848 // We'd like to assert: | |
7849 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, | |
7850 // "mr should be a range of cards"); | |
7851 // However, that would be too strong in one case -- the last | |
7852 // partition ends at _unallocated_block which, in general, can be | |
7853 // an arbitrary boundary, not necessarily card aligned. | |
7854 if (PrintCMSStatistics != 0) { | |
7855 _num_dirty_cards += | |
7856 mr.word_size()/CardTableModRefBS::card_size_in_words; | |
7857 } | |
7858 _space->object_iterate_mem(mr, &_scan_cl); | |
7859 } | |
7860 | |
7861 SweepClosure::SweepClosure(CMSCollector* collector, | |
7862 ConcurrentMarkSweepGeneration* g, | |
7863 CMSBitMap* bitMap, bool should_yield) : | |
7864 _collector(collector), | |
7865 _g(g), | |
7866 _sp(g->cmsSpace()), | |
7867 _limit(_sp->sweep_limit()), | |
7868 _freelistLock(_sp->freelistLock()), | |
7869 _bitMap(bitMap), | |
7870 _yield(should_yield), | |
7871 _inFreeRange(false), // No free range at beginning of sweep | |
7872 _freeRangeInFreeLists(false), // No free range at beginning of sweep | |
7873 _lastFreeRangeCoalesced(false), | |
7874 _freeFinger(g->used_region().start()) | |
7875 { | |
7876 NOT_PRODUCT( | |
7877 _numObjectsFreed = 0; | |
7878 _numWordsFreed = 0; | |
7879 _numObjectsLive = 0; | |
7880 _numWordsLive = 0; | |
7881 _numObjectsAlreadyFree = 0; | |
7882 _numWordsAlreadyFree = 0; | |
7883 _last_fc = NULL; | |
7884 | |
7885 _sp->initializeIndexedFreeListArrayReturnedBytes(); | |
7886 _sp->dictionary()->initializeDictReturnedBytes(); | |
7887 ) | |
7888 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), | |
7889 "sweep _limit out of bounds"); | |
7890 if (CMSTraceSweeper) { | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7891 gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7892 _limit); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7893 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7894 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7895 |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7896 void SweepClosure::print_on(outputStream* st) const { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7897 tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7898 _sp->bottom(), _sp->end()); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7899 tty->print_cr("_limit = " PTR_FORMAT, _limit); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7900 tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7901 NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);) |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7902 tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7903 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7904 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7905 |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7906 #ifndef PRODUCT |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7907 // Assertion checking only: no useful work in product mode -- |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7908 // however, if any of the flags below become product flags, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7909 // you may need to review this code to see if it needs to be |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7910 // enabled in product mode. |
0 | 7911 SweepClosure::~SweepClosure() { |
7912 assert_lock_strong(_freelistLock); | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7913 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7914 "sweep _limit out of bounds"); |
0 | 7915 if (inFreeRange()) { |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7916 warning("inFreeRange() should have been reset; dumping state of SweepClosure"); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7917 print(); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7918 ShouldNotReachHere(); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7919 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7920 if (Verbose && PrintGC) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7921 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7922 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7923 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7924 SIZE_FORMAT" bytes " |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7925 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7926 _numObjectsLive, _numWordsLive*sizeof(HeapWord), |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7927 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7928 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7929 * sizeof(HeapWord); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7930 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7931 |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7932 if (PrintCMSStatistics && CMSVerifyReturnedBytes) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7933 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7934 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7935 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7936 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7937 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7938 indexListReturnedBytes); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7939 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7940 dictReturnedBytes); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7941 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7942 } |
0 | 7943 if (CMSTraceSweeper) { |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7944 gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7945 _limit); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7946 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7947 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
7948 #endif // PRODUCT |
0 | 7949 |
7950 void SweepClosure::initialize_free_range(HeapWord* freeFinger, | |
7951 bool freeRangeInFreeLists) { | |
7952 if (CMSTraceSweeper) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7953 gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n", |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
7954 freeFinger, freeRangeInFreeLists); |
0 | 7955 } |
7956 assert(!inFreeRange(), "Trampling existing free range"); | |
7957 set_inFreeRange(true); | |
7958 set_lastFreeRangeCoalesced(false); | |
7959 | |
7960 set_freeFinger(freeFinger); | |
7961 set_freeRangeInFreeLists(freeRangeInFreeLists); | |
7962 if (CMSTestInFreeList) { | |
7963 if (freeRangeInFreeLists) { | |
7964 FreeChunk* fc = (FreeChunk*) freeFinger; | |
7965 assert(fc->isFree(), "A chunk on the free list should be free."); | |
7966 assert(fc->size() > 0, "Free range should have a size"); | |
7967 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists"); | |
7968 } | |
7969 } | |
7970 } | |
7971 | |
7972 // Note that the sweeper runs concurrently with mutators. Thus, | |
7973 // it is possible for direct allocation in this generation to happen | |
7974 // in the middle of the sweep. Note that the sweeper also coalesces | |
7975 // contiguous free blocks. Thus, unless the sweeper and the allocator | |
7976 // synchronize appropriately freshly allocated blocks may get swept up. | |
7977 // This is accomplished by the sweeper locking the free lists while | |
7978 // it is sweeping. Thus blocks that are determined to be free are | |
7979 // indeed free. There is however one additional complication: | |
7980 // blocks that have been allocated since the final checkpoint and | |
7981 // mark, will not have been marked and so would be treated as | |
7982 // unreachable and swept up. To prevent this, the allocator marks | |
7983 // the bit map when allocating during the sweep phase. This leads, | |
7984 // however, to a further complication -- objects may have been allocated | |
7985 // but not yet initialized -- in the sense that the header isn't yet | |
7986 // installed. The sweeper can not then determine the size of the block | |
7987 // in order to skip over it. To deal with this case, we use a technique | |
7988 // (due to Printezis) to encode such uninitialized block sizes in the | |
7989 // bit map. Since the bit map uses a bit per every HeapWord, but the | |
7990 // CMS generation has a minimum object size of 3 HeapWords, it follows | |
7991 // that "normal marks" won't be adjacent in the bit map (there will | |
7992 // always be at least two 0 bits between successive 1 bits). We make use | |
7993 // of these "unused" bits to represent uninitialized blocks -- the bit | |
7994 // corresponding to the start of the uninitialized object and the next | |
7995 // bit are both set. Finally, a 1 bit marks the end of the object that | |
7996 // started with the two consecutive 1 bits to indicate its potentially | |
7997 // uninitialized state. | |
7998 | |
7999 size_t SweepClosure::do_blk_careful(HeapWord* addr) { | |
8000 FreeChunk* fc = (FreeChunk*)addr; | |
8001 size_t res; | |
8002 | |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8003 // Check if we are done sweeping. Below we check "addr >= _limit" rather |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8004 // than "addr == _limit" because although _limit was a block boundary when |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8005 // we started the sweep, it may no longer be one because heap expansion |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8006 // may have caused us to coalesce the block ending at the address _limit |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8007 // with a newly expanded chunk (this happens when _limit was set to the |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8008 // previous _end of the space), so we may have stepped past _limit: |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8009 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740. |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8010 if (addr >= _limit) { // we have swept up to or past the limit: finish up |
0 | 8011 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), |
8012 "sweep _limit out of bounds"); | |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8013 assert(addr < _sp->end(), "addr out of bounds"); |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8014 // Flush any free range we might be holding as a single |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8015 // coalesced chunk to the appropriate free list. |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8016 if (inFreeRange()) { |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8017 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8018 err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger())); |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8019 flush_cur_free_chunk(freeFinger(), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8020 pointer_delta(addr, freeFinger())); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8021 if (CMSTraceSweeper) { |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8022 gclog_or_tty->print("Sweep: last chunk: "); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8023 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") " |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8024 "[coalesced:"SIZE_FORMAT"]\n", |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8025 freeFinger(), pointer_delta(addr, freeFinger()), |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8026 lastFreeRangeCoalesced()); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8027 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8028 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8029 |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8030 // help the iterator loop finish |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8031 return pointer_delta(_sp->end(), addr); |
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8032 } |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8033 |
1720
5ed703250bff
6977970: CMS: concurrentMarkSweepGeneration.cpp:7947 assert(addr <= _limit) failed: sweep invariant
ysr
parents:
1716
diff
changeset
|
8034 assert(addr < _limit, "sweep invariant"); |
0 | 8035 // check if we should yield |
8036 do_yield_check(addr); | |
8037 if (fc->isFree()) { | |
8038 // Chunk that is already free | |
8039 res = fc->size(); | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8040 do_already_free_chunk(fc); |
0 | 8041 debug_only(_sp->verifyFreeLists()); |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8042 // If we flush the chunk at hand in lookahead_and_flush() |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8043 // and it's coalesced with a preceding chunk, then the |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8044 // process of "mangling" the payload of the coalesced block |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8045 // will cause erasure of the size information from the |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8046 // (erstwhile) header of all the coalesced blocks but the |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8047 // first, so the first disjunct in the assert will not hold |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8048 // in that specific case (in which case the second disjunct |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8049 // will hold). |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8050 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8051 "Otherwise the size info doesn't change at this step"); |
0 | 8052 NOT_PRODUCT( |
8053 _numObjectsAlreadyFree++; | |
8054 _numWordsAlreadyFree += res; | |
8055 ) | |
8056 NOT_PRODUCT(_last_fc = fc;) | |
8057 } else if (!_bitMap->isMarked(addr)) { | |
8058 // Chunk is fresh garbage | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8059 res = do_garbage_chunk(fc); |
0 | 8060 debug_only(_sp->verifyFreeLists()); |
8061 NOT_PRODUCT( | |
8062 _numObjectsFreed++; | |
8063 _numWordsFreed += res; | |
8064 ) | |
8065 } else { | |
8066 // Chunk that is alive. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8067 res = do_live_chunk(fc); |
0 | 8068 debug_only(_sp->verifyFreeLists()); |
8069 NOT_PRODUCT( | |
8070 _numObjectsLive++; | |
8071 _numWordsLive += res; | |
8072 ) | |
8073 } | |
8074 return res; | |
8075 } | |
8076 | |
8077 // For the smart allocation, record following | |
8078 // split deaths - a free chunk is removed from its free list because | |
8079 // it is being split into two or more chunks. | |
8080 // split birth - a free chunk is being added to its free list because | |
8081 // a larger free chunk has been split and resulted in this free chunk. | |
8082 // coal death - a free chunk is being removed from its free list because | |
8083 // it is being coalesced into a large free chunk. | |
8084 // coal birth - a free chunk is being added to its free list because | |
8085 // it was created when two or more free chunks where coalesced into | |
8086 // this free chunk. | |
8087 // | |
8088 // These statistics are used to determine the desired number of free | |
8089 // chunks of a given size. The desired number is chosen to be relative | |
8090 // to the end of a CMS sweep. The desired number at the end of a sweep | |
8091 // is the | |
8092 // count-at-end-of-previous-sweep (an amount that was enough) | |
8093 // - count-at-beginning-of-current-sweep (the excess) | |
8094 // + split-births (gains in this size during interval) | |
8095 // - split-deaths (demands on this size during interval) | |
8096 // where the interval is from the end of one sweep to the end of the | |
8097 // next. | |
8098 // | |
8099 // When sweeping the sweeper maintains an accumulated chunk which is | |
8100 // the chunk that is made up of chunks that have been coalesced. That | |
8101 // will be termed the left-hand chunk. A new chunk of garbage that | |
8102 // is being considered for coalescing will be referred to as the | |
8103 // right-hand chunk. | |
8104 // | |
8105 // When making a decision on whether to coalesce a right-hand chunk with | |
8106 // the current left-hand chunk, the current count vs. the desired count | |
8107 // of the left-hand chunk is considered. Also if the right-hand chunk | |
8108 // is near the large chunk at the end of the heap (see | |
8109 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the | |
8110 // left-hand chunk is coalesced. | |
8111 // | |
8112 // When making a decision about whether to split a chunk, the desired count | |
8113 // vs. the current count of the candidate to be split is also considered. | |
8114 // If the candidate is underpopulated (currently fewer chunks than desired) | |
8115 // a chunk of an overpopulated (currently more chunks than desired) size may | |
8116 // be chosen. The "hint" associated with a free list, if non-null, points | |
8117 // to a free list which may be overpopulated. | |
8118 // | |
8119 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8120 void SweepClosure::do_already_free_chunk(FreeChunk* fc) { |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8121 const size_t size = fc->size(); |
0 | 8122 // Chunks that cannot be coalesced are not in the |
8123 // free lists. | |
8124 if (CMSTestInFreeList && !fc->cantCoalesce()) { | |
8125 assert(_sp->verifyChunkInFreeLists(fc), | |
8126 "free chunk should be in free lists"); | |
8127 } | |
8128 // a chunk that is already free, should not have been | |
8129 // marked in the bit map | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8130 HeapWord* const addr = (HeapWord*) fc; |
0 | 8131 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); |
8132 // Verify that the bit map has no bits marked between | |
8133 // addr and purported end of this block. | |
8134 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8135 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8136 // Some chunks cannot be coalesced under any circumstances. |
0 | 8137 // See the definition of cantCoalesce(). |
8138 if (!fc->cantCoalesce()) { | |
8139 // This chunk can potentially be coalesced. | |
8140 if (_sp->adaptive_freelists()) { | |
8141 // All the work is done in | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8142 do_post_free_or_garbage_chunk(fc, size); |
0 | 8143 } else { // Not adaptive free lists |
8144 // this is a free chunk that can potentially be coalesced by the sweeper; | |
8145 if (!inFreeRange()) { | |
8146 // if the next chunk is a free block that can't be coalesced | |
8147 // it doesn't make sense to remove this chunk from the free lists | |
8148 FreeChunk* nextChunk = (FreeChunk*)(addr + size); | |
2136
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8149 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?"); |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8150 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ... |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8151 nextChunk->isFree() && // ... which is free... |
c91cc404ca46
7011940: iCMS: SIGSEGV in SweepClosure::do_already_free_chunk(FreeChunk*)+0x360
ysr
parents:
2132
diff
changeset
|
8152 nextChunk->cantCoalesce()) { // ... but can't be coalesced |
0 | 8153 // nothing to do |
8154 } else { | |
8155 // Potentially the start of a new free range: | |
8156 // Don't eagerly remove it from the free lists. | |
8157 // No need to remove it if it will just be put | |
8158 // back again. (Also from a pragmatic point of view | |
8159 // if it is a free block in a region that is beyond | |
8160 // any allocated blocks, an assertion will fail) | |
8161 // Remember the start of a free run. | |
8162 initialize_free_range(addr, true); | |
8163 // end - can coalesce with next chunk | |
8164 } | |
8165 } else { | |
8166 // the midst of a free range, we are coalescing | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8167 print_free_block_coalesced(fc); |
0 | 8168 if (CMSTraceSweeper) { |
8169 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); | |
8170 } | |
8171 // remove it from the free lists | |
8172 _sp->removeFreeChunkFromFreeLists(fc); | |
8173 set_lastFreeRangeCoalesced(true); | |
8174 // If the chunk is being coalesced and the current free range is | |
8175 // in the free lists, remove the current free range so that it | |
8176 // will be returned to the free lists in its entirety - all | |
8177 // the coalesced pieces included. | |
8178 if (freeRangeInFreeLists()) { | |
8179 FreeChunk* ffc = (FreeChunk*) freeFinger(); | |
8180 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8181 "Size of free range is inconsistent with chunk size."); | |
8182 if (CMSTestInFreeList) { | |
8183 assert(_sp->verifyChunkInFreeLists(ffc), | |
8184 "free range is not in free lists"); | |
8185 } | |
8186 _sp->removeFreeChunkFromFreeLists(ffc); | |
8187 set_freeRangeInFreeLists(false); | |
8188 } | |
8189 } | |
8190 } | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8191 // Note that if the chunk is not coalescable (the else arm |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8192 // below), we unconditionally flush, without needing to do |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8193 // a "lookahead," as we do below. |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8194 if (inFreeRange()) lookahead_and_flush(fc, size); |
0 | 8195 } else { |
8196 // Code path common to both original and adaptive free lists. | |
8197 | |
8198 // cant coalesce with previous block; this should be treated | |
8199 // as the end of a free run if any | |
8200 if (inFreeRange()) { | |
8201 // we kicked some butt; time to pick up the garbage | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8202 assert(freeFinger() < addr, "freeFinger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8203 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
0 | 8204 } |
8205 // else, nothing to do, just continue | |
8206 } | |
8207 } | |
8208 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8209 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { |
0 | 8210 // This is a chunk of garbage. It is not in any free list. |
8211 // Add it to a free list or let it possibly be coalesced into | |
8212 // a larger chunk. | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8213 HeapWord* const addr = (HeapWord*) fc; |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8214 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); |
0 | 8215 |
8216 if (_sp->adaptive_freelists()) { | |
8217 // Verify that the bit map has no bits marked between | |
8218 // addr and purported end of just dead object. | |
8219 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8220 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8221 do_post_free_or_garbage_chunk(fc, size); |
0 | 8222 } else { |
8223 if (!inFreeRange()) { | |
8224 // start of a new free range | |
8225 assert(size > 0, "A free range should have a size"); | |
8226 initialize_free_range(addr, false); | |
8227 } else { | |
8228 // this will be swept up when we hit the end of the | |
8229 // free range | |
8230 if (CMSTraceSweeper) { | |
8231 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size); | |
8232 } | |
8233 // If the chunk is being coalesced and the current free range is | |
8234 // in the free lists, remove the current free range so that it | |
8235 // will be returned to the free lists in its entirety - all | |
8236 // the coalesced pieces included. | |
8237 if (freeRangeInFreeLists()) { | |
8238 FreeChunk* ffc = (FreeChunk*)freeFinger(); | |
8239 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8240 "Size of free range is inconsistent with chunk size."); | |
8241 if (CMSTestInFreeList) { | |
8242 assert(_sp->verifyChunkInFreeLists(ffc), | |
8243 "free range is not in free lists"); | |
8244 } | |
8245 _sp->removeFreeChunkFromFreeLists(ffc); | |
8246 set_freeRangeInFreeLists(false); | |
8247 } | |
8248 set_lastFreeRangeCoalesced(true); | |
8249 } | |
8250 // this will be swept up when we hit the end of the free range | |
8251 | |
8252 // Verify that the bit map has no bits marked between | |
8253 // addr and purported end of just dead object. | |
8254 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
8255 } | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8256 assert(_limit >= addr + size, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8257 "A freshly garbage chunk can't possibly straddle over _limit"); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8258 if (inFreeRange()) lookahead_and_flush(fc, size); |
0 | 8259 return size; |
8260 } | |
8261 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8262 size_t SweepClosure::do_live_chunk(FreeChunk* fc) { |
0 | 8263 HeapWord* addr = (HeapWord*) fc; |
8264 // The sweeper has just found a live object. Return any accumulated | |
8265 // left hand chunk to the free lists. | |
8266 if (inFreeRange()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8267 assert(freeFinger() < addr, "freeFinger points too high"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8268 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8269 } |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8270 |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8271 // This object is live: we'd normally expect this to be |
0 | 8272 // an oop, and like to assert the following: |
8273 // assert(oop(addr)->is_oop(), "live block should be an oop"); | |
8274 // However, as we commented above, this may be an object whose | |
8275 // header hasn't yet been initialized. | |
8276 size_t size; | |
8277 assert(_bitMap->isMarked(addr), "Tautology for this control point"); | |
8278 if (_bitMap->isMarked(addr + 1)) { | |
8279 // Determine the size from the bit map, rather than trying to | |
8280 // compute it from the object header. | |
8281 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
8282 size = pointer_delta(nextOneAddr + 1, addr); | |
8283 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
8284 "alignment problem"); | |
8285 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8286 #ifdef DEBUG |
187 | 8287 if (oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8288 ( !_collector->should_unload_classes() |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8289 || (oop(addr)->is_parsable()) && |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8290 oop(addr)->is_conc_safe())) { |
0 | 8291 // Ignore mark word because we are running concurrent with mutators |
8292 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8293 // is_conc_safe is checked before performing this assertion |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8294 // because an object that is not is_conc_safe may yet have |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8295 // the return from size() correct. |
0 | 8296 assert(size == |
8297 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), | |
8298 "P-mark and computed size do not agree"); | |
8299 } | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8300 #endif |
0 | 8301 |
8302 } else { | |
8303 // This should be an initialized object that's alive. | |
187 | 8304 assert(oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8305 (!_collector->should_unload_classes() |
0 | 8306 || oop(addr)->is_parsable()), |
8307 "Should be an initialized object"); | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8308 // Note that there are objects used during class redefinition, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8309 // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(), |
518
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8310 // which are discarded with their is_conc_safe state still |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8311 // false. These object may be floating garbage so may be |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8312 // seen here. If they are floating garbage their size |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8313 // should be attainable from their klass. Do not that |
0af8b0718fc9
6692899: CMS: many vm.parallel_class_loading tests fail with assert "missing Printezis mark"
jmasa
parents:
517
diff
changeset
|
8314 // is_conc_safe() is true for oop(addr). |
0 | 8315 // Ignore mark word because we are running concurrent with mutators |
8316 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
8317 // Verify that the bit map has no bits marked between | |
8318 // addr and purported end of this block. | |
8319 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); | |
8320 assert(size >= 3, "Necessary for Printezis marks to work"); | |
8321 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); | |
8322 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) | |
8323 } | |
8324 return size; | |
8325 } | |
8326 | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8327 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8328 size_t chunkSize) { |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8329 // do_post_free_or_garbage_chunk() should only be called in the case |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8330 // of the adaptive free list allocator. |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8331 const bool fcInFreeLists = fc->isFree(); |
0 | 8332 assert(_sp->adaptive_freelists(), "Should only be used in this case."); |
8333 assert((HeapWord*)fc <= _limit, "sweep invariant"); | |
8334 if (CMSTestInFreeList && fcInFreeLists) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8335 assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists"); |
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8336 } |
0 | 8337 |
8338 if (CMSTraceSweeper) { | |
8339 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); | |
8340 } | |
8341 | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8342 HeapWord* const fc_addr = (HeapWord*) fc; |
0 | 8343 |
8344 bool coalesce; | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8345 const size_t left = pointer_delta(fc_addr, freeFinger()); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8346 const size_t right = chunkSize; |
0 | 8347 switch (FLSCoalescePolicy) { |
8348 // numeric value forms a coalition aggressiveness metric | |
8349 case 0: { // never coalesce | |
8350 coalesce = false; | |
8351 break; | |
8352 } | |
8353 case 1: { // coalesce if left & right chunks on overpopulated lists | |
8354 coalesce = _sp->coalOverPopulated(left) && | |
8355 _sp->coalOverPopulated(right); | |
8356 break; | |
8357 } | |
8358 case 2: { // coalesce if left chunk on overpopulated list (default) | |
8359 coalesce = _sp->coalOverPopulated(left); | |
8360 break; | |
8361 } | |
8362 case 3: { // coalesce if left OR right chunk on overpopulated list | |
8363 coalesce = _sp->coalOverPopulated(left) || | |
8364 _sp->coalOverPopulated(right); | |
8365 break; | |
8366 } | |
8367 case 4: { // always coalesce | |
8368 coalesce = true; | |
8369 break; | |
8370 } | |
8371 default: | |
8372 ShouldNotReachHere(); | |
8373 } | |
8374 | |
8375 // Should the current free range be coalesced? | |
8376 // If the chunk is in a free range and either we decided to coalesce above | |
8377 // or the chunk is near the large block at the end of the heap | |
8378 // (isNearLargestChunk() returns true), then coalesce this chunk. | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8379 const bool doCoalesce = inFreeRange() |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8380 && (coalesce || _g->isNearLargestChunk(fc_addr)); |
0 | 8381 if (doCoalesce) { |
8382 // Coalesce the current free range on the left with the new | |
8383 // chunk on the right. If either is on a free list, | |
8384 // it must be removed from the list and stashed in the closure. | |
8385 if (freeRangeInFreeLists()) { | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8386 FreeChunk* const ffc = (FreeChunk*)freeFinger(); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8387 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()), |
0 | 8388 "Size of free range is inconsistent with chunk size."); |
8389 if (CMSTestInFreeList) { | |
8390 assert(_sp->verifyChunkInFreeLists(ffc), | |
8391 "Chunk is not in free lists"); | |
8392 } | |
8393 _sp->coalDeath(ffc->size()); | |
8394 _sp->removeFreeChunkFromFreeLists(ffc); | |
8395 set_freeRangeInFreeLists(false); | |
8396 } | |
8397 if (fcInFreeLists) { | |
8398 _sp->coalDeath(chunkSize); | |
8399 assert(fc->size() == chunkSize, | |
8400 "The chunk has the wrong size or is not in the free lists"); | |
8401 _sp->removeFreeChunkFromFreeLists(fc); | |
8402 } | |
8403 set_lastFreeRangeCoalesced(true); | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8404 print_free_block_coalesced(fc); |
0 | 8405 } else { // not in a free range and/or should not coalesce |
8406 // Return the current free range and start a new one. | |
8407 if (inFreeRange()) { | |
8408 // In a free range but cannot coalesce with the right hand chunk. | |
8409 // Put the current free range into the free lists. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8410 flush_cur_free_chunk(freeFinger(), |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8411 pointer_delta(fc_addr, freeFinger())); |
0 | 8412 } |
8413 // Set up for new free range. Pass along whether the right hand | |
8414 // chunk is in the free lists. | |
8415 initialize_free_range((HeapWord*)fc, fcInFreeLists); | |
8416 } | |
8417 } | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8418 |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8419 // Lookahead flush: |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8420 // If we are tracking a free range, and this is the last chunk that |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8421 // we'll look at because its end crosses past _limit, we'll preemptively |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8422 // flush it along with any free range we may be holding on to. Note that |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8423 // this can be the case only for an already free or freshly garbage |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8424 // chunk. If this block is an object, it can never straddle |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8425 // over _limit. The "straddling" occurs when _limit is set at |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8426 // the previous end of the space when this cycle started, and |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8427 // a subsequent heap expansion caused the previously co-terminal |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8428 // free block to be coalesced with the newly expanded portion, |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8429 // thus rendering _limit a non-block-boundary making it dangerous |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8430 // for the sweeper to step over and examine. |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8431 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8432 assert(inFreeRange(), "Should only be called if currently in a free range."); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8433 HeapWord* const eob = ((HeapWord*)fc) + chunk_size; |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8434 assert(_sp->used_region().contains(eob - 1), |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8435 err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8436 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8437 _limit, _sp->bottom(), _sp->end(), fc, chunk_size)); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8438 if (eob >= _limit) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8439 assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit"); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8440 if (CMSTraceSweeper) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8441 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block " |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8442 "[" PTR_FORMAT "," PTR_FORMAT ") in space " |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8443 "[" PTR_FORMAT "," PTR_FORMAT ")", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8444 _limit, fc, eob, _sp->bottom(), _sp->end()); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8445 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8446 // Return the storage we are tracking back into the free lists. |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8447 if (CMSTraceSweeper) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8448 gclog_or_tty->print_cr("Flushing ... "); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8449 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8450 assert(freeFinger() < eob, "Error"); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8451 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8452 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8453 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8454 |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8455 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { |
0 | 8456 assert(inFreeRange(), "Should only be called if currently in a free range."); |
8457 assert(size > 0, | |
8458 "A zero sized chunk cannot be added to the free lists."); | |
8459 if (!freeRangeInFreeLists()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8460 if (CMSTestInFreeList) { |
0 | 8461 FreeChunk* fc = (FreeChunk*) chunk; |
8462 fc->setSize(size); | |
8463 assert(!_sp->verifyChunkInFreeLists(fc), | |
8464 "chunk should not be in free lists yet"); | |
8465 } | |
8466 if (CMSTraceSweeper) { | |
8467 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists", | |
8468 chunk, size); | |
8469 } | |
8470 // A new free range is going to be starting. The current | |
8471 // free range has not been added to the free lists yet or | |
8472 // was removed so add it back. | |
8473 // If the current free range was coalesced, then the death | |
8474 // of the free range was recorded. Record a birth now. | |
8475 if (lastFreeRangeCoalesced()) { | |
8476 _sp->coalBirth(size); | |
8477 } | |
8478 _sp->addChunkAndRepairOffsetTable(chunk, size, | |
8479 lastFreeRangeCoalesced()); | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8480 } else if (CMSTraceSweeper) { |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8481 gclog_or_tty->print_cr("Already in free list: nothing to flush"); |
0 | 8482 } |
8483 set_inFreeRange(false); | |
8484 set_freeRangeInFreeLists(false); | |
8485 } | |
8486 | |
8487 // We take a break if we've been at this for a while, | |
8488 // so as to avoid monopolizing the locks involved. | |
8489 void SweepClosure::do_yield_work(HeapWord* addr) { | |
8490 // Return current free chunk being used for coalescing (if any) | |
8491 // to the appropriate freelist. After yielding, the next | |
8492 // free block encountered will start a coalescing range of | |
8493 // free blocks. If the next free block is adjacent to the | |
8494 // chunk just flushed, they will need to wait for the next | |
8495 // sweep to be coalesced. | |
8496 if (inFreeRange()) { | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1994
diff
changeset
|
8497 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); |
0 | 8498 } |
8499 | |
8500 // First give up the locks, then yield, then re-lock. | |
8501 // We should probably use a constructor/destructor idiom to | |
8502 // do this unlock/lock or modify the MutexUnlocker class to | |
8503 // serve our purpose. XXX | |
8504 assert_lock_strong(_bitMap->lock()); | |
8505 assert_lock_strong(_freelistLock); | |
8506 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
8507 "CMS thread should hold CMS token"); | |
8508 _bitMap->lock()->unlock(); | |
8509 _freelistLock->unlock(); | |
8510 ConcurrentMarkSweepThread::desynchronize(true); | |
8511 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8512 _collector->stopTimer(); | |
8513 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
8514 if (PrintCMSStatistics != 0) { | |
8515 _collector->incrementYields(); | |
8516 } | |
8517 _collector->icms_wait(); | |
8518 | |
8519 // See the comment in coordinator_yield() | |
8520 for (unsigned i = 0; i < CMSYieldSleepCount && | |
8521 ConcurrentMarkSweepThread::should_yield() && | |
8522 !CMSCollector::foregroundGCIsActive(); ++i) { | |
8523 os::sleep(Thread::current(), 1, false); | |
8524 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8525 } | |
8526 | |
8527 ConcurrentMarkSweepThread::synchronize(true); | |
8528 _freelistLock->lock(); | |
8529 _bitMap->lock()->lock_without_safepoint_check(); | |
8530 _collector->startTimer(); | |
8531 } | |
8532 | |
8533 #ifndef PRODUCT | |
8534 // This is actually very useful in a product build if it can | |
8535 // be called from the debugger. Compile it into the product | |
8536 // as needed. | |
8537 bool debug_verifyChunkInFreeLists(FreeChunk* fc) { | |
8538 return debug_cms_space->verifyChunkInFreeLists(fc); | |
8539 } | |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8540 #endif |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8541 |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8542 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { |
0 | 8543 if (CMSTraceSweeper) { |
3405
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8544 gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8545 fc, fc->size()); |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8546 } |
8cbcd406c42e
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
8547 } |
0 | 8548 |
8549 // CMSIsAliveClosure | |
8550 bool CMSIsAliveClosure::do_object_b(oop obj) { | |
8551 HeapWord* addr = (HeapWord*)obj; | |
8552 return addr != NULL && | |
8553 (!_span.contains(addr) || _bit_map->isMarked(addr)); | |
8554 } | |
8555 | |
935 | 8556 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, |
8557 MemRegion span, | |
8558 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | |
8559 CMSMarkStack* revisit_stack, bool cpc): | |
8560 KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8561 _span(span), | |
8562 _bit_map(bit_map), | |
8563 _mark_stack(mark_stack), | |
8564 _concurrent_precleaning(cpc) { | |
8565 assert(!_span.is_empty(), "Empty span could spell trouble"); | |
8566 } | |
8567 | |
8568 | |
0 | 8569 // CMSKeepAliveClosure: the serial version |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8570 void CMSKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8571 HeapWord* addr = (HeapWord*)obj; |
0 | 8572 if (_span.contains(addr) && |
8573 !_bit_map->isMarked(addr)) { | |
8574 _bit_map->mark(addr); | |
8575 bool simulate_overflow = false; | |
8576 NOT_PRODUCT( | |
8577 if (CMSMarkStackOverflowALot && | |
8578 _collector->simulate_overflow()) { | |
8579 // simulate a stack overflow | |
8580 simulate_overflow = true; | |
8581 } | |
8582 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8583 if (simulate_overflow || !_mark_stack->push(obj)) { |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8584 if (_concurrent_precleaning) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8585 // We dirty the overflown object and let the remark |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8586 // phase deal with it. |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8587 assert(_collector->overflow_list_is_empty(), "Error"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8588 // In the case of object arrays, we need to dirty all of |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8589 // the cards that the object spans. No locking or atomics |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8590 // are needed since no one else can be mutating the mod union |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8591 // table. |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8592 if (obj->is_objArray()) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8593 size_t sz = obj->size(); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8594 HeapWord* end_card_addr = |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8595 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8596 MemRegion redirty_range = MemRegion(addr, end_card_addr); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8597 assert(!redirty_range.is_empty(), "Arithmetical tautology"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8598 _collector->_modUnionTable.mark_range(redirty_range); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8599 } else { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8600 _collector->_modUnionTable.mark(addr); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8601 } |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8602 _collector->_ser_kac_preclean_ovflw++; |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8603 } else { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8604 _collector->push_on_overflow_list(obj); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8605 _collector->_ser_kac_ovflw++; |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8606 } |
0 | 8607 } |
8608 } | |
8609 } | |
8610 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8611 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8612 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8613 |
0 | 8614 // CMSParKeepAliveClosure: a parallel version of the above. |
8615 // The work queues are private to each closure (thread), | |
8616 // but (may be) available for stealing by other threads. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8617 void CMSParKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8618 HeapWord* addr = (HeapWord*)obj; |
0 | 8619 if (_span.contains(addr) && |
8620 !_bit_map->isMarked(addr)) { | |
8621 // In general, during recursive tracing, several threads | |
8622 // may be concurrently getting here; the first one to | |
8623 // "tag" it, claims it. | |
8624 if (_bit_map->par_mark(addr)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8625 bool res = _work_queue->push(obj); |
0 | 8626 assert(res, "Low water mark should be much less than capacity"); |
8627 // Do a recursive trim in the hope that this will keep | |
8628 // stack usage lower, but leave some oops for potential stealers | |
8629 trim_queue(_low_water_mark); | |
8630 } // Else, another thread got there first | |
8631 } | |
8632 } | |
8633 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8634 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8635 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8636 |
0 | 8637 void CMSParKeepAliveClosure::trim_queue(uint max) { |
8638 while (_work_queue->size() > max) { | |
8639 oop new_oop; | |
8640 if (_work_queue->pop_local(new_oop)) { | |
8641 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
8642 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8643 "no white objects on this stack!"); | |
8644 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8645 // iterate over the oops in this oop, marking and pushing | |
8646 // the ones in CMS heap (i.e. in _span). | |
8647 new_oop->oop_iterate(&_mark_and_push); | |
8648 } | |
8649 } | |
8650 } | |
8651 | |
935 | 8652 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( |
8653 CMSCollector* collector, | |
8654 MemRegion span, CMSBitMap* bit_map, | |
8655 CMSMarkStack* revisit_stack, | |
8656 OopTaskQueue* work_queue): | |
8657 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), | |
8658 _span(span), | |
8659 _bit_map(bit_map), | |
8660 _work_queue(work_queue) { } | |
8661 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8662 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8663 HeapWord* addr = (HeapWord*)obj; |
0 | 8664 if (_span.contains(addr) && |
8665 !_bit_map->isMarked(addr)) { | |
8666 if (_bit_map->par_mark(addr)) { | |
8667 bool simulate_overflow = false; | |
8668 NOT_PRODUCT( | |
8669 if (CMSMarkStackOverflowALot && | |
8670 _collector->par_simulate_overflow()) { | |
8671 // simulate a stack overflow | |
8672 simulate_overflow = true; | |
8673 } | |
8674 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8675 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8676 _collector->par_push_on_overflow_list(obj); |
0 | 8677 _collector->_par_kac_ovflw++; |
8678 } | |
8679 } // Else another thread got there already | |
8680 } | |
8681 } | |
8682 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8683 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8684 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8685 |
0 | 8686 ////////////////////////////////////////////////////////////////// |
8687 // CMSExpansionCause ///////////////////////////// | |
8688 ////////////////////////////////////////////////////////////////// | |
8689 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { | |
8690 switch (cause) { | |
8691 case _no_expansion: | |
8692 return "No expansion"; | |
8693 case _satisfy_free_ratio: | |
8694 return "Free ratio"; | |
8695 case _satisfy_promotion: | |
8696 return "Satisfy promotion"; | |
8697 case _satisfy_allocation: | |
8698 return "allocation"; | |
8699 case _allocate_par_lab: | |
8700 return "Par LAB"; | |
8701 case _allocate_par_spooling_space: | |
8702 return "Par Spooling Space"; | |
8703 case _adaptive_size_policy: | |
8704 return "Ergonomics"; | |
8705 default: | |
8706 return "unknown"; | |
8707 } | |
8708 } | |
8709 | |
8710 void CMSDrainMarkingStackClosure::do_void() { | |
8711 // the max number to take from overflow list at a time | |
8712 const size_t num = _mark_stack->capacity()/4; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8713 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
360
diff
changeset
|
8714 "Overflow list should be NULL during concurrent phases"); |
0 | 8715 while (!_mark_stack->isEmpty() || |
8716 // if stack is empty, check the overflow list | |
8717 _collector->take_from_overflow_list(num, _mark_stack)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8718 oop obj = _mark_stack->pop(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8719 HeapWord* addr = (HeapWord*)obj; |
0 | 8720 assert(_span.contains(addr), "Should be within span"); |
8721 assert(_bit_map->isMarked(addr), "Should be marked"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8722 assert(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8723 obj->oop_iterate(_keep_alive); |
0 | 8724 } |
8725 } | |
8726 | |
8727 void CMSParDrainMarkingStackClosure::do_void() { | |
8728 // drain queue | |
8729 trim_queue(0); | |
8730 } | |
8731 | |
8732 // Trim our work_queue so its length is below max at return | |
8733 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { | |
8734 while (_work_queue->size() > max) { | |
8735 oop new_oop; | |
8736 if (_work_queue->pop_local(new_oop)) { | |
8737 assert(new_oop->is_oop(), "Expected an oop"); | |
8738 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8739 "no white objects on this stack!"); | |
8740 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8741 // iterate over the oops in this oop, marking and pushing | |
8742 // the ones in CMS heap (i.e. in _span). | |
8743 new_oop->oop_iterate(&_mark_and_push); | |
8744 } | |
8745 } | |
8746 } | |
8747 | |
8748 //////////////////////////////////////////////////////////////////// | |
8749 // Support for Marking Stack Overflow list handling and related code | |
8750 //////////////////////////////////////////////////////////////////// | |
8751 // Much of the following code is similar in shape and spirit to the | |
8752 // code used in ParNewGC. We should try and share that code | |
8753 // as much as possible in the future. | |
8754 | |
8755 #ifndef PRODUCT | |
8756 // Debugging support for CMSStackOverflowALot | |
8757 | |
8758 // It's OK to call this multi-threaded; the worst thing | |
8759 // that can happen is that we'll get a bunch of closely | |
8760 // spaced simulated oveflows, but that's OK, in fact | |
8761 // probably good as it would exercise the overflow code | |
8762 // under contention. | |
8763 bool CMSCollector::simulate_overflow() { | |
8764 if (_overflow_counter-- <= 0) { // just being defensive | |
8765 _overflow_counter = CMSMarkStackOverflowInterval; | |
8766 return true; | |
8767 } else { | |
8768 return false; | |
8769 } | |
8770 } | |
8771 | |
8772 bool CMSCollector::par_simulate_overflow() { | |
8773 return simulate_overflow(); | |
8774 } | |
8775 #endif | |
8776 | |
8777 // Single-threaded | |
8778 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { | |
8779 assert(stack->isEmpty(), "Expected precondition"); | |
8780 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); | |
8781 size_t i = num; | |
8782 oop cur = _overflow_list; | |
8783 const markOop proto = markOopDesc::prototype(); | |
534 | 8784 NOT_PRODUCT(ssize_t n = 0;) |
0 | 8785 for (oop next; i > 0 && cur != NULL; cur = next, i--) { |
8786 next = oop(cur->mark()); | |
8787 cur->set_mark(proto); // until proven otherwise | |
8788 assert(cur->is_oop(), "Should be an oop"); | |
8789 bool res = stack->push(cur); | |
8790 assert(res, "Bit off more than can chew?"); | |
8791 NOT_PRODUCT(n++;) | |
8792 } | |
8793 _overflow_list = cur; | |
8794 #ifndef PRODUCT | |
8795 assert(_num_par_pushes >= n, "Too many pops?"); | |
8796 _num_par_pushes -=n; | |
8797 #endif | |
8798 return !stack->isEmpty(); | |
8799 } | |
8800 | |
534 | 8801 #define BUSY (oop(0x1aff1aff)) |
8802 // (MT-safe) Get a prefix of at most "num" from the list. | |
8803 // The overflow list is chained through the mark word of | |
8804 // each object in the list. We fetch the entire list, | |
8805 // break off a prefix of the right size and return the | |
8806 // remainder. If other threads try to take objects from | |
8807 // the overflow list at that time, they will wait for | |
8808 // some time to see if data becomes available. If (and | |
8809 // only if) another thread places one or more object(s) | |
8810 // on the global list before we have returned the suffix | |
8811 // to the global list, we will walk down our local list | |
8812 // to find its end and append the global list to | |
8813 // our suffix before returning it. This suffix walk can | |
8814 // prove to be expensive (quadratic in the amount of traffic) | |
8815 // when there are many objects in the overflow list and | |
8816 // there is much producer-consumer contention on the list. | |
8817 // *NOTE*: The overflow list manipulation code here and | |
8818 // in ParNewGeneration:: are very similar in shape, | |
8819 // except that in the ParNew case we use the old (from/eden) | |
8820 // copy of the object to thread the list via its klass word. | |
8821 // Because of the common code, if you make any changes in | |
8822 // the code below, please check the ParNew version to see if | |
8823 // similar changes might be needed. | |
8824 // CR 6797058 has been filed to consolidate the common code. | |
0 | 8825 bool CMSCollector::par_take_from_overflow_list(size_t num, |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8826 OopTaskQueue* work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8827 int no_of_gc_threads) { |
534 | 8828 assert(work_q->size() == 0, "First empty local work queue"); |
0 | 8829 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); |
8830 if (_overflow_list == NULL) { | |
8831 return false; | |
8832 } | |
8833 // Grab the entire list; we'll put back a suffix | |
534 | 8834 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); |
8835 Thread* tid = Thread::current(); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8836 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8837 // set to ParallelGCThreads. |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1777
diff
changeset
|
8838 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads; |
534 | 8839 size_t sleep_time_millis = MAX2((size_t)1, num/100); |
8840 // If the list is busy, we spin for a short while, | |
8841 // sleeping between attempts to get the list. | |
8842 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) { | |
8843 os::sleep(tid, sleep_time_millis, false); | |
8844 if (_overflow_list == NULL) { | |
8845 // Nothing left to take | |
8846 return false; | |
8847 } else if (_overflow_list != BUSY) { | |
8848 // Try and grab the prefix | |
8849 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); | |
8850 } | |
8851 } | |
8852 // If the list was found to be empty, or we spun long | |
8853 // enough, we give up and return empty-handed. If we leave | |
8854 // the list in the BUSY state below, it must be the case that | |
8855 // some other thread holds the overflow list and will set it | |
8856 // to a non-BUSY state in the future. | |
8857 if (prefix == NULL || prefix == BUSY) { | |
8858 // Nothing to take or waited long enough | |
8859 if (prefix == NULL) { | |
8860 // Write back the NULL in case we overwrote it with BUSY above | |
8861 // and it is still the same value. | |
8862 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
8863 } | |
8864 return false; | |
8865 } | |
8866 assert(prefix != NULL && prefix != BUSY, "Error"); | |
0 | 8867 size_t i = num; |
8868 oop cur = prefix; | |
534 | 8869 // Walk down the first "num" objects, unless we reach the end. |
0 | 8870 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--); |
534 | 8871 if (cur->mark() == NULL) { |
8872 // We have "num" or fewer elements in the list, so there | |
8873 // is nothing to return to the global list. | |
8874 // Write back the NULL in lieu of the BUSY we wrote | |
8875 // above, if it is still the same value. | |
8876 if (_overflow_list == BUSY) { | |
8877 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); | |
8878 } | |
8879 } else { | |
8880 // Chop off the suffix and rerturn it to the global list. | |
8881 assert(cur->mark() != BUSY, "Error"); | |
0 | 8882 oop suffix_head = cur->mark(); // suffix will be put back on global list |
8883 cur->set_mark(NULL); // break off suffix | |
534 | 8884 // It's possible that the list is still in the empty(busy) state |
8885 // we left it in a short while ago; in that case we may be | |
8886 // able to place back the suffix without incurring the cost | |
8887 // of a walk down the list. | |
0 | 8888 oop observed_overflow_list = _overflow_list; |
534 | 8889 oop cur_overflow_list = observed_overflow_list; |
8890 bool attached = false; | |
8891 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { | |
0 | 8892 observed_overflow_list = |
534 | 8893 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); |
8894 if (cur_overflow_list == observed_overflow_list) { | |
8895 attached = true; | |
8896 break; | |
8897 } else cur_overflow_list = observed_overflow_list; | |
8898 } | |
8899 if (!attached) { | |
8900 // Too bad, someone else sneaked in (at least) an element; we'll need | |
8901 // to do a splice. Find tail of suffix so we can prepend suffix to global | |
8902 // list. | |
8903 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark())); | |
8904 oop suffix_tail = cur; | |
8905 assert(suffix_tail != NULL && suffix_tail->mark() == NULL, | |
8906 "Tautology"); | |
8907 observed_overflow_list = _overflow_list; | |
8908 do { | |
8909 cur_overflow_list = observed_overflow_list; | |
8910 if (cur_overflow_list != BUSY) { | |
8911 // Do the splice ... | |
8912 suffix_tail->set_mark(markOop(cur_overflow_list)); | |
8913 } else { // cur_overflow_list == BUSY | |
8914 suffix_tail->set_mark(NULL); | |
8915 } | |
8916 // ... and try to place spliced list back on overflow_list ... | |
8917 observed_overflow_list = | |
8918 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); | |
8919 } while (cur_overflow_list != observed_overflow_list); | |
8920 // ... until we have succeeded in doing so. | |
8921 } | |
0 | 8922 } |
8923 | |
8924 // Push the prefix elements on work_q | |
8925 assert(prefix != NULL, "control point invariant"); | |
8926 const markOop proto = markOopDesc::prototype(); | |
8927 oop next; | |
534 | 8928 NOT_PRODUCT(ssize_t n = 0;) |
0 | 8929 for (cur = prefix; cur != NULL; cur = next) { |
8930 next = oop(cur->mark()); | |
8931 cur->set_mark(proto); // until proven otherwise | |
8932 assert(cur->is_oop(), "Should be an oop"); | |
8933 bool res = work_q->push(cur); | |
8934 assert(res, "Bit off more than we can chew?"); | |
8935 NOT_PRODUCT(n++;) | |
8936 } | |
8937 #ifndef PRODUCT | |
8938 assert(_num_par_pushes >= n, "Too many pops?"); | |
8939 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); | |
8940 #endif | |
8941 return true; | |
8942 } | |
8943 | |
8944 // Single-threaded | |
8945 void CMSCollector::push_on_overflow_list(oop p) { | |
8946 NOT_PRODUCT(_num_par_pushes++;) | |
8947 assert(p->is_oop(), "Not an oop"); | |
8948 preserve_mark_if_necessary(p); | |
8949 p->set_mark((markOop)_overflow_list); | |
8950 _overflow_list = p; | |
8951 } | |
8952 | |
8953 // Multi-threaded; use CAS to prepend to overflow list | |
8954 void CMSCollector::par_push_on_overflow_list(oop p) { | |
8955 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);) | |
8956 assert(p->is_oop(), "Not an oop"); | |
8957 par_preserve_mark_if_necessary(p); | |
8958 oop observed_overflow_list = _overflow_list; | |
8959 oop cur_overflow_list; | |
8960 do { | |
8961 cur_overflow_list = observed_overflow_list; | |
534 | 8962 if (cur_overflow_list != BUSY) { |
8963 p->set_mark(markOop(cur_overflow_list)); | |
8964 } else { | |
8965 p->set_mark(NULL); | |
8966 } | |
0 | 8967 observed_overflow_list = |
8968 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); | |
8969 } while (cur_overflow_list != observed_overflow_list); | |
8970 } | |
534 | 8971 #undef BUSY |
0 | 8972 |
8973 // Single threaded | |
8974 // General Note on GrowableArray: pushes may silently fail | |
8975 // because we are (temporarily) out of C-heap for expanding | |
8976 // the stack. The problem is quite ubiquitous and affects | |
8977 // a lot of code in the JVM. The prudent thing for GrowableArray | |
8978 // to do (for now) is to exit with an error. However, that may | |
8979 // be too draconian in some cases because the caller may be | |
534 | 8980 // able to recover without much harm. For such cases, we |
0 | 8981 // should probably introduce a "soft_push" method which returns |
8982 // an indication of success or failure with the assumption that | |
8983 // the caller may be able to recover from a failure; code in | |
8984 // the VM can then be changed, incrementally, to deal with such | |
8985 // failures where possible, thus, incrementally hardening the VM | |
8986 // in such low resource situations. | |
8987 void CMSCollector::preserve_mark_work(oop p, markOop m) { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8988 _preserved_oop_stack.push(p); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8989 _preserved_mark_stack.push(m); |
0 | 8990 assert(m == p->mark(), "Mark word changed"); |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
8991 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
0 | 8992 "bijection"); |
8993 } | |
8994 | |
8995 // Single threaded | |
8996 void CMSCollector::preserve_mark_if_necessary(oop p) { | |
8997 markOop m = p->mark(); | |
8998 if (m->must_be_preserved(p)) { | |
8999 preserve_mark_work(p, m); | |
9000 } | |
9001 } | |
9002 | |
9003 void CMSCollector::par_preserve_mark_if_necessary(oop p) { | |
9004 markOop m = p->mark(); | |
9005 if (m->must_be_preserved(p)) { | |
9006 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
9007 // Even though we read the mark word without holding | |
9008 // the lock, we are assured that it will not change | |
9009 // because we "own" this oop, so no other thread can | |
9010 // be trying to push it on the overflow list; see | |
9011 // the assertion in preserve_mark_work() that checks | |
9012 // that m == p->mark(). | |
9013 preserve_mark_work(p, m); | |
9014 } | |
9015 } | |
9016 | |
9017 // We should be able to do this multi-threaded, | |
9018 // a chunk of stack being a task (this is | |
9019 // correct because each oop only ever appears | |
9020 // once in the overflow list. However, it's | |
9021 // not very easy to completely overlap this with | |
9022 // other operations, so will generally not be done | |
9023 // until all work's been completed. Because we | |
9024 // expect the preserved oop stack (set) to be small, | |
9025 // it's probably fine to do this single-threaded. | |
9026 // We can explore cleverer concurrent/overlapped/parallel | |
9027 // processing of preserved marks if we feel the | |
9028 // need for this in the future. Stack overflow should | |
9029 // be so rare in practice and, when it happens, its | |
9030 // effect on performance so great that this will | |
9031 // likely just be in the noise anyway. | |
9032 void CMSCollector::restore_preserved_marks_if_any() { | |
9033 assert(SafepointSynchronize::is_at_safepoint(), | |
9034 "world should be stopped"); | |
9035 assert(Thread::current()->is_ConcurrentGC_thread() || | |
9036 Thread::current()->is_VM_thread(), | |
9037 "should be single-threaded"); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9038 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9039 "bijection"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9040 |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9041 while (!_preserved_oop_stack.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9042 oop p = _preserved_oop_stack.pop(); |
0 | 9043 assert(p->is_oop(), "Should be an oop"); |
9044 assert(_span.contains(p), "oop should be in _span"); | |
9045 assert(p->mark() == markOopDesc::prototype(), | |
9046 "Set when taken from overflow list"); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9047 markOop m = _preserved_mark_stack.pop(); |
0 | 9048 p->set_mark(m); |
9049 } | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9050 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(), |
0 | 9051 "stacks were cleared above"); |
9052 } | |
9053 | |
9054 #ifndef PRODUCT | |
9055 bool CMSCollector::no_preserved_marks() const { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1833
diff
changeset
|
9056 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(); |
0 | 9057 } |
9058 #endif | |
9059 | |
9060 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const | |
9061 { | |
9062 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
9063 CMSAdaptiveSizePolicy* size_policy = | |
9064 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy(); | |
9065 assert(size_policy->is_gc_cms_adaptive_size_policy(), | |
9066 "Wrong type for size policy"); | |
9067 return size_policy; | |
9068 } | |
9069 | |
9070 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size, | |
9071 size_t desired_promo_size) { | |
9072 if (cur_promo_size < desired_promo_size) { | |
9073 size_t expand_bytes = desired_promo_size - cur_promo_size; | |
9074 if (PrintAdaptiveSizePolicy && Verbose) { | |
9075 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
9076 "Expanding tenured generation by " SIZE_FORMAT " (bytes)", | |
9077 expand_bytes); | |
9078 } | |
9079 expand(expand_bytes, | |
9080 MinHeapDeltaBytes, | |
9081 CMSExpansionCause::_adaptive_size_policy); | |
9082 } else if (desired_promo_size < cur_promo_size) { | |
9083 size_t shrink_bytes = cur_promo_size - desired_promo_size; | |
9084 if (PrintAdaptiveSizePolicy && Verbose) { | |
9085 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
9086 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)", | |
9087 shrink_bytes); | |
9088 } | |
9089 shrink(shrink_bytes); | |
9090 } | |
9091 } | |
9092 | |
9093 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() { | |
9094 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9095 CMSGCAdaptivePolicyCounters* counters = | |
9096 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); | |
9097 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
9098 "Wrong kind of counters"); | |
9099 return counters; | |
9100 } | |
9101 | |
9102 | |
9103 void ASConcurrentMarkSweepGeneration::update_counters() { | |
9104 if (UsePerfData) { | |
9105 _space_counters->update_all(); | |
9106 _gen_counters->update_all(); | |
9107 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9108 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9109 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
9110 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
9111 "Wrong gc statistics type"); | |
9112 counters->update_counters(gc_stats_l); | |
9113 } | |
9114 } | |
9115 | |
9116 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
9117 if (UsePerfData) { | |
9118 _space_counters->update_used(used); | |
9119 _space_counters->update_capacity(); | |
9120 _gen_counters->update_all(); | |
9121 | |
9122 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9123 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
9124 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
9125 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
9126 "Wrong gc statistics type"); | |
9127 counters->update_counters(gc_stats_l); | |
9128 } | |
9129 } | |
9130 | |
9131 // The desired expansion delta is computed so that: | |
9132 // . desired free percentage or greater is used | |
9133 void ASConcurrentMarkSweepGeneration::compute_new_size() { | |
9134 assert_locked_or_safepoint(Heap_lock); | |
9135 | |
9136 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
9137 | |
9138 // If incremental collection failed, we just want to expand | |
9139 // to the limit. | |
9140 if (incremental_collection_failed()) { | |
9141 clear_incremental_collection_failed(); | |
9142 grow_to_reserved(); | |
9143 return; | |
9144 } | |
9145 | |
9146 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing"); | |
9147 | |
9148 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
9149 "Wrong type of heap"); | |
9150 int prev_level = level() - 1; | |
9151 assert(prev_level >= 0, "The cms generation is the lowest generation"); | |
9152 Generation* prev_gen = gch->get_gen(prev_level); | |
9153 assert(prev_gen->kind() == Generation::ASParNew, | |
9154 "Wrong type of young generation"); | |
9155 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen; | |
9156 size_t cur_eden = younger_gen->eden()->capacity(); | |
9157 CMSAdaptiveSizePolicy* size_policy = cms_size_policy(); | |
9158 size_t cur_promo = free(); | |
9159 size_policy->compute_tenured_generation_free_space(cur_promo, | |
9160 max_available(), | |
9161 cur_eden); | |
9162 resize(cur_promo, size_policy->promo_size()); | |
9163 | |
9164 // Record the new size of the space in the cms generation | |
9165 // that is available for promotions. This is temporary. | |
9166 // It should be the desired promo size. | |
9167 size_policy->avg_cms_promo()->sample(free()); | |
9168 size_policy->avg_old_live()->sample(used()); | |
9169 | |
9170 if (UsePerfData) { | |
9171 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
9172 counters->update_cms_capacity_counter(capacity()); | |
9173 } | |
9174 } | |
9175 | |
9176 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { | |
9177 assert_locked_or_safepoint(Heap_lock); | |
9178 assert_lock_strong(freelistLock()); | |
9179 HeapWord* old_end = _cmsSpace->end(); | |
9180 HeapWord* unallocated_start = _cmsSpace->unallocated_block(); | |
9181 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start"); | |
9182 FreeChunk* chunk_at_end = find_chunk_at_end(); | |
9183 if (chunk_at_end == NULL) { | |
9184 // No room to shrink | |
9185 if (PrintGCDetails && Verbose) { | |
9186 gclog_or_tty->print_cr("No room to shrink: old_end " | |
9187 PTR_FORMAT " unallocated_start " PTR_FORMAT | |
9188 " chunk_at_end " PTR_FORMAT, | |
9189 old_end, unallocated_start, chunk_at_end); | |
9190 } | |
9191 return; | |
9192 } else { | |
9193 | |
9194 // Find the chunk at the end of the space and determine | |
9195 // how much it can be shrunk. | |
9196 size_t shrinkable_size_in_bytes = chunk_at_end->size(); | |
9197 size_t aligned_shrinkable_size_in_bytes = | |
9198 align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); | |
9199 assert(unallocated_start <= chunk_at_end->end(), | |
9200 "Inconsistent chunk at end of space"); | |
9201 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); | |
9202 size_t word_size_before = heap_word_size(_virtual_space.committed_size()); | |
9203 | |
9204 // Shrink the underlying space | |
9205 _virtual_space.shrink_by(bytes); | |
9206 if (PrintGCDetails && Verbose) { | |
9207 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:" | |
9208 " desired_bytes " SIZE_FORMAT | |
9209 " shrinkable_size_in_bytes " SIZE_FORMAT | |
9210 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT | |
9211 " bytes " SIZE_FORMAT, | |
9212 desired_bytes, shrinkable_size_in_bytes, | |
9213 aligned_shrinkable_size_in_bytes, bytes); | |
9214 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT | |
9215 " unallocated_start " SIZE_FORMAT, | |
9216 old_end, unallocated_start); | |
9217 } | |
9218 | |
9219 // If the space did shrink (shrinking is not guaranteed), | |
9220 // shrink the chunk at the end by the appropriate amount. | |
9221 if (((HeapWord*)_virtual_space.high()) < old_end) { | |
9222 size_t new_word_size = | |
9223 heap_word_size(_virtual_space.committed_size()); | |
9224 | |
9225 // Have to remove the chunk from the dictionary because it is changing | |
9226 // size and might be someplace elsewhere in the dictionary. | |
9227 | |
9228 // Get the chunk at end, shrink it, and put it | |
9229 // back. | |
9230 _cmsSpace->removeChunkFromDictionary(chunk_at_end); | |
9231 size_t word_size_change = word_size_before - new_word_size; | |
9232 size_t chunk_at_end_old_size = chunk_at_end->size(); | |
9233 assert(chunk_at_end_old_size >= word_size_change, | |
9234 "Shrink is too large"); | |
9235 chunk_at_end->setSize(chunk_at_end_old_size - | |
9236 word_size_change); | |
9237 _cmsSpace->freed((HeapWord*) chunk_at_end->end(), | |
9238 word_size_change); | |
9239 | |
9240 _cmsSpace->returnChunkToDictionary(chunk_at_end); | |
9241 | |
9242 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
9243 _bts->resize(new_word_size); // resize the block offset shared array | |
9244 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
9245 _cmsSpace->assert_locked(); | |
9246 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); | |
9247 | |
9248 NOT_PRODUCT(_cmsSpace->dictionary()->verify()); | |
9249 | |
9250 // update the space and generation capacity counters | |
9251 if (UsePerfData) { | |
9252 _space_counters->update_capacity(); | |
9253 _gen_counters->update_all(); | |
9254 } | |
9255 | |
9256 if (Verbose && PrintGCDetails) { | |
9257 size_t new_mem_size = _virtual_space.committed_size(); | |
9258 size_t old_mem_size = new_mem_size + bytes; | |
9259 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK", | |
9260 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
9261 } | |
9262 } | |
9263 | |
9264 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), | |
9265 "Inconsistency at end of space"); | |
9266 assert(chunk_at_end->end() == _cmsSpace->end(), | |
9267 "Shrinking is inconsistent"); | |
9268 return; | |
9269 } | |
9270 } | |
9271 | |
9272 // Transfer some number of overflown objects to usual marking | |
9273 // stack. Return true if some objects were transferred. | |
9274 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { | |
679
cea947c8a988
6819891: ParNew: Fix work queue overflow code to deal correctly with +UseCompressedOops
ysr
parents:
628
diff
changeset
|
9275 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, |
0 | 9276 (size_t)ParGCDesiredObjsFromOverflowList); |
9277 | |
9278 bool res = _collector->take_from_overflow_list(num, _mark_stack); | |
9279 assert(_collector->overflow_list_is_empty() || res, | |
9280 "If list is not empty, we should have taken something"); | |
9281 assert(!res || !_mark_stack->isEmpty(), | |
9282 "If we took something, it should now be on our stack"); | |
9283 return res; | |
9284 } | |
9285 | |
9286 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { | |
9287 size_t res = _sp->block_size_no_stall(addr, _collector); | |
9288 if (_sp->block_is_obj(addr)) { | |
9289 if (_live_bit_map->isMarked(addr)) { | |
9290 // It can't have been dead in a previous cycle | |
9291 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); | |
9292 } else { | |
9293 _dead_bit_map->mark(addr); // mark the dead object | |
9294 } | |
9295 } | |
2226
c5a923563727
6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
ysr
parents:
2177
diff
changeset
|
9296 // Could be 0, if the block size could not be computed without stalling. |
0 | 9297 return res; |
9298 } | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9299 |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9300 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() { |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9301 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9302 switch (phase) { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9303 case CMSCollector::InitialMarking: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9304 initialize(true /* fullGC */ , |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9305 cause /* cause of the GC */, |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9306 true /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9307 true /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9308 false /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9309 false /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9310 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9311 false /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9312 false /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9313 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9314 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9315 case CMSCollector::FinalMarking: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9316 initialize(true /* fullGC */ , |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9317 cause /* cause of the GC */, |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9318 false /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9319 false /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9320 false /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9321 false /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9322 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9323 false /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9324 false /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9325 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9326 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9327 case CMSCollector::Sweeping: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9328 initialize(true /* fullGC */ , |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9329 cause /* cause of the GC */, |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9330 false /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9331 false /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9332 true /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9333 true /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9334 false /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9335 true /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9336 true /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9337 break; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9338 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9339 default: |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9340 ShouldNotReachHere(); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9341 } |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9342 } |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9343 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9344 // when bailing out of cms in concurrent mode failure |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9345 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(GCCause::Cause cause): TraceMemoryManagerStats() { |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9346 initialize(true /* fullGC */ , |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
3293
diff
changeset
|
9347 cause /* cause of the GC */, |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9348 true /* recordGCBeginTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9349 true /* recordPreGCUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9350 true /* recordPeakUsage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9351 true /* recordPostGCusage */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9352 true /* recordAccumulatedGCTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9353 true /* recordGCEndTime */, |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9354 true /* countCollection */ ); |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1579
diff
changeset
|
9355 } |