Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @ 187:790e66e5fbac
6687581: Make CMS work with compressed oops
Summary: Make FreeChunk read markword instead of LSB in _klass pointer to indicate that it's a FreeChunk for compressed oops.
Reviewed-by: ysr, jmasa
author | coleenp |
---|---|
date | Mon, 09 Jun 2008 11:51:19 -0400 |
parents | b5489bb705c9 |
children | d1605aabd0a1 6aae2f9d0294 |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl" | |
27 | |
28 // statics | |
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; | |
30 bool CMSCollector::_full_gc_requested = false; | |
31 | |
32 ////////////////////////////////////////////////////////////////// | |
33 // In support of CMS/VM thread synchronization | |
34 ////////////////////////////////////////////////////////////////// | |
35 // We split use of the CGC_lock into 2 "levels". | |
36 // The low-level locking is of the usual CGC_lock monitor. We introduce | |
37 // a higher level "token" (hereafter "CMS token") built on top of the | |
38 // low level monitor (hereafter "CGC lock"). | |
39 // The token-passing protocol gives priority to the VM thread. The | |
40 // CMS-lock doesn't provide any fairness guarantees, but clients | |
41 // should ensure that it is only held for very short, bounded | |
42 // durations. | |
43 // | |
44 // When either of the CMS thread or the VM thread is involved in | |
45 // collection operations during which it does not want the other | |
46 // thread to interfere, it obtains the CMS token. | |
47 // | |
48 // If either thread tries to get the token while the other has | |
49 // it, that thread waits. However, if the VM thread and CMS thread | |
50 // both want the token, then the VM thread gets priority while the | |
51 // CMS thread waits. This ensures, for instance, that the "concurrent" | |
52 // phases of the CMS thread's work do not block out the VM thread | |
53 // for long periods of time as the CMS thread continues to hog | |
54 // the token. (See bug 4616232). | |
55 // | |
56 // The baton-passing functions are, however, controlled by the | |
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive, | |
58 // and here the low-level CMS lock, not the high level token, | |
59 // ensures mutual exclusion. | |
60 // | |
61 // Two important conditions that we have to satisfy: | |
62 // 1. if a thread does a low-level wait on the CMS lock, then it | |
63 // relinquishes the CMS token if it were holding that token | |
64 // when it acquired the low-level CMS lock. | |
65 // 2. any low-level notifications on the low-level lock | |
66 // should only be sent when a thread has relinquished the token. | |
67 // | |
68 // In the absence of either property, we'd have potential deadlock. | |
69 // | |
70 // We protect each of the CMS (concurrent and sequential) phases | |
71 // with the CMS _token_, not the CMS _lock_. | |
72 // | |
73 // The only code protected by CMS lock is the token acquisition code | |
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the | |
75 // baton-passing code. | |
76 // | |
77 // Unfortunately, i couldn't come up with a good abstraction to factor and | |
78 // hide the naked CGC_lock manipulation in the baton-passing code | |
79 // further below. That's something we should try to do. Also, the proof | |
80 // of correctness of this 2-level locking scheme is far from obvious, | |
81 // and potentially quite slippery. We have an uneasy supsicion, for instance, | |
82 // that there may be a theoretical possibility of delay/starvation in the | |
83 // low-level lock/wait/notify scheme used for the baton-passing because of | |
84 // potential intereference with the priority scheme embodied in the | |
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() | |
86 // invocation further below and marked with "XXX 20011219YSR". | |
87 // Indeed, as we note elsewhere, this may become yet more slippery | |
88 // in the presence of multiple CMS and/or multiple VM threads. XXX | |
89 | |
90 class CMSTokenSync: public StackObj { | |
91 private: | |
92 bool _is_cms_thread; | |
93 public: | |
94 CMSTokenSync(bool is_cms_thread): | |
95 _is_cms_thread(is_cms_thread) { | |
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), | |
97 "Incorrect argument to constructor"); | |
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); | |
99 } | |
100 | |
101 ~CMSTokenSync() { | |
102 assert(_is_cms_thread ? | |
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : | |
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
105 "Incorrect state"); | |
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); | |
107 } | |
108 }; | |
109 | |
110 // Convenience class that does a CMSTokenSync, and then acquires | |
111 // upto three locks. | |
112 class CMSTokenSyncWithLocks: public CMSTokenSync { | |
113 private: | |
114 // Note: locks are acquired in textual declaration order | |
115 // and released in the opposite order | |
116 MutexLockerEx _locker1, _locker2, _locker3; | |
117 public: | |
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, | |
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): | |
120 CMSTokenSync(is_cms_thread), | |
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag), | |
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag), | |
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag) | |
124 { } | |
125 }; | |
126 | |
127 | |
128 // Wrapper class to temporarily disable icms during a foreground cms collection. | |
129 class ICMSDisabler: public StackObj { | |
130 public: | |
131 // The ctor disables icms and wakes up the thread so it notices the change; | |
132 // the dtor re-enables icms. Note that the CMSCollector methods will check | |
133 // CMSIncrementalMode. | |
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); } | |
135 ~ICMSDisabler() { CMSCollector::enable_icms(); } | |
136 }; | |
137 | |
138 ////////////////////////////////////////////////////////////////// | |
139 // Concurrent Mark-Sweep Generation ///////////////////////////// | |
140 ////////////////////////////////////////////////////////////////// | |
141 | |
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) | |
143 | |
144 // This struct contains per-thread things necessary to support parallel | |
145 // young-gen collection. | |
146 class CMSParGCThreadState: public CHeapObj { | |
147 public: | |
148 CFLS_LAB lab; | |
149 PromotionInfo promo; | |
150 | |
151 // Constructor. | |
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { | |
153 promo.setSpace(cfls); | |
154 } | |
155 }; | |
156 | |
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( | |
158 ReservedSpace rs, size_t initial_byte_size, int level, | |
159 CardTableRS* ct, bool use_adaptive_freelists, | |
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : | |
161 CardGeneration(rs, initial_byte_size, level, ct), | |
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))), | |
163 _debug_collection_type(Concurrent_collection_type) | |
164 { | |
165 HeapWord* bottom = (HeapWord*) _virtual_space.low(); | |
166 HeapWord* end = (HeapWord*) _virtual_space.high(); | |
167 | |
168 _direct_allocated_words = 0; | |
169 NOT_PRODUCT( | |
170 _numObjectsPromoted = 0; | |
171 _numWordsPromoted = 0; | |
172 _numObjectsAllocated = 0; | |
173 _numWordsAllocated = 0; | |
174 ) | |
175 | |
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), | |
177 use_adaptive_freelists, | |
178 dictionaryChoice); | |
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;) | |
180 if (_cmsSpace == NULL) { | |
181 vm_exit_during_initialization( | |
182 "CompactibleFreeListSpace allocation failure"); | |
183 } | |
184 _cmsSpace->_gen = this; | |
185 | |
186 _gc_stats = new CMSGCStats(); | |
187 | |
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass | |
189 // offsets match. The ability to tell free chunks from objects | |
190 // depends on this property. | |
191 debug_only( | |
192 FreeChunk* junk = NULL; | |
187 | 193 assert(UseCompressedOops || |
194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), | |
0 | 195 "Offset of FreeChunk::_prev within FreeChunk must match" |
196 " that of OopDesc::_klass within OopDesc"); | |
197 ) | |
198 if (ParallelGCThreads > 0) { | |
199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr; | |
200 _par_gc_thread_states = | |
201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads); | |
202 if (_par_gc_thread_states == NULL) { | |
203 vm_exit_during_initialization("Could not allocate par gc structs"); | |
204 } | |
205 for (uint i = 0; i < ParallelGCThreads; i++) { | |
206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); | |
207 if (_par_gc_thread_states[i] == NULL) { | |
208 vm_exit_during_initialization("Could not allocate par gc structs"); | |
209 } | |
210 } | |
211 } else { | |
212 _par_gc_thread_states = NULL; | |
213 } | |
214 _incremental_collection_failed = false; | |
215 // The "dilatation_factor" is the expansion that can occur on | |
216 // account of the fact that the minimum object size in the CMS | |
217 // generation may be larger than that in, say, a contiguous young | |
218 // generation. | |
219 // Ideally, in the calculation below, we'd compute the dilatation | |
220 // factor as: MinChunkSize/(promoting_gen's min object size) | |
221 // Since we do not have such a general query interface for the | |
222 // promoting generation, we'll instead just use the mimimum | |
223 // object size (which today is a header's worth of space); | |
224 // note that all arithmetic is in units of HeapWords. | |
225 assert(MinChunkSize >= oopDesc::header_size(), "just checking"); | |
226 assert(_dilatation_factor >= 1.0, "from previous assert"); | |
227 } | |
228 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
229 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
230 // The field "_initiating_occupancy" represents the occupancy percentage |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
231 // at which we trigger a new collection cycle. Unless explicitly specified |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
233 // is calculated by: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
234 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
235 // Let "f" be MinHeapFreeRatio in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
236 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
237 // _intiating_occupancy = 100-f + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
238 // f * (CMSTrigger[Perm]Ratio/100) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
239 // where CMSTrigger[Perm]Ratio is the argument "tr" below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
240 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
241 // That is, if we assume the heap is at its desired maximum occupancy at the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
243 // space be allocated before initiating a new collection cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
244 // |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
247 if (io >= 0) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
248 _initiating_occupancy = (double)io / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
249 } else { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
250 _initiating_occupancy = ((100 - MinHeapFreeRatio) + |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
251 (double)(tr * MinHeapFreeRatio) / 100.0) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
252 / 100.0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
253 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
254 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
255 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
256 |
0 | 257 void ConcurrentMarkSweepGeneration::ref_processor_init() { |
258 assert(collector() != NULL, "no collector"); | |
259 collector()->ref_processor_init(); | |
260 } | |
261 | |
262 void CMSCollector::ref_processor_init() { | |
263 if (_ref_processor == NULL) { | |
264 // Allocate and initialize a reference processor | |
265 _ref_processor = ReferenceProcessor::create_ref_processor( | |
266 _span, // span | |
267 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery | |
268 _cmsGen->refs_discovery_is_mt(), // mt_discovery | |
269 &_is_alive_closure, | |
270 ParallelGCThreads, | |
271 ParallelRefProcEnabled); | |
272 // Initialize the _ref_processor field of CMSGen | |
273 _cmsGen->set_ref_processor(_ref_processor); | |
274 | |
275 // Allocate a dummy ref processor for perm gen. | |
276 ReferenceProcessor* rp2 = new ReferenceProcessor(); | |
277 if (rp2 == NULL) { | |
278 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); | |
279 } | |
280 _permGen->set_ref_processor(rp2); | |
281 } | |
282 } | |
283 | |
284 CMSAdaptiveSizePolicy* CMSCollector::size_policy() { | |
285 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
286 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
287 "Wrong type of heap"); | |
288 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
289 gch->gen_policy()->size_policy(); | |
290 assert(sp->is_gc_cms_adaptive_size_policy(), | |
291 "Wrong type of size policy"); | |
292 return sp; | |
293 } | |
294 | |
295 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() { | |
296 CMSGCAdaptivePolicyCounters* results = | |
297 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters(); | |
298 assert( | |
299 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
300 "Wrong gc policy counter kind"); | |
301 return results; | |
302 } | |
303 | |
304 | |
305 void ConcurrentMarkSweepGeneration::initialize_performance_counters() { | |
306 | |
307 const char* gen_name = "old"; | |
308 | |
309 // Generation Counters - generation 1, 1 subspace | |
310 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space); | |
311 | |
312 _space_counters = new GSpaceCounters(gen_name, 0, | |
313 _virtual_space.reserved_size(), | |
314 this, _gen_counters); | |
315 } | |
316 | |
317 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): | |
318 _cms_gen(cms_gen) | |
319 { | |
320 assert(alpha <= 100, "bad value"); | |
321 _saved_alpha = alpha; | |
322 | |
323 // Initialize the alphas to the bootstrap value of 100. | |
324 _gc0_alpha = _cms_alpha = 100; | |
325 | |
326 _cms_begin_time.update(); | |
327 _cms_end_time.update(); | |
328 | |
329 _gc0_duration = 0.0; | |
330 _gc0_period = 0.0; | |
331 _gc0_promoted = 0; | |
332 | |
333 _cms_duration = 0.0; | |
334 _cms_period = 0.0; | |
335 _cms_allocated = 0; | |
336 | |
337 _cms_used_at_gc0_begin = 0; | |
338 _cms_used_at_gc0_end = 0; | |
339 _allow_duty_cycle_reduction = false; | |
340 _valid_bits = 0; | |
341 _icms_duty_cycle = CMSIncrementalDutyCycle; | |
342 } | |
343 | |
344 // If promotion failure handling is on use | |
345 // the padded average size of the promotion for each | |
346 // young generation collection. | |
347 double CMSStats::time_until_cms_gen_full() const { | |
348 size_t cms_free = _cms_gen->cmsSpace()->free(); | |
349 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
350 size_t expected_promotion = gch->get_gen(0)->capacity(); | |
351 if (HandlePromotionFailure) { | |
352 expected_promotion = MIN2( | |
353 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(), | |
354 expected_promotion); | |
355 } | |
356 if (cms_free > expected_promotion) { | |
357 // Start a cms collection if there isn't enough space to promote | |
358 // for the next minor collection. Use the padded average as | |
359 // a safety factor. | |
360 cms_free -= expected_promotion; | |
361 | |
362 // Adjust by the safety factor. | |
363 double cms_free_dbl = (double)cms_free; | |
364 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0; | |
365 | |
366 if (PrintGCDetails && Verbose) { | |
367 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " | |
368 SIZE_FORMAT " expected_promotion " SIZE_FORMAT, | |
369 cms_free, expected_promotion); | |
370 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", | |
371 cms_free_dbl, cms_consumption_rate() + 1.0); | |
372 } | |
373 // Add 1 in case the consumption rate goes to zero. | |
374 return cms_free_dbl / (cms_consumption_rate() + 1.0); | |
375 } | |
376 return 0.0; | |
377 } | |
378 | |
379 // Compare the duration of the cms collection to the | |
380 // time remaining before the cms generation is empty. | |
381 // Note that the time from the start of the cms collection | |
382 // to the start of the cms sweep (less than the total | |
383 // duration of the cms collection) can be used. This | |
384 // has been tried and some applications experienced | |
385 // promotion failures early in execution. This was | |
386 // possibly because the averages were not accurate | |
387 // enough at the beginning. | |
388 double CMSStats::time_until_cms_start() const { | |
389 // We add "gc0_period" to the "work" calculation | |
390 // below because this query is done (mostly) at the | |
391 // end of a scavenge, so we need to conservatively | |
392 // account for that much possible delay | |
393 // in the query so as to avoid concurrent mode failures | |
394 // due to starting the collection just a wee bit too | |
395 // late. | |
396 double work = cms_duration() + gc0_period(); | |
397 double deadline = time_until_cms_gen_full(); | |
398 if (work > deadline) { | |
399 if (Verbose && PrintGCDetails) { | |
400 gclog_or_tty->print( | |
401 " CMSCollector: collect because of anticipated promotion " | |
402 "before full %3.7f + %3.7f > %3.7f ", cms_duration(), | |
403 gc0_period(), time_until_cms_gen_full()); | |
404 } | |
405 return 0.0; | |
406 } | |
407 return work - deadline; | |
408 } | |
409 | |
410 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the | |
411 // amount of change to prevent wild oscillation. | |
412 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle, | |
413 unsigned int new_duty_cycle) { | |
414 assert(old_duty_cycle <= 100, "bad input value"); | |
415 assert(new_duty_cycle <= 100, "bad input value"); | |
416 | |
417 // Note: use subtraction with caution since it may underflow (values are | |
418 // unsigned). Addition is safe since we're in the range 0-100. | |
419 unsigned int damped_duty_cycle = new_duty_cycle; | |
420 if (new_duty_cycle < old_duty_cycle) { | |
421 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U); | |
422 if (new_duty_cycle + largest_delta < old_duty_cycle) { | |
423 damped_duty_cycle = old_duty_cycle - largest_delta; | |
424 } | |
425 } else if (new_duty_cycle > old_duty_cycle) { | |
426 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U); | |
427 if (new_duty_cycle > old_duty_cycle + largest_delta) { | |
428 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U); | |
429 } | |
430 } | |
431 assert(damped_duty_cycle <= 100, "invalid duty cycle computed"); | |
432 | |
433 if (CMSTraceIncrementalPacing) { | |
434 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ", | |
435 old_duty_cycle, new_duty_cycle, damped_duty_cycle); | |
436 } | |
437 return damped_duty_cycle; | |
438 } | |
439 | |
440 unsigned int CMSStats::icms_update_duty_cycle_impl() { | |
441 assert(CMSIncrementalPacing && valid(), | |
442 "should be handled in icms_update_duty_cycle()"); | |
443 | |
444 double cms_time_so_far = cms_timer().seconds(); | |
445 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M; | |
446 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far); | |
447 | |
448 // Avoid division by 0. | |
449 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01); | |
450 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full; | |
451 | |
452 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U); | |
453 if (new_duty_cycle > _icms_duty_cycle) { | |
454 // Avoid very small duty cycles (1 or 2); 0 is allowed. | |
455 if (new_duty_cycle > 2) { | |
456 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, | |
457 new_duty_cycle); | |
458 } | |
459 } else if (_allow_duty_cycle_reduction) { | |
460 // The duty cycle is reduced only once per cms cycle (see record_cms_end()). | |
461 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle); | |
462 // Respect the minimum duty cycle. | |
463 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin; | |
464 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle); | |
465 } | |
466 | |
467 if (PrintGCDetails || CMSTraceIncrementalPacing) { | |
468 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle); | |
469 } | |
470 | |
471 _allow_duty_cycle_reduction = false; | |
472 return _icms_duty_cycle; | |
473 } | |
474 | |
475 #ifndef PRODUCT | |
476 void CMSStats::print_on(outputStream *st) const { | |
477 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); | |
478 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, | |
479 gc0_duration(), gc0_period(), gc0_promoted()); | |
480 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, | |
481 cms_duration(), cms_duration_per_mb(), | |
482 cms_period(), cms_allocated()); | |
483 st->print(",cms_since_beg=%g,cms_since_end=%g", | |
484 cms_time_since_begin(), cms_time_since_end()); | |
485 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, | |
486 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); | |
487 if (CMSIncrementalMode) { | |
488 st->print(",dc=%d", icms_duty_cycle()); | |
489 } | |
490 | |
491 if (valid()) { | |
492 st->print(",promo_rate=%g,cms_alloc_rate=%g", | |
493 promotion_rate(), cms_allocation_rate()); | |
494 st->print(",cms_consumption_rate=%g,time_until_full=%g", | |
495 cms_consumption_rate(), time_until_cms_gen_full()); | |
496 } | |
497 st->print(" "); | |
498 } | |
499 #endif // #ifndef PRODUCT | |
500 | |
501 CMSCollector::CollectorState CMSCollector::_collectorState = | |
502 CMSCollector::Idling; | |
503 bool CMSCollector::_foregroundGCIsActive = false; | |
504 bool CMSCollector::_foregroundGCShouldWait = false; | |
505 | |
506 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, | |
507 ConcurrentMarkSweepGeneration* permGen, | |
508 CardTableRS* ct, | |
509 ConcurrentMarkSweepPolicy* cp): | |
510 _cmsGen(cmsGen), | |
511 _permGen(permGen), | |
512 _ct(ct), | |
513 _ref_processor(NULL), // will be set later | |
514 _conc_workers(NULL), // may be set later | |
515 _abort_preclean(false), | |
516 _start_sampling(false), | |
517 _between_prologue_and_epilogue(false), | |
518 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), | |
519 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"), | |
520 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), | |
521 -1 /* lock-free */, "No_lock" /* dummy */), | |
522 _modUnionClosure(&_modUnionTable), | |
523 _modUnionClosurePar(&_modUnionTable), | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
524 // Adjust my span to cover old (cms) gen and perm gen |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
525 _span(cmsGen->reserved()._union(permGen->reserved())), |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
526 // Construct the is_alive_closure with _span & markBitMap |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
527 _is_alive_closure(_span, &_markBitMap), |
0 | 528 _restart_addr(NULL), |
529 _overflow_list(NULL), | |
530 _preserved_oop_stack(NULL), | |
531 _preserved_mark_stack(NULL), | |
532 _stats(cmsGen), | |
533 _eden_chunk_array(NULL), // may be set in ctor body | |
534 _eden_chunk_capacity(0), // -- ditto -- | |
535 _eden_chunk_index(0), // -- ditto -- | |
536 _survivor_plab_array(NULL), // -- ditto -- | |
537 _survivor_chunk_array(NULL), // -- ditto -- | |
538 _survivor_chunk_capacity(0), // -- ditto -- | |
539 _survivor_chunk_index(0), // -- ditto -- | |
540 _ser_pmc_preclean_ovflw(0), | |
541 _ser_pmc_remark_ovflw(0), | |
542 _par_pmc_remark_ovflw(0), | |
543 _ser_kac_ovflw(0), | |
544 _par_kac_ovflw(0), | |
545 #ifndef PRODUCT | |
546 _num_par_pushes(0), | |
547 #endif | |
548 _collection_count_start(0), | |
549 _verifying(false), | |
550 _icms_start_limit(NULL), | |
551 _icms_stop_limit(NULL), | |
552 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), | |
553 _completed_initialization(false), | |
554 _collector_policy(cp), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
555 _should_unload_classes(false), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
556 _concurrent_cycles_since_last_unload(0), |
0 | 557 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) |
558 { | |
559 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { | |
560 ExplicitGCInvokesConcurrent = true; | |
561 } | |
562 // Now expand the span and allocate the collection support structures | |
563 // (MUT, marking bit map etc.) to cover both generations subject to | |
564 // collection. | |
565 | |
566 // First check that _permGen is adjacent to _cmsGen and above it. | |
567 assert( _cmsGen->reserved().word_size() > 0 | |
568 && _permGen->reserved().word_size() > 0, | |
569 "generations should not be of zero size"); | |
570 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(), | |
571 "_cmsGen and _permGen should not overlap"); | |
572 assert(_cmsGen->reserved().end() == _permGen->reserved().start(), | |
573 "_cmsGen->end() different from _permGen->start()"); | |
574 | |
575 // For use by dirty card to oop closures. | |
576 _cmsGen->cmsSpace()->set_collector(this); | |
577 _permGen->cmsSpace()->set_collector(this); | |
578 | |
579 // Allocate MUT and marking bit map | |
580 { | |
581 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); | |
582 if (!_markBitMap.allocate(_span)) { | |
583 warning("Failed to allocate CMS Bit Map"); | |
584 return; | |
585 } | |
586 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); | |
587 } | |
588 { | |
589 _modUnionTable.allocate(_span); | |
590 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); | |
591 } | |
592 | |
593 if (!_markStack.allocate(CMSMarkStackSize)) { | |
594 warning("Failed to allocate CMS Marking Stack"); | |
595 return; | |
596 } | |
597 if (!_revisitStack.allocate(CMSRevisitStackSize)) { | |
598 warning("Failed to allocate CMS Revisit Stack"); | |
599 return; | |
600 } | |
601 | |
602 // Support for multi-threaded concurrent phases | |
603 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { | |
604 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) { | |
605 // just for now | |
606 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4); | |
607 } | |
608 if (ParallelCMSThreads > 1) { | |
609 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", | |
610 ParallelCMSThreads, true); | |
611 if (_conc_workers == NULL) { | |
612 warning("GC/CMS: _conc_workers allocation failure: " | |
613 "forcing -CMSConcurrentMTEnabled"); | |
614 CMSConcurrentMTEnabled = false; | |
615 } | |
616 } else { | |
617 CMSConcurrentMTEnabled = false; | |
618 } | |
619 } | |
620 if (!CMSConcurrentMTEnabled) { | |
621 ParallelCMSThreads = 0; | |
622 } else { | |
623 // Turn off CMSCleanOnEnter optimization temporarily for | |
624 // the MT case where it's not fixed yet; see 6178663. | |
625 CMSCleanOnEnter = false; | |
626 } | |
627 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), | |
628 "Inconsistency"); | |
629 | |
630 // Parallel task queues; these are shared for the | |
631 // concurrent and stop-world phases of CMS, but | |
632 // are not shared with parallel scavenge (ParNew). | |
633 { | |
634 uint i; | |
635 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads); | |
636 | |
637 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled | |
638 || ParallelRefProcEnabled) | |
639 && num_queues > 0) { | |
640 _task_queues = new OopTaskQueueSet(num_queues); | |
641 if (_task_queues == NULL) { | |
642 warning("task_queues allocation failure."); | |
643 return; | |
644 } | |
645 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues); | |
646 if (_hash_seed == NULL) { | |
647 warning("_hash_seed array allocation failure"); | |
648 return; | |
649 } | |
650 | |
651 // XXX use a global constant instead of 64! | |
652 typedef struct OopTaskQueuePadded { | |
653 OopTaskQueue work_queue; | |
654 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing | |
655 } OopTaskQueuePadded; | |
656 | |
657 for (i = 0; i < num_queues; i++) { | |
658 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded(); | |
659 if (q_padded == NULL) { | |
660 warning("work_queue allocation failure."); | |
661 return; | |
662 } | |
663 _task_queues->register_queue(i, &q_padded->work_queue); | |
664 } | |
665 for (i = 0; i < num_queues; i++) { | |
666 _task_queues->queue(i)->initialize(); | |
667 _hash_seed[i] = 17; // copied from ParNew | |
668 } | |
669 } | |
670 } | |
671 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
672 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
673 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
674 |
0 | 675 // Clip CMSBootstrapOccupancy between 0 and 100. |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
676 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) |
0 | 677 /(double)100; |
678 | |
679 _full_gcs_since_conc_gc = 0; | |
680 | |
681 // Now tell CMS generations the identity of their collector | |
682 ConcurrentMarkSweepGeneration::set_collector(this); | |
683 | |
684 // Create & start a CMS thread for this CMS collector | |
685 _cmsThread = ConcurrentMarkSweepThread::start(this); | |
686 assert(cmsThread() != NULL, "CMS Thread should have been created"); | |
687 assert(cmsThread()->collector() == this, | |
688 "CMS Thread should refer to this gen"); | |
689 assert(CGC_lock != NULL, "Where's the CGC_lock?"); | |
690 | |
691 // Support for parallelizing young gen rescan | |
692 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
693 _young_gen = gch->prev_gen(_cmsGen); | |
694 if (gch->supports_inline_contig_alloc()) { | |
695 _top_addr = gch->top_addr(); | |
696 _end_addr = gch->end_addr(); | |
697 assert(_young_gen != NULL, "no _young_gen"); | |
698 _eden_chunk_index = 0; | |
699 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; | |
700 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity); | |
701 if (_eden_chunk_array == NULL) { | |
702 _eden_chunk_capacity = 0; | |
703 warning("GC/CMS: _eden_chunk_array allocation failure"); | |
704 } | |
705 } | |
706 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error"); | |
707 | |
708 // Support for parallelizing survivor space rescan | |
709 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) { | |
710 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize); | |
711 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); | |
712 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); | |
713 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); | |
714 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL | |
715 || _cursor == NULL) { | |
716 warning("Failed to allocate survivor plab/chunk array"); | |
717 if (_survivor_plab_array != NULL) { | |
718 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
719 _survivor_plab_array = NULL; | |
720 } | |
721 if (_survivor_chunk_array != NULL) { | |
722 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
723 _survivor_chunk_array = NULL; | |
724 } | |
725 if (_cursor != NULL) { | |
726 FREE_C_HEAP_ARRAY(size_t, _cursor); | |
727 _cursor = NULL; | |
728 } | |
729 } else { | |
730 _survivor_chunk_capacity = 2*max_plab_samples; | |
731 for (uint i = 0; i < ParallelGCThreads; i++) { | |
732 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples); | |
733 if (vec == NULL) { | |
734 warning("Failed to allocate survivor plab array"); | |
735 for (int j = i; j > 0; j--) { | |
736 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array()); | |
737 } | |
738 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); | |
739 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); | |
740 _survivor_plab_array = NULL; | |
741 _survivor_chunk_array = NULL; | |
742 _survivor_chunk_capacity = 0; | |
743 break; | |
744 } else { | |
745 ChunkArray* cur = | |
746 ::new (&_survivor_plab_array[i]) ChunkArray(vec, | |
747 max_plab_samples); | |
748 assert(cur->end() == 0, "Should be 0"); | |
749 assert(cur->array() == vec, "Should be vec"); | |
750 assert(cur->capacity() == max_plab_samples, "Error"); | |
751 } | |
752 } | |
753 } | |
754 } | |
755 assert( ( _survivor_plab_array != NULL | |
756 && _survivor_chunk_array != NULL) | |
757 || ( _survivor_chunk_capacity == 0 | |
758 && _survivor_chunk_index == 0), | |
759 "Error"); | |
760 | |
761 // Choose what strong roots should be scanned depending on verification options | |
762 // and perm gen collection mode. | |
763 if (!CMSClassUnloadingEnabled) { | |
764 // If class unloading is disabled we want to include all classes into the root set. | |
765 add_root_scanning_option(SharedHeap::SO_AllClasses); | |
766 } else { | |
767 add_root_scanning_option(SharedHeap::SO_SystemClasses); | |
768 } | |
769 | |
770 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) | |
771 _gc_counters = new CollectorCounters("CMS", 1); | |
772 _completed_initialization = true; | |
773 _sweep_timer.start(); // start of time | |
774 } | |
775 | |
776 const char* ConcurrentMarkSweepGeneration::name() const { | |
777 return "concurrent mark-sweep generation"; | |
778 } | |
779 void ConcurrentMarkSweepGeneration::update_counters() { | |
780 if (UsePerfData) { | |
781 _space_counters->update_all(); | |
782 _gen_counters->update_all(); | |
783 } | |
784 } | |
785 | |
786 // this is an optimized version of update_counters(). it takes the | |
787 // used value as a parameter rather than computing it. | |
788 // | |
789 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
790 if (UsePerfData) { | |
791 _space_counters->update_used(used); | |
792 _space_counters->update_capacity(); | |
793 _gen_counters->update_all(); | |
794 } | |
795 } | |
796 | |
797 void ConcurrentMarkSweepGeneration::print() const { | |
798 Generation::print(); | |
799 cmsSpace()->print(); | |
800 } | |
801 | |
802 #ifndef PRODUCT | |
803 void ConcurrentMarkSweepGeneration::print_statistics() { | |
804 cmsSpace()->printFLCensus(0); | |
805 } | |
806 #endif | |
807 | |
808 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { | |
809 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
810 if (PrintGCDetails) { | |
811 if (Verbose) { | |
812 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", | |
813 level(), short_name(), s, used(), capacity()); | |
814 } else { | |
815 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", | |
816 level(), short_name(), s, used() / K, capacity() / K); | |
817 } | |
818 } | |
819 if (Verbose) { | |
820 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")", | |
821 gch->used(), gch->capacity()); | |
822 } else { | |
823 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)", | |
824 gch->used() / K, gch->capacity() / K); | |
825 } | |
826 } | |
827 | |
828 size_t | |
829 ConcurrentMarkSweepGeneration::contiguous_available() const { | |
830 // dld proposes an improvement in precision here. If the committed | |
831 // part of the space ends in a free block we should add that to | |
832 // uncommitted size in the calculation below. Will make this | |
833 // change later, staying with the approximation below for the | |
834 // time being. -- ysr. | |
835 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); | |
836 } | |
837 | |
838 size_t | |
839 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { | |
840 return _cmsSpace->max_alloc_in_words() * HeapWordSize; | |
841 } | |
842 | |
843 size_t ConcurrentMarkSweepGeneration::max_available() const { | |
844 return free() + _virtual_space.uncommitted_size(); | |
845 } | |
846 | |
847 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe( | |
848 size_t max_promotion_in_bytes, | |
849 bool younger_handles_promotion_failure) const { | |
850 | |
851 // This is the most conservative test. Full promotion is | |
852 // guaranteed if this is used. The multiplicative factor is to | |
853 // account for the worst case "dilatation". | |
854 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes; | |
855 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t | |
856 adjusted_max_promo_bytes = (double)max_uintx; | |
857 } | |
858 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes); | |
859 | |
860 if (younger_handles_promotion_failure && !result) { | |
861 // Full promotion is not guaranteed because fragmentation | |
862 // of the cms generation can prevent the full promotion. | |
863 result = (max_available() >= (size_t)adjusted_max_promo_bytes); | |
864 | |
865 if (!result) { | |
866 // With promotion failure handling the test for the ability | |
867 // to support the promotion does not have to be guaranteed. | |
868 // Use an average of the amount promoted. | |
869 result = max_available() >= (size_t) | |
870 gc_stats()->avg_promoted()->padded_average(); | |
871 if (PrintGC && Verbose && result) { | |
872 gclog_or_tty->print_cr( | |
873 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" | |
874 " max_available: " SIZE_FORMAT | |
875 " avg_promoted: " SIZE_FORMAT, | |
876 max_available(), (size_t) | |
877 gc_stats()->avg_promoted()->padded_average()); | |
878 } | |
879 } else { | |
880 if (PrintGC && Verbose) { | |
881 gclog_or_tty->print_cr( | |
882 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" | |
883 " max_available: " SIZE_FORMAT | |
884 " adj_max_promo_bytes: " SIZE_FORMAT, | |
885 max_available(), (size_t)adjusted_max_promo_bytes); | |
886 } | |
887 } | |
888 } else { | |
889 if (PrintGC && Verbose) { | |
890 gclog_or_tty->print_cr( | |
891 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" | |
892 " contiguous_available: " SIZE_FORMAT | |
893 " adj_max_promo_bytes: " SIZE_FORMAT, | |
894 max_contiguous_available(), (size_t)adjusted_max_promo_bytes); | |
895 } | |
896 } | |
897 return result; | |
898 } | |
899 | |
900 CompactibleSpace* | |
901 ConcurrentMarkSweepGeneration::first_compaction_space() const { | |
902 return _cmsSpace; | |
903 } | |
904 | |
905 void ConcurrentMarkSweepGeneration::reset_after_compaction() { | |
906 // Clear the promotion information. These pointers can be adjusted | |
907 // along with all the other pointers into the heap but | |
908 // compaction is expected to be a rare event with | |
909 // a heap using cms so don't do it without seeing the need. | |
910 if (ParallelGCThreads > 0) { | |
911 for (uint i = 0; i < ParallelGCThreads; i++) { | |
912 _par_gc_thread_states[i]->promo.reset(); | |
913 } | |
914 } | |
915 } | |
916 | |
917 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { | |
918 blk->do_space(_cmsSpace); | |
919 } | |
920 | |
921 void ConcurrentMarkSweepGeneration::compute_new_size() { | |
922 assert_locked_or_safepoint(Heap_lock); | |
923 | |
924 // If incremental collection failed, we just want to expand | |
925 // to the limit. | |
926 if (incremental_collection_failed()) { | |
927 clear_incremental_collection_failed(); | |
928 grow_to_reserved(); | |
929 return; | |
930 } | |
931 | |
932 size_t expand_bytes = 0; | |
933 double free_percentage = ((double) free()) / capacity(); | |
934 double desired_free_percentage = (double) MinHeapFreeRatio / 100; | |
935 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; | |
936 | |
937 // compute expansion delta needed for reaching desired free percentage | |
938 if (free_percentage < desired_free_percentage) { | |
939 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
940 assert(desired_capacity >= capacity(), "invalid expansion size"); | |
941 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); | |
942 } | |
943 if (expand_bytes > 0) { | |
944 if (PrintGCDetails && Verbose) { | |
945 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); | |
946 gclog_or_tty->print_cr("\nFrom compute_new_size: "); | |
947 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); | |
948 gclog_or_tty->print_cr(" Desired free fraction %f", | |
949 desired_free_percentage); | |
950 gclog_or_tty->print_cr(" Maximum free fraction %f", | |
951 maximum_free_percentage); | |
952 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000); | |
953 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, | |
954 desired_capacity/1000); | |
955 int prev_level = level() - 1; | |
956 if (prev_level >= 0) { | |
957 size_t prev_size = 0; | |
958 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
959 Generation* prev_gen = gch->_gens[prev_level]; | |
960 prev_size = prev_gen->capacity(); | |
961 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, | |
962 prev_size/1000); | |
963 } | |
964 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, | |
965 unsafe_max_alloc_nogc()/1000); | |
966 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, | |
967 contiguous_available()/1000); | |
968 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", | |
969 expand_bytes); | |
970 } | |
971 // safe if expansion fails | |
972 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); | |
973 if (PrintGCDetails && Verbose) { | |
974 gclog_or_tty->print_cr(" Expanded free fraction %f", | |
975 ((double) free()) / capacity()); | |
976 } | |
977 } | |
978 } | |
979 | |
980 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { | |
981 return cmsSpace()->freelistLock(); | |
982 } | |
983 | |
984 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, | |
985 bool tlab) { | |
986 CMSSynchronousYieldRequest yr; | |
987 MutexLockerEx x(freelistLock(), | |
988 Mutex::_no_safepoint_check_flag); | |
989 return have_lock_and_allocate(size, tlab); | |
990 } | |
991 | |
992 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, | |
993 bool tlab) { | |
994 assert_lock_strong(freelistLock()); | |
995 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); | |
996 HeapWord* res = cmsSpace()->allocate(adjustedSize); | |
997 // Allocate the object live (grey) if the background collector has | |
998 // started marking. This is necessary because the marker may | |
999 // have passed this address and consequently this object will | |
1000 // not otherwise be greyed and would be incorrectly swept up. | |
1001 // Note that if this object contains references, the writing | |
1002 // of those references will dirty the card containing this object | |
1003 // allowing the object to be blackened (and its references scanned) | |
1004 // either during a preclean phase or at the final checkpoint. | |
1005 if (res != NULL) { | |
1006 collector()->direct_allocated(res, adjustedSize); | |
1007 _direct_allocated_words += adjustedSize; | |
1008 // allocation counters | |
1009 NOT_PRODUCT( | |
1010 _numObjectsAllocated++; | |
1011 _numWordsAllocated += (int)adjustedSize; | |
1012 ) | |
1013 } | |
1014 return res; | |
1015 } | |
1016 | |
1017 // In the case of direct allocation by mutators in a generation that | |
1018 // is being concurrently collected, the object must be allocated | |
1019 // live (grey) if the background collector has started marking. | |
1020 // This is necessary because the marker may | |
1021 // have passed this address and consequently this object will | |
1022 // not otherwise be greyed and would be incorrectly swept up. | |
1023 // Note that if this object contains references, the writing | |
1024 // of those references will dirty the card containing this object | |
1025 // allowing the object to be blackened (and its references scanned) | |
1026 // either during a preclean phase or at the final checkpoint. | |
1027 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { | |
1028 assert(_markBitMap.covers(start, size), "Out of bounds"); | |
1029 if (_collectorState >= Marking) { | |
1030 MutexLockerEx y(_markBitMap.lock(), | |
1031 Mutex::_no_safepoint_check_flag); | |
1032 // [see comments preceding SweepClosure::do_blk() below for details] | |
1033 // 1. need to mark the object as live so it isn't collected | |
1034 // 2. need to mark the 2nd bit to indicate the object may be uninitialized | |
1035 // 3. need to mark the end of the object so sweeper can skip over it | |
1036 // if it's uninitialized when the sweeper reaches it. | |
1037 _markBitMap.mark(start); // object is live | |
1038 _markBitMap.mark(start + 1); // object is potentially uninitialized? | |
1039 _markBitMap.mark(start + size - 1); | |
1040 // mark end of object | |
1041 } | |
1042 // check that oop looks uninitialized | |
187 | 1043 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); |
0 | 1044 } |
1045 | |
1046 void CMSCollector::promoted(bool par, HeapWord* start, | |
1047 bool is_obj_array, size_t obj_size) { | |
1048 assert(_markBitMap.covers(start), "Out of bounds"); | |
1049 // See comment in direct_allocated() about when objects should | |
1050 // be allocated live. | |
1051 if (_collectorState >= Marking) { | |
1052 // we already hold the marking bit map lock, taken in | |
1053 // the prologue | |
1054 if (par) { | |
1055 _markBitMap.par_mark(start); | |
1056 } else { | |
1057 _markBitMap.mark(start); | |
1058 } | |
1059 // We don't need to mark the object as uninitialized (as | |
1060 // in direct_allocated above) because this is being done with the | |
1061 // world stopped and the object will be initialized by the | |
1062 // time the sweeper gets to look at it. | |
1063 assert(SafepointSynchronize::is_at_safepoint(), | |
1064 "expect promotion only at safepoints"); | |
1065 | |
1066 if (_collectorState < Sweeping) { | |
1067 // Mark the appropriate cards in the modUnionTable, so that | |
1068 // this object gets scanned before the sweep. If this is | |
1069 // not done, CMS generation references in the object might | |
1070 // not get marked. | |
1071 // For the case of arrays, which are otherwise precisely | |
1072 // marked, we need to dirty the entire array, not just its head. | |
1073 if (is_obj_array) { | |
1074 // The [par_]mark_range() method expects mr.end() below to | |
1075 // be aligned to the granularity of a bit's representation | |
1076 // in the heap. In the case of the MUT below, that's a | |
1077 // card size. | |
1078 MemRegion mr(start, | |
1079 (HeapWord*)round_to((intptr_t)(start + obj_size), | |
1080 CardTableModRefBS::card_size /* bytes */)); | |
1081 if (par) { | |
1082 _modUnionTable.par_mark_range(mr); | |
1083 } else { | |
1084 _modUnionTable.mark_range(mr); | |
1085 } | |
1086 } else { // not an obj array; we can just mark the head | |
1087 if (par) { | |
1088 _modUnionTable.par_mark(start); | |
1089 } else { | |
1090 _modUnionTable.mark(start); | |
1091 } | |
1092 } | |
1093 } | |
1094 } | |
1095 } | |
1096 | |
1097 static inline size_t percent_of_space(Space* space, HeapWord* addr) | |
1098 { | |
1099 size_t delta = pointer_delta(addr, space->bottom()); | |
1100 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); | |
1101 } | |
1102 | |
1103 void CMSCollector::icms_update_allocation_limits() | |
1104 { | |
1105 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0); | |
1106 EdenSpace* eden = gen0->as_DefNewGeneration()->eden(); | |
1107 | |
1108 const unsigned int duty_cycle = stats().icms_update_duty_cycle(); | |
1109 if (CMSTraceIncrementalPacing) { | |
1110 stats().print(); | |
1111 } | |
1112 | |
1113 assert(duty_cycle <= 100, "invalid duty cycle"); | |
1114 if (duty_cycle != 0) { | |
1115 // The duty_cycle is a percentage between 0 and 100; convert to words and | |
1116 // then compute the offset from the endpoints of the space. | |
1117 size_t free_words = eden->free() / HeapWordSize; | |
1118 double free_words_dbl = (double)free_words; | |
1119 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0); | |
1120 size_t offset_words = (free_words - duty_cycle_words) / 2; | |
1121 | |
1122 _icms_start_limit = eden->top() + offset_words; | |
1123 _icms_stop_limit = eden->end() - offset_words; | |
1124 | |
1125 // The limits may be adjusted (shifted to the right) by | |
1126 // CMSIncrementalOffset, to allow the application more mutator time after a | |
1127 // young gen gc (when all mutators were stopped) and before CMS starts and | |
1128 // takes away one or more cpus. | |
1129 if (CMSIncrementalOffset != 0) { | |
1130 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0; | |
1131 size_t adjustment = (size_t)adjustment_dbl; | |
1132 HeapWord* tmp_stop = _icms_stop_limit + adjustment; | |
1133 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) { | |
1134 _icms_start_limit += adjustment; | |
1135 _icms_stop_limit = tmp_stop; | |
1136 } | |
1137 } | |
1138 } | |
1139 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) { | |
1140 _icms_start_limit = _icms_stop_limit = eden->end(); | |
1141 } | |
1142 | |
1143 // Install the new start limit. | |
1144 eden->set_soft_end(_icms_start_limit); | |
1145 | |
1146 if (CMSTraceIncrementalMode) { | |
1147 gclog_or_tty->print(" icms alloc limits: " | |
1148 PTR_FORMAT "," PTR_FORMAT | |
1149 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ", | |
1150 _icms_start_limit, _icms_stop_limit, | |
1151 percent_of_space(eden, _icms_start_limit), | |
1152 percent_of_space(eden, _icms_stop_limit)); | |
1153 if (Verbose) { | |
1154 gclog_or_tty->print("eden: "); | |
1155 eden->print_on(gclog_or_tty); | |
1156 } | |
1157 } | |
1158 } | |
1159 | |
1160 // Any changes here should try to maintain the invariant | |
1161 // that if this method is called with _icms_start_limit | |
1162 // and _icms_stop_limit both NULL, then it should return NULL | |
1163 // and not notify the icms thread. | |
1164 HeapWord* | |
1165 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top, | |
1166 size_t word_size) | |
1167 { | |
1168 // A start_limit equal to end() means the duty cycle is 0, so treat that as a | |
1169 // nop. | |
1170 if (CMSIncrementalMode && _icms_start_limit != space->end()) { | |
1171 if (top <= _icms_start_limit) { | |
1172 if (CMSTraceIncrementalMode) { | |
1173 space->print_on(gclog_or_tty); | |
1174 gclog_or_tty->stamp(); | |
1175 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT | |
1176 ", new limit=" PTR_FORMAT | |
1177 " (" SIZE_FORMAT "%%)", | |
1178 top, _icms_stop_limit, | |
1179 percent_of_space(space, _icms_stop_limit)); | |
1180 } | |
1181 ConcurrentMarkSweepThread::start_icms(); | |
1182 assert(top < _icms_stop_limit, "Tautology"); | |
1183 if (word_size < pointer_delta(_icms_stop_limit, top)) { | |
1184 return _icms_stop_limit; | |
1185 } | |
1186 | |
1187 // The allocation will cross both the _start and _stop limits, so do the | |
1188 // stop notification also and return end(). | |
1189 if (CMSTraceIncrementalMode) { | |
1190 space->print_on(gclog_or_tty); | |
1191 gclog_or_tty->stamp(); | |
1192 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT | |
1193 ", new limit=" PTR_FORMAT | |
1194 " (" SIZE_FORMAT "%%)", | |
1195 top, space->end(), | |
1196 percent_of_space(space, space->end())); | |
1197 } | |
1198 ConcurrentMarkSweepThread::stop_icms(); | |
1199 return space->end(); | |
1200 } | |
1201 | |
1202 if (top <= _icms_stop_limit) { | |
1203 if (CMSTraceIncrementalMode) { | |
1204 space->print_on(gclog_or_tty); | |
1205 gclog_or_tty->stamp(); | |
1206 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT | |
1207 ", new limit=" PTR_FORMAT | |
1208 " (" SIZE_FORMAT "%%)", | |
1209 top, space->end(), | |
1210 percent_of_space(space, space->end())); | |
1211 } | |
1212 ConcurrentMarkSweepThread::stop_icms(); | |
1213 return space->end(); | |
1214 } | |
1215 | |
1216 if (CMSTraceIncrementalMode) { | |
1217 space->print_on(gclog_or_tty); | |
1218 gclog_or_tty->stamp(); | |
1219 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT | |
1220 ", new limit=" PTR_FORMAT, | |
1221 top, NULL); | |
1222 } | |
1223 } | |
1224 | |
1225 return NULL; | |
1226 } | |
1227 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1228 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { |
0 | 1229 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
1230 // allocate, copy and if necessary update promoinfo -- | |
1231 // delegate to underlying space. | |
1232 assert_lock_strong(freelistLock()); | |
1233 | |
1234 #ifndef PRODUCT | |
1235 if (Universe::heap()->promotion_should_fail()) { | |
1236 return NULL; | |
1237 } | |
1238 #endif // #ifndef PRODUCT | |
1239 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1240 oop res = _cmsSpace->promote(obj, obj_size); |
0 | 1241 if (res == NULL) { |
1242 // expand and retry | |
1243 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords | |
1244 expand(s*HeapWordSize, MinHeapDeltaBytes, | |
1245 CMSExpansionCause::_satisfy_promotion); | |
1246 // Since there's currently no next generation, we don't try to promote | |
1247 // into a more senior generation. | |
1248 assert(next_gen() == NULL, "assumption, based upon which no attempt " | |
1249 "is made to pass on a possibly failing " | |
1250 "promotion to next generation"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1251 res = _cmsSpace->promote(obj, obj_size); |
0 | 1252 } |
1253 if (res != NULL) { | |
1254 // See comment in allocate() about when objects should | |
1255 // be allocated live. | |
1256 assert(obj->is_oop(), "Will dereference klass pointer below"); | |
1257 collector()->promoted(false, // Not parallel | |
1258 (HeapWord*)res, obj->is_objArray(), obj_size); | |
1259 // promotion counters | |
1260 NOT_PRODUCT( | |
1261 _numObjectsPromoted++; | |
1262 _numWordsPromoted += | |
1263 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); | |
1264 ) | |
1265 } | |
1266 return res; | |
1267 } | |
1268 | |
1269 | |
1270 HeapWord* | |
1271 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, | |
1272 HeapWord* top, | |
1273 size_t word_sz) | |
1274 { | |
1275 return collector()->allocation_limit_reached(space, top, word_sz); | |
1276 } | |
1277 | |
1278 // Things to support parallel young-gen collection. | |
1279 oop | |
1280 ConcurrentMarkSweepGeneration::par_promote(int thread_num, | |
1281 oop old, markOop m, | |
1282 size_t word_sz) { | |
1283 #ifndef PRODUCT | |
1284 if (Universe::heap()->promotion_should_fail()) { | |
1285 return NULL; | |
1286 } | |
1287 #endif // #ifndef PRODUCT | |
1288 | |
1289 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1290 PromotionInfo* promoInfo = &ps->promo; | |
1291 // if we are tracking promotions, then first ensure space for | |
1292 // promotion (including spooling space for saving header if necessary). | |
1293 // then allocate and copy, then track promoted info if needed. | |
1294 // When tracking (see PromotionInfo::track()), the mark word may | |
1295 // be displaced and in this case restoration of the mark word | |
1296 // occurs in the (oop_since_save_marks_)iterate phase. | |
1297 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { | |
1298 // Out of space for allocating spooling buffers; | |
1299 // try expanding and allocating spooling buffers. | |
1300 if (!expand_and_ensure_spooling_space(promoInfo)) { | |
1301 return NULL; | |
1302 } | |
1303 } | |
1304 assert(promoInfo->has_spooling_space(), "Control point invariant"); | |
1305 HeapWord* obj_ptr = ps->lab.alloc(word_sz); | |
1306 if (obj_ptr == NULL) { | |
1307 obj_ptr = expand_and_par_lab_allocate(ps, word_sz); | |
1308 if (obj_ptr == NULL) { | |
1309 return NULL; | |
1310 } | |
1311 } | |
1312 oop obj = oop(obj_ptr); | |
187 | 1313 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); |
0 | 1314 // Otherwise, copy the object. Here we must be careful to insert the |
1315 // klass pointer last, since this marks the block as an allocated object. | |
187 | 1316 // Except with compressed oops it's the mark word. |
0 | 1317 HeapWord* old_ptr = (HeapWord*)old; |
1318 if (word_sz > (size_t)oopDesc::header_size()) { | |
1319 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), | |
1320 obj_ptr + oopDesc::header_size(), | |
1321 word_sz - oopDesc::header_size()); | |
1322 } | |
187 | 1323 |
1324 if (UseCompressedOops) { | |
1325 // Copy gap missed by (aligned) header size calculation above | |
1326 obj->set_klass_gap(old->klass_gap()); | |
1327 } | |
1328 | |
0 | 1329 // Restore the mark word copied above. |
1330 obj->set_mark(m); | |
187 | 1331 |
0 | 1332 // Now we can track the promoted object, if necessary. We take care |
1333 // To delay the transition from uninitialized to full object | |
1334 // (i.e., insertion of klass pointer) until after, so that it | |
1335 // atomically becomes a promoted object. | |
1336 if (promoInfo->tracking()) { | |
1337 promoInfo->track((PromotedObject*)obj, old->klass()); | |
1338 } | |
187 | 1339 |
1340 // Finally, install the klass pointer (this should be volatile). | |
0 | 1341 obj->set_klass(old->klass()); |
1342 | |
1343 assert(old->is_oop(), "Will dereference klass ptr below"); | |
1344 collector()->promoted(true, // parallel | |
1345 obj_ptr, old->is_objArray(), word_sz); | |
1346 | |
1347 NOT_PRODUCT( | |
1348 Atomic::inc(&_numObjectsPromoted); | |
1349 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()), | |
1350 &_numWordsPromoted); | |
1351 ) | |
1352 | |
1353 return obj; | |
1354 } | |
1355 | |
1356 void | |
1357 ConcurrentMarkSweepGeneration:: | |
1358 par_promote_alloc_undo(int thread_num, | |
1359 HeapWord* obj, size_t word_sz) { | |
1360 // CMS does not support promotion undo. | |
1361 ShouldNotReachHere(); | |
1362 } | |
1363 | |
1364 void | |
1365 ConcurrentMarkSweepGeneration:: | |
1366 par_promote_alloc_done(int thread_num) { | |
1367 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1368 ps->lab.retire(); | |
1369 #if CFLS_LAB_REFILL_STATS | |
1370 if (thread_num == 0) { | |
1371 _cmsSpace->print_par_alloc_stats(); | |
1372 } | |
1373 #endif | |
1374 } | |
1375 | |
1376 void | |
1377 ConcurrentMarkSweepGeneration:: | |
1378 par_oop_since_save_marks_iterate_done(int thread_num) { | |
1379 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; | |
1380 ParScanWithoutBarrierClosure* dummy_cl = NULL; | |
1381 ps->promo.promoted_oops_iterate_nv(dummy_cl); | |
1382 } | |
1383 | |
1384 // XXXPERM | |
1385 bool ConcurrentMarkSweepGeneration::should_collect(bool full, | |
1386 size_t size, | |
1387 bool tlab) | |
1388 { | |
1389 // We allow a STW collection only if a full | |
1390 // collection was requested. | |
1391 return full || should_allocate(size, tlab); // FIX ME !!! | |
1392 // This and promotion failure handling are connected at the | |
1393 // hip and should be fixed by untying them. | |
1394 } | |
1395 | |
1396 bool CMSCollector::shouldConcurrentCollect() { | |
1397 if (_full_gc_requested) { | |
1398 assert(ExplicitGCInvokesConcurrent, "Unexpected state"); | |
1399 if (Verbose && PrintGCDetails) { | |
1400 gclog_or_tty->print_cr("CMSCollector: collect because of explicit " | |
1401 " gc request"); | |
1402 } | |
1403 return true; | |
1404 } | |
1405 | |
1406 // For debugging purposes, change the type of collection. | |
1407 // If the rotation is not on the concurrent collection | |
1408 // type, don't start a concurrent collection. | |
1409 NOT_PRODUCT( | |
1410 if (RotateCMSCollectionTypes && | |
1411 (_cmsGen->debug_collection_type() != | |
1412 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { | |
1413 assert(_cmsGen->debug_collection_type() != | |
1414 ConcurrentMarkSweepGeneration::Unknown_collection_type, | |
1415 "Bad cms collection type"); | |
1416 return false; | |
1417 } | |
1418 ) | |
1419 | |
1420 FreelistLocker x(this); | |
1421 // ------------------------------------------------------------------ | |
1422 // Print out lots of information which affects the initiation of | |
1423 // a collection. | |
1424 if (PrintCMSInitiationStatistics && stats().valid()) { | |
1425 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); | |
1426 gclog_or_tty->stamp(); | |
1427 gclog_or_tty->print_cr(""); | |
1428 stats().print_on(gclog_or_tty); | |
1429 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", | |
1430 stats().time_until_cms_gen_full()); | |
1431 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free()); | |
1432 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, | |
1433 _cmsGen->contiguous_available()); | |
1434 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); | |
1435 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); | |
1436 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1437 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1438 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); |
0 | 1439 } |
1440 // ------------------------------------------------------------------ | |
1441 | |
1442 // If the estimated time to complete a cms collection (cms_duration()) | |
1443 // is less than the estimated time remaining until the cms generation | |
1444 // is full, start a collection. | |
1445 if (!UseCMSInitiatingOccupancyOnly) { | |
1446 if (stats().valid()) { | |
1447 if (stats().time_until_cms_start() == 0.0) { | |
1448 return true; | |
1449 } | |
1450 } else { | |
1451 // We want to conservatively collect somewhat early in order | |
1452 // to try and "bootstrap" our CMS/promotion statistics; | |
1453 // this branch will not fire after the first successful CMS | |
1454 // collection because the stats should then be valid. | |
1455 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { | |
1456 if (Verbose && PrintGCDetails) { | |
1457 gclog_or_tty->print_cr( | |
1458 " CMSCollector: collect for bootstrapping statistics:" | |
1459 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), | |
1460 _bootstrap_occupancy); | |
1461 } | |
1462 return true; | |
1463 } | |
1464 } | |
1465 } | |
1466 | |
1467 // Otherwise, we start a collection cycle if either the perm gen or | |
1468 // old gen want a collection cycle started. Each may use | |
1469 // an appropriate criterion for making this decision. | |
1470 // XXX We need to make sure that the gen expansion | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1471 // criterion dovetails well with this. XXX NEED TO FIX THIS |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1472 if (_cmsGen->should_concurrent_collect()) { |
0 | 1473 if (Verbose && PrintGCDetails) { |
1474 gclog_or_tty->print_cr("CMS old gen initiated"); | |
1475 } | |
1476 return true; | |
1477 } | |
1478 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1479 // We start a collection if we believe an incremental collection may fail; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1480 // this is not likely to be productive in practice because it's probably too |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1481 // late anyway. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1482 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1483 assert(gch->collector_policy()->is_two_generation_policy(), |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1484 "You may want to check the correctness of the following"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1485 if (gch->incremental_collection_will_fail()) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1486 if (PrintGCDetails && Verbose) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1487 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); |
0 | 1488 } |
1489 return true; | |
1490 } | |
1491 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1492 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1493 bool res = update_should_unload_classes(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1494 if (res) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1495 if (Verbose && PrintGCDetails) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1496 gclog_or_tty->print_cr("CMS perm gen initiated"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1497 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1498 return true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1499 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1500 } |
0 | 1501 return false; |
1502 } | |
1503 | |
1504 // Clear _expansion_cause fields of constituent generations | |
1505 void CMSCollector::clear_expansion_cause() { | |
1506 _cmsGen->clear_expansion_cause(); | |
1507 _permGen->clear_expansion_cause(); | |
1508 } | |
1509 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1510 // We should be conservative in starting a collection cycle. To |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1511 // start too eagerly runs the risk of collecting too often in the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1512 // extreme. To collect too rarely falls back on full collections, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1513 // which works, even if not optimum in terms of concurrent work. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1514 // As a work around for too eagerly collecting, use the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1515 // UseCMSInitiatingOccupancyOnly. This also has the advantage of |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1516 // giving the user an easily understandable way of controlling the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1517 // collections. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1518 // We want to start a new collection cycle if any of the following |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1519 // conditions hold: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1520 // . our current occupancy exceeds the configured initiating occupancy |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1521 // for this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1522 // . we recently needed to expand this space and have not, since that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1523 // expansion, done a collection of this generation, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1524 // . the underlying space believes that it may be a good idea to initiate |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1525 // a concurrent collection (this may be based on criteria such as the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1526 // following: the space uses linear allocation and linear allocation is |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1527 // going to fail, or there is believed to be excessive fragmentation in |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1528 // the generation, etc... or ... |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1529 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1530 // the case of the old generation, not the perm generation; see CR 6543076): |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1531 // we may be approaching a point at which allocation requests may fail because |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1532 // we will be out of sufficient free space given allocation rate estimates.] |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1533 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1534 |
0 | 1535 assert_lock_strong(freelistLock()); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1536 if (occupancy() > initiating_occupancy()) { |
0 | 1537 if (PrintGCDetails && Verbose) { |
1538 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1539 short_name(), occupancy(), initiating_occupancy()); |
0 | 1540 } |
1541 return true; | |
1542 } | |
1543 if (UseCMSInitiatingOccupancyOnly) { | |
1544 return false; | |
1545 } | |
1546 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { | |
1547 if (PrintGCDetails && Verbose) { | |
1548 gclog_or_tty->print(" %s: collect because expanded for allocation ", | |
1549 short_name()); | |
1550 } | |
1551 return true; | |
1552 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1553 if (_cmsSpace->should_concurrent_collect()) { |
0 | 1554 if (PrintGCDetails && Verbose) { |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
1555 gclog_or_tty->print(" %s: collect because cmsSpace says so ", |
0 | 1556 short_name()); |
1557 } | |
1558 return true; | |
1559 } | |
1560 return false; | |
1561 } | |
1562 | |
1563 void ConcurrentMarkSweepGeneration::collect(bool full, | |
1564 bool clear_all_soft_refs, | |
1565 size_t size, | |
1566 bool tlab) | |
1567 { | |
1568 collector()->collect(full, clear_all_soft_refs, size, tlab); | |
1569 } | |
1570 | |
1571 void CMSCollector::collect(bool full, | |
1572 bool clear_all_soft_refs, | |
1573 size_t size, | |
1574 bool tlab) | |
1575 { | |
1576 if (!UseCMSCollectionPassing && _collectorState > Idling) { | |
1577 // For debugging purposes skip the collection if the state | |
1578 // is not currently idle | |
1579 if (TraceCMSState) { | |
1580 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", | |
1581 Thread::current(), full, _collectorState); | |
1582 } | |
1583 return; | |
1584 } | |
1585 | |
1586 // The following "if" branch is present for defensive reasons. | |
1587 // In the current uses of this interface, it can be replaced with: | |
1588 // assert(!GC_locker.is_active(), "Can't be called otherwise"); | |
1589 // But I am not placing that assert here to allow future | |
1590 // generality in invoking this interface. | |
1591 if (GC_locker::is_active()) { | |
1592 // A consistency test for GC_locker | |
1593 assert(GC_locker::needs_gc(), "Should have been set already"); | |
1594 // Skip this foreground collection, instead | |
1595 // expanding the heap if necessary. | |
1596 // Need the free list locks for the call to free() in compute_new_size() | |
1597 compute_new_size(); | |
1598 return; | |
1599 } | |
1600 acquire_control_and_collect(full, clear_all_soft_refs); | |
1601 _full_gcs_since_conc_gc++; | |
1602 | |
1603 } | |
1604 | |
1605 void CMSCollector::request_full_gc(unsigned int full_gc_count) { | |
1606 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1607 unsigned int gc_count = gch->total_full_collections(); | |
1608 if (gc_count == full_gc_count) { | |
1609 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1610 _full_gc_requested = true; | |
1611 CGC_lock->notify(); // nudge CMS thread | |
1612 } | |
1613 } | |
1614 | |
1615 | |
1616 // The foreground and background collectors need to coordinate in order | |
1617 // to make sure that they do not mutually interfere with CMS collections. | |
1618 // When a background collection is active, | |
1619 // the foreground collector may need to take over (preempt) and | |
1620 // synchronously complete an ongoing collection. Depending on the | |
1621 // frequency of the background collections and the heap usage | |
1622 // of the application, this preemption can be seldom or frequent. | |
1623 // There are only certain | |
1624 // points in the background collection that the "collection-baton" | |
1625 // can be passed to the foreground collector. | |
1626 // | |
1627 // The foreground collector will wait for the baton before | |
1628 // starting any part of the collection. The foreground collector | |
1629 // will only wait at one location. | |
1630 // | |
1631 // The background collector will yield the baton before starting a new | |
1632 // phase of the collection (e.g., before initial marking, marking from roots, | |
1633 // precleaning, final re-mark, sweep etc.) This is normally done at the head | |
1634 // of the loop which switches the phases. The background collector does some | |
1635 // of the phases (initial mark, final re-mark) with the world stopped. | |
1636 // Because of locking involved in stopping the world, | |
1637 // the foreground collector should not block waiting for the background | |
1638 // collector when it is doing a stop-the-world phase. The background | |
1639 // collector will yield the baton at an additional point just before | |
1640 // it enters a stop-the-world phase. Once the world is stopped, the | |
1641 // background collector checks the phase of the collection. If the | |
1642 // phase has not changed, it proceeds with the collection. If the | |
1643 // phase has changed, it skips that phase of the collection. See | |
1644 // the comments on the use of the Heap_lock in collect_in_background(). | |
1645 // | |
1646 // Variable used in baton passing. | |
1647 // _foregroundGCIsActive - Set to true by the foreground collector when | |
1648 // it wants the baton. The foreground clears it when it has finished | |
1649 // the collection. | |
1650 // _foregroundGCShouldWait - Set to true by the background collector | |
1651 // when it is running. The foreground collector waits while | |
1652 // _foregroundGCShouldWait is true. | |
1653 // CGC_lock - monitor used to protect access to the above variables | |
1654 // and to notify the foreground and background collectors. | |
1655 // _collectorState - current state of the CMS collection. | |
1656 // | |
1657 // The foreground collector | |
1658 // acquires the CGC_lock | |
1659 // sets _foregroundGCIsActive | |
1660 // waits on the CGC_lock for _foregroundGCShouldWait to be false | |
1661 // various locks acquired in preparation for the collection | |
1662 // are released so as not to block the background collector | |
1663 // that is in the midst of a collection | |
1664 // proceeds with the collection | |
1665 // clears _foregroundGCIsActive | |
1666 // returns | |
1667 // | |
1668 // The background collector in a loop iterating on the phases of the | |
1669 // collection | |
1670 // acquires the CGC_lock | |
1671 // sets _foregroundGCShouldWait | |
1672 // if _foregroundGCIsActive is set | |
1673 // clears _foregroundGCShouldWait, notifies _CGC_lock | |
1674 // waits on _CGC_lock for _foregroundGCIsActive to become false | |
1675 // and exits the loop. | |
1676 // otherwise | |
1677 // proceed with that phase of the collection | |
1678 // if the phase is a stop-the-world phase, | |
1679 // yield the baton once more just before enqueueing | |
1680 // the stop-world CMS operation (executed by the VM thread). | |
1681 // returns after all phases of the collection are done | |
1682 // | |
1683 | |
1684 void CMSCollector::acquire_control_and_collect(bool full, | |
1685 bool clear_all_soft_refs) { | |
1686 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
1687 assert(!Thread::current()->is_ConcurrentGC_thread(), | |
1688 "shouldn't try to acquire control from self!"); | |
1689 | |
1690 // Start the protocol for acquiring control of the | |
1691 // collection from the background collector (aka CMS thread). | |
1692 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1693 "VM thread should have CMS token"); | |
1694 // Remember the possibly interrupted state of an ongoing | |
1695 // concurrent collection | |
1696 CollectorState first_state = _collectorState; | |
1697 | |
1698 // Signal to a possibly ongoing concurrent collection that | |
1699 // we want to do a foreground collection. | |
1700 _foregroundGCIsActive = true; | |
1701 | |
1702 // Disable incremental mode during a foreground collection. | |
1703 ICMSDisabler icms_disabler; | |
1704 | |
1705 // release locks and wait for a notify from the background collector | |
1706 // releasing the locks in only necessary for phases which | |
1707 // do yields to improve the granularity of the collection. | |
1708 assert_lock_strong(bitMapLock()); | |
1709 // We need to lock the Free list lock for the space that we are | |
1710 // currently collecting. | |
1711 assert(haveFreelistLocks(), "Must be holding free list locks"); | |
1712 bitMapLock()->unlock(); | |
1713 releaseFreelistLocks(); | |
1714 { | |
1715 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
1716 if (_foregroundGCShouldWait) { | |
1717 // We are going to be waiting for action for the CMS thread; | |
1718 // it had better not be gone (for instance at shutdown)! | |
1719 assert(ConcurrentMarkSweepThread::cmst() != NULL, | |
1720 "CMS thread must be running"); | |
1721 // Wait here until the background collector gives us the go-ahead | |
1722 ConcurrentMarkSweepThread::clear_CMS_flag( | |
1723 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token | |
1724 // Get a possibly blocked CMS thread going: | |
1725 // Note that we set _foregroundGCIsActive true above, | |
1726 // without protection of the CGC_lock. | |
1727 CGC_lock->notify(); | |
1728 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), | |
1729 "Possible deadlock"); | |
1730 while (_foregroundGCShouldWait) { | |
1731 // wait for notification | |
1732 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
1733 // Possibility of delay/starvation here, since CMS token does | |
1734 // not know to give priority to VM thread? Actually, i think | |
1735 // there wouldn't be any delay/starvation, but the proof of | |
1736 // that "fact" (?) appears non-trivial. XXX 20011219YSR | |
1737 } | |
1738 ConcurrentMarkSweepThread::set_CMS_flag( | |
1739 ConcurrentMarkSweepThread::CMS_vm_has_token); | |
1740 } | |
1741 } | |
1742 // The CMS_token is already held. Get back the other locks. | |
1743 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
1744 "VM thread should have CMS token"); | |
1745 getFreelistLocks(); | |
1746 bitMapLock()->lock_without_safepoint_check(); | |
1747 if (TraceCMSState) { | |
1748 gclog_or_tty->print_cr("CMS foreground collector has asked for control " | |
1749 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); | |
1750 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); | |
1751 } | |
1752 | |
1753 // Check if we need to do a compaction, or if not, whether | |
1754 // we need to start the mark-sweep from scratch. | |
1755 bool should_compact = false; | |
1756 bool should_start_over = false; | |
1757 decide_foreground_collection_type(clear_all_soft_refs, | |
1758 &should_compact, &should_start_over); | |
1759 | |
1760 NOT_PRODUCT( | |
1761 if (RotateCMSCollectionTypes) { | |
1762 if (_cmsGen->debug_collection_type() == | |
1763 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { | |
1764 should_compact = true; | |
1765 } else if (_cmsGen->debug_collection_type() == | |
1766 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { | |
1767 should_compact = false; | |
1768 } | |
1769 } | |
1770 ) | |
1771 | |
1772 if (PrintGCDetails && first_state > Idling) { | |
1773 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); | |
1774 if (GCCause::is_user_requested_gc(cause) || | |
1775 GCCause::is_serviceability_requested_gc(cause)) { | |
1776 gclog_or_tty->print(" (concurrent mode interrupted)"); | |
1777 } else { | |
1778 gclog_or_tty->print(" (concurrent mode failure)"); | |
1779 } | |
1780 } | |
1781 | |
1782 if (should_compact) { | |
1783 // If the collection is being acquired from the background | |
1784 // collector, there may be references on the discovered | |
1785 // references lists that have NULL referents (being those | |
1786 // that were concurrently cleared by a mutator) or | |
1787 // that are no longer active (having been enqueued concurrently | |
1788 // by the mutator). | |
1789 // Scrub the list of those references because Mark-Sweep-Compact | |
1790 // code assumes referents are not NULL and that all discovered | |
1791 // Reference objects are active. | |
1792 ref_processor()->clean_up_discovered_references(); | |
1793 | |
1794 do_compaction_work(clear_all_soft_refs); | |
1795 | |
1796 // Has the GC time limit been exceeded? | |
1797 check_gc_time_limit(); | |
1798 | |
1799 } else { | |
1800 do_mark_sweep_work(clear_all_soft_refs, first_state, | |
1801 should_start_over); | |
1802 } | |
1803 // Reset the expansion cause, now that we just completed | |
1804 // a collection cycle. | |
1805 clear_expansion_cause(); | |
1806 _foregroundGCIsActive = false; | |
1807 return; | |
1808 } | |
1809 | |
1810 void CMSCollector::check_gc_time_limit() { | |
1811 | |
1812 // Ignore explicit GC's. Exiting here does not set the flag and | |
1813 // does not reset the count. Updating of the averages for system | |
1814 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC. | |
1815 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause(); | |
1816 if (GCCause::is_user_requested_gc(gc_cause) || | |
1817 GCCause::is_serviceability_requested_gc(gc_cause)) { | |
1818 return; | |
1819 } | |
1820 | |
1821 // Calculate the fraction of the CMS generation was freed during | |
1822 // the last collection. | |
1823 // Only consider the STW compacting cost for now. | |
1824 // | |
1825 // Note that the gc time limit test only works for the collections | |
1826 // of the young gen + tenured gen and not for collections of the | |
1827 // permanent gen. That is because the calculation of the space | |
1828 // freed by the collection is the free space in the young gen + | |
1829 // tenured gen. | |
1830 | |
1831 double fraction_free = | |
1832 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity()); | |
1833 if ((100.0 * size_policy()->compacting_gc_cost()) > | |
1834 ((double) GCTimeLimit) && | |
1835 ((fraction_free * 100) < GCHeapFreeLimit)) { | |
1836 size_policy()->inc_gc_time_limit_count(); | |
1837 if (UseGCOverheadLimit && | |
1838 (size_policy()->gc_time_limit_count() > | |
1839 AdaptiveSizePolicyGCTimeLimitThreshold)) { | |
1840 size_policy()->set_gc_time_limit_exceeded(true); | |
1841 // Avoid consecutive OOM due to the gc time limit by resetting | |
1842 // the counter. | |
1843 size_policy()->reset_gc_time_limit_count(); | |
1844 if (PrintGCDetails) { | |
1845 gclog_or_tty->print_cr(" GC is exceeding overhead limit " | |
1846 "of %d%%", GCTimeLimit); | |
1847 } | |
1848 } else { | |
1849 if (PrintGCDetails) { | |
1850 gclog_or_tty->print_cr(" GC would exceed overhead limit " | |
1851 "of %d%%", GCTimeLimit); | |
1852 } | |
1853 } | |
1854 } else { | |
1855 size_policy()->reset_gc_time_limit_count(); | |
1856 } | |
1857 } | |
1858 | |
1859 // Resize the perm generation and the tenured generation | |
1860 // after obtaining the free list locks for the | |
1861 // two generations. | |
1862 void CMSCollector::compute_new_size() { | |
1863 assert_locked_or_safepoint(Heap_lock); | |
1864 FreelistLocker z(this); | |
1865 _permGen->compute_new_size(); | |
1866 _cmsGen->compute_new_size(); | |
1867 } | |
1868 | |
1869 // A work method used by foreground collection to determine | |
1870 // what type of collection (compacting or not, continuing or fresh) | |
1871 // it should do. | |
1872 // NOTE: the intent is to make UseCMSCompactAtFullCollection | |
1873 // and CMSCompactWhenClearAllSoftRefs the default in the future | |
1874 // and do away with the flags after a suitable period. | |
1875 void CMSCollector::decide_foreground_collection_type( | |
1876 bool clear_all_soft_refs, bool* should_compact, | |
1877 bool* should_start_over) { | |
1878 // Normally, we'll compact only if the UseCMSCompactAtFullCollection | |
1879 // flag is set, and we have either requested a System.gc() or | |
1880 // the number of full gc's since the last concurrent cycle | |
1881 // has exceeded the threshold set by CMSFullGCsBeforeCompaction, | |
1882 // or if an incremental collection has failed | |
1883 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1884 assert(gch->collector_policy()->is_two_generation_policy(), | |
1885 "You may want to check the correctness of the following"); | |
1886 // Inform cms gen if this was due to partial collection failing. | |
1887 // The CMS gen may use this fact to determine its expansion policy. | |
1888 if (gch->incremental_collection_will_fail()) { | |
1889 assert(!_cmsGen->incremental_collection_failed(), | |
1890 "Should have been noticed, reacted to and cleared"); | |
1891 _cmsGen->set_incremental_collection_failed(); | |
1892 } | |
1893 *should_compact = | |
1894 UseCMSCompactAtFullCollection && | |
1895 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || | |
1896 GCCause::is_user_requested_gc(gch->gc_cause()) || | |
1897 gch->incremental_collection_will_fail()); | |
1898 *should_start_over = false; | |
1899 if (clear_all_soft_refs && !*should_compact) { | |
1900 // We are about to do a last ditch collection attempt | |
1901 // so it would normally make sense to do a compaction | |
1902 // to reclaim as much space as possible. | |
1903 if (CMSCompactWhenClearAllSoftRefs) { | |
1904 // Default: The rationale is that in this case either | |
1905 // we are past the final marking phase, in which case | |
1906 // we'd have to start over, or so little has been done | |
1907 // that there's little point in saving that work. Compaction | |
1908 // appears to be the sensible choice in either case. | |
1909 *should_compact = true; | |
1910 } else { | |
1911 // We have been asked to clear all soft refs, but not to | |
1912 // compact. Make sure that we aren't past the final checkpoint | |
1913 // phase, for that is where we process soft refs. If we are already | |
1914 // past that phase, we'll need to redo the refs discovery phase and | |
1915 // if necessary clear soft refs that weren't previously | |
1916 // cleared. We do so by remembering the phase in which | |
1917 // we came in, and if we are past the refs processing | |
1918 // phase, we'll choose to just redo the mark-sweep | |
1919 // collection from scratch. | |
1920 if (_collectorState > FinalMarking) { | |
1921 // We are past the refs processing phase; | |
1922 // start over and do a fresh synchronous CMS cycle | |
1923 _collectorState = Resetting; // skip to reset to start new cycle | |
1924 reset(false /* == !asynch */); | |
1925 *should_start_over = true; | |
1926 } // else we can continue a possibly ongoing current cycle | |
1927 } | |
1928 } | |
1929 } | |
1930 | |
1931 // A work method used by the foreground collector to do | |
1932 // a mark-sweep-compact. | |
1933 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { | |
1934 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1935 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); | |
1936 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { | |
1937 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " | |
1938 "collections passed to foreground collector", _full_gcs_since_conc_gc); | |
1939 } | |
1940 | |
1941 // Sample collection interval time and reset for collection pause. | |
1942 if (UseAdaptiveSizePolicy) { | |
1943 size_policy()->msc_collection_begin(); | |
1944 } | |
1945 | |
1946 // Temporarily widen the span of the weak reference processing to | |
1947 // the entire heap. | |
1948 MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); | |
1949 ReferenceProcessorSpanMutator x(ref_processor(), new_span); | |
1950 | |
1951 // Temporarily, clear the "is_alive_non_header" field of the | |
1952 // reference processor. | |
1953 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL); | |
1954 | |
1955 // Temporarily make reference _processing_ single threaded (non-MT). | |
1956 ReferenceProcessorMTProcMutator z(ref_processor(), false); | |
1957 | |
1958 // Temporarily make refs discovery atomic | |
1959 ReferenceProcessorAtomicMutator w(ref_processor(), true); | |
1960 | |
1961 ref_processor()->set_enqueuing_is_done(false); | |
1962 ref_processor()->enable_discovery(); | |
1963 // If an asynchronous collection finishes, the _modUnionTable is | |
1964 // all clear. If we are assuming the collection from an asynchronous | |
1965 // collection, clear the _modUnionTable. | |
1966 assert(_collectorState != Idling || _modUnionTable.isAllClear(), | |
1967 "_modUnionTable should be clear if the baton was not passed"); | |
1968 _modUnionTable.clear_all(); | |
1969 | |
1970 // We must adjust the allocation statistics being maintained | |
1971 // in the free list space. We do so by reading and clearing | |
1972 // the sweep timer and updating the block flux rate estimates below. | |
1973 assert(_sweep_timer.is_active(), "We should never see the timer inactive"); | |
1974 _sweep_timer.stop(); | |
1975 // Note that we do not use this sample to update the _sweep_estimate. | |
1976 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()), | |
1977 _sweep_estimate.padded_average()); | |
1978 | |
1979 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), | |
1980 ref_processor(), clear_all_soft_refs); | |
1981 #ifdef ASSERT | |
1982 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
1983 size_t free_size = cms_space->free(); | |
1984 assert(free_size == | |
1985 pointer_delta(cms_space->end(), cms_space->compaction_top()) | |
1986 * HeapWordSize, | |
1987 "All the free space should be compacted into one chunk at top"); | |
1988 assert(cms_space->dictionary()->totalChunkSize( | |
1989 debug_only(cms_space->freelistLock())) == 0 || | |
1990 cms_space->totalSizeInIndexedFreeLists() == 0, | |
1991 "All the free space should be in a single chunk"); | |
1992 size_t num = cms_space->totalCount(); | |
1993 assert((free_size == 0 && num == 0) || | |
1994 (free_size > 0 && (num == 1 || num == 2)), | |
1995 "There should be at most 2 free chunks after compaction"); | |
1996 #endif // ASSERT | |
1997 _collectorState = Resetting; | |
1998 assert(_restart_addr == NULL, | |
1999 "Should have been NULL'd before baton was passed"); | |
2000 reset(false /* == !asynch */); | |
2001 _cmsGen->reset_after_compaction(); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2002 _concurrent_cycles_since_last_unload = 0; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2003 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2004 if (verifying() && !should_unload_classes()) { |
0 | 2005 perm_gen_verify_bit_map()->clear_all(); |
2006 } | |
2007 | |
2008 // Clear any data recorded in the PLAB chunk arrays. | |
2009 if (_survivor_plab_array != NULL) { | |
2010 reset_survivor_plab_arrays(); | |
2011 } | |
2012 | |
2013 // Adjust the per-size allocation stats for the next epoch. | |
2014 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */); | |
2015 // Restart the "sweep timer" for next epoch. | |
2016 _sweep_timer.reset(); | |
2017 _sweep_timer.start(); | |
2018 | |
2019 // Sample collection pause time and reset for collection interval. | |
2020 if (UseAdaptiveSizePolicy) { | |
2021 size_policy()->msc_collection_end(gch->gc_cause()); | |
2022 } | |
2023 | |
2024 // For a mark-sweep-compact, compute_new_size() will be called | |
2025 // in the heap's do_collection() method. | |
2026 } | |
2027 | |
2028 // A work method used by the foreground collector to do | |
2029 // a mark-sweep, after taking over from a possibly on-going | |
2030 // concurrent mark-sweep collection. | |
2031 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, | |
2032 CollectorState first_state, bool should_start_over) { | |
2033 if (PrintGC && Verbose) { | |
2034 gclog_or_tty->print_cr("Pass concurrent collection to foreground " | |
2035 "collector with count %d", | |
2036 _full_gcs_since_conc_gc); | |
2037 } | |
2038 switch (_collectorState) { | |
2039 case Idling: | |
2040 if (first_state == Idling || should_start_over) { | |
2041 // The background GC was not active, or should | |
2042 // restarted from scratch; start the cycle. | |
2043 _collectorState = InitialMarking; | |
2044 } | |
2045 // If first_state was not Idling, then a background GC | |
2046 // was in progress and has now finished. No need to do it | |
2047 // again. Leave the state as Idling. | |
2048 break; | |
2049 case Precleaning: | |
2050 // In the foreground case don't do the precleaning since | |
2051 // it is not done concurrently and there is extra work | |
2052 // required. | |
2053 _collectorState = FinalMarking; | |
2054 } | |
2055 if (PrintGCDetails && | |
2056 (_collectorState > Idling || | |
2057 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) { | |
2058 gclog_or_tty->print(" (concurrent mode failure)"); | |
2059 } | |
2060 collect_in_foreground(clear_all_soft_refs); | |
2061 | |
2062 // For a mark-sweep, compute_new_size() will be called | |
2063 // in the heap's do_collection() method. | |
2064 } | |
2065 | |
2066 | |
2067 void CMSCollector::getFreelistLocks() const { | |
2068 // Get locks for all free lists in all generations that this | |
2069 // collector is responsible for | |
2070 _cmsGen->freelistLock()->lock_without_safepoint_check(); | |
2071 _permGen->freelistLock()->lock_without_safepoint_check(); | |
2072 } | |
2073 | |
2074 void CMSCollector::releaseFreelistLocks() const { | |
2075 // Release locks for all free lists in all generations that this | |
2076 // collector is responsible for | |
2077 _cmsGen->freelistLock()->unlock(); | |
2078 _permGen->freelistLock()->unlock(); | |
2079 } | |
2080 | |
2081 bool CMSCollector::haveFreelistLocks() const { | |
2082 // Check locks for all free lists in all generations that this | |
2083 // collector is responsible for | |
2084 assert_lock_strong(_cmsGen->freelistLock()); | |
2085 assert_lock_strong(_permGen->freelistLock()); | |
2086 PRODUCT_ONLY(ShouldNotReachHere()); | |
2087 return true; | |
2088 } | |
2089 | |
2090 // A utility class that is used by the CMS collector to | |
2091 // temporarily "release" the foreground collector from its | |
2092 // usual obligation to wait for the background collector to | |
2093 // complete an ongoing phase before proceeding. | |
2094 class ReleaseForegroundGC: public StackObj { | |
2095 private: | |
2096 CMSCollector* _c; | |
2097 public: | |
2098 ReleaseForegroundGC(CMSCollector* c) : _c(c) { | |
2099 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); | |
2100 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2101 // allow a potentially blocked foreground collector to proceed | |
2102 _c->_foregroundGCShouldWait = false; | |
2103 if (_c->_foregroundGCIsActive) { | |
2104 CGC_lock->notify(); | |
2105 } | |
2106 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2107 "Possible deadlock"); | |
2108 } | |
2109 | |
2110 ~ReleaseForegroundGC() { | |
2111 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); | |
2112 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2113 _c->_foregroundGCShouldWait = true; | |
2114 } | |
2115 }; | |
2116 | |
2117 // There are separate collect_in_background and collect_in_foreground because of | |
2118 // the different locking requirements of the background collector and the | |
2119 // foreground collector. There was originally an attempt to share | |
2120 // one "collect" method between the background collector and the foreground | |
2121 // collector but the if-then-else required made it cleaner to have | |
2122 // separate methods. | |
2123 void CMSCollector::collect_in_background(bool clear_all_soft_refs) { | |
2124 assert(Thread::current()->is_ConcurrentGC_thread(), | |
2125 "A CMS asynchronous collection is only allowed on a CMS thread."); | |
2126 | |
2127 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2128 { | |
2129 bool safepoint_check = Mutex::_no_safepoint_check_flag; | |
2130 MutexLockerEx hl(Heap_lock, safepoint_check); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2131 FreelistLocker fll(this); |
0 | 2132 MutexLockerEx x(CGC_lock, safepoint_check); |
2133 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { | |
2134 // The foreground collector is active or we're | |
2135 // not using asynchronous collections. Skip this | |
2136 // background collection. | |
2137 assert(!_foregroundGCShouldWait, "Should be clear"); | |
2138 return; | |
2139 } else { | |
2140 assert(_collectorState == Idling, "Should be idling before start."); | |
2141 _collectorState = InitialMarking; | |
2142 // Reset the expansion cause, now that we are about to begin | |
2143 // a new cycle. | |
2144 clear_expansion_cause(); | |
2145 } | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2146 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2147 // ensuing concurrent GC cycle. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
2148 update_should_unload_classes(); |
0 | 2149 _full_gc_requested = false; // acks all outstanding full gc requests |
2150 // Signal that we are about to start a collection | |
2151 gch->increment_total_full_collections(); // ... starting a collection cycle | |
2152 _collection_count_start = gch->total_full_collections(); | |
2153 } | |
2154 | |
2155 // Used for PrintGC | |
2156 size_t prev_used; | |
2157 if (PrintGC && Verbose) { | |
2158 prev_used = _cmsGen->used(); // XXXPERM | |
2159 } | |
2160 | |
2161 // The change of the collection state is normally done at this level; | |
2162 // the exceptions are phases that are executed while the world is | |
2163 // stopped. For those phases the change of state is done while the | |
2164 // world is stopped. For baton passing purposes this allows the | |
2165 // background collector to finish the phase and change state atomically. | |
2166 // The foreground collector cannot wait on a phase that is done | |
2167 // while the world is stopped because the foreground collector already | |
2168 // has the world stopped and would deadlock. | |
2169 while (_collectorState != Idling) { | |
2170 if (TraceCMSState) { | |
2171 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2172 Thread::current(), _collectorState); | |
2173 } | |
2174 // The foreground collector | |
2175 // holds the Heap_lock throughout its collection. | |
2176 // holds the CMS token (but not the lock) | |
2177 // except while it is waiting for the background collector to yield. | |
2178 // | |
2179 // The foreground collector should be blocked (not for long) | |
2180 // if the background collector is about to start a phase | |
2181 // executed with world stopped. If the background | |
2182 // collector has already started such a phase, the | |
2183 // foreground collector is blocked waiting for the | |
2184 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) | |
2185 // are executed in the VM thread. | |
2186 // | |
2187 // The locking order is | |
2188 // PendingListLock (PLL) -- if applicable (FinalMarking) | |
2189 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) | |
2190 // CMS token (claimed in | |
2191 // stop_world_and_do() --> | |
2192 // safepoint_synchronize() --> | |
2193 // CMSThread::synchronize()) | |
2194 | |
2195 { | |
2196 // Check if the FG collector wants us to yield. | |
2197 CMSTokenSync x(true); // is cms thread | |
2198 if (waitForForegroundGC()) { | |
2199 // We yielded to a foreground GC, nothing more to be | |
2200 // done this round. | |
2201 assert(_foregroundGCShouldWait == false, "We set it to false in " | |
2202 "waitForForegroundGC()"); | |
2203 if (TraceCMSState) { | |
2204 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2205 " exiting collection CMS state %d", | |
2206 Thread::current(), _collectorState); | |
2207 } | |
2208 return; | |
2209 } else { | |
2210 // The background collector can run but check to see if the | |
2211 // foreground collector has done a collection while the | |
2212 // background collector was waiting to get the CGC_lock | |
2213 // above. If yes, break so that _foregroundGCShouldWait | |
2214 // is cleared before returning. | |
2215 if (_collectorState == Idling) { | |
2216 break; | |
2217 } | |
2218 } | |
2219 } | |
2220 | |
2221 assert(_foregroundGCShouldWait, "Foreground collector, if active, " | |
2222 "should be waiting"); | |
2223 | |
2224 switch (_collectorState) { | |
2225 case InitialMarking: | |
2226 { | |
2227 ReleaseForegroundGC x(this); | |
2228 stats().record_cms_begin(); | |
2229 | |
2230 VM_CMS_Initial_Mark initial_mark_op(this); | |
2231 VMThread::execute(&initial_mark_op); | |
2232 } | |
2233 // The collector state may be any legal state at this point | |
2234 // since the background collector may have yielded to the | |
2235 // foreground collector. | |
2236 break; | |
2237 case Marking: | |
2238 // initial marking in checkpointRootsInitialWork has been completed | |
2239 if (markFromRoots(true)) { // we were successful | |
2240 assert(_collectorState == Precleaning, "Collector state should " | |
2241 "have changed"); | |
2242 } else { | |
2243 assert(_foregroundGCIsActive, "Internal state inconsistency"); | |
2244 } | |
2245 break; | |
2246 case Precleaning: | |
2247 if (UseAdaptiveSizePolicy) { | |
2248 size_policy()->concurrent_precleaning_begin(); | |
2249 } | |
2250 // marking from roots in markFromRoots has been completed | |
2251 preclean(); | |
2252 if (UseAdaptiveSizePolicy) { | |
2253 size_policy()->concurrent_precleaning_end(); | |
2254 } | |
2255 assert(_collectorState == AbortablePreclean || | |
2256 _collectorState == FinalMarking, | |
2257 "Collector state should have changed"); | |
2258 break; | |
2259 case AbortablePreclean: | |
2260 if (UseAdaptiveSizePolicy) { | |
2261 size_policy()->concurrent_phases_resume(); | |
2262 } | |
2263 abortable_preclean(); | |
2264 if (UseAdaptiveSizePolicy) { | |
2265 size_policy()->concurrent_precleaning_end(); | |
2266 } | |
2267 assert(_collectorState == FinalMarking, "Collector state should " | |
2268 "have changed"); | |
2269 break; | |
2270 case FinalMarking: | |
2271 { | |
2272 ReleaseForegroundGC x(this); | |
2273 | |
2274 VM_CMS_Final_Remark final_remark_op(this); | |
2275 VMThread::execute(&final_remark_op); | |
2276 } | |
2277 assert(_foregroundGCShouldWait, "block post-condition"); | |
2278 break; | |
2279 case Sweeping: | |
2280 if (UseAdaptiveSizePolicy) { | |
2281 size_policy()->concurrent_sweeping_begin(); | |
2282 } | |
2283 // final marking in checkpointRootsFinal has been completed | |
2284 sweep(true); | |
2285 assert(_collectorState == Resizing, "Collector state change " | |
2286 "to Resizing must be done under the free_list_lock"); | |
2287 _full_gcs_since_conc_gc = 0; | |
2288 | |
2289 // Stop the timers for adaptive size policy for the concurrent phases | |
2290 if (UseAdaptiveSizePolicy) { | |
2291 size_policy()->concurrent_sweeping_end(); | |
2292 size_policy()->concurrent_phases_end(gch->gc_cause(), | |
2293 gch->prev_gen(_cmsGen)->capacity(), | |
2294 _cmsGen->free()); | |
2295 } | |
2296 | |
2297 case Resizing: { | |
2298 // Sweeping has been completed... | |
2299 // At this point the background collection has completed. | |
2300 // Don't move the call to compute_new_size() down | |
2301 // into code that might be executed if the background | |
2302 // collection was preempted. | |
2303 { | |
2304 ReleaseForegroundGC x(this); // unblock FG collection | |
2305 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag); | |
2306 CMSTokenSync z(true); // not strictly needed. | |
2307 if (_collectorState == Resizing) { | |
2308 compute_new_size(); | |
2309 _collectorState = Resetting; | |
2310 } else { | |
2311 assert(_collectorState == Idling, "The state should only change" | |
2312 " because the foreground collector has finished the collection"); | |
2313 } | |
2314 } | |
2315 break; | |
2316 } | |
2317 case Resetting: | |
2318 // CMS heap resizing has been completed | |
2319 reset(true); | |
2320 assert(_collectorState == Idling, "Collector state should " | |
2321 "have changed"); | |
2322 stats().record_cms_end(); | |
2323 // Don't move the concurrent_phases_end() and compute_new_size() | |
2324 // calls to here because a preempted background collection | |
2325 // has it's state set to "Resetting". | |
2326 break; | |
2327 case Idling: | |
2328 default: | |
2329 ShouldNotReachHere(); | |
2330 break; | |
2331 } | |
2332 if (TraceCMSState) { | |
2333 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2334 Thread::current(), _collectorState); | |
2335 } | |
2336 assert(_foregroundGCShouldWait, "block post-condition"); | |
2337 } | |
2338 | |
2339 // Should this be in gc_epilogue? | |
2340 collector_policy()->counters()->update_counters(); | |
2341 | |
2342 { | |
2343 // Clear _foregroundGCShouldWait and, in the event that the | |
2344 // foreground collector is waiting, notify it, before | |
2345 // returning. | |
2346 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2347 _foregroundGCShouldWait = false; | |
2348 if (_foregroundGCIsActive) { | |
2349 CGC_lock->notify(); | |
2350 } | |
2351 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2352 "Possible deadlock"); | |
2353 } | |
2354 if (TraceCMSState) { | |
2355 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2356 " exiting collection CMS state %d", | |
2357 Thread::current(), _collectorState); | |
2358 } | |
2359 if (PrintGC && Verbose) { | |
2360 _cmsGen->print_heap_change(prev_used); | |
2361 } | |
2362 } | |
2363 | |
2364 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { | |
2365 assert(_foregroundGCIsActive && !_foregroundGCShouldWait, | |
2366 "Foreground collector should be waiting, not executing"); | |
2367 assert(Thread::current()->is_VM_thread(), "A foreground collection" | |
2368 "may only be done by the VM Thread with the world stopped"); | |
2369 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), | |
2370 "VM thread should have CMS token"); | |
2371 | |
2372 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, | |
2373 true, gclog_or_tty);) | |
2374 if (UseAdaptiveSizePolicy) { | |
2375 size_policy()->ms_collection_begin(); | |
2376 } | |
2377 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); | |
2378 | |
2379 HandleMark hm; // Discard invalid handles created during verification | |
2380 | |
2381 if (VerifyBeforeGC && | |
2382 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2383 Universe::verify(true); | |
2384 } | |
2385 | |
2386 bool init_mark_was_synchronous = false; // until proven otherwise | |
2387 while (_collectorState != Idling) { | |
2388 if (TraceCMSState) { | |
2389 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", | |
2390 Thread::current(), _collectorState); | |
2391 } | |
2392 switch (_collectorState) { | |
2393 case InitialMarking: | |
2394 init_mark_was_synchronous = true; // fact to be exploited in re-mark | |
2395 checkpointRootsInitial(false); | |
2396 assert(_collectorState == Marking, "Collector state should have changed" | |
2397 " within checkpointRootsInitial()"); | |
2398 break; | |
2399 case Marking: | |
2400 // initial marking in checkpointRootsInitialWork has been completed | |
2401 if (VerifyDuringGC && | |
2402 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2403 gclog_or_tty->print("Verify before initial mark: "); | |
2404 Universe::verify(true); | |
2405 } | |
2406 { | |
2407 bool res = markFromRoots(false); | |
2408 assert(res && _collectorState == FinalMarking, "Collector state should " | |
2409 "have changed"); | |
2410 break; | |
2411 } | |
2412 case FinalMarking: | |
2413 if (VerifyDuringGC && | |
2414 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2415 gclog_or_tty->print("Verify before re-mark: "); | |
2416 Universe::verify(true); | |
2417 } | |
2418 checkpointRootsFinal(false, clear_all_soft_refs, | |
2419 init_mark_was_synchronous); | |
2420 assert(_collectorState == Sweeping, "Collector state should not " | |
2421 "have changed within checkpointRootsFinal()"); | |
2422 break; | |
2423 case Sweeping: | |
2424 // final marking in checkpointRootsFinal has been completed | |
2425 if (VerifyDuringGC && | |
2426 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2427 gclog_or_tty->print("Verify before sweep: "); | |
2428 Universe::verify(true); | |
2429 } | |
2430 sweep(false); | |
2431 assert(_collectorState == Resizing, "Incorrect state"); | |
2432 break; | |
2433 case Resizing: { | |
2434 // Sweeping has been completed; the actual resize in this case | |
2435 // is done separately; nothing to be done in this state. | |
2436 _collectorState = Resetting; | |
2437 break; | |
2438 } | |
2439 case Resetting: | |
2440 // The heap has been resized. | |
2441 if (VerifyDuringGC && | |
2442 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2443 gclog_or_tty->print("Verify before reset: "); | |
2444 Universe::verify(true); | |
2445 } | |
2446 reset(false); | |
2447 assert(_collectorState == Idling, "Collector state should " | |
2448 "have changed"); | |
2449 break; | |
2450 case Precleaning: | |
2451 case AbortablePreclean: | |
2452 // Elide the preclean phase | |
2453 _collectorState = FinalMarking; | |
2454 break; | |
2455 default: | |
2456 ShouldNotReachHere(); | |
2457 } | |
2458 if (TraceCMSState) { | |
2459 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", | |
2460 Thread::current(), _collectorState); | |
2461 } | |
2462 } | |
2463 | |
2464 if (UseAdaptiveSizePolicy) { | |
2465 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2466 size_policy()->ms_collection_end(gch->gc_cause()); | |
2467 } | |
2468 | |
2469 if (VerifyAfterGC && | |
2470 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
2471 Universe::verify(true); | |
2472 } | |
2473 if (TraceCMSState) { | |
2474 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT | |
2475 " exiting collection CMS state %d", | |
2476 Thread::current(), _collectorState); | |
2477 } | |
2478 } | |
2479 | |
2480 bool CMSCollector::waitForForegroundGC() { | |
2481 bool res = false; | |
2482 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
2483 "CMS thread should have CMS token"); | |
2484 // Block the foreground collector until the | |
2485 // background collectors decides whether to | |
2486 // yield. | |
2487 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); | |
2488 _foregroundGCShouldWait = true; | |
2489 if (_foregroundGCIsActive) { | |
2490 // The background collector yields to the | |
2491 // foreground collector and returns a value | |
2492 // indicating that it has yielded. The foreground | |
2493 // collector can proceed. | |
2494 res = true; | |
2495 _foregroundGCShouldWait = false; | |
2496 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2497 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2498 ConcurrentMarkSweepThread::set_CMS_flag( | |
2499 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2500 // Get a possibly blocked foreground thread going | |
2501 CGC_lock->notify(); | |
2502 if (TraceCMSState) { | |
2503 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", | |
2504 Thread::current(), _collectorState); | |
2505 } | |
2506 while (_foregroundGCIsActive) { | |
2507 CGC_lock->wait(Mutex::_no_safepoint_check_flag); | |
2508 } | |
2509 ConcurrentMarkSweepThread::set_CMS_flag( | |
2510 ConcurrentMarkSweepThread::CMS_cms_has_token); | |
2511 ConcurrentMarkSweepThread::clear_CMS_flag( | |
2512 ConcurrentMarkSweepThread::CMS_cms_wants_token); | |
2513 } | |
2514 if (TraceCMSState) { | |
2515 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", | |
2516 Thread::current(), _collectorState); | |
2517 } | |
2518 return res; | |
2519 } | |
2520 | |
2521 // Because of the need to lock the free lists and other structures in | |
2522 // the collector, common to all the generations that the collector is | |
2523 // collecting, we need the gc_prologues of individual CMS generations | |
2524 // delegate to their collector. It may have been simpler had the | |
2525 // current infrastructure allowed one to call a prologue on a | |
2526 // collector. In the absence of that we have the generation's | |
2527 // prologue delegate to the collector, which delegates back | |
2528 // some "local" work to a worker method in the individual generations | |
2529 // that it's responsible for collecting, while itself doing any | |
2530 // work common to all generations it's responsible for. A similar | |
2531 // comment applies to the gc_epilogue()'s. | |
2532 // The role of the varaible _between_prologue_and_epilogue is to | |
2533 // enforce the invocation protocol. | |
2534 void CMSCollector::gc_prologue(bool full) { | |
2535 // Call gc_prologue_work() for each CMSGen and PermGen that | |
2536 // we are responsible for. | |
2537 | |
2538 // The following locking discipline assumes that we are only called | |
2539 // when the world is stopped. | |
2540 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); | |
2541 | |
2542 // The CMSCollector prologue must call the gc_prologues for the | |
2543 // "generations" (including PermGen if any) that it's responsible | |
2544 // for. | |
2545 | |
2546 assert( Thread::current()->is_VM_thread() | |
2547 || ( CMSScavengeBeforeRemark | |
2548 && Thread::current()->is_ConcurrentGC_thread()), | |
2549 "Incorrect thread type for prologue execution"); | |
2550 | |
2551 if (_between_prologue_and_epilogue) { | |
2552 // We have already been invoked; this is a gc_prologue delegation | |
2553 // from yet another CMS generation that we are responsible for, just | |
2554 // ignore it since all relevant work has already been done. | |
2555 return; | |
2556 } | |
2557 | |
2558 // set a bit saying prologue has been called; cleared in epilogue | |
2559 _between_prologue_and_epilogue = true; | |
2560 // Claim locks for common data structures, then call gc_prologue_work() | |
2561 // for each CMSGen and PermGen that we are responsible for. | |
2562 | |
2563 getFreelistLocks(); // gets free list locks on constituent spaces | |
2564 bitMapLock()->lock_without_safepoint_check(); | |
2565 | |
2566 // Should call gc_prologue_work() for all cms gens we are responsible for | |
2567 bool registerClosure = _collectorState >= Marking | |
2568 && _collectorState < Sweeping; | |
2569 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar | |
2570 : &_modUnionClosure; | |
2571 _cmsGen->gc_prologue_work(full, registerClosure, muc); | |
2572 _permGen->gc_prologue_work(full, registerClosure, muc); | |
2573 | |
2574 if (!full) { | |
2575 stats().record_gc0_begin(); | |
2576 } | |
2577 } | |
2578 | |
2579 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { | |
2580 // Delegate to CMScollector which knows how to coordinate between | |
2581 // this and any other CMS generations that it is responsible for | |
2582 // collecting. | |
2583 collector()->gc_prologue(full); | |
2584 } | |
2585 | |
2586 // This is a "private" interface for use by this generation's CMSCollector. | |
2587 // Not to be called directly by any other entity (for instance, | |
2588 // GenCollectedHeap, which calls the "public" gc_prologue method above). | |
2589 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, | |
2590 bool registerClosure, ModUnionClosure* modUnionClosure) { | |
2591 assert(!incremental_collection_failed(), "Shouldn't be set yet"); | |
2592 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, | |
2593 "Should be NULL"); | |
2594 if (registerClosure) { | |
2595 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); | |
2596 } | |
2597 cmsSpace()->gc_prologue(); | |
2598 // Clear stat counters | |
2599 NOT_PRODUCT( | |
2600 assert(_numObjectsPromoted == 0, "check"); | |
2601 assert(_numWordsPromoted == 0, "check"); | |
2602 if (Verbose && PrintGC) { | |
2603 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, " | |
2604 SIZE_FORMAT" bytes concurrently", | |
2605 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); | |
2606 } | |
2607 _numObjectsAllocated = 0; | |
2608 _numWordsAllocated = 0; | |
2609 ) | |
2610 } | |
2611 | |
2612 void CMSCollector::gc_epilogue(bool full) { | |
2613 // The following locking discipline assumes that we are only called | |
2614 // when the world is stopped. | |
2615 assert(SafepointSynchronize::is_at_safepoint(), | |
2616 "world is stopped assumption"); | |
2617 | |
2618 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks | |
2619 // if linear allocation blocks need to be appropriately marked to allow the | |
2620 // the blocks to be parsable. We also check here whether we need to nudge the | |
2621 // CMS collector thread to start a new cycle (if it's not already active). | |
2622 assert( Thread::current()->is_VM_thread() | |
2623 || ( CMSScavengeBeforeRemark | |
2624 && Thread::current()->is_ConcurrentGC_thread()), | |
2625 "Incorrect thread type for epilogue execution"); | |
2626 | |
2627 if (!_between_prologue_and_epilogue) { | |
2628 // We have already been invoked; this is a gc_epilogue delegation | |
2629 // from yet another CMS generation that we are responsible for, just | |
2630 // ignore it since all relevant work has already been done. | |
2631 return; | |
2632 } | |
2633 assert(haveFreelistLocks(), "must have freelist locks"); | |
2634 assert_lock_strong(bitMapLock()); | |
2635 | |
2636 _cmsGen->gc_epilogue_work(full); | |
2637 _permGen->gc_epilogue_work(full); | |
2638 | |
2639 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { | |
2640 // in case sampling was not already enabled, enable it | |
2641 _start_sampling = true; | |
2642 } | |
2643 // reset _eden_chunk_array so sampling starts afresh | |
2644 _eden_chunk_index = 0; | |
2645 | |
2646 size_t cms_used = _cmsGen->cmsSpace()->used(); | |
2647 size_t perm_used = _permGen->cmsSpace()->used(); | |
2648 | |
2649 // update performance counters - this uses a special version of | |
2650 // update_counters() that allows the utilization to be passed as a | |
2651 // parameter, avoiding multiple calls to used(). | |
2652 // | |
2653 _cmsGen->update_counters(cms_used); | |
2654 _permGen->update_counters(perm_used); | |
2655 | |
2656 if (CMSIncrementalMode) { | |
2657 icms_update_allocation_limits(); | |
2658 } | |
2659 | |
2660 bitMapLock()->unlock(); | |
2661 releaseFreelistLocks(); | |
2662 | |
2663 _between_prologue_and_epilogue = false; // ready for next cycle | |
2664 } | |
2665 | |
2666 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { | |
2667 collector()->gc_epilogue(full); | |
2668 | |
2669 // Also reset promotion tracking in par gc thread states. | |
2670 if (ParallelGCThreads > 0) { | |
2671 for (uint i = 0; i < ParallelGCThreads; i++) { | |
2672 _par_gc_thread_states[i]->promo.stopTrackingPromotions(); | |
2673 } | |
2674 } | |
2675 } | |
2676 | |
2677 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { | |
2678 assert(!incremental_collection_failed(), "Should have been cleared"); | |
2679 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); | |
2680 cmsSpace()->gc_epilogue(); | |
2681 // Print stat counters | |
2682 NOT_PRODUCT( | |
2683 assert(_numObjectsAllocated == 0, "check"); | |
2684 assert(_numWordsAllocated == 0, "check"); | |
2685 if (Verbose && PrintGC) { | |
2686 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, " | |
2687 SIZE_FORMAT" bytes", | |
2688 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); | |
2689 } | |
2690 _numObjectsPromoted = 0; | |
2691 _numWordsPromoted = 0; | |
2692 ) | |
2693 | |
2694 if (PrintGC && Verbose) { | |
2695 // Call down the chain in contiguous_available needs the freelistLock | |
2696 // so print this out before releasing the freeListLock. | |
2697 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ", | |
2698 contiguous_available()); | |
2699 } | |
2700 } | |
2701 | |
2702 #ifndef PRODUCT | |
2703 bool CMSCollector::have_cms_token() { | |
2704 Thread* thr = Thread::current(); | |
2705 if (thr->is_VM_thread()) { | |
2706 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); | |
2707 } else if (thr->is_ConcurrentGC_thread()) { | |
2708 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); | |
2709 } else if (thr->is_GC_task_thread()) { | |
2710 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && | |
2711 ParGCRareEvent_lock->owned_by_self(); | |
2712 } | |
2713 return false; | |
2714 } | |
2715 #endif | |
2716 | |
2717 // Check reachability of the given heap address in CMS generation, | |
2718 // treating all other generations as roots. | |
2719 bool CMSCollector::is_cms_reachable(HeapWord* addr) { | |
2720 // We could "guarantee" below, rather than assert, but i'll | |
2721 // leave these as "asserts" so that an adventurous debugger | |
2722 // could try this in the product build provided some subset of | |
2723 // the conditions were met, provided they were intersted in the | |
2724 // results and knew that the computation below wouldn't interfere | |
2725 // with other concurrent computations mutating the structures | |
2726 // being read or written. | |
2727 assert(SafepointSynchronize::is_at_safepoint(), | |
2728 "Else mutations in object graph will make answer suspect"); | |
2729 assert(have_cms_token(), "Should hold cms token"); | |
2730 assert(haveFreelistLocks(), "must hold free list locks"); | |
2731 assert_lock_strong(bitMapLock()); | |
2732 | |
2733 // Clear the marking bit map array before starting, but, just | |
2734 // for kicks, first report if the given address is already marked | |
2735 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr, | |
2736 _markBitMap.isMarked(addr) ? "" : " not"); | |
2737 | |
2738 if (verify_after_remark()) { | |
2739 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2740 bool result = verification_mark_bm()->isMarked(addr); | |
2741 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr, | |
2742 result ? "IS" : "is NOT"); | |
2743 return result; | |
2744 } else { | |
2745 gclog_or_tty->print_cr("Could not compute result"); | |
2746 return false; | |
2747 } | |
2748 } | |
2749 | |
2750 //////////////////////////////////////////////////////// | |
2751 // CMS Verification Support | |
2752 //////////////////////////////////////////////////////// | |
2753 // Following the remark phase, the following invariant | |
2754 // should hold -- each object in the CMS heap which is | |
2755 // marked in markBitMap() should be marked in the verification_mark_bm(). | |
2756 | |
2757 class VerifyMarkedClosure: public BitMapClosure { | |
2758 CMSBitMap* _marks; | |
2759 bool _failed; | |
2760 | |
2761 public: | |
2762 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} | |
2763 | |
2764 void do_bit(size_t offset) { | |
2765 HeapWord* addr = _marks->offsetToHeapWord(offset); | |
2766 if (!_marks->isMarked(addr)) { | |
2767 oop(addr)->print(); | |
2768 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); | |
2769 _failed = true; | |
2770 } | |
2771 } | |
2772 | |
2773 bool failed() { return _failed; } | |
2774 }; | |
2775 | |
2776 bool CMSCollector::verify_after_remark() { | |
2777 gclog_or_tty->print(" [Verifying CMS Marking... "); | |
2778 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); | |
2779 static bool init = false; | |
2780 | |
2781 assert(SafepointSynchronize::is_at_safepoint(), | |
2782 "Else mutations in object graph will make answer suspect"); | |
2783 assert(have_cms_token(), | |
2784 "Else there may be mutual interference in use of " | |
2785 " verification data structures"); | |
2786 assert(_collectorState > Marking && _collectorState <= Sweeping, | |
2787 "Else marking info checked here may be obsolete"); | |
2788 assert(haveFreelistLocks(), "must hold free list locks"); | |
2789 assert_lock_strong(bitMapLock()); | |
2790 | |
2791 | |
2792 // Allocate marking bit map if not already allocated | |
2793 if (!init) { // first time | |
2794 if (!verification_mark_bm()->allocate(_span)) { | |
2795 return false; | |
2796 } | |
2797 init = true; | |
2798 } | |
2799 | |
2800 assert(verification_mark_stack()->isEmpty(), "Should be empty"); | |
2801 | |
2802 // Turn off refs discovery -- so we will be tracing through refs. | |
2803 // This is as intended, because by this time | |
2804 // GC must already have cleared any refs that need to be cleared, | |
2805 // and traced those that need to be marked; moreover, | |
2806 // the marking done here is not going to intefere in any | |
2807 // way with the marking information used by GC. | |
2808 NoRefDiscovery no_discovery(ref_processor()); | |
2809 | |
2810 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
2811 | |
2812 // Clear any marks from a previous round | |
2813 verification_mark_bm()->clear_all(); | |
2814 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); | |
2815 assert(overflow_list_is_empty(), "overflow list should be empty"); | |
2816 | |
2817 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2818 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
2819 // Update the saved marks which may affect the root scans. | |
2820 gch->save_marks(); | |
2821 | |
2822 if (CMSRemarkVerifyVariant == 1) { | |
2823 // In this first variant of verification, we complete | |
2824 // all marking, then check if the new marks-verctor is | |
2825 // a subset of the CMS marks-vector. | |
2826 verify_after_remark_work_1(); | |
2827 } else if (CMSRemarkVerifyVariant == 2) { | |
2828 // In this second variant of verification, we flag an error | |
2829 // (i.e. an object reachable in the new marks-vector not reachable | |
2830 // in the CMS marks-vector) immediately, also indicating the | |
2831 // identify of an object (A) that references the unmarked object (B) -- | |
2832 // presumably, a mutation to A failed to be picked up by preclean/remark? | |
2833 verify_after_remark_work_2(); | |
2834 } else { | |
2835 warning("Unrecognized value %d for CMSRemarkVerifyVariant", | |
2836 CMSRemarkVerifyVariant); | |
2837 } | |
2838 gclog_or_tty->print(" done] "); | |
2839 return true; | |
2840 } | |
2841 | |
2842 void CMSCollector::verify_after_remark_work_1() { | |
2843 ResourceMark rm; | |
2844 HandleMark hm; | |
2845 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2846 | |
2847 // Mark from roots one level into CMS | |
2848 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */); | |
2849 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
2850 | |
2851 gch->gen_process_strong_roots(_cmsGen->level(), | |
2852 true, // younger gens are roots | |
2853 true, // collecting perm gen | |
2854 SharedHeap::ScanningOption(roots_scanning_options()), | |
2855 NULL, ¬Older); | |
2856 | |
2857 // Now mark from the roots | |
2858 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2859 MarkFromRootsClosure markFromRootsClosure(this, _span, | |
2860 verification_mark_bm(), verification_mark_stack(), &_revisitStack, | |
2861 false /* don't yield */, true /* verifying */); | |
2862 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2863 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2864 while (_restart_addr != NULL) { | |
2865 // Deal with stack overflow: by restarting at the indicated | |
2866 // address. | |
2867 HeapWord* ra = _restart_addr; | |
2868 markFromRootsClosure.reset(ra); | |
2869 _restart_addr = NULL; | |
2870 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2871 } | |
2872 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2873 verify_work_stacks_empty(); | |
2874 // Should reset the revisit stack above, since no class tree | |
2875 // surgery is forthcoming. | |
2876 _revisitStack.reset(); // throwing away all contents | |
2877 | |
2878 // Marking completed -- now verify that each bit marked in | |
2879 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2880 // errors by printing corresponding objects. | |
2881 VerifyMarkedClosure vcl(markBitMap()); | |
2882 verification_mark_bm()->iterate(&vcl); | |
2883 if (vcl.failed()) { | |
2884 gclog_or_tty->print("Verification failed"); | |
2885 Universe::heap()->print(); | |
2886 fatal(" ... aborting"); | |
2887 } | |
2888 } | |
2889 | |
2890 void CMSCollector::verify_after_remark_work_2() { | |
2891 ResourceMark rm; | |
2892 HandleMark hm; | |
2893 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
2894 | |
2895 // Mark from roots one level into CMS | |
2896 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), | |
2897 markBitMap(), true /* nmethods */); | |
2898 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
2899 gch->gen_process_strong_roots(_cmsGen->level(), | |
2900 true, // younger gens are roots | |
2901 true, // collecting perm gen | |
2902 SharedHeap::ScanningOption(roots_scanning_options()), | |
2903 NULL, ¬Older); | |
2904 | |
2905 // Now mark from the roots | |
2906 assert(_revisitStack.isEmpty(), "Should be empty"); | |
2907 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, | |
2908 verification_mark_bm(), markBitMap(), verification_mark_stack()); | |
2909 assert(_restart_addr == NULL, "Expected pre-condition"); | |
2910 verification_mark_bm()->iterate(&markFromRootsClosure); | |
2911 while (_restart_addr != NULL) { | |
2912 // Deal with stack overflow: by restarting at the indicated | |
2913 // address. | |
2914 HeapWord* ra = _restart_addr; | |
2915 markFromRootsClosure.reset(ra); | |
2916 _restart_addr = NULL; | |
2917 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); | |
2918 } | |
2919 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); | |
2920 verify_work_stacks_empty(); | |
2921 // Should reset the revisit stack above, since no class tree | |
2922 // surgery is forthcoming. | |
2923 _revisitStack.reset(); // throwing away all contents | |
2924 | |
2925 // Marking completed -- now verify that each bit marked in | |
2926 // verification_mark_bm() is also marked in markBitMap(); flag all | |
2927 // errors by printing corresponding objects. | |
2928 VerifyMarkedClosure vcl(markBitMap()); | |
2929 verification_mark_bm()->iterate(&vcl); | |
2930 assert(!vcl.failed(), "Else verification above should not have succeeded"); | |
2931 } | |
2932 | |
2933 void ConcurrentMarkSweepGeneration::save_marks() { | |
2934 // delegate to CMS space | |
2935 cmsSpace()->save_marks(); | |
2936 for (uint i = 0; i < ParallelGCThreads; i++) { | |
2937 _par_gc_thread_states[i]->promo.startTrackingPromotions(); | |
2938 } | |
2939 } | |
2940 | |
2941 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { | |
2942 return cmsSpace()->no_allocs_since_save_marks(); | |
2943 } | |
2944 | |
2945 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
2946 \ | |
2947 void ConcurrentMarkSweepGeneration:: \ | |
2948 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
2949 cl->set_generation(this); \ | |
2950 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
2951 cl->reset_generation(); \ | |
2952 save_marks(); \ | |
2953 } | |
2954 | |
2955 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) | |
2956 | |
2957 void | |
2958 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) | |
2959 { | |
2960 // Not currently implemented; need to do the following. -- ysr. | |
2961 // dld -- I think that is used for some sort of allocation profiler. So it | |
2962 // really means the objects allocated by the mutator since the last | |
2963 // GC. We could potentially implement this cheaply by recording only | |
2964 // the direct allocations in a side data structure. | |
2965 // | |
2966 // I think we probably ought not to be required to support these | |
2967 // iterations at any arbitrary point; I think there ought to be some | |
2968 // call to enable/disable allocation profiling in a generation/space, | |
2969 // and the iterator ought to return the objects allocated in the | |
2970 // gen/space since the enable call, or the last iterator call (which | |
2971 // will probably be at a GC.) That way, for gens like CM&S that would | |
2972 // require some extra data structure to support this, we only pay the | |
2973 // cost when it's in use... | |
2974 cmsSpace()->object_iterate_since_last_GC(blk); | |
2975 } | |
2976 | |
2977 void | |
2978 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
2979 cl->set_generation(this); | |
2980 younger_refs_in_space_iterate(_cmsSpace, cl); | |
2981 cl->reset_generation(); | |
2982 } | |
2983 | |
2984 void | |
2985 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) { | |
2986 if (freelistLock()->owned_by_self()) { | |
2987 Generation::oop_iterate(mr, cl); | |
2988 } else { | |
2989 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
2990 Generation::oop_iterate(mr, cl); | |
2991 } | |
2992 } | |
2993 | |
2994 void | |
2995 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) { | |
2996 if (freelistLock()->owned_by_self()) { | |
2997 Generation::oop_iterate(cl); | |
2998 } else { | |
2999 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3000 Generation::oop_iterate(cl); | |
3001 } | |
3002 } | |
3003 | |
3004 void | |
3005 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { | |
3006 if (freelistLock()->owned_by_self()) { | |
3007 Generation::object_iterate(cl); | |
3008 } else { | |
3009 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3010 Generation::object_iterate(cl); | |
3011 } | |
3012 } | |
3013 | |
3014 void | |
3015 ConcurrentMarkSweepGeneration::pre_adjust_pointers() { | |
3016 } | |
3017 | |
3018 void | |
3019 ConcurrentMarkSweepGeneration::post_compact() { | |
3020 } | |
3021 | |
3022 void | |
3023 ConcurrentMarkSweepGeneration::prepare_for_verify() { | |
3024 // Fix the linear allocation blocks to look like free blocks. | |
3025 | |
3026 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3027 // are not called when the heap is verified during universe initialization and | |
3028 // at vm shutdown. | |
3029 if (freelistLock()->owned_by_self()) { | |
3030 cmsSpace()->prepare_for_verify(); | |
3031 } else { | |
3032 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3033 cmsSpace()->prepare_for_verify(); | |
3034 } | |
3035 } | |
3036 | |
3037 void | |
3038 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) { | |
3039 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those | |
3040 // are not called when the heap is verified during universe initialization and | |
3041 // at vm shutdown. | |
3042 if (freelistLock()->owned_by_self()) { | |
3043 cmsSpace()->verify(false /* ignored */); | |
3044 } else { | |
3045 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3046 cmsSpace()->verify(false /* ignored */); | |
3047 } | |
3048 } | |
3049 | |
3050 void CMSCollector::verify(bool allow_dirty /* ignored */) { | |
3051 _cmsGen->verify(allow_dirty); | |
3052 _permGen->verify(allow_dirty); | |
3053 } | |
3054 | |
3055 #ifndef PRODUCT | |
3056 bool CMSCollector::overflow_list_is_empty() const { | |
3057 assert(_num_par_pushes >= 0, "Inconsistency"); | |
3058 if (_overflow_list == NULL) { | |
3059 assert(_num_par_pushes == 0, "Inconsistency"); | |
3060 } | |
3061 return _overflow_list == NULL; | |
3062 } | |
3063 | |
3064 // The methods verify_work_stacks_empty() and verify_overflow_empty() | |
3065 // merely consolidate assertion checks that appear to occur together frequently. | |
3066 void CMSCollector::verify_work_stacks_empty() const { | |
3067 assert(_markStack.isEmpty(), "Marking stack should be empty"); | |
3068 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3069 } | |
3070 | |
3071 void CMSCollector::verify_overflow_empty() const { | |
3072 assert(overflow_list_is_empty(), "Overflow list should be empty"); | |
3073 assert(no_preserved_marks(), "No preserved marks"); | |
3074 } | |
3075 #endif // PRODUCT | |
3076 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3077 // Decide if we want to enable class unloading as part of the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3078 // ensuing concurrent GC cycle. We will collect the perm gen and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3079 // unload classes if it's the case that: |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3080 // (1) an explicit gc request has been made and the flag |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3081 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3082 // (2) (a) class unloading is enabled at the command line, and |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3083 // (b) (i) perm gen threshold has been crossed, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3084 // (ii) old gen is getting really full, or |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3085 // (iii) the previous N CMS collections did not collect the |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3086 // perm gen |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3087 // NOTE: Provided there is no change in the state of the heap between |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3088 // calls to this method, it should have idempotent results. Moreover, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3089 // its results should be monotonically increasing (i.e. going from 0 to 1, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3090 // but not 1 to 0) between successive calls between which the heap was |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3091 // not collected. For the implementation below, it must thus rely on |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3092 // the property that concurrent_cycles_since_last_unload() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3093 // will not decrease unless a collection cycle happened and that |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3094 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3095 // themselves also monotonic in that sense. See check_monotonicity() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3096 // below. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3097 bool CMSCollector::update_should_unload_classes() { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3098 _should_unload_classes = false; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3099 // Condition 1 above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3100 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3101 _should_unload_classes = true; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3102 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3103 // Disjuncts 2.b.(i,ii,iii) above |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3104 _should_unload_classes = (concurrent_cycles_since_last_unload() >= |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3105 CMSClassUnloadingMaxInterval) |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3106 || _permGen->should_concurrent_collect() |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3107 || _cmsGen->is_too_full(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3108 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3109 return _should_unload_classes; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3110 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3111 |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3112 bool ConcurrentMarkSweepGeneration::is_too_full() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3113 bool res = should_concurrent_collect(); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3114 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3115 return res; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3116 } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3117 |
0 | 3118 void CMSCollector::setup_cms_unloading_and_verification_state() { |
3119 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC | |
3120 || VerifyBeforeExit; | |
3121 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | |
3122 | SharedHeap::SO_CodeCache; | |
3123 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3124 if (should_unload_classes()) { // Should unload classes this cycle |
0 | 3125 remove_root_scanning_option(rso); // Shrink the root set appropriately |
3126 set_verifying(should_verify); // Set verification state for this cycle | |
3127 return; // Nothing else needs to be done at this time | |
3128 } | |
3129 | |
3130 // Not unloading classes this cycle | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3131 assert(!should_unload_classes(), "Inconsitency!"); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
3132 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { |
0 | 3133 // We were not verifying, or we _were_ unloading classes in the last cycle, |
3134 // AND some verification options are enabled this cycle; in this case, | |
3135 // we must make sure that the deadness map is allocated if not already so, | |
3136 // and cleared (if already allocated previously -- | |
3137 // CMSBitMap::sizeInBits() is used to determine if it's allocated). | |
3138 if (perm_gen_verify_bit_map()->sizeInBits() == 0) { | |
3139 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) { | |
3140 warning("Failed to allocate permanent generation verification CMS Bit Map;\n" | |
3141 "permanent generation verification disabled"); | |
3142 return; // Note that we leave verification disabled, so we'll retry this | |
3143 // allocation next cycle. We _could_ remember this failure | |
3144 // and skip further attempts and permanently disable verification | |
3145 // attempts if that is considered more desirable. | |
3146 } | |
3147 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()), | |
3148 "_perm_gen_ver_bit_map inconsistency?"); | |
3149 } else { | |
3150 perm_gen_verify_bit_map()->clear_all(); | |
3151 } | |
3152 // Include symbols, strings and code cache elements to prevent their resurrection. | |
3153 add_root_scanning_option(rso); | |
3154 set_verifying(true); | |
3155 } else if (verifying() && !should_verify) { | |
3156 // We were verifying, but some verification flags got disabled. | |
3157 set_verifying(false); | |
3158 // Exclude symbols, strings and code cache elements from root scanning to | |
3159 // reduce IM and RM pauses. | |
3160 remove_root_scanning_option(rso); | |
3161 } | |
3162 } | |
3163 | |
3164 | |
3165 #ifndef PRODUCT | |
3166 HeapWord* CMSCollector::block_start(const void* p) const { | |
3167 const HeapWord* addr = (HeapWord*)p; | |
3168 if (_span.contains(p)) { | |
3169 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { | |
3170 return _cmsGen->cmsSpace()->block_start(p); | |
3171 } else { | |
3172 assert(_permGen->cmsSpace()->is_in_reserved(addr), | |
3173 "Inconsistent _span?"); | |
3174 return _permGen->cmsSpace()->block_start(p); | |
3175 } | |
3176 } | |
3177 return NULL; | |
3178 } | |
3179 #endif | |
3180 | |
3181 HeapWord* | |
3182 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, | |
3183 bool tlab, | |
3184 bool parallel) { | |
3185 assert(!tlab, "Can't deal with TLAB allocation"); | |
3186 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); | |
3187 expand(word_size*HeapWordSize, MinHeapDeltaBytes, | |
3188 CMSExpansionCause::_satisfy_allocation); | |
3189 if (GCExpandToAllocateDelayMillis > 0) { | |
3190 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3191 } | |
9
173195ff483a
6642634: Test nsk/regression/b6186200 crashed with SIGSEGV
ysr
parents:
7
diff
changeset
|
3192 return have_lock_and_allocate(word_size, tlab); |
0 | 3193 } |
3194 | |
3195 // YSR: All of this generation expansion/shrinking stuff is an exact copy of | |
3196 // OneContigSpaceCardGeneration, which makes me wonder if we should move this | |
3197 // to CardGeneration and share it... | |
3198 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, | |
3199 CMSExpansionCause::Cause cause) | |
3200 { | |
3201 assert_locked_or_safepoint(Heap_lock); | |
3202 | |
3203 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); | |
3204 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); | |
3205 bool success = false; | |
3206 if (aligned_expand_bytes > aligned_bytes) { | |
3207 success = grow_by(aligned_expand_bytes); | |
3208 } | |
3209 if (!success) { | |
3210 success = grow_by(aligned_bytes); | |
3211 } | |
3212 if (!success) { | |
3213 size_t remaining_bytes = _virtual_space.uncommitted_size(); | |
3214 if (remaining_bytes > 0) { | |
3215 success = grow_by(remaining_bytes); | |
3216 } | |
3217 } | |
3218 if (GC_locker::is_active()) { | |
3219 if (PrintGC && Verbose) { | |
3220 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); | |
3221 } | |
3222 } | |
3223 // remember why we expanded; this information is used | |
3224 // by shouldConcurrentCollect() when making decisions on whether to start | |
3225 // a new CMS cycle. | |
3226 if (success) { | |
3227 set_expansion_cause(cause); | |
3228 if (PrintGCDetails && Verbose) { | |
3229 gclog_or_tty->print_cr("Expanded CMS gen for %s", | |
3230 CMSExpansionCause::to_string(cause)); | |
3231 } | |
3232 } | |
3233 } | |
3234 | |
3235 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { | |
3236 HeapWord* res = NULL; | |
3237 MutexLocker x(ParGCRareEvent_lock); | |
3238 while (true) { | |
3239 // Expansion by some other thread might make alloc OK now: | |
3240 res = ps->lab.alloc(word_sz); | |
3241 if (res != NULL) return res; | |
3242 // If there's not enough expansion space available, give up. | |
3243 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { | |
3244 return NULL; | |
3245 } | |
3246 // Otherwise, we try expansion. | |
3247 expand(word_sz*HeapWordSize, MinHeapDeltaBytes, | |
3248 CMSExpansionCause::_allocate_par_lab); | |
3249 // Now go around the loop and try alloc again; | |
3250 // A competing par_promote might beat us to the expansion space, | |
3251 // so we may go around the loop again if promotion fails agaion. | |
3252 if (GCExpandToAllocateDelayMillis > 0) { | |
3253 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3254 } | |
3255 } | |
3256 } | |
3257 | |
3258 | |
3259 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( | |
3260 PromotionInfo* promo) { | |
3261 MutexLocker x(ParGCRareEvent_lock); | |
3262 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; | |
3263 while (true) { | |
3264 // Expansion by some other thread might make alloc OK now: | |
3265 if (promo->ensure_spooling_space()) { | |
3266 assert(promo->has_spooling_space(), | |
3267 "Post-condition of successful ensure_spooling_space()"); | |
3268 return true; | |
3269 } | |
3270 // If there's not enough expansion space available, give up. | |
3271 if (_virtual_space.uncommitted_size() < refill_size_bytes) { | |
3272 return false; | |
3273 } | |
3274 // Otherwise, we try expansion. | |
3275 expand(refill_size_bytes, MinHeapDeltaBytes, | |
3276 CMSExpansionCause::_allocate_par_spooling_space); | |
3277 // Now go around the loop and try alloc again; | |
3278 // A competing allocation might beat us to the expansion space, | |
3279 // so we may go around the loop again if allocation fails again. | |
3280 if (GCExpandToAllocateDelayMillis > 0) { | |
3281 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); | |
3282 } | |
3283 } | |
3284 } | |
3285 | |
3286 | |
3287 | |
3288 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { | |
3289 assert_locked_or_safepoint(Heap_lock); | |
3290 size_t size = ReservedSpace::page_align_size_down(bytes); | |
3291 if (size > 0) { | |
3292 shrink_by(size); | |
3293 } | |
3294 } | |
3295 | |
3296 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { | |
3297 assert_locked_or_safepoint(Heap_lock); | |
3298 bool result = _virtual_space.expand_by(bytes); | |
3299 if (result) { | |
3300 HeapWord* old_end = _cmsSpace->end(); | |
3301 size_t new_word_size = | |
3302 heap_word_size(_virtual_space.committed_size()); | |
3303 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
3304 _bts->resize(new_word_size); // resize the block offset shared array | |
3305 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
3306 // Hmmmm... why doesn't CFLS::set_end verify locking? | |
3307 // This is quite ugly; FIX ME XXX | |
3308 _cmsSpace->assert_locked(); | |
3309 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); | |
3310 | |
3311 // update the space and generation capacity counters | |
3312 if (UsePerfData) { | |
3313 _space_counters->update_capacity(); | |
3314 _gen_counters->update_all(); | |
3315 } | |
3316 | |
3317 if (Verbose && PrintGC) { | |
3318 size_t new_mem_size = _virtual_space.committed_size(); | |
3319 size_t old_mem_size = new_mem_size - bytes; | |
3320 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK", | |
3321 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
3322 } | |
3323 } | |
3324 return result; | |
3325 } | |
3326 | |
3327 bool ConcurrentMarkSweepGeneration::grow_to_reserved() { | |
3328 assert_locked_or_safepoint(Heap_lock); | |
3329 bool success = true; | |
3330 const size_t remaining_bytes = _virtual_space.uncommitted_size(); | |
3331 if (remaining_bytes > 0) { | |
3332 success = grow_by(remaining_bytes); | |
3333 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) | |
3334 } | |
3335 return success; | |
3336 } | |
3337 | |
3338 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { | |
3339 assert_locked_or_safepoint(Heap_lock); | |
3340 assert_lock_strong(freelistLock()); | |
3341 // XXX Fix when compaction is implemented. | |
3342 warning("Shrinking of CMS not yet implemented"); | |
3343 return; | |
3344 } | |
3345 | |
3346 | |
3347 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent | |
3348 // phases. | |
3349 class CMSPhaseAccounting: public StackObj { | |
3350 public: | |
3351 CMSPhaseAccounting(CMSCollector *collector, | |
3352 const char *phase, | |
3353 bool print_cr = true); | |
3354 ~CMSPhaseAccounting(); | |
3355 | |
3356 private: | |
3357 CMSCollector *_collector; | |
3358 const char *_phase; | |
3359 elapsedTimer _wallclock; | |
3360 bool _print_cr; | |
3361 | |
3362 public: | |
3363 // Not MT-safe; so do not pass around these StackObj's | |
3364 // where they may be accessed by other threads. | |
3365 jlong wallclock_millis() { | |
3366 assert(_wallclock.is_active(), "Wall clock should not stop"); | |
3367 _wallclock.stop(); // to record time | |
3368 jlong ret = _wallclock.milliseconds(); | |
3369 _wallclock.start(); // restart | |
3370 return ret; | |
3371 } | |
3372 }; | |
3373 | |
3374 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, | |
3375 const char *phase, | |
3376 bool print_cr) : | |
3377 _collector(collector), _phase(phase), _print_cr(print_cr) { | |
3378 | |
3379 if (PrintCMSStatistics != 0) { | |
3380 _collector->resetYields(); | |
3381 } | |
3382 if (PrintGCDetails && PrintGCTimeStamps) { | |
3383 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3384 gclog_or_tty->stamp(); | |
3385 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", | |
3386 _collector->cmsGen()->short_name(), _phase); | |
3387 } | |
3388 _collector->resetTimer(); | |
3389 _wallclock.start(); | |
3390 _collector->startTimer(); | |
3391 } | |
3392 | |
3393 CMSPhaseAccounting::~CMSPhaseAccounting() { | |
3394 assert(_wallclock.is_active(), "Wall clock should not have stopped"); | |
3395 _collector->stopTimer(); | |
3396 _wallclock.stop(); | |
3397 if (PrintGCDetails) { | |
3398 gclog_or_tty->date_stamp(PrintGCDateStamps); | |
3399 if (PrintGCTimeStamps) { | |
3400 gclog_or_tty->stamp(); | |
3401 gclog_or_tty->print(": "); | |
3402 } | |
3403 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", | |
3404 _collector->cmsGen()->short_name(), | |
3405 _phase, _collector->timerValue(), _wallclock.seconds()); | |
3406 if (_print_cr) { | |
3407 gclog_or_tty->print_cr(""); | |
3408 } | |
3409 if (PrintCMSStatistics != 0) { | |
3410 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, | |
3411 _collector->yields()); | |
3412 } | |
3413 } | |
3414 } | |
3415 | |
3416 // CMS work | |
3417 | |
3418 // Checkpoint the roots into this generation from outside | |
3419 // this generation. [Note this initial checkpoint need only | |
3420 // be approximate -- we'll do a catch up phase subsequently.] | |
3421 void CMSCollector::checkpointRootsInitial(bool asynch) { | |
3422 assert(_collectorState == InitialMarking, "Wrong collector state"); | |
3423 check_correct_thread_executing(); | |
3424 ReferenceProcessor* rp = ref_processor(); | |
3425 SpecializationStats::clear(); | |
3426 assert(_restart_addr == NULL, "Control point invariant"); | |
3427 if (asynch) { | |
3428 // acquire locks for subsequent manipulations | |
3429 MutexLockerEx x(bitMapLock(), | |
3430 Mutex::_no_safepoint_check_flag); | |
3431 checkpointRootsInitialWork(asynch); | |
3432 rp->verify_no_references_recorded(); | |
3433 rp->enable_discovery(); // enable ("weak") refs discovery | |
3434 _collectorState = Marking; | |
3435 } else { | |
3436 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection | |
3437 // which recognizes if we are a CMS generation, and doesn't try to turn on | |
3438 // discovery; verify that they aren't meddling. | |
3439 assert(!rp->discovery_is_atomic(), | |
3440 "incorrect setting of discovery predicate"); | |
3441 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " | |
3442 "ref discovery for this generation kind"); | |
3443 // already have locks | |
3444 checkpointRootsInitialWork(asynch); | |
3445 rp->enable_discovery(); // now enable ("weak") refs discovery | |
3446 _collectorState = Marking; | |
3447 } | |
3448 SpecializationStats::print(); | |
3449 } | |
3450 | |
3451 void CMSCollector::checkpointRootsInitialWork(bool asynch) { | |
3452 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); | |
3453 assert(_collectorState == InitialMarking, "just checking"); | |
3454 | |
3455 // If there has not been a GC[n-1] since last GC[n] cycle completed, | |
3456 // precede our marking with a collection of all | |
3457 // younger generations to keep floating garbage to a minimum. | |
3458 // XXX: we won't do this for now -- it's an optimization to be done later. | |
3459 | |
3460 // already have locks | |
3461 assert_lock_strong(bitMapLock()); | |
3462 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); | |
3463 | |
3464 // Setup the verification and class unloading state for this | |
3465 // CMS collection cycle. | |
3466 setup_cms_unloading_and_verification_state(); | |
3467 | |
3468 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", | |
3469 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
3470 if (UseAdaptiveSizePolicy) { | |
3471 size_policy()->checkpoint_roots_initial_begin(); | |
3472 } | |
3473 | |
3474 // Reset all the PLAB chunk arrays if necessary. | |
3475 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { | |
3476 reset_survivor_plab_arrays(); | |
3477 } | |
3478 | |
3479 ResourceMark rm; | |
3480 HandleMark hm; | |
3481 | |
3482 FalseClosure falseClosure; | |
3483 // In the case of a synchronous collection, we will elide the | |
3484 // remark step, so it's important to catch all the nmethod oops | |
3485 // in this step; hence the last argument to the constrcutor below. | |
3486 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */); | |
3487 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
3488 | |
3489 verify_work_stacks_empty(); | |
3490 verify_overflow_empty(); | |
3491 | |
3492 gch->ensure_parsability(false); // fill TLABs, but no need to retire them | |
3493 // Update the saved marks which may affect the root scans. | |
3494 gch->save_marks(); | |
3495 | |
3496 // weak reference processing has not started yet. | |
3497 ref_processor()->set_enqueuing_is_done(false); | |
3498 | |
3499 { | |
3500 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
3501 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
3502 gch->gen_process_strong_roots(_cmsGen->level(), | |
3503 true, // younger gens are roots | |
3504 true, // collecting perm gen | |
3505 SharedHeap::ScanningOption(roots_scanning_options()), | |
3506 NULL, ¬Older); | |
3507 } | |
3508 | |
3509 // Clear mod-union table; it will be dirtied in the prologue of | |
3510 // CMS generation per each younger generation collection. | |
3511 | |
3512 assert(_modUnionTable.isAllClear(), | |
3513 "Was cleared in most recent final checkpoint phase" | |
3514 " or no bits are set in the gc_prologue before the start of the next " | |
3515 "subsequent marking phase."); | |
3516 | |
3517 // Temporarily disabled, since pre/post-consumption closures don't | |
3518 // care about precleaned cards | |
3519 #if 0 | |
3520 { | |
3521 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(), | |
3522 (HeapWord*)_virtual_space.high()); | |
3523 _ct->ct_bs()->preclean_dirty_cards(mr); | |
3524 } | |
3525 #endif | |
3526 | |
3527 // Save the end of the used_region of the constituent generations | |
3528 // to be used to limit the extent of sweep in each generation. | |
3529 save_sweep_limits(); | |
3530 if (UseAdaptiveSizePolicy) { | |
3531 size_policy()->checkpoint_roots_initial_end(gch->gc_cause()); | |
3532 } | |
3533 verify_overflow_empty(); | |
3534 } | |
3535 | |
3536 bool CMSCollector::markFromRoots(bool asynch) { | |
3537 // we might be tempted to assert that: | |
3538 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), | |
3539 // "inconsistent argument?"); | |
3540 // However that wouldn't be right, because it's possible that | |
3541 // a safepoint is indeed in progress as a younger generation | |
3542 // stop-the-world GC happens even as we mark in this generation. | |
3543 assert(_collectorState == Marking, "inconsistent state?"); | |
3544 check_correct_thread_executing(); | |
3545 verify_overflow_empty(); | |
3546 | |
3547 bool res; | |
3548 if (asynch) { | |
3549 | |
3550 // Start the timers for adaptive size policy for the concurrent phases | |
3551 // Do it here so that the foreground MS can use the concurrent | |
3552 // timer since a foreground MS might has the sweep done concurrently | |
3553 // or STW. | |
3554 if (UseAdaptiveSizePolicy) { | |
3555 size_policy()->concurrent_marking_begin(); | |
3556 } | |
3557 | |
3558 // Weak ref discovery note: We may be discovering weak | |
3559 // refs in this generation concurrent (but interleaved) with | |
3560 // weak ref discovery by a younger generation collector. | |
3561 | |
3562 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
3563 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
3564 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); | |
3565 res = markFromRootsWork(asynch); | |
3566 if (res) { | |
3567 _collectorState = Precleaning; | |
3568 } else { // We failed and a foreground collection wants to take over | |
3569 assert(_foregroundGCIsActive, "internal state inconsistency"); | |
3570 assert(_restart_addr == NULL, "foreground will restart from scratch"); | |
3571 if (PrintGCDetails) { | |
3572 gclog_or_tty->print_cr("bailing out to foreground collection"); | |
3573 } | |
3574 } | |
3575 if (UseAdaptiveSizePolicy) { | |
3576 size_policy()->concurrent_marking_end(); | |
3577 } | |
3578 } else { | |
3579 assert(SafepointSynchronize::is_at_safepoint(), | |
3580 "inconsistent with asynch == false"); | |
3581 if (UseAdaptiveSizePolicy) { | |
3582 size_policy()->ms_collection_marking_begin(); | |
3583 } | |
3584 // already have locks | |
3585 res = markFromRootsWork(asynch); | |
3586 _collectorState = FinalMarking; | |
3587 if (UseAdaptiveSizePolicy) { | |
3588 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
3589 size_policy()->ms_collection_marking_end(gch->gc_cause()); | |
3590 } | |
3591 } | |
3592 verify_overflow_empty(); | |
3593 return res; | |
3594 } | |
3595 | |
3596 bool CMSCollector::markFromRootsWork(bool asynch) { | |
3597 // iterate over marked bits in bit map, doing a full scan and mark | |
3598 // from these roots using the following algorithm: | |
3599 // . if oop is to the right of the current scan pointer, | |
3600 // mark corresponding bit (we'll process it later) | |
3601 // . else (oop is to left of current scan pointer) | |
3602 // push oop on marking stack | |
3603 // . drain the marking stack | |
3604 | |
3605 // Note that when we do a marking step we need to hold the | |
3606 // bit map lock -- recall that direct allocation (by mutators) | |
3607 // and promotion (by younger generation collectors) is also | |
3608 // marking the bit map. [the so-called allocate live policy.] | |
3609 // Because the implementation of bit map marking is not | |
3610 // robust wrt simultaneous marking of bits in the same word, | |
3611 // we need to make sure that there is no such interference | |
3612 // between concurrent such updates. | |
3613 | |
3614 // already have locks | |
3615 assert_lock_strong(bitMapLock()); | |
3616 | |
3617 // Clear the revisit stack, just in case there are any | |
3618 // obsolete contents from a short-circuited previous CMS cycle. | |
3619 _revisitStack.reset(); | |
3620 verify_work_stacks_empty(); | |
3621 verify_overflow_empty(); | |
3622 assert(_revisitStack.isEmpty(), "tabula rasa"); | |
3623 | |
3624 bool result = false; | |
3625 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { | |
3626 result = do_marking_mt(asynch); | |
3627 } else { | |
3628 result = do_marking_st(asynch); | |
3629 } | |
3630 return result; | |
3631 } | |
3632 | |
3633 // Forward decl | |
3634 class CMSConcMarkingTask; | |
3635 | |
3636 class CMSConcMarkingTerminator: public ParallelTaskTerminator { | |
3637 CMSCollector* _collector; | |
3638 CMSConcMarkingTask* _task; | |
3639 bool _yield; | |
3640 protected: | |
3641 virtual void yield(); | |
3642 public: | |
3643 // "n_threads" is the number of threads to be terminated. | |
3644 // "queue_set" is a set of work queues of other threads. | |
3645 // "collector" is the CMS collector associated with this task terminator. | |
3646 // "yield" indicates whether we need the gang as a whole to yield. | |
3647 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, | |
3648 CMSCollector* collector, bool yield) : | |
3649 ParallelTaskTerminator(n_threads, queue_set), | |
3650 _collector(collector), | |
3651 _yield(yield) { } | |
3652 | |
3653 void set_task(CMSConcMarkingTask* task) { | |
3654 _task = task; | |
3655 } | |
3656 }; | |
3657 | |
3658 // MT Concurrent Marking Task | |
3659 class CMSConcMarkingTask: public YieldingFlexibleGangTask { | |
3660 CMSCollector* _collector; | |
3661 YieldingFlexibleWorkGang* _workers; // the whole gang | |
3662 int _n_workers; // requested/desired # workers | |
3663 bool _asynch; | |
3664 bool _result; | |
3665 CompactibleFreeListSpace* _cms_space; | |
3666 CompactibleFreeListSpace* _perm_space; | |
3667 HeapWord* _global_finger; | |
3668 | |
3669 // Exposed here for yielding support | |
3670 Mutex* const _bit_map_lock; | |
3671 | |
3672 // The per thread work queues, available here for stealing | |
3673 OopTaskQueueSet* _task_queues; | |
3674 CMSConcMarkingTerminator _term; | |
3675 | |
3676 public: | |
3677 CMSConcMarkingTask(CMSCollector* collector, | |
3678 CompactibleFreeListSpace* cms_space, | |
3679 CompactibleFreeListSpace* perm_space, | |
3680 bool asynch, int n_workers, | |
3681 YieldingFlexibleWorkGang* workers, | |
3682 OopTaskQueueSet* task_queues): | |
3683 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), | |
3684 _collector(collector), | |
3685 _cms_space(cms_space), | |
3686 _perm_space(perm_space), | |
3687 _asynch(asynch), _n_workers(n_workers), _result(true), | |
3688 _workers(workers), _task_queues(task_queues), | |
3689 _term(n_workers, task_queues, _collector, asynch), | |
3690 _bit_map_lock(collector->bitMapLock()) | |
3691 { | |
3692 assert(n_workers <= workers->total_workers(), | |
3693 "Else termination won't work correctly today"); // XXX FIX ME! | |
3694 _requested_size = n_workers; | |
3695 _term.set_task(this); | |
3696 assert(_cms_space->bottom() < _perm_space->bottom(), | |
3697 "Finger incorrectly initialized below"); | |
3698 _global_finger = _cms_space->bottom(); | |
3699 } | |
3700 | |
3701 | |
3702 OopTaskQueueSet* task_queues() { return _task_queues; } | |
3703 | |
3704 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
3705 | |
3706 HeapWord** global_finger_addr() { return &_global_finger; } | |
3707 | |
3708 CMSConcMarkingTerminator* terminator() { return &_term; } | |
3709 | |
3710 void work(int i); | |
3711 | |
3712 virtual void coordinator_yield(); // stuff done by coordinator | |
3713 bool result() { return _result; } | |
3714 | |
3715 void reset(HeapWord* ra) { | |
3716 _term.reset_for_reuse(); | |
3717 } | |
3718 | |
3719 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3720 OopTaskQueue* work_q); | |
3721 | |
3722 private: | |
3723 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); | |
3724 void do_work_steal(int i); | |
3725 void bump_global_finger(HeapWord* f); | |
3726 }; | |
3727 | |
3728 void CMSConcMarkingTerminator::yield() { | |
3729 if (ConcurrentMarkSweepThread::should_yield() && | |
3730 !_collector->foregroundGCIsActive() && | |
3731 _yield) { | |
3732 _task->yield(); | |
3733 } else { | |
3734 ParallelTaskTerminator::yield(); | |
3735 } | |
3736 } | |
3737 | |
3738 //////////////////////////////////////////////////////////////// | |
3739 // Concurrent Marking Algorithm Sketch | |
3740 //////////////////////////////////////////////////////////////// | |
3741 // Until all tasks exhausted (both spaces): | |
3742 // -- claim next available chunk | |
3743 // -- bump global finger via CAS | |
3744 // -- find first object that starts in this chunk | |
3745 // and start scanning bitmap from that position | |
3746 // -- scan marked objects for oops | |
3747 // -- CAS-mark target, and if successful: | |
3748 // . if target oop is above global finger (volatile read) | |
3749 // nothing to do | |
3750 // . if target oop is in chunk and above local finger | |
3751 // then nothing to do | |
3752 // . else push on work-queue | |
3753 // -- Deal with possible overflow issues: | |
3754 // . local work-queue overflow causes stuff to be pushed on | |
3755 // global (common) overflow queue | |
3756 // . always first empty local work queue | |
3757 // . then get a batch of oops from global work queue if any | |
3758 // . then do work stealing | |
3759 // -- When all tasks claimed (both spaces) | |
3760 // and local work queue empty, | |
3761 // then in a loop do: | |
3762 // . check global overflow stack; steal a batch of oops and trace | |
3763 // . try to steal from other threads oif GOS is empty | |
3764 // . if neither is available, offer termination | |
3765 // -- Terminate and return result | |
3766 // | |
3767 void CMSConcMarkingTask::work(int i) { | |
3768 elapsedTimer _timer; | |
3769 ResourceMark rm; | |
3770 HandleMark hm; | |
3771 | |
3772 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3773 | |
3774 // Before we begin work, our work queue should be empty | |
3775 assert(work_queue(i)->size() == 0, "Expected to be empty"); | |
3776 // Scan the bitmap covering _cms_space, tracing through grey objects. | |
3777 _timer.start(); | |
3778 do_scan_and_mark(i, _cms_space); | |
3779 _timer.stop(); | |
3780 if (PrintCMSStatistics != 0) { | |
3781 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", | |
3782 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3783 } | |
3784 | |
3785 // ... do the same for the _perm_space | |
3786 _timer.reset(); | |
3787 _timer.start(); | |
3788 do_scan_and_mark(i, _perm_space); | |
3789 _timer.stop(); | |
3790 if (PrintCMSStatistics != 0) { | |
3791 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec", | |
3792 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3793 } | |
3794 | |
3795 // ... do work stealing | |
3796 _timer.reset(); | |
3797 _timer.start(); | |
3798 do_work_steal(i); | |
3799 _timer.stop(); | |
3800 if (PrintCMSStatistics != 0) { | |
3801 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", | |
3802 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers | |
3803 } | |
3804 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); | |
3805 assert(work_queue(i)->size() == 0, "Should have been emptied"); | |
3806 // Note that under the current task protocol, the | |
3807 // following assertion is true even of the spaces | |
3808 // expanded since the completion of the concurrent | |
3809 // marking. XXX This will likely change under a strict | |
3810 // ABORT semantics. | |
3811 assert(_global_finger > _cms_space->end() && | |
3812 _global_finger >= _perm_space->end(), | |
3813 "All tasks have been completed"); | |
3814 DEBUG_ONLY(_collector->verify_overflow_empty();) | |
3815 } | |
3816 | |
3817 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { | |
3818 HeapWord* read = _global_finger; | |
3819 HeapWord* cur = read; | |
3820 while (f > read) { | |
3821 cur = read; | |
3822 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); | |
3823 if (cur == read) { | |
3824 // our cas succeeded | |
3825 assert(_global_finger >= f, "protocol consistency"); | |
3826 break; | |
3827 } | |
3828 } | |
3829 } | |
3830 | |
3831 // This is really inefficient, and should be redone by | |
3832 // using (not yet available) block-read and -write interfaces to the | |
3833 // stack and the work_queue. XXX FIX ME !!! | |
3834 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, | |
3835 OopTaskQueue* work_q) { | |
3836 // Fast lock-free check | |
3837 if (ovflw_stk->length() == 0) { | |
3838 return false; | |
3839 } | |
3840 assert(work_q->size() == 0, "Shouldn't steal"); | |
3841 MutexLockerEx ml(ovflw_stk->par_lock(), | |
3842 Mutex::_no_safepoint_check_flag); | |
3843 // Grab up to 1/4 the size of the work queue | |
3844 size_t num = MIN2((size_t)work_q->max_elems()/4, | |
3845 (size_t)ParGCDesiredObjsFromOverflowList); | |
3846 num = MIN2(num, ovflw_stk->length()); | |
3847 for (int i = (int) num; i > 0; i--) { | |
3848 oop cur = ovflw_stk->pop(); | |
3849 assert(cur != NULL, "Counted wrong?"); | |
3850 work_q->push(cur); | |
3851 } | |
3852 return num > 0; | |
3853 } | |
3854 | |
3855 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { | |
3856 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
3857 int n_tasks = pst->n_tasks(); | |
3858 // We allow that there may be no tasks to do here because | |
3859 // we are restarting after a stack overflow. | |
3860 assert(pst->valid() || n_tasks == 0, "Uninitializd use?"); | |
3861 int nth_task = 0; | |
3862 | |
3863 HeapWord* start = sp->bottom(); | |
3864 size_t chunk_size = sp->marking_task_size(); | |
3865 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
3866 // Having claimed the nth task in this space, | |
3867 // compute the chunk that it corresponds to: | |
3868 MemRegion span = MemRegion(start + nth_task*chunk_size, | |
3869 start + (nth_task+1)*chunk_size); | |
3870 // Try and bump the global finger via a CAS; | |
3871 // note that we need to do the global finger bump | |
3872 // _before_ taking the intersection below, because | |
3873 // the task corresponding to that region will be | |
3874 // deemed done even if the used_region() expands | |
3875 // because of allocation -- as it almost certainly will | |
3876 // during start-up while the threads yield in the | |
3877 // closure below. | |
3878 HeapWord* finger = span.end(); | |
3879 bump_global_finger(finger); // atomically | |
3880 // There are null tasks here corresponding to chunks | |
3881 // beyond the "top" address of the space. | |
3882 span = span.intersection(sp->used_region()); | |
3883 if (!span.is_empty()) { // Non-null task | |
3884 // We want to skip the first object because | |
3885 // the protocol is to scan any object in its entirety | |
3886 // that _starts_ in this span; a fortiori, any | |
3887 // object starting in an earlier span is scanned | |
3888 // as part of an earlier claimed task. | |
3889 // Below we use the "careful" version of block_start | |
3890 // so we do not try to navigate uninitialized objects. | |
3891 HeapWord* prev_obj = sp->block_start_careful(span.start()); | |
3892 // Below we use a variant of block_size that uses the | |
3893 // Printezis bits to avoid waiting for allocated | |
3894 // objects to become initialized/parsable. | |
3895 while (prev_obj < span.start()) { | |
3896 size_t sz = sp->block_size_no_stall(prev_obj, _collector); | |
3897 if (sz > 0) { | |
3898 prev_obj += sz; | |
3899 } else { | |
3900 // In this case we may end up doing a bit of redundant | |
3901 // scanning, but that appears unavoidable, short of | |
3902 // locking the free list locks; see bug 6324141. | |
3903 break; | |
3904 } | |
3905 } | |
3906 if (prev_obj < span.end()) { | |
3907 MemRegion my_span = MemRegion(prev_obj, span.end()); | |
3908 // Do the marking work within a non-empty span -- | |
3909 // the last argument to the constructor indicates whether the | |
3910 // iteration should be incremental with periodic yields. | |
3911 Par_MarkFromRootsClosure cl(this, _collector, my_span, | |
3912 &_collector->_markBitMap, | |
3913 work_queue(i), | |
3914 &_collector->_markStack, | |
3915 &_collector->_revisitStack, | |
3916 _asynch); | |
3917 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); | |
3918 } // else nothing to do for this task | |
3919 } // else nothing to do for this task | |
3920 } | |
3921 // We'd be tempted to assert here that since there are no | |
3922 // more tasks left to claim in this space, the global_finger | |
3923 // must exceed space->top() and a fortiori space->end(). However, | |
3924 // that would not quite be correct because the bumping of | |
3925 // global_finger occurs strictly after the claiming of a task, | |
3926 // so by the time we reach here the global finger may not yet | |
3927 // have been bumped up by the thread that claimed the last | |
3928 // task. | |
3929 pst->all_tasks_completed(); | |
3930 } | |
3931 | |
3932 class Par_ConcMarkingClosure: public OopClosure { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3933 private: |
0 | 3934 CMSCollector* _collector; |
3935 MemRegion _span; | |
3936 CMSBitMap* _bit_map; | |
3937 CMSMarkStack* _overflow_stack; | |
3938 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use | |
3939 OopTaskQueue* _work_queue; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3940 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3941 DO_OOP_WORK_DEFN |
0 | 3942 public: |
3943 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, | |
3944 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): | |
3945 _collector(collector), | |
3946 _span(_collector->_span), | |
3947 _work_queue(work_queue), | |
3948 _bit_map(bit_map), | |
3949 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3950 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3951 virtual void do_oop(narrowOop* p); |
0 | 3952 void trim_queue(size_t max); |
3953 void handle_stack_overflow(HeapWord* lost); | |
3954 }; | |
3955 | |
3956 // Grey object rescan during work stealing phase -- | |
3957 // the salient assumption here is that stolen oops must | |
3958 // always be initialized, so we do not need to check for | |
3959 // uninitialized objects before scanning here. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3960 void Par_ConcMarkingClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3961 assert(obj->is_oop_or_null(), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3962 HeapWord* addr = (HeapWord*)obj; |
0 | 3963 // Check if oop points into the CMS generation |
3964 // and is not marked | |
3965 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
3966 // a white object ... | |
3967 // If we manage to "claim" the object, by being the | |
3968 // first thread to mark it, then we push it on our | |
3969 // marking stack | |
3970 if (_bit_map->par_mark(addr)) { // ... now grey | |
3971 // push on work queue (grey set) | |
3972 bool simulate_overflow = false; | |
3973 NOT_PRODUCT( | |
3974 if (CMSMarkStackOverflowALot && | |
3975 _collector->simulate_overflow()) { | |
3976 // simulate a stack overflow | |
3977 simulate_overflow = true; | |
3978 } | |
3979 ) | |
3980 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3981 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 3982 // stack overflow |
3983 if (PrintCMSStatistics != 0) { | |
3984 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
3985 SIZE_FORMAT, _overflow_stack->capacity()); | |
3986 } | |
3987 // We cannot assert that the overflow stack is full because | |
3988 // it may have been emptied since. | |
3989 assert(simulate_overflow || | |
3990 _work_queue->size() == _work_queue->max_elems(), | |
3991 "Else push should have succeeded"); | |
3992 handle_stack_overflow(addr); | |
3993 } | |
3994 } // Else, some other thread got there first | |
3995 } | |
3996 } | |
3997 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3998 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
3999 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4000 |
0 | 4001 void Par_ConcMarkingClosure::trim_queue(size_t max) { |
4002 while (_work_queue->size() > max) { | |
4003 oop new_oop; | |
4004 if (_work_queue->pop_local(new_oop)) { | |
4005 assert(new_oop->is_oop(), "Should be an oop"); | |
4006 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); | |
4007 assert(_span.contains((HeapWord*)new_oop), "Not in span"); | |
4008 assert(new_oop->is_parsable(), "Should be parsable"); | |
4009 new_oop->oop_iterate(this); // do_oop() above | |
4010 } | |
4011 } | |
4012 } | |
4013 | |
4014 // Upon stack overflow, we discard (part of) the stack, | |
4015 // remembering the least address amongst those discarded | |
4016 // in CMSCollector's _restart_address. | |
4017 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { | |
4018 // We need to do this under a mutex to prevent other | |
4019 // workers from interfering with the expansion below. | |
4020 MutexLockerEx ml(_overflow_stack->par_lock(), | |
4021 Mutex::_no_safepoint_check_flag); | |
4022 // Remember the least grey address discarded | |
4023 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
4024 _collector->lower_restart_addr(ra); | |
4025 _overflow_stack->reset(); // discard stack contents | |
4026 _overflow_stack->expand(); // expand the stack if possible | |
4027 } | |
4028 | |
4029 | |
4030 void CMSConcMarkingTask::do_work_steal(int i) { | |
4031 OopTaskQueue* work_q = work_queue(i); | |
4032 oop obj_to_scan; | |
4033 CMSBitMap* bm = &(_collector->_markBitMap); | |
4034 CMSMarkStack* ovflw = &(_collector->_markStack); | |
4035 int* seed = _collector->hash_seed(i); | |
4036 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw); | |
4037 while (true) { | |
4038 cl.trim_queue(0); | |
4039 assert(work_q->size() == 0, "Should have been emptied above"); | |
4040 if (get_work_from_overflow_stack(ovflw, work_q)) { | |
4041 // Can't assert below because the work obtained from the | |
4042 // overflow stack may already have been stolen from us. | |
4043 // assert(work_q->size() > 0, "Work from overflow stack"); | |
4044 continue; | |
4045 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
4046 assert(obj_to_scan->is_oop(), "Should be an oop"); | |
4047 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); | |
4048 obj_to_scan->oop_iterate(&cl); | |
4049 } else if (terminator()->offer_termination()) { | |
4050 assert(work_q->size() == 0, "Impossible!"); | |
4051 break; | |
4052 } | |
4053 } | |
4054 } | |
4055 | |
4056 // This is run by the CMS (coordinator) thread. | |
4057 void CMSConcMarkingTask::coordinator_yield() { | |
4058 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4059 "CMS thread should hold CMS token"); | |
4060 | |
4061 // First give up the locks, then yield, then re-lock | |
4062 // We should probably use a constructor/destructor idiom to | |
4063 // do this unlock/lock or modify the MutexUnlocker class to | |
4064 // serve our purpose. XXX | |
4065 assert_lock_strong(_bit_map_lock); | |
4066 _bit_map_lock->unlock(); | |
4067 ConcurrentMarkSweepThread::desynchronize(true); | |
4068 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4069 _collector->stopTimer(); | |
4070 if (PrintCMSStatistics != 0) { | |
4071 _collector->incrementYields(); | |
4072 } | |
4073 _collector->icms_wait(); | |
4074 | |
4075 // It is possible for whichever thread initiated the yield request | |
4076 // not to get a chance to wake up and take the bitmap lock between | |
4077 // this thread releasing it and reacquiring it. So, while the | |
4078 // should_yield() flag is on, let's sleep for a bit to give the | |
4079 // other thread a chance to wake up. The limit imposed on the number | |
4080 // of iterations is defensive, to avoid any unforseen circumstances | |
4081 // putting us into an infinite loop. Since it's always been this | |
4082 // (coordinator_yield()) method that was observed to cause the | |
4083 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) | |
4084 // which is by default non-zero. For the other seven methods that | |
4085 // also perform the yield operation, as are using a different | |
4086 // parameter (CMSYieldSleepCount) which is by default zero. This way we | |
4087 // can enable the sleeping for those methods too, if necessary. | |
4088 // See 6442774. | |
4089 // | |
4090 // We really need to reconsider the synchronization between the GC | |
4091 // thread and the yield-requesting threads in the future and we | |
4092 // should really use wait/notify, which is the recommended | |
4093 // way of doing this type of interaction. Additionally, we should | |
4094 // consolidate the eight methods that do the yield operation and they | |
4095 // are almost identical into one for better maintenability and | |
4096 // readability. See 6445193. | |
4097 // | |
4098 // Tony 2006.06.29 | |
4099 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4100 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
4101 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 4102 os::sleep(Thread::current(), 1, false); |
4103 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
4104 } | |
4105 | |
4106 ConcurrentMarkSweepThread::synchronize(true); | |
4107 _bit_map_lock->lock_without_safepoint_check(); | |
4108 _collector->startTimer(); | |
4109 } | |
4110 | |
4111 bool CMSCollector::do_marking_mt(bool asynch) { | |
4112 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition"); | |
4113 // In the future this would be determined ergonomically, based | |
4114 // on #cpu's, # active mutator threads (and load), and mutation rate. | |
4115 int num_workers = ParallelCMSThreads; | |
4116 | |
4117 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
4118 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
4119 | |
4120 CMSConcMarkingTask tsk(this, cms_space, perm_space, | |
4121 asynch, num_workers /* number requested XXX */, | |
4122 conc_workers(), task_queues()); | |
4123 | |
4124 // Since the actual number of workers we get may be different | |
4125 // from the number we requested above, do we need to do anything different | |
4126 // below? In particular, may be we need to subclass the SequantialSubTasksDone | |
4127 // class?? XXX | |
4128 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); | |
4129 perm_space->initialize_sequential_subtasks_for_marking(num_workers); | |
4130 | |
4131 // Refs discovery is already non-atomic. | |
4132 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); | |
4133 // Mutate the Refs discovery so it is MT during the | |
4134 // multi-threaded marking phase. | |
4135 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); | |
4136 | |
4137 conc_workers()->start_task(&tsk); | |
4138 while (tsk.yielded()) { | |
4139 tsk.coordinator_yield(); | |
4140 conc_workers()->continue_task(&tsk); | |
4141 } | |
4142 // If the task was aborted, _restart_addr will be non-NULL | |
4143 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); | |
4144 while (_restart_addr != NULL) { | |
4145 // XXX For now we do not make use of ABORTED state and have not | |
4146 // yet implemented the right abort semantics (even in the original | |
4147 // single-threaded CMS case). That needs some more investigation | |
4148 // and is deferred for now; see CR# TBF. 07252005YSR. XXX | |
4149 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); | |
4150 // If _restart_addr is non-NULL, a marking stack overflow | |
4151 // occured; we need to do a fresh marking iteration from the | |
4152 // indicated restart address. | |
4153 if (_foregroundGCIsActive && asynch) { | |
4154 // We may be running into repeated stack overflows, having | |
4155 // reached the limit of the stack size, while making very | |
4156 // slow forward progress. It may be best to bail out and | |
4157 // let the foreground collector do its job. | |
4158 // Clear _restart_addr, so that foreground GC | |
4159 // works from scratch. This avoids the headache of | |
4160 // a "rescan" which would otherwise be needed because | |
4161 // of the dirty mod union table & card table. | |
4162 _restart_addr = NULL; | |
4163 return false; | |
4164 } | |
4165 // Adjust the task to restart from _restart_addr | |
4166 tsk.reset(_restart_addr); | |
4167 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, | |
4168 _restart_addr); | |
4169 perm_space->initialize_sequential_subtasks_for_marking(num_workers, | |
4170 _restart_addr); | |
4171 _restart_addr = NULL; | |
4172 // Get the workers going again | |
4173 conc_workers()->start_task(&tsk); | |
4174 while (tsk.yielded()) { | |
4175 tsk.coordinator_yield(); | |
4176 conc_workers()->continue_task(&tsk); | |
4177 } | |
4178 } | |
4179 assert(tsk.completed(), "Inconsistency"); | |
4180 assert(tsk.result() == true, "Inconsistency"); | |
4181 return true; | |
4182 } | |
4183 | |
4184 bool CMSCollector::do_marking_st(bool asynch) { | |
4185 ResourceMark rm; | |
4186 HandleMark hm; | |
4187 | |
4188 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, | |
4189 &_markStack, &_revisitStack, CMSYield && asynch); | |
4190 // the last argument to iterate indicates whether the iteration | |
4191 // should be incremental with periodic yields. | |
4192 _markBitMap.iterate(&markFromRootsClosure); | |
4193 // If _restart_addr is non-NULL, a marking stack overflow | |
4194 // occured; we need to do a fresh iteration from the | |
4195 // indicated restart address. | |
4196 while (_restart_addr != NULL) { | |
4197 if (_foregroundGCIsActive && asynch) { | |
4198 // We may be running into repeated stack overflows, having | |
4199 // reached the limit of the stack size, while making very | |
4200 // slow forward progress. It may be best to bail out and | |
4201 // let the foreground collector do its job. | |
4202 // Clear _restart_addr, so that foreground GC | |
4203 // works from scratch. This avoids the headache of | |
4204 // a "rescan" which would otherwise be needed because | |
4205 // of the dirty mod union table & card table. | |
4206 _restart_addr = NULL; | |
4207 return false; // indicating failure to complete marking | |
4208 } | |
4209 // Deal with stack overflow: | |
4210 // we restart marking from _restart_addr | |
4211 HeapWord* ra = _restart_addr; | |
4212 markFromRootsClosure.reset(ra); | |
4213 _restart_addr = NULL; | |
4214 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); | |
4215 } | |
4216 return true; | |
4217 } | |
4218 | |
4219 void CMSCollector::preclean() { | |
4220 check_correct_thread_executing(); | |
4221 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); | |
4222 verify_work_stacks_empty(); | |
4223 verify_overflow_empty(); | |
4224 _abort_preclean = false; | |
4225 if (CMSPrecleaningEnabled) { | |
4226 _eden_chunk_index = 0; | |
4227 size_t used = get_eden_used(); | |
4228 size_t capacity = get_eden_capacity(); | |
4229 // Don't start sampling unless we will get sufficiently | |
4230 // many samples. | |
4231 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100) | |
4232 * CMSScheduleRemarkEdenPenetration)) { | |
4233 _start_sampling = true; | |
4234 } else { | |
4235 _start_sampling = false; | |
4236 } | |
4237 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4238 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); | |
4239 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); | |
4240 } | |
4241 CMSTokenSync x(true); // is cms thread | |
4242 if (CMSPrecleaningEnabled) { | |
4243 sample_eden(); | |
4244 _collectorState = AbortablePreclean; | |
4245 } else { | |
4246 _collectorState = FinalMarking; | |
4247 } | |
4248 verify_work_stacks_empty(); | |
4249 verify_overflow_empty(); | |
4250 } | |
4251 | |
4252 // Try and schedule the remark such that young gen | |
4253 // occupancy is CMSScheduleRemarkEdenPenetration %. | |
4254 void CMSCollector::abortable_preclean() { | |
4255 check_correct_thread_executing(); | |
4256 assert(CMSPrecleaningEnabled, "Inconsistent control state"); | |
4257 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); | |
4258 | |
4259 // If Eden's current occupancy is below this threshold, | |
4260 // immediately schedule the remark; else preclean | |
4261 // past the next scavenge in an effort to | |
4262 // schedule the pause as described avove. By choosing | |
4263 // CMSScheduleRemarkEdenSizeThreshold >= max eden size | |
4264 // we will never do an actual abortable preclean cycle. | |
4265 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { | |
4266 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
4267 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); | |
4268 // We need more smarts in the abortable preclean | |
4269 // loop below to deal with cases where allocation | |
4270 // in young gen is very very slow, and our precleaning | |
4271 // is running a losing race against a horde of | |
4272 // mutators intent on flooding us with CMS updates | |
4273 // (dirty cards). | |
4274 // One, admittedly dumb, strategy is to give up | |
4275 // after a certain number of abortable precleaning loops | |
4276 // or after a certain maximum time. We want to make | |
4277 // this smarter in the next iteration. | |
4278 // XXX FIX ME!!! YSR | |
4279 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; | |
4280 while (!(should_abort_preclean() || | |
4281 ConcurrentMarkSweepThread::should_terminate())) { | |
4282 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); | |
4283 cumworkdone += workdone; | |
4284 loops++; | |
4285 // Voluntarily terminate abortable preclean phase if we have | |
4286 // been at it for too long. | |
4287 if ((CMSMaxAbortablePrecleanLoops != 0) && | |
4288 loops >= CMSMaxAbortablePrecleanLoops) { | |
4289 if (PrintGCDetails) { | |
4290 gclog_or_tty->print(" CMS: abort preclean due to loops "); | |
4291 } | |
4292 break; | |
4293 } | |
4294 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { | |
4295 if (PrintGCDetails) { | |
4296 gclog_or_tty->print(" CMS: abort preclean due to time "); | |
4297 } | |
4298 break; | |
4299 } | |
4300 // If we are doing little work each iteration, we should | |
4301 // take a short break. | |
4302 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { | |
4303 // Sleep for some time, waiting for work to accumulate | |
4304 stopTimer(); | |
4305 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); | |
4306 startTimer(); | |
4307 waited++; | |
4308 } | |
4309 } | |
4310 if (PrintCMSStatistics > 0) { | |
4311 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ", | |
4312 loops, waited, cumworkdone); | |
4313 } | |
4314 } | |
4315 CMSTokenSync x(true); // is cms thread | |
4316 if (_collectorState != Idling) { | |
4317 assert(_collectorState == AbortablePreclean, | |
4318 "Spontaneous state transition?"); | |
4319 _collectorState = FinalMarking; | |
4320 } // Else, a foreground collection completed this CMS cycle. | |
4321 return; | |
4322 } | |
4323 | |
4324 // Respond to an Eden sampling opportunity | |
4325 void CMSCollector::sample_eden() { | |
4326 // Make sure a young gc cannot sneak in between our | |
4327 // reading and recording of a sample. | |
4328 assert(Thread::current()->is_ConcurrentGC_thread(), | |
4329 "Only the cms thread may collect Eden samples"); | |
4330 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
4331 "Should collect samples while holding CMS token"); | |
4332 if (!_start_sampling) { | |
4333 return; | |
4334 } | |
4335 if (_eden_chunk_array) { | |
4336 if (_eden_chunk_index < _eden_chunk_capacity) { | |
4337 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample | |
4338 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, | |
4339 "Unexpected state of Eden"); | |
4340 // We'd like to check that what we just sampled is an oop-start address; | |
4341 // however, we cannot do that here since the object may not yet have been | |
4342 // initialized. So we'll instead do the check when we _use_ this sample | |
4343 // later. | |
4344 if (_eden_chunk_index == 0 || | |
4345 (pointer_delta(_eden_chunk_array[_eden_chunk_index], | |
4346 _eden_chunk_array[_eden_chunk_index-1]) | |
4347 >= CMSSamplingGrain)) { | |
4348 _eden_chunk_index++; // commit sample | |
4349 } | |
4350 } | |
4351 } | |
4352 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { | |
4353 size_t used = get_eden_used(); | |
4354 size_t capacity = get_eden_capacity(); | |
4355 assert(used <= capacity, "Unexpected state of Eden"); | |
4356 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { | |
4357 _abort_preclean = true; | |
4358 } | |
4359 } | |
4360 } | |
4361 | |
4362 | |
4363 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { | |
4364 assert(_collectorState == Precleaning || | |
4365 _collectorState == AbortablePreclean, "incorrect state"); | |
4366 ResourceMark rm; | |
4367 HandleMark hm; | |
4368 // Do one pass of scrubbing the discovered reference lists | |
4369 // to remove any reference objects with strongly-reachable | |
4370 // referents. | |
4371 if (clean_refs) { | |
4372 ReferenceProcessor* rp = ref_processor(); | |
4373 CMSPrecleanRefsYieldClosure yield_cl(this); | |
4374 assert(rp->span().equals(_span), "Spans should be equal"); | |
4375 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, | |
4376 &_markStack); | |
4377 CMSDrainMarkingStackClosure complete_trace(this, | |
4378 _span, &_markBitMap, &_markStack, | |
4379 &keep_alive); | |
4380 | |
4381 // We don't want this step to interfere with a young | |
4382 // collection because we don't want to take CPU | |
4383 // or memory bandwidth away from the young GC threads | |
4384 // (which may be as many as there are CPUs). | |
4385 // Note that we don't need to protect ourselves from | |
4386 // interference with mutators because they can't | |
4387 // manipulate the discovered reference lists nor affect | |
4388 // the computed reachability of the referents, the | |
4389 // only properties manipulated by the precleaning | |
4390 // of these reference lists. | |
4391 stopTimer(); | |
4392 CMSTokenSyncWithLocks x(true /* is cms thread */, | |
4393 bitMapLock()); | |
4394 startTimer(); | |
4395 sample_eden(); | |
4396 // The following will yield to allow foreground | |
4397 // collection to proceed promptly. XXX YSR: | |
4398 // The code in this method may need further | |
4399 // tweaking for better performance and some restructuring | |
4400 // for cleaner interfaces. | |
4401 rp->preclean_discovered_references( | |
4402 rp->is_alive_non_header(), &keep_alive, &complete_trace, | |
4403 &yield_cl); | |
4404 } | |
4405 | |
4406 if (clean_survivor) { // preclean the active survivor space(s) | |
4407 assert(_young_gen->kind() == Generation::DefNew || | |
4408 _young_gen->kind() == Generation::ParNew || | |
4409 _young_gen->kind() == Generation::ASParNew, | |
4410 "incorrect type for cast"); | |
4411 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
4412 PushAndMarkClosure pam_cl(this, _span, ref_processor(), | |
4413 &_markBitMap, &_modUnionTable, | |
4414 &_markStack, &_revisitStack, | |
4415 true /* precleaning phase */); | |
4416 stopTimer(); | |
4417 CMSTokenSyncWithLocks ts(true /* is cms thread */, | |
4418 bitMapLock()); | |
4419 startTimer(); | |
4420 unsigned int before_count = | |
4421 GenCollectedHeap::heap()->total_collections(); | |
4422 SurvivorSpacePrecleanClosure | |
4423 sss_cl(this, _span, &_markBitMap, &_markStack, | |
4424 &pam_cl, before_count, CMSYield); | |
4425 dng->from()->object_iterate_careful(&sss_cl); | |
4426 dng->to()->object_iterate_careful(&sss_cl); | |
4427 } | |
4428 MarkRefsIntoAndScanClosure | |
4429 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
4430 &_markStack, &_revisitStack, this, CMSYield, | |
4431 true /* precleaning phase */); | |
4432 // CAUTION: The following closure has persistent state that may need to | |
4433 // be reset upon a decrease in the sequence of addresses it | |
4434 // processes. | |
4435 ScanMarkedObjectsAgainCarefullyClosure | |
4436 smoac_cl(this, _span, | |
4437 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield); | |
4438 | |
4439 // Preclean dirty cards in ModUnionTable and CardTable using | |
4440 // appropriate convergence criterion; | |
4441 // repeat CMSPrecleanIter times unless we find that | |
4442 // we are losing. | |
4443 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); | |
4444 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, | |
4445 "Bad convergence multiplier"); | |
4446 assert(CMSPrecleanThreshold >= 100, | |
4447 "Unreasonably low CMSPrecleanThreshold"); | |
4448 | |
4449 size_t numIter, cumNumCards, lastNumCards, curNumCards; | |
4450 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; | |
4451 numIter < CMSPrecleanIter; | |
4452 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { | |
4453 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); | |
4454 if (CMSPermGenPrecleaningEnabled) { | |
4455 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl); | |
4456 } | |
4457 if (Verbose && PrintGCDetails) { | |
4458 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards); | |
4459 } | |
4460 // Either there are very few dirty cards, so re-mark | |
4461 // pause will be small anyway, or our pre-cleaning isn't | |
4462 // that much faster than the rate at which cards are being | |
4463 // dirtied, so we might as well stop and re-mark since | |
4464 // precleaning won't improve our re-mark time by much. | |
4465 if (curNumCards <= CMSPrecleanThreshold || | |
4466 (numIter > 0 && | |
4467 (curNumCards * CMSPrecleanDenominator > | |
4468 lastNumCards * CMSPrecleanNumerator))) { | |
4469 numIter++; | |
4470 cumNumCards += curNumCards; | |
4471 break; | |
4472 } | |
4473 } | |
4474 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); | |
4475 if (CMSPermGenPrecleaningEnabled) { | |
4476 curNumCards += preclean_card_table(_permGen, &smoac_cl); | |
4477 } | |
4478 cumNumCards += curNumCards; | |
4479 if (PrintGCDetails && PrintCMSStatistics != 0) { | |
4480 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)", | |
4481 curNumCards, cumNumCards, numIter); | |
4482 } | |
4483 return cumNumCards; // as a measure of useful work done | |
4484 } | |
4485 | |
4486 // PRECLEANING NOTES: | |
4487 // Precleaning involves: | |
4488 // . reading the bits of the modUnionTable and clearing the set bits. | |
4489 // . For the cards corresponding to the set bits, we scan the | |
4490 // objects on those cards. This means we need the free_list_lock | |
4491 // so that we can safely iterate over the CMS space when scanning | |
4492 // for oops. | |
4493 // . When we scan the objects, we'll be both reading and setting | |
4494 // marks in the marking bit map, so we'll need the marking bit map. | |
4495 // . For protecting _collector_state transitions, we take the CGC_lock. | |
4496 // Note that any races in the reading of of card table entries by the | |
4497 // CMS thread on the one hand and the clearing of those entries by the | |
4498 // VM thread or the setting of those entries by the mutator threads on the | |
4499 // other are quite benign. However, for efficiency it makes sense to keep | |
4500 // the VM thread from racing with the CMS thread while the latter is | |
4501 // dirty card info to the modUnionTable. We therefore also use the | |
4502 // CGC_lock to protect the reading of the card table and the mod union | |
4503 // table by the CM thread. | |
4504 // . We run concurrently with mutator updates, so scanning | |
4505 // needs to be done carefully -- we should not try to scan | |
4506 // potentially uninitialized objects. | |
4507 // | |
4508 // Locking strategy: While holding the CGC_lock, we scan over and | |
4509 // reset a maximal dirty range of the mod union / card tables, then lock | |
4510 // the free_list_lock and bitmap lock to do a full marking, then | |
4511 // release these locks; and repeat the cycle. This allows for a | |
4512 // certain amount of fairness in the sharing of these locks between | |
4513 // the CMS collector on the one hand, and the VM thread and the | |
4514 // mutators on the other. | |
4515 | |
4516 // NOTE: preclean_mod_union_table() and preclean_card_table() | |
4517 // further below are largely identical; if you need to modify | |
4518 // one of these methods, please check the other method too. | |
4519 | |
4520 size_t CMSCollector::preclean_mod_union_table( | |
4521 ConcurrentMarkSweepGeneration* gen, | |
4522 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4523 verify_work_stacks_empty(); | |
4524 verify_overflow_empty(); | |
4525 | |
4526 // strategy: starting with the first card, accumulate contiguous | |
4527 // ranges of dirty cards; clear these cards, then scan the region | |
4528 // covered by these cards. | |
4529 | |
4530 // Since all of the MUT is committed ahead, we can just use | |
4531 // that, in case the generations expand while we are precleaning. | |
4532 // It might also be fine to just use the committed part of the | |
4533 // generation, but we might potentially miss cards when the | |
4534 // generation is rapidly expanding while we are in the midst | |
4535 // of precleaning. | |
4536 HeapWord* startAddr = gen->reserved().start(); | |
4537 HeapWord* endAddr = gen->reserved().end(); | |
4538 | |
4539 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4540 | |
4541 size_t numDirtyCards, cumNumDirtyCards; | |
4542 HeapWord *nextAddr, *lastAddr; | |
4543 for (cumNumDirtyCards = numDirtyCards = 0, | |
4544 nextAddr = lastAddr = startAddr; | |
4545 nextAddr < endAddr; | |
4546 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4547 | |
4548 ResourceMark rm; | |
4549 HandleMark hm; | |
4550 | |
4551 MemRegion dirtyRegion; | |
4552 { | |
4553 stopTimer(); | |
4554 CMSTokenSync ts(true); | |
4555 startTimer(); | |
4556 sample_eden(); | |
4557 // Get dirty region starting at nextOffset (inclusive), | |
4558 // simultaneously clearing it. | |
4559 dirtyRegion = | |
4560 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); | |
4561 assert(dirtyRegion.start() >= nextAddr, | |
4562 "returned region inconsistent?"); | |
4563 } | |
4564 // Remember where the next search should begin. | |
4565 // The returned region (if non-empty) is a right open interval, | |
4566 // so lastOffset is obtained from the right end of that | |
4567 // interval. | |
4568 lastAddr = dirtyRegion.end(); | |
4569 // Should do something more transparent and less hacky XXX | |
4570 numDirtyCards = | |
4571 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); | |
4572 | |
4573 // We'll scan the cards in the dirty region (with periodic | |
4574 // yields for foreground GC as needed). | |
4575 if (!dirtyRegion.is_empty()) { | |
4576 assert(numDirtyCards > 0, "consistency check"); | |
4577 HeapWord* stop_point = NULL; | |
4578 { | |
4579 stopTimer(); | |
4580 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), | |
4581 bitMapLock()); | |
4582 startTimer(); | |
4583 verify_work_stacks_empty(); | |
4584 verify_overflow_empty(); | |
4585 sample_eden(); | |
4586 stop_point = | |
4587 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4588 } | |
4589 if (stop_point != NULL) { | |
4590 // The careful iteration stopped early either because it found an | |
4591 // uninitialized object, or because we were in the midst of an | |
4592 // "abortable preclean", which should now be aborted. Redirty | |
4593 // the bits corresponding to the partially-scanned or unscanned | |
4594 // cards. We'll either restart at the next block boundary or | |
4595 // abort the preclean. | |
4596 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || | |
4597 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4598 "Unparsable objects should only be in perm gen."); | |
4599 | |
4600 stopTimer(); | |
4601 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
4602 startTimer(); | |
4603 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); | |
4604 if (should_abort_preclean()) { | |
4605 break; // out of preclean loop | |
4606 } else { | |
4607 // Compute the next address at which preclean should pick up; | |
4608 // might need bitMapLock in order to read P-bits. | |
4609 lastAddr = next_card_start_after_block(stop_point); | |
4610 } | |
4611 } | |
4612 } else { | |
4613 assert(lastAddr == endAddr, "consistency check"); | |
4614 assert(numDirtyCards == 0, "consistency check"); | |
4615 break; | |
4616 } | |
4617 } | |
4618 verify_work_stacks_empty(); | |
4619 verify_overflow_empty(); | |
4620 return cumNumDirtyCards; | |
4621 } | |
4622 | |
4623 // NOTE: preclean_mod_union_table() above and preclean_card_table() | |
4624 // below are largely identical; if you need to modify | |
4625 // one of these methods, please check the other method too. | |
4626 | |
4627 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, | |
4628 ScanMarkedObjectsAgainCarefullyClosure* cl) { | |
4629 // strategy: it's similar to precleamModUnionTable above, in that | |
4630 // we accumulate contiguous ranges of dirty cards, mark these cards | |
4631 // precleaned, then scan the region covered by these cards. | |
4632 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); | |
4633 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); | |
4634 | |
4635 cl->setFreelistLock(gen->freelistLock()); // needed for yielding | |
4636 | |
4637 size_t numDirtyCards, cumNumDirtyCards; | |
4638 HeapWord *lastAddr, *nextAddr; | |
4639 | |
4640 for (cumNumDirtyCards = numDirtyCards = 0, | |
4641 nextAddr = lastAddr = startAddr; | |
4642 nextAddr < endAddr; | |
4643 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { | |
4644 | |
4645 ResourceMark rm; | |
4646 HandleMark hm; | |
4647 | |
4648 MemRegion dirtyRegion; | |
4649 { | |
4650 // See comments in "Precleaning notes" above on why we | |
4651 // do this locking. XXX Could the locking overheads be | |
4652 // too high when dirty cards are sparse? [I don't think so.] | |
4653 stopTimer(); | |
4654 CMSTokenSync x(true); // is cms thread | |
4655 startTimer(); | |
4656 sample_eden(); | |
4657 // Get and clear dirty region from card table | |
4658 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean( | |
4659 MemRegion(nextAddr, endAddr)); | |
4660 assert(dirtyRegion.start() >= nextAddr, | |
4661 "returned region inconsistent?"); | |
4662 } | |
4663 lastAddr = dirtyRegion.end(); | |
4664 numDirtyCards = | |
4665 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; | |
4666 | |
4667 if (!dirtyRegion.is_empty()) { | |
4668 stopTimer(); | |
4669 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); | |
4670 startTimer(); | |
4671 sample_eden(); | |
4672 verify_work_stacks_empty(); | |
4673 verify_overflow_empty(); | |
4674 HeapWord* stop_point = | |
4675 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); | |
4676 if (stop_point != NULL) { | |
4677 // The careful iteration stopped early because it found an | |
4678 // uninitialized object. Redirty the bits corresponding to the | |
4679 // partially-scanned or unscanned cards, and start again at the | |
4680 // next block boundary. | |
4681 assert(CMSPermGenPrecleaningEnabled || | |
4682 (_collectorState == AbortablePreclean && should_abort_preclean()), | |
4683 "Unparsable objects should only be in perm gen."); | |
4684 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); | |
4685 if (should_abort_preclean()) { | |
4686 break; // out of preclean loop | |
4687 } else { | |
4688 // Compute the next address at which preclean should pick up. | |
4689 lastAddr = next_card_start_after_block(stop_point); | |
4690 } | |
4691 } | |
4692 } else { | |
4693 break; | |
4694 } | |
4695 } | |
4696 verify_work_stacks_empty(); | |
4697 verify_overflow_empty(); | |
4698 return cumNumDirtyCards; | |
4699 } | |
4700 | |
4701 void CMSCollector::checkpointRootsFinal(bool asynch, | |
4702 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4703 assert(_collectorState == FinalMarking, "incorrect state transition?"); | |
4704 check_correct_thread_executing(); | |
4705 // world is stopped at this checkpoint | |
4706 assert(SafepointSynchronize::is_at_safepoint(), | |
4707 "world should be stopped"); | |
4708 verify_work_stacks_empty(); | |
4709 verify_overflow_empty(); | |
4710 | |
4711 SpecializationStats::clear(); | |
4712 if (PrintGCDetails) { | |
4713 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", | |
4714 _young_gen->used() / K, | |
4715 _young_gen->capacity() / K); | |
4716 } | |
4717 if (asynch) { | |
4718 if (CMSScavengeBeforeRemark) { | |
4719 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4720 // Temporarily set flag to false, GCH->do_collection will | |
4721 // expect it to be false and set to true | |
4722 FlagSetting fl(gch->_is_gc_active, false); | |
4723 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", | |
4724 PrintGCDetails && Verbose, true, gclog_or_tty);) | |
4725 int level = _cmsGen->level() - 1; | |
4726 if (level >= 0) { | |
4727 gch->do_collection(true, // full (i.e. force, see below) | |
4728 false, // !clear_all_soft_refs | |
4729 0, // size | |
4730 false, // is_tlab | |
4731 level // max_level | |
4732 ); | |
4733 } | |
4734 } | |
4735 FreelistLocker x(this); | |
4736 MutexLockerEx y(bitMapLock(), | |
4737 Mutex::_no_safepoint_check_flag); | |
4738 assert(!init_mark_was_synchronous, "but that's impossible!"); | |
4739 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); | |
4740 } else { | |
4741 // already have all the locks | |
4742 checkpointRootsFinalWork(asynch, clear_all_soft_refs, | |
4743 init_mark_was_synchronous); | |
4744 } | |
4745 verify_work_stacks_empty(); | |
4746 verify_overflow_empty(); | |
4747 SpecializationStats::print(); | |
4748 } | |
4749 | |
4750 void CMSCollector::checkpointRootsFinalWork(bool asynch, | |
4751 bool clear_all_soft_refs, bool init_mark_was_synchronous) { | |
4752 | |
4753 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) | |
4754 | |
4755 assert(haveFreelistLocks(), "must have free list locks"); | |
4756 assert_lock_strong(bitMapLock()); | |
4757 | |
4758 if (UseAdaptiveSizePolicy) { | |
4759 size_policy()->checkpoint_roots_final_begin(); | |
4760 } | |
4761 | |
4762 ResourceMark rm; | |
4763 HandleMark hm; | |
4764 | |
4765 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4766 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4767 if (should_unload_classes()) { |
0 | 4768 CodeCache::gc_prologue(); |
4769 } | |
4770 assert(haveFreelistLocks(), "must have free list locks"); | |
4771 assert_lock_strong(bitMapLock()); | |
4772 | |
4773 if (!init_mark_was_synchronous) { | |
4774 // We might assume that we need not fill TLAB's when | |
4775 // CMSScavengeBeforeRemark is set, because we may have just done | |
4776 // a scavenge which would have filled all TLAB's -- and besides | |
4777 // Eden would be empty. This however may not always be the case -- | |
4778 // for instance although we asked for a scavenge, it may not have | |
4779 // happened because of a JNI critical section. We probably need | |
4780 // a policy for deciding whether we can in that case wait until | |
4781 // the critical section releases and then do the remark following | |
4782 // the scavenge, and skip it here. In the absence of that policy, | |
4783 // or of an indication of whether the scavenge did indeed occur, | |
4784 // we cannot rely on TLAB's having been filled and must do | |
4785 // so here just in case a scavenge did not happen. | |
4786 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them | |
4787 // Update the saved marks which may affect the root scans. | |
4788 gch->save_marks(); | |
4789 | |
4790 { | |
4791 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) | |
4792 | |
4793 // Note on the role of the mod union table: | |
4794 // Since the marker in "markFromRoots" marks concurrently with | |
4795 // mutators, it is possible for some reachable objects not to have been | |
4796 // scanned. For instance, an only reference to an object A was | |
4797 // placed in object B after the marker scanned B. Unless B is rescanned, | |
4798 // A would be collected. Such updates to references in marked objects | |
4799 // are detected via the mod union table which is the set of all cards | |
4800 // dirtied since the first checkpoint in this GC cycle and prior to | |
4801 // the most recent young generation GC, minus those cleaned up by the | |
4802 // concurrent precleaning. | |
4803 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) { | |
4804 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); | |
4805 do_remark_parallel(); | |
4806 } else { | |
4807 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, | |
4808 gclog_or_tty); | |
4809 do_remark_non_parallel(); | |
4810 } | |
4811 } | |
4812 } else { | |
4813 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); | |
4814 // The initial mark was stop-world, so there's no rescanning to | |
4815 // do; go straight on to the next step below. | |
4816 } | |
4817 verify_work_stacks_empty(); | |
4818 verify_overflow_empty(); | |
4819 | |
4820 { | |
4821 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) | |
4822 refProcessingWork(asynch, clear_all_soft_refs); | |
4823 } | |
4824 verify_work_stacks_empty(); | |
4825 verify_overflow_empty(); | |
4826 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
4827 if (should_unload_classes()) { |
0 | 4828 CodeCache::gc_epilogue(); |
4829 } | |
4830 | |
4831 // If we encountered any (marking stack / work queue) overflow | |
4832 // events during the current CMS cycle, take appropriate | |
4833 // remedial measures, where possible, so as to try and avoid | |
4834 // recurrence of that condition. | |
4835 assert(_markStack.isEmpty(), "No grey objects"); | |
4836 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + | |
4837 _ser_kac_ovflw; | |
4838 if (ser_ovflw > 0) { | |
4839 if (PrintCMSStatistics != 0) { | |
4840 gclog_or_tty->print_cr("Marking stack overflow (benign) " | |
4841 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", | |
4842 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, | |
4843 _ser_kac_ovflw); | |
4844 } | |
4845 _markStack.expand(); | |
4846 _ser_pmc_remark_ovflw = 0; | |
4847 _ser_pmc_preclean_ovflw = 0; | |
4848 _ser_kac_ovflw = 0; | |
4849 } | |
4850 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { | |
4851 if (PrintCMSStatistics != 0) { | |
4852 gclog_or_tty->print_cr("Work queue overflow (benign) " | |
4853 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", | |
4854 _par_pmc_remark_ovflw, _par_kac_ovflw); | |
4855 } | |
4856 _par_pmc_remark_ovflw = 0; | |
4857 _par_kac_ovflw = 0; | |
4858 } | |
4859 if (PrintCMSStatistics != 0) { | |
4860 if (_markStack._hit_limit > 0) { | |
4861 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")", | |
4862 _markStack._hit_limit); | |
4863 } | |
4864 if (_markStack._failed_double > 0) { | |
4865 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT")," | |
4866 " current capacity "SIZE_FORMAT, | |
4867 _markStack._failed_double, | |
4868 _markStack.capacity()); | |
4869 } | |
4870 } | |
4871 _markStack._hit_limit = 0; | |
4872 _markStack._failed_double = 0; | |
4873 | |
4874 if ((VerifyAfterGC || VerifyDuringGC) && | |
4875 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
4876 verify_after_remark(); | |
4877 } | |
4878 | |
4879 // Change under the freelistLocks. | |
4880 _collectorState = Sweeping; | |
4881 // Call isAllClear() under bitMapLock | |
4882 assert(_modUnionTable.isAllClear(), "Should be clear by end of the" | |
4883 " final marking"); | |
4884 if (UseAdaptiveSizePolicy) { | |
4885 size_policy()->checkpoint_roots_final_end(gch->gc_cause()); | |
4886 } | |
4887 } | |
4888 | |
4889 // Parallel remark task | |
4890 class CMSParRemarkTask: public AbstractGangTask { | |
4891 CMSCollector* _collector; | |
4892 WorkGang* _workers; | |
4893 int _n_workers; | |
4894 CompactibleFreeListSpace* _cms_space; | |
4895 CompactibleFreeListSpace* _perm_space; | |
4896 | |
4897 // The per-thread work queues, available here for stealing. | |
4898 OopTaskQueueSet* _task_queues; | |
4899 ParallelTaskTerminator _term; | |
4900 | |
4901 public: | |
4902 CMSParRemarkTask(CMSCollector* collector, | |
4903 CompactibleFreeListSpace* cms_space, | |
4904 CompactibleFreeListSpace* perm_space, | |
4905 int n_workers, WorkGang* workers, | |
4906 OopTaskQueueSet* task_queues): | |
4907 AbstractGangTask("Rescan roots and grey objects in parallel"), | |
4908 _collector(collector), | |
4909 _cms_space(cms_space), _perm_space(perm_space), | |
4910 _n_workers(n_workers), | |
4911 _workers(workers), | |
4912 _task_queues(task_queues), | |
4913 _term(workers->total_workers(), task_queues) { } | |
4914 | |
4915 OopTaskQueueSet* task_queues() { return _task_queues; } | |
4916 | |
4917 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
4918 | |
4919 ParallelTaskTerminator* terminator() { return &_term; } | |
4920 | |
4921 void work(int i); | |
4922 | |
4923 private: | |
4924 // Work method in support of parallel rescan ... of young gen spaces | |
4925 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
4926 ContiguousSpace* space, | |
4927 HeapWord** chunk_array, size_t chunk_top); | |
4928 | |
4929 // ... of dirty cards in old space | |
4930 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, | |
4931 Par_MarkRefsIntoAndScanClosure* cl); | |
4932 | |
4933 // ... work stealing for the above | |
4934 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); | |
4935 }; | |
4936 | |
4937 void CMSParRemarkTask::work(int i) { | |
4938 elapsedTimer _timer; | |
4939 ResourceMark rm; | |
4940 HandleMark hm; | |
4941 | |
4942 // ---------- rescan from roots -------------- | |
4943 _timer.start(); | |
4944 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
4945 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, | |
4946 _collector->_span, _collector->ref_processor(), | |
4947 &(_collector->_markBitMap), | |
4948 work_queue(i), &(_collector->_revisitStack)); | |
4949 | |
4950 // Rescan young gen roots first since these are likely | |
4951 // coarsely partitioned and may, on that account, constitute | |
4952 // the critical path; thus, it's best to start off that | |
4953 // work first. | |
4954 // ---------- young gen roots -------------- | |
4955 { | |
4956 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); | |
4957 EdenSpace* eden_space = dng->eden(); | |
4958 ContiguousSpace* from_space = dng->from(); | |
4959 ContiguousSpace* to_space = dng->to(); | |
4960 | |
4961 HeapWord** eca = _collector->_eden_chunk_array; | |
4962 size_t ect = _collector->_eden_chunk_index; | |
4963 HeapWord** sca = _collector->_survivor_chunk_array; | |
4964 size_t sct = _collector->_survivor_chunk_index; | |
4965 | |
4966 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); | |
4967 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); | |
4968 | |
4969 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0); | |
4970 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct); | |
4971 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect); | |
4972 | |
4973 _timer.stop(); | |
4974 if (PrintCMSStatistics != 0) { | |
4975 gclog_or_tty->print_cr( | |
4976 "Finished young gen rescan work in %dth thread: %3.3f sec", | |
4977 i, _timer.seconds()); | |
4978 } | |
4979 } | |
4980 | |
4981 // ---------- remaining roots -------------- | |
4982 _timer.reset(); | |
4983 _timer.start(); | |
4984 gch->gen_process_strong_roots(_collector->_cmsGen->level(), | |
4985 false, // yg was scanned above | |
4986 true, // collecting perm gen | |
4987 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), | |
4988 NULL, &par_mrias_cl); | |
4989 _timer.stop(); | |
4990 if (PrintCMSStatistics != 0) { | |
4991 gclog_or_tty->print_cr( | |
4992 "Finished remaining root rescan work in %dth thread: %3.3f sec", | |
4993 i, _timer.seconds()); | |
4994 } | |
4995 | |
4996 // ---------- rescan dirty cards ------------ | |
4997 _timer.reset(); | |
4998 _timer.start(); | |
4999 | |
5000 // Do the rescan tasks for each of the two spaces | |
5001 // (cms_space and perm_space) in turn. | |
5002 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl); | |
5003 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl); | |
5004 _timer.stop(); | |
5005 if (PrintCMSStatistics != 0) { | |
5006 gclog_or_tty->print_cr( | |
5007 "Finished dirty card rescan work in %dth thread: %3.3f sec", | |
5008 i, _timer.seconds()); | |
5009 } | |
5010 | |
5011 // ---------- steal work from other threads ... | |
5012 // ---------- ... and drain overflow list. | |
5013 _timer.reset(); | |
5014 _timer.start(); | |
5015 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i)); | |
5016 _timer.stop(); | |
5017 if (PrintCMSStatistics != 0) { | |
5018 gclog_or_tty->print_cr( | |
5019 "Finished work stealing in %dth thread: %3.3f sec", | |
5020 i, _timer.seconds()); | |
5021 } | |
5022 } | |
5023 | |
5024 void | |
5025 CMSParRemarkTask::do_young_space_rescan(int i, | |
5026 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space, | |
5027 HeapWord** chunk_array, size_t chunk_top) { | |
5028 // Until all tasks completed: | |
5029 // . claim an unclaimed task | |
5030 // . compute region boundaries corresponding to task claimed | |
5031 // using chunk_array | |
5032 // . par_oop_iterate(cl) over that region | |
5033 | |
5034 ResourceMark rm; | |
5035 HandleMark hm; | |
5036 | |
5037 SequentialSubTasksDone* pst = space->par_seq_tasks(); | |
5038 assert(pst->valid(), "Uninitialized use?"); | |
5039 | |
5040 int nth_task = 0; | |
5041 int n_tasks = pst->n_tasks(); | |
5042 | |
5043 HeapWord *start, *end; | |
5044 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5045 // We claimed task # nth_task; compute its boundaries. | |
5046 if (chunk_top == 0) { // no samples were taken | |
5047 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); | |
5048 start = space->bottom(); | |
5049 end = space->top(); | |
5050 } else if (nth_task == 0) { | |
5051 start = space->bottom(); | |
5052 end = chunk_array[nth_task]; | |
5053 } else if (nth_task < (jint)chunk_top) { | |
5054 assert(nth_task >= 1, "Control point invariant"); | |
5055 start = chunk_array[nth_task - 1]; | |
5056 end = chunk_array[nth_task]; | |
5057 } else { | |
5058 assert(nth_task == (jint)chunk_top, "Control point invariant"); | |
5059 start = chunk_array[chunk_top - 1]; | |
5060 end = space->top(); | |
5061 } | |
5062 MemRegion mr(start, end); | |
5063 // Verify that mr is in space | |
5064 assert(mr.is_empty() || space->used_region().contains(mr), | |
5065 "Should be in space"); | |
5066 // Verify that "start" is an object boundary | |
5067 assert(mr.is_empty() || oop(mr.start())->is_oop(), | |
5068 "Should be an oop"); | |
5069 space->par_oop_iterate(mr, cl); | |
5070 } | |
5071 pst->all_tasks_completed(); | |
5072 } | |
5073 | |
5074 void | |
5075 CMSParRemarkTask::do_dirty_card_rescan_tasks( | |
5076 CompactibleFreeListSpace* sp, int i, | |
5077 Par_MarkRefsIntoAndScanClosure* cl) { | |
5078 // Until all tasks completed: | |
5079 // . claim an unclaimed task | |
5080 // . compute region boundaries corresponding to task claimed | |
5081 // . transfer dirty bits ct->mut for that region | |
5082 // . apply rescanclosure to dirty mut bits for that region | |
5083 | |
5084 ResourceMark rm; | |
5085 HandleMark hm; | |
5086 | |
5087 OopTaskQueue* work_q = work_queue(i); | |
5088 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); | |
5089 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! | |
5090 // CAUTION: This closure has state that persists across calls to | |
5091 // the work method dirty_range_iterate_clear() in that it has | |
5092 // imbedded in it a (subtype of) UpwardsObjectClosure. The | |
5093 // use of that state in the imbedded UpwardsObjectClosure instance | |
5094 // assumes that the cards are always iterated (even if in parallel | |
5095 // by several threads) in monotonically increasing order per each | |
5096 // thread. This is true of the implementation below which picks | |
5097 // card ranges (chunks) in monotonically increasing order globally | |
5098 // and, a-fortiori, in monotonically increasing order per thread | |
5099 // (the latter order being a subsequence of the former). | |
5100 // If the work code below is ever reorganized into a more chaotic | |
5101 // work-partitioning form than the current "sequential tasks" | |
5102 // paradigm, the use of that persistent state will have to be | |
5103 // revisited and modified appropriately. See also related | |
5104 // bug 4756801 work on which should examine this code to make | |
5105 // sure that the changes there do not run counter to the | |
5106 // assumptions made here and necessary for correctness and | |
5107 // efficiency. Note also that this code might yield inefficient | |
5108 // behaviour in the case of very large objects that span one or | |
5109 // more work chunks. Such objects would potentially be scanned | |
5110 // several times redundantly. Work on 4756801 should try and | |
5111 // address that performance anomaly if at all possible. XXX | |
5112 MemRegion full_span = _collector->_span; | |
5113 CMSBitMap* bm = &(_collector->_markBitMap); // shared | |
5114 CMSMarkStack* rs = &(_collector->_revisitStack); // shared | |
5115 MarkFromDirtyCardsClosure | |
5116 greyRescanClosure(_collector, full_span, // entire span of interest | |
5117 sp, bm, work_q, rs, cl); | |
5118 | |
5119 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); | |
5120 assert(pst->valid(), "Uninitialized use?"); | |
5121 int nth_task = 0; | |
5122 const int alignment = CardTableModRefBS::card_size * BitsPerWord; | |
5123 MemRegion span = sp->used_region(); | |
5124 HeapWord* start_addr = span.start(); | |
5125 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(), | |
5126 alignment); | |
5127 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units | |
5128 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) == | |
5129 start_addr, "Check alignment"); | |
5130 assert((size_t)round_to((intptr_t)chunk_size, alignment) == | |
5131 chunk_size, "Check alignment"); | |
5132 | |
5133 while (!pst->is_task_claimed(/* reference */ nth_task)) { | |
5134 // Having claimed the nth_task, compute corresponding mem-region, | |
5135 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). | |
5136 // The alignment restriction ensures that we do not need any | |
5137 // synchronization with other gang-workers while setting or | |
5138 // clearing bits in thus chunk of the MUT. | |
5139 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, | |
5140 start_addr + (nth_task+1)*chunk_size); | |
5141 // The last chunk's end might be way beyond end of the | |
5142 // used region. In that case pull back appropriately. | |
5143 if (this_span.end() > end_addr) { | |
5144 this_span.set_end(end_addr); | |
5145 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); | |
5146 } | |
5147 // Iterate over the dirty cards covering this chunk, marking them | |
5148 // precleaned, and setting the corresponding bits in the mod union | |
5149 // table. Since we have been careful to partition at Card and MUT-word | |
5150 // boundaries no synchronization is needed between parallel threads. | |
5151 _collector->_ct->ct_bs()->dirty_card_iterate(this_span, | |
5152 &modUnionClosure); | |
5153 | |
5154 // Having transferred these marks into the modUnionTable, | |
5155 // rescan the marked objects on the dirty cards in the modUnionTable. | |
5156 // Even if this is at a synchronous collection, the initial marking | |
5157 // may have been done during an asynchronous collection so there | |
5158 // may be dirty bits in the mod-union table. | |
5159 _collector->_modUnionTable.dirty_range_iterate_clear( | |
5160 this_span, &greyRescanClosure); | |
5161 _collector->_modUnionTable.verifyNoOneBitsInRange( | |
5162 this_span.start(), | |
5163 this_span.end()); | |
5164 } | |
5165 pst->all_tasks_completed(); // declare that i am done | |
5166 } | |
5167 | |
5168 // . see if we can share work_queues with ParNew? XXX | |
5169 void | |
5170 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, | |
5171 int* seed) { | |
5172 OopTaskQueue* work_q = work_queue(i); | |
5173 NOT_PRODUCT(int num_steals = 0;) | |
5174 oop obj_to_scan; | |
5175 CMSBitMap* bm = &(_collector->_markBitMap); | |
5176 size_t num_from_overflow_list = | |
5177 MIN2((size_t)work_q->max_elems()/4, | |
5178 (size_t)ParGCDesiredObjsFromOverflowList); | |
5179 | |
5180 while (true) { | |
5181 // Completely finish any left over work from (an) earlier round(s) | |
5182 cl->trim_queue(0); | |
5183 // Now check if there's any work in the overflow list | |
5184 if (_collector->par_take_from_overflow_list(num_from_overflow_list, | |
5185 work_q)) { | |
5186 // found something in global overflow list; | |
5187 // not yet ready to go stealing work from others. | |
5188 // We'd like to assert(work_q->size() != 0, ...) | |
5189 // because we just took work from the overflow list, | |
5190 // but of course we can't since all of that could have | |
5191 // been already stolen from us. | |
5192 // "He giveth and He taketh away." | |
5193 continue; | |
5194 } | |
5195 // Verify that we have no work before we resort to stealing | |
5196 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5197 // Try to steal from other queues that have work | |
5198 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5199 NOT_PRODUCT(num_steals++;) | |
5200 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5201 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5202 // Do scanning work | |
5203 obj_to_scan->oop_iterate(cl); | |
5204 // Loop around, finish this work, and try to steal some more | |
5205 } else if (terminator()->offer_termination()) { | |
5206 break; // nirvana from the infinite cycle | |
5207 } | |
5208 } | |
5209 NOT_PRODUCT( | |
5210 if (PrintCMSStatistics != 0) { | |
5211 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5212 } | |
5213 ) | |
5214 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), | |
5215 "Else our work is not yet done"); | |
5216 } | |
5217 | |
5218 // Return a thread-local PLAB recording array, as appropriate. | |
5219 void* CMSCollector::get_data_recorder(int thr_num) { | |
5220 if (_survivor_plab_array != NULL && | |
5221 (CMSPLABRecordAlways || | |
5222 (_collectorState > Marking && _collectorState < FinalMarking))) { | |
5223 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); | |
5224 ChunkArray* ca = &_survivor_plab_array[thr_num]; | |
5225 ca->reset(); // clear it so that fresh data is recorded | |
5226 return (void*) ca; | |
5227 } else { | |
5228 return NULL; | |
5229 } | |
5230 } | |
5231 | |
5232 // Reset all the thread-local PLAB recording arrays | |
5233 void CMSCollector::reset_survivor_plab_arrays() { | |
5234 for (uint i = 0; i < ParallelGCThreads; i++) { | |
5235 _survivor_plab_array[i].reset(); | |
5236 } | |
5237 } | |
5238 | |
5239 // Merge the per-thread plab arrays into the global survivor chunk | |
5240 // array which will provide the partitioning of the survivor space | |
5241 // for CMS rescan. | |
5242 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) { | |
5243 assert(_survivor_plab_array != NULL, "Error"); | |
5244 assert(_survivor_chunk_array != NULL, "Error"); | |
5245 assert(_collectorState == FinalMarking, "Error"); | |
5246 for (uint j = 0; j < ParallelGCThreads; j++) { | |
5247 _cursor[j] = 0; | |
5248 } | |
5249 HeapWord* top = surv->top(); | |
5250 size_t i; | |
5251 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries | |
5252 HeapWord* min_val = top; // Higher than any PLAB address | |
5253 uint min_tid = 0; // position of min_val this round | |
5254 for (uint j = 0; j < ParallelGCThreads; j++) { | |
5255 ChunkArray* cur_sca = &_survivor_plab_array[j]; | |
5256 if (_cursor[j] == cur_sca->end()) { | |
5257 continue; | |
5258 } | |
5259 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); | |
5260 HeapWord* cur_val = cur_sca->nth(_cursor[j]); | |
5261 assert(surv->used_region().contains(cur_val), "Out of bounds value"); | |
5262 if (cur_val < min_val) { | |
5263 min_tid = j; | |
5264 min_val = cur_val; | |
5265 } else { | |
5266 assert(cur_val < top, "All recorded addresses should be less"); | |
5267 } | |
5268 } | |
5269 // At this point min_val and min_tid are respectively | |
5270 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) | |
5271 // and the thread (j) that witnesses that address. | |
5272 // We record this address in the _survivor_chunk_array[i] | |
5273 // and increment _cursor[min_tid] prior to the next round i. | |
5274 if (min_val == top) { | |
5275 break; | |
5276 } | |
5277 _survivor_chunk_array[i] = min_val; | |
5278 _cursor[min_tid]++; | |
5279 } | |
5280 // We are all done; record the size of the _survivor_chunk_array | |
5281 _survivor_chunk_index = i; // exclusive: [0, i) | |
5282 if (PrintCMSStatistics > 0) { | |
5283 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); | |
5284 } | |
5285 // Verify that we used up all the recorded entries | |
5286 #ifdef ASSERT | |
5287 size_t total = 0; | |
5288 for (uint j = 0; j < ParallelGCThreads; j++) { | |
5289 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); | |
5290 total += _cursor[j]; | |
5291 } | |
5292 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); | |
5293 // Check that the merged array is in sorted order | |
5294 if (total > 0) { | |
5295 for (size_t i = 0; i < total - 1; i++) { | |
5296 if (PrintCMSStatistics > 0) { | |
5297 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", | |
5298 i, _survivor_chunk_array[i]); | |
5299 } | |
5300 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], | |
5301 "Not sorted"); | |
5302 } | |
5303 } | |
5304 #endif // ASSERT | |
5305 } | |
5306 | |
5307 // Set up the space's par_seq_tasks structure for work claiming | |
5308 // for parallel rescan of young gen. | |
5309 // See ParRescanTask where this is currently used. | |
5310 void | |
5311 CMSCollector:: | |
5312 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { | |
5313 assert(n_threads > 0, "Unexpected n_threads argument"); | |
5314 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; | |
5315 | |
5316 // Eden space | |
5317 { | |
5318 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks(); | |
5319 assert(!pst->valid(), "Clobbering existing data?"); | |
5320 // Each valid entry in [0, _eden_chunk_index) represents a task. | |
5321 size_t n_tasks = _eden_chunk_index + 1; | |
5322 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); | |
5323 pst->set_par_threads(n_threads); | |
5324 pst->set_n_tasks((int)n_tasks); | |
5325 } | |
5326 | |
5327 // Merge the survivor plab arrays into _survivor_chunk_array | |
5328 if (_survivor_plab_array != NULL) { | |
5329 merge_survivor_plab_arrays(dng->from()); | |
5330 } else { | |
5331 assert(_survivor_chunk_index == 0, "Error"); | |
5332 } | |
5333 | |
5334 // To space | |
5335 { | |
5336 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks(); | |
5337 assert(!pst->valid(), "Clobbering existing data?"); | |
5338 pst->set_par_threads(n_threads); | |
5339 pst->set_n_tasks(1); | |
5340 assert(pst->valid(), "Error"); | |
5341 } | |
5342 | |
5343 // From space | |
5344 { | |
5345 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks(); | |
5346 assert(!pst->valid(), "Clobbering existing data?"); | |
5347 size_t n_tasks = _survivor_chunk_index + 1; | |
5348 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); | |
5349 pst->set_par_threads(n_threads); | |
5350 pst->set_n_tasks((int)n_tasks); | |
5351 assert(pst->valid(), "Error"); | |
5352 } | |
5353 } | |
5354 | |
5355 // Parallel version of remark | |
5356 void CMSCollector::do_remark_parallel() { | |
5357 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5358 WorkGang* workers = gch->workers(); | |
5359 assert(workers != NULL, "Need parallel worker threads."); | |
5360 int n_workers = workers->total_workers(); | |
5361 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); | |
5362 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); | |
5363 | |
5364 CMSParRemarkTask tsk(this, | |
5365 cms_space, perm_space, | |
5366 n_workers, workers, task_queues()); | |
5367 | |
5368 // Set up for parallel process_strong_roots work. | |
5369 gch->set_par_threads(n_workers); | |
5370 gch->change_strong_roots_parity(); | |
5371 // We won't be iterating over the cards in the card table updating | |
5372 // the younger_gen cards, so we shouldn't call the following else | |
5373 // the verification code as well as subsequent younger_refs_iterate | |
5374 // code would get confused. XXX | |
5375 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel | |
5376 | |
5377 // The young gen rescan work will not be done as part of | |
5378 // process_strong_roots (which currently doesn't knw how to | |
5379 // parallelize such a scan), but rather will be broken up into | |
5380 // a set of parallel tasks (via the sampling that the [abortable] | |
5381 // preclean phase did of EdenSpace, plus the [two] tasks of | |
5382 // scanning the [two] survivor spaces. Further fine-grain | |
5383 // parallelization of the scanning of the survivor spaces | |
5384 // themselves, and of precleaning of the younger gen itself | |
5385 // is deferred to the future. | |
5386 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); | |
5387 | |
5388 // The dirty card rescan work is broken up into a "sequence" | |
5389 // of parallel tasks (per constituent space) that are dynamically | |
5390 // claimed by the parallel threads. | |
5391 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5392 perm_space->initialize_sequential_subtasks_for_rescan(n_workers); | |
5393 | |
5394 // It turns out that even when we're using 1 thread, doing the work in a | |
5395 // separate thread causes wide variance in run times. We can't help this | |
5396 // in the multi-threaded case, but we special-case n=1 here to get | |
5397 // repeatable measurements of the 1-thread overhead of the parallel code. | |
5398 if (n_workers > 1) { | |
5399 // Make refs discovery MT-safe | |
5400 ReferenceProcessorMTMutator mt(ref_processor(), true); | |
5401 workers->run_task(&tsk); | |
5402 } else { | |
5403 tsk.work(0); | |
5404 } | |
5405 gch->set_par_threads(0); // 0 ==> non-parallel. | |
5406 // restore, single-threaded for now, any preserved marks | |
5407 // as a result of work_q overflow | |
5408 restore_preserved_marks_if_any(); | |
5409 } | |
5410 | |
5411 // Non-parallel version of remark | |
5412 void CMSCollector::do_remark_non_parallel() { | |
5413 ResourceMark rm; | |
5414 HandleMark hm; | |
5415 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5416 MarkRefsIntoAndScanClosure | |
5417 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, | |
5418 &_markStack, &_revisitStack, this, | |
5419 false /* should_yield */, false /* not precleaning */); | |
5420 MarkFromDirtyCardsClosure | |
5421 markFromDirtyCardsClosure(this, _span, | |
5422 NULL, // space is set further below | |
5423 &_markBitMap, &_markStack, &_revisitStack, | |
5424 &mrias_cl); | |
5425 { | |
5426 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); | |
5427 // Iterate over the dirty cards, marking them precleaned, and | |
5428 // setting the corresponding bits in the mod union table. | |
5429 { | |
5430 ModUnionClosure modUnionClosure(&_modUnionTable); | |
5431 _ct->ct_bs()->dirty_card_iterate( | |
5432 _cmsGen->used_region(), | |
5433 &modUnionClosure); | |
5434 _ct->ct_bs()->dirty_card_iterate( | |
5435 _permGen->used_region(), | |
5436 &modUnionClosure); | |
5437 } | |
5438 // Having transferred these marks into the modUnionTable, we just need | |
5439 // to rescan the marked objects on the dirty cards in the modUnionTable. | |
5440 // The initial marking may have been done during an asynchronous | |
5441 // collection so there may be dirty bits in the mod-union table. | |
5442 const int alignment = | |
5443 CardTableModRefBS::card_size * BitsPerWord; | |
5444 { | |
5445 // ... First handle dirty cards in CMS gen | |
5446 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); | |
5447 MemRegion ur = _cmsGen->used_region(); | |
5448 HeapWord* lb = ur.start(); | |
5449 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5450 MemRegion cms_span(lb, ub); | |
5451 _modUnionTable.dirty_range_iterate_clear(cms_span, | |
5452 &markFromDirtyCardsClosure); | |
5453 verify_work_stacks_empty(); | |
5454 if (PrintCMSStatistics != 0) { | |
5455 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ", | |
5456 markFromDirtyCardsClosure.num_dirty_cards()); | |
5457 } | |
5458 } | |
5459 { | |
5460 // .. and then repeat for dirty cards in perm gen | |
5461 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace()); | |
5462 MemRegion ur = _permGen->used_region(); | |
5463 HeapWord* lb = ur.start(); | |
5464 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); | |
5465 MemRegion perm_span(lb, ub); | |
5466 _modUnionTable.dirty_range_iterate_clear(perm_span, | |
5467 &markFromDirtyCardsClosure); | |
5468 verify_work_stacks_empty(); | |
5469 if (PrintCMSStatistics != 0) { | |
5470 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ", | |
5471 markFromDirtyCardsClosure.num_dirty_cards()); | |
5472 } | |
5473 } | |
5474 } | |
5475 if (VerifyDuringGC && | |
5476 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { | |
5477 HandleMark hm; // Discard invalid handles created during verification | |
5478 Universe::verify(true); | |
5479 } | |
5480 { | |
5481 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); | |
5482 | |
5483 verify_work_stacks_empty(); | |
5484 | |
5485 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. | |
5486 gch->gen_process_strong_roots(_cmsGen->level(), | |
5487 true, // younger gens as roots | |
5488 true, // collecting perm gen | |
5489 SharedHeap::ScanningOption(roots_scanning_options()), | |
5490 NULL, &mrias_cl); | |
5491 } | |
5492 verify_work_stacks_empty(); | |
5493 // Restore evacuated mark words, if any, used for overflow list links | |
5494 if (!CMSOverflowEarlyRestoration) { | |
5495 restore_preserved_marks_if_any(); | |
5496 } | |
5497 verify_overflow_empty(); | |
5498 } | |
5499 | |
5500 //////////////////////////////////////////////////////// | |
5501 // Parallel Reference Processing Task Proxy Class | |
5502 //////////////////////////////////////////////////////// | |
5503 class CMSRefProcTaskProxy: public AbstractGangTask { | |
5504 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; | |
5505 CMSCollector* _collector; | |
5506 CMSBitMap* _mark_bit_map; | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5507 const MemRegion _span; |
0 | 5508 OopTaskQueueSet* _task_queues; |
5509 ParallelTaskTerminator _term; | |
5510 ProcessTask& _task; | |
5511 | |
5512 public: | |
5513 CMSRefProcTaskProxy(ProcessTask& task, | |
5514 CMSCollector* collector, | |
5515 const MemRegion& span, | |
5516 CMSBitMap* mark_bit_map, | |
5517 int total_workers, | |
5518 OopTaskQueueSet* task_queues): | |
5519 AbstractGangTask("Process referents by policy in parallel"), | |
5520 _task(task), | |
5521 _collector(collector), _span(span), _mark_bit_map(mark_bit_map), | |
5522 _task_queues(task_queues), | |
5523 _term(total_workers, task_queues) | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5524 { |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5525 assert(_collector->_span.equals(_span) && !_span.is_empty(), |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5526 "Inconsistency in _span"); |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5527 } |
0 | 5528 |
5529 OopTaskQueueSet* task_queues() { return _task_queues; } | |
5530 | |
5531 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } | |
5532 | |
5533 ParallelTaskTerminator* terminator() { return &_term; } | |
5534 | |
5535 void do_work_steal(int i, | |
5536 CMSParDrainMarkingStackClosure* drain, | |
5537 CMSParKeepAliveClosure* keep_alive, | |
5538 int* seed); | |
5539 | |
5540 virtual void work(int i); | |
5541 }; | |
5542 | |
5543 void CMSRefProcTaskProxy::work(int i) { | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5544 assert(_collector->_span.equals(_span), "Inconsistency in _span"); |
0 | 5545 CMSParKeepAliveClosure par_keep_alive(_collector, _span, |
5546 _mark_bit_map, work_queue(i)); | |
5547 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, | |
5548 _mark_bit_map, work_queue(i)); | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
5549 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); |
0 | 5550 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); |
5551 if (_task.marks_oops_alive()) { | |
5552 do_work_steal(i, &par_drain_stack, &par_keep_alive, | |
5553 _collector->hash_seed(i)); | |
5554 } | |
5555 assert(work_queue(i)->size() == 0, "work_queue should be empty"); | |
5556 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); | |
5557 } | |
5558 | |
5559 class CMSRefEnqueueTaskProxy: public AbstractGangTask { | |
5560 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; | |
5561 EnqueueTask& _task; | |
5562 | |
5563 public: | |
5564 CMSRefEnqueueTaskProxy(EnqueueTask& task) | |
5565 : AbstractGangTask("Enqueue reference objects in parallel"), | |
5566 _task(task) | |
5567 { } | |
5568 | |
5569 virtual void work(int i) | |
5570 { | |
5571 _task.work(i); | |
5572 } | |
5573 }; | |
5574 | |
5575 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, | |
5576 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): | |
5577 _collector(collector), | |
5578 _span(span), | |
5579 _bit_map(bit_map), | |
5580 _work_queue(work_queue), | |
5581 _mark_and_push(collector, span, bit_map, work_queue), | |
5582 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), | |
5583 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) | |
5584 { } | |
5585 | |
5586 // . see if we can share work_queues with ParNew? XXX | |
5587 void CMSRefProcTaskProxy::do_work_steal(int i, | |
5588 CMSParDrainMarkingStackClosure* drain, | |
5589 CMSParKeepAliveClosure* keep_alive, | |
5590 int* seed) { | |
5591 OopTaskQueue* work_q = work_queue(i); | |
5592 NOT_PRODUCT(int num_steals = 0;) | |
5593 oop obj_to_scan; | |
5594 size_t num_from_overflow_list = | |
5595 MIN2((size_t)work_q->max_elems()/4, | |
5596 (size_t)ParGCDesiredObjsFromOverflowList); | |
5597 | |
5598 while (true) { | |
5599 // Completely finish any left over work from (an) earlier round(s) | |
5600 drain->trim_queue(0); | |
5601 // Now check if there's any work in the overflow list | |
5602 if (_collector->par_take_from_overflow_list(num_from_overflow_list, | |
5603 work_q)) { | |
5604 // Found something in global overflow list; | |
5605 // not yet ready to go stealing work from others. | |
5606 // We'd like to assert(work_q->size() != 0, ...) | |
5607 // because we just took work from the overflow list, | |
5608 // but of course we can't, since all of that might have | |
5609 // been already stolen from us. | |
5610 continue; | |
5611 } | |
5612 // Verify that we have no work before we resort to stealing | |
5613 assert(work_q->size() == 0, "Have work, shouldn't steal"); | |
5614 // Try to steal from other queues that have work | |
5615 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { | |
5616 NOT_PRODUCT(num_steals++;) | |
5617 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); | |
5618 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); | |
5619 // Do scanning work | |
5620 obj_to_scan->oop_iterate(keep_alive); | |
5621 // Loop around, finish this work, and try to steal some more | |
5622 } else if (terminator()->offer_termination()) { | |
5623 break; // nirvana from the infinite cycle | |
5624 } | |
5625 } | |
5626 NOT_PRODUCT( | |
5627 if (PrintCMSStatistics != 0) { | |
5628 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); | |
5629 } | |
5630 ) | |
5631 } | |
5632 | |
5633 void CMSRefProcTaskExecutor::execute(ProcessTask& task) | |
5634 { | |
5635 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5636 WorkGang* workers = gch->workers(); | |
5637 assert(workers != NULL, "Need parallel worker threads."); | |
5638 int n_workers = workers->total_workers(); | |
5639 CMSRefProcTaskProxy rp_task(task, &_collector, | |
5640 _collector.ref_processor()->span(), | |
5641 _collector.markBitMap(), | |
5642 n_workers, _collector.task_queues()); | |
5643 workers->run_task(&rp_task); | |
5644 } | |
5645 | |
5646 void CMSRefProcTaskExecutor::execute(EnqueueTask& task) | |
5647 { | |
5648 | |
5649 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5650 WorkGang* workers = gch->workers(); | |
5651 assert(workers != NULL, "Need parallel worker threads."); | |
5652 CMSRefEnqueueTaskProxy enq_task(task); | |
5653 workers->run_task(&enq_task); | |
5654 } | |
5655 | |
5656 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { | |
5657 | |
5658 ResourceMark rm; | |
5659 HandleMark hm; | |
5660 ReferencePolicy* soft_ref_policy; | |
5661 | |
5662 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete"); | |
5663 // Process weak references. | |
5664 if (clear_all_soft_refs) { | |
5665 soft_ref_policy = new AlwaysClearPolicy(); | |
5666 } else { | |
5667 #ifdef COMPILER2 | |
5668 soft_ref_policy = new LRUMaxHeapPolicy(); | |
5669 #else | |
5670 soft_ref_policy = new LRUCurrentHeapPolicy(); | |
5671 #endif // COMPILER2 | |
5672 } | |
5673 verify_work_stacks_empty(); | |
5674 | |
5675 ReferenceProcessor* rp = ref_processor(); | |
5676 assert(rp->span().equals(_span), "Spans should be equal"); | |
5677 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, | |
5678 &_markStack); | |
5679 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, | |
5680 _span, &_markBitMap, &_markStack, | |
5681 &cmsKeepAliveClosure); | |
5682 { | |
5683 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); | |
5684 if (rp->processing_is_mt()) { | |
5685 CMSRefProcTaskExecutor task_executor(*this); | |
5686 rp->process_discovered_references(soft_ref_policy, | |
5687 &_is_alive_closure, | |
5688 &cmsKeepAliveClosure, | |
5689 &cmsDrainMarkingStackClosure, | |
5690 &task_executor); | |
5691 } else { | |
5692 rp->process_discovered_references(soft_ref_policy, | |
5693 &_is_alive_closure, | |
5694 &cmsKeepAliveClosure, | |
5695 &cmsDrainMarkingStackClosure, | |
5696 NULL); | |
5697 } | |
5698 verify_work_stacks_empty(); | |
5699 } | |
5700 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5701 if (should_unload_classes()) { |
0 | 5702 { |
5703 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); | |
5704 | |
5705 // Follow SystemDictionary roots and unload classes | |
5706 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); | |
5707 | |
5708 // Follow CodeCache roots and unload any methods marked for unloading | |
5709 CodeCache::do_unloading(&_is_alive_closure, | |
5710 &cmsKeepAliveClosure, | |
5711 purged_class); | |
5712 | |
5713 cmsDrainMarkingStackClosure.do_void(); | |
5714 verify_work_stacks_empty(); | |
5715 | |
5716 // Update subklass/sibling/implementor links in KlassKlass descendants | |
5717 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty"); | |
5718 oop k; | |
5719 while ((k = _revisitStack.pop()) != NULL) { | |
5720 ((Klass*)(oopDesc*)k)->follow_weak_klass_links( | |
5721 &_is_alive_closure, | |
5722 &cmsKeepAliveClosure); | |
5723 } | |
5724 assert(!ClassUnloading || | |
5725 (_markStack.isEmpty() && overflow_list_is_empty()), | |
5726 "Should not have found new reachable objects"); | |
5727 assert(_revisitStack.isEmpty(), "revisit stack should have been drained"); | |
5728 cmsDrainMarkingStackClosure.do_void(); | |
5729 verify_work_stacks_empty(); | |
5730 } | |
5731 | |
5732 { | |
5733 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty); | |
5734 // Now clean up stale oops in SymbolTable and StringTable | |
5735 SymbolTable::unlink(&_is_alive_closure); | |
5736 StringTable::unlink(&_is_alive_closure); | |
5737 } | |
5738 } | |
5739 | |
5740 verify_work_stacks_empty(); | |
5741 // Restore any preserved marks as a result of mark stack or | |
5742 // work queue overflow | |
5743 restore_preserved_marks_if_any(); // done single-threaded for now | |
5744 | |
5745 rp->set_enqueuing_is_done(true); | |
5746 if (rp->processing_is_mt()) { | |
5747 CMSRefProcTaskExecutor task_executor(*this); | |
5748 rp->enqueue_discovered_references(&task_executor); | |
5749 } else { | |
5750 rp->enqueue_discovered_references(NULL); | |
5751 } | |
5752 rp->verify_no_references_recorded(); | |
5753 assert(!rp->discovery_enabled(), "should have been disabled"); | |
5754 | |
5755 // JVMTI object tagging is based on JNI weak refs. If any of these | |
5756 // refs were cleared then JVMTI needs to update its maps and | |
5757 // maybe post ObjectFrees to agents. | |
5758 JvmtiExport::cms_ref_processing_epilogue(); | |
5759 } | |
5760 | |
5761 #ifndef PRODUCT | |
5762 void CMSCollector::check_correct_thread_executing() { | |
5763 Thread* t = Thread::current(); | |
5764 // Only the VM thread or the CMS thread should be here. | |
5765 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), | |
5766 "Unexpected thread type"); | |
5767 // If this is the vm thread, the foreground process | |
5768 // should not be waiting. Note that _foregroundGCIsActive is | |
5769 // true while the foreground collector is waiting. | |
5770 if (_foregroundGCShouldWait) { | |
5771 // We cannot be the VM thread | |
5772 assert(t->is_ConcurrentGC_thread(), | |
5773 "Should be CMS thread"); | |
5774 } else { | |
5775 // We can be the CMS thread only if we are in a stop-world | |
5776 // phase of CMS collection. | |
5777 if (t->is_ConcurrentGC_thread()) { | |
5778 assert(_collectorState == InitialMarking || | |
5779 _collectorState == FinalMarking, | |
5780 "Should be a stop-world phase"); | |
5781 // The CMS thread should be holding the CMS_token. | |
5782 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
5783 "Potential interference with concurrently " | |
5784 "executing VM thread"); | |
5785 } | |
5786 } | |
5787 } | |
5788 #endif | |
5789 | |
5790 void CMSCollector::sweep(bool asynch) { | |
5791 assert(_collectorState == Sweeping, "just checking"); | |
5792 check_correct_thread_executing(); | |
5793 verify_work_stacks_empty(); | |
5794 verify_overflow_empty(); | |
5795 incrementSweepCount(); | |
5796 _sweep_timer.stop(); | |
5797 _sweep_estimate.sample(_sweep_timer.seconds()); | |
5798 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); | |
5799 | |
5800 // PermGen verification support: If perm gen sweeping is disabled in | |
5801 // this cycle, we preserve the perm gen object "deadness" information | |
5802 // in the perm_gen_verify_bit_map. In order to do that we traverse | |
5803 // all blocks in perm gen and mark all dead objects. | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5804 if (verifying() && !should_unload_classes()) { |
0 | 5805 assert(perm_gen_verify_bit_map()->sizeInBits() != 0, |
5806 "Should have already been allocated"); | |
5807 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), | |
5808 markBitMap(), perm_gen_verify_bit_map()); | |
7
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5809 if (asynch) { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5810 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5811 bitMapLock()); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5812 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5813 } else { |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5814 // In the case of synchronous sweep, we already have |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5815 // the requisite locks/tokens. |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5816 _permGen->cmsSpace()->blk_iterate(&mdo); |
2faf283ce688
6621144: CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"
ysr
parents:
0
diff
changeset
|
5817 } |
0 | 5818 } |
5819 | |
5820 if (asynch) { | |
5821 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
5822 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); | |
5823 // First sweep the old gen then the perm gen | |
5824 { | |
5825 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
5826 bitMapLock()); | |
5827 sweepWork(_cmsGen, asynch); | |
5828 } | |
5829 | |
5830 // Now repeat for perm gen | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5831 if (should_unload_classes()) { |
0 | 5832 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), |
5833 bitMapLock()); | |
5834 sweepWork(_permGen, asynch); | |
5835 } | |
5836 | |
5837 // Update Universe::_heap_*_at_gc figures. | |
5838 // We need all the free list locks to make the abstract state | |
5839 // transition from Sweeping to Resetting. See detailed note | |
5840 // further below. | |
5841 { | |
5842 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), | |
5843 _permGen->freelistLock()); | |
5844 // Update heap occupancy information which is used as | |
5845 // input to soft ref clearing policy at the next gc. | |
5846 Universe::update_heap_info_at_gc(); | |
5847 _collectorState = Resizing; | |
5848 } | |
5849 } else { | |
5850 // already have needed locks | |
5851 sweepWork(_cmsGen, asynch); | |
5852 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
5853 if (should_unload_classes()) { |
0 | 5854 sweepWork(_permGen, asynch); |
5855 } | |
5856 // Update heap occupancy information which is used as | |
5857 // input to soft ref clearing policy at the next gc. | |
5858 Universe::update_heap_info_at_gc(); | |
5859 _collectorState = Resizing; | |
5860 } | |
5861 verify_work_stacks_empty(); | |
5862 verify_overflow_empty(); | |
5863 | |
5864 _sweep_timer.reset(); | |
5865 _sweep_timer.start(); | |
5866 | |
5867 update_time_of_last_gc(os::javaTimeMillis()); | |
5868 | |
5869 // NOTE on abstract state transitions: | |
5870 // Mutators allocate-live and/or mark the mod-union table dirty | |
5871 // based on the state of the collection. The former is done in | |
5872 // the interval [Marking, Sweeping] and the latter in the interval | |
5873 // [Marking, Sweeping). Thus the transitions into the Marking state | |
5874 // and out of the Sweeping state must be synchronously visible | |
5875 // globally to the mutators. | |
5876 // The transition into the Marking state happens with the world | |
5877 // stopped so the mutators will globally see it. Sweeping is | |
5878 // done asynchronously by the background collector so the transition | |
5879 // from the Sweeping state to the Resizing state must be done | |
5880 // under the freelistLock (as is the check for whether to | |
5881 // allocate-live and whether to dirty the mod-union table). | |
5882 assert(_collectorState == Resizing, "Change of collector state to" | |
5883 " Resizing must be done under the freelistLocks (plural)"); | |
5884 | |
5885 // Now that sweeping has been completed, if the GCH's | |
5886 // incremental_collection_will_fail flag is set, clear it, | |
5887 // thus inviting a younger gen collection to promote into | |
5888 // this generation. If such a promotion may still fail, | |
5889 // the flag will be set again when a young collection is | |
5890 // attempted. | |
5891 // I think the incremental_collection_will_fail flag's use | |
5892 // is specific to a 2 generation collection policy, so i'll | |
5893 // assert that that's the configuration we are operating within. | |
5894 // The use of the flag can and should be generalized appropriately | |
5895 // in the future to deal with a general n-generation system. | |
5896 | |
5897 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5898 assert(gch->collector_policy()->is_two_generation_policy(), | |
5899 "Resetting of incremental_collection_will_fail flag" | |
5900 " may be incorrect otherwise"); | |
5901 gch->clear_incremental_collection_will_fail(); | |
5902 gch->update_full_collections_completed(_collection_count_start); | |
5903 } | |
5904 | |
5905 // FIX ME!!! Looks like this belongs in CFLSpace, with | |
5906 // CMSGen merely delegating to it. | |
5907 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { | |
5908 double nearLargestPercent = 0.999; | |
5909 HeapWord* minAddr = _cmsSpace->bottom(); | |
5910 HeapWord* largestAddr = | |
5911 (HeapWord*) _cmsSpace->dictionary()->findLargestDict(); | |
5912 if (largestAddr == 0) { | |
5913 // The dictionary appears to be empty. In this case | |
5914 // try to coalesce at the end of the heap. | |
5915 largestAddr = _cmsSpace->end(); | |
5916 } | |
5917 size_t largestOffset = pointer_delta(largestAddr, minAddr); | |
5918 size_t nearLargestOffset = | |
5919 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; | |
5920 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); | |
5921 } | |
5922 | |
5923 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { | |
5924 return addr >= _cmsSpace->nearLargestChunk(); | |
5925 } | |
5926 | |
5927 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { | |
5928 return _cmsSpace->find_chunk_at_end(); | |
5929 } | |
5930 | |
5931 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, | |
5932 bool full) { | |
5933 // The next lower level has been collected. Gather any statistics | |
5934 // that are of interest at this point. | |
5935 if (!full && (current_level + 1) == level()) { | |
5936 // Gather statistics on the young generation collection. | |
5937 collector()->stats().record_gc0_end(used()); | |
5938 } | |
5939 } | |
5940 | |
5941 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() { | |
5942 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
5943 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
5944 "Wrong type of heap"); | |
5945 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) | |
5946 gch->gen_policy()->size_policy(); | |
5947 assert(sp->is_gc_cms_adaptive_size_policy(), | |
5948 "Wrong type of size policy"); | |
5949 return sp; | |
5950 } | |
5951 | |
5952 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { | |
5953 if (PrintGCDetails && Verbose) { | |
5954 gclog_or_tty->print("Rotate from %d ", _debug_collection_type); | |
5955 } | |
5956 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); | |
5957 _debug_collection_type = | |
5958 (CollectionTypes) (_debug_collection_type % Unknown_collection_type); | |
5959 if (PrintGCDetails && Verbose) { | |
5960 gclog_or_tty->print_cr("to %d ", _debug_collection_type); | |
5961 } | |
5962 } | |
5963 | |
5964 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, | |
5965 bool asynch) { | |
5966 // We iterate over the space(s) underlying this generation, | |
5967 // checking the mark bit map to see if the bits corresponding | |
5968 // to specific blocks are marked or not. Blocks that are | |
5969 // marked are live and are not swept up. All remaining blocks | |
5970 // are swept up, with coalescing on-the-fly as we sweep up | |
5971 // contiguous free and/or garbage blocks: | |
5972 // We need to ensure that the sweeper synchronizes with allocators | |
5973 // and stop-the-world collectors. In particular, the following | |
5974 // locks are used: | |
5975 // . CMS token: if this is held, a stop the world collection cannot occur | |
5976 // . freelistLock: if this is held no allocation can occur from this | |
5977 // generation by another thread | |
5978 // . bitMapLock: if this is held, no other thread can access or update | |
5979 // | |
5980 | |
5981 // Note that we need to hold the freelistLock if we use | |
5982 // block iterate below; else the iterator might go awry if | |
5983 // a mutator (or promotion) causes block contents to change | |
5984 // (for instance if the allocator divvies up a block). | |
5985 // If we hold the free list lock, for all practical purposes | |
5986 // young generation GC's can't occur (they'll usually need to | |
5987 // promote), so we might as well prevent all young generation | |
5988 // GC's while we do a sweeping step. For the same reason, we might | |
5989 // as well take the bit map lock for the entire duration | |
5990 | |
5991 // check that we hold the requisite locks | |
5992 assert(have_cms_token(), "Should hold cms token"); | |
5993 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) | |
5994 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), | |
5995 "Should possess CMS token to sweep"); | |
5996 assert_lock_strong(gen->freelistLock()); | |
5997 assert_lock_strong(bitMapLock()); | |
5998 | |
5999 assert(!_sweep_timer.is_active(), "Was switched off in an outer context"); | |
6000 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()), | |
6001 _sweep_estimate.padded_average()); | |
6002 gen->setNearLargestChunk(); | |
6003 | |
6004 { | |
6005 SweepClosure sweepClosure(this, gen, &_markBitMap, | |
6006 CMSYield && asynch); | |
6007 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); | |
6008 // We need to free-up/coalesce garbage/blocks from a | |
6009 // co-terminal free run. This is done in the SweepClosure | |
6010 // destructor; so, do not remove this scope, else the | |
6011 // end-of-sweep-census below will be off by a little bit. | |
6012 } | |
6013 gen->cmsSpace()->sweep_completed(); | |
6014 gen->cmsSpace()->endSweepFLCensus(sweepCount()); | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6015 if (should_unload_classes()) { // unloaded classes this cycle, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6016 _concurrent_cycles_since_last_unload = 0; // ... reset count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6017 } else { // did not unload classes, |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6018 _concurrent_cycles_since_last_unload++; // ... increment count |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
6019 } |
0 | 6020 } |
6021 | |
6022 // Reset CMS data structures (for now just the marking bit map) | |
6023 // preparatory for the next cycle. | |
6024 void CMSCollector::reset(bool asynch) { | |
6025 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
6026 CMSAdaptiveSizePolicy* sp = size_policy(); | |
6027 AdaptiveSizePolicyOutput(sp, gch->total_collections()); | |
6028 if (asynch) { | |
6029 CMSTokenSyncWithLocks ts(true, bitMapLock()); | |
6030 | |
6031 // If the state is not "Resetting", the foreground thread | |
6032 // has done a collection and the resetting. | |
6033 if (_collectorState != Resetting) { | |
6034 assert(_collectorState == Idling, "The state should only change" | |
6035 " because the foreground collector has finished the collection"); | |
6036 return; | |
6037 } | |
6038 | |
6039 // Clear the mark bitmap (no grey objects to start with) | |
6040 // for the next cycle. | |
6041 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6042 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); | |
6043 | |
6044 HeapWord* curAddr = _markBitMap.startWord(); | |
6045 while (curAddr < _markBitMap.endWord()) { | |
6046 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); | |
6047 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); | |
6048 _markBitMap.clear_large_range(chunk); | |
6049 if (ConcurrentMarkSweepThread::should_yield() && | |
6050 !foregroundGCIsActive() && | |
6051 CMSYield) { | |
6052 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6053 "CMS thread should hold CMS token"); | |
6054 assert_lock_strong(bitMapLock()); | |
6055 bitMapLock()->unlock(); | |
6056 ConcurrentMarkSweepThread::desynchronize(true); | |
6057 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6058 stopTimer(); | |
6059 if (PrintCMSStatistics != 0) { | |
6060 incrementYields(); | |
6061 } | |
6062 icms_wait(); | |
6063 | |
6064 // See the comment in coordinator_yield() | |
6065 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6066 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6067 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6068 os::sleep(Thread::current(), 1, false); |
6069 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6070 } | |
6071 | |
6072 ConcurrentMarkSweepThread::synchronize(true); | |
6073 bitMapLock()->lock_without_safepoint_check(); | |
6074 startTimer(); | |
6075 } | |
6076 curAddr = chunk.end(); | |
6077 } | |
6078 _collectorState = Idling; | |
6079 } else { | |
6080 // already have the lock | |
6081 assert(_collectorState == Resetting, "just checking"); | |
6082 assert_lock_strong(bitMapLock()); | |
6083 _markBitMap.clear_all(); | |
6084 _collectorState = Idling; | |
6085 } | |
6086 | |
6087 // Stop incremental mode after a cycle completes, so that any future cycles | |
6088 // are triggered by allocation. | |
6089 stop_icms(); | |
6090 | |
6091 NOT_PRODUCT( | |
6092 if (RotateCMSCollectionTypes) { | |
6093 _cmsGen->rotate_debug_collection_type(); | |
6094 } | |
6095 ) | |
6096 } | |
6097 | |
6098 void CMSCollector::do_CMS_operation(CMS_op_type op) { | |
6099 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
6100 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
6101 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty); | |
6102 TraceCollectorStats tcs(counters()); | |
6103 | |
6104 switch (op) { | |
6105 case CMS_op_checkpointRootsInitial: { | |
6106 checkpointRootsInitial(true); // asynch | |
6107 if (PrintGC) { | |
6108 _cmsGen->printOccupancy("initial-mark"); | |
6109 } | |
6110 break; | |
6111 } | |
6112 case CMS_op_checkpointRootsFinal: { | |
6113 checkpointRootsFinal(true, // asynch | |
6114 false, // !clear_all_soft_refs | |
6115 false); // !init_mark_was_synchronous | |
6116 if (PrintGC) { | |
6117 _cmsGen->printOccupancy("remark"); | |
6118 } | |
6119 break; | |
6120 } | |
6121 default: | |
6122 fatal("No such CMS_op"); | |
6123 } | |
6124 } | |
6125 | |
6126 #ifndef PRODUCT | |
6127 size_t const CMSCollector::skip_header_HeapWords() { | |
6128 return FreeChunk::header_size(); | |
6129 } | |
6130 | |
6131 // Try and collect here conditions that should hold when | |
6132 // CMS thread is exiting. The idea is that the foreground GC | |
6133 // thread should not be blocked if it wants to terminate | |
6134 // the CMS thread and yet continue to run the VM for a while | |
6135 // after that. | |
6136 void CMSCollector::verify_ok_to_terminate() const { | |
6137 assert(Thread::current()->is_ConcurrentGC_thread(), | |
6138 "should be called by CMS thread"); | |
6139 assert(!_foregroundGCShouldWait, "should be false"); | |
6140 // We could check here that all the various low-level locks | |
6141 // are not held by the CMS thread, but that is overkill; see | |
6142 // also CMSThread::verify_ok_to_terminate() where the CGC_lock | |
6143 // is checked. | |
6144 } | |
6145 #endif | |
6146 | |
6147 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { | |
6148 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), | |
6149 "missing Printezis mark?"); | |
6150 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); | |
6151 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6152 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6153 "alignment problem"); | |
6154 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6155 return size; | |
6156 } | |
6157 | |
6158 // A variant of the above (block_size_using_printezis_bits()) except | |
6159 // that we return 0 if the P-bits are not yet set. | |
6160 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { | |
6161 if (_markBitMap.isMarked(addr)) { | |
6162 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?"); | |
6163 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); | |
6164 size_t size = pointer_delta(nextOneAddr + 1, addr); | |
6165 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6166 "alignment problem"); | |
6167 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6168 return size; | |
6169 } else { | |
6170 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?"); | |
6171 return 0; | |
6172 } | |
6173 } | |
6174 | |
6175 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { | |
6176 size_t sz = 0; | |
6177 oop p = (oop)addr; | |
187 | 6178 if (p->klass_or_null() != NULL && p->is_parsable()) { |
0 | 6179 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); |
6180 } else { | |
6181 sz = block_size_using_printezis_bits(addr); | |
6182 } | |
6183 assert(sz > 0, "size must be nonzero"); | |
6184 HeapWord* next_block = addr + sz; | |
6185 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block, | |
6186 CardTableModRefBS::card_size); | |
6187 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) < | |
6188 round_down((uintptr_t)next_card, CardTableModRefBS::card_size), | |
6189 "must be different cards"); | |
6190 return next_card; | |
6191 } | |
6192 | |
6193 | |
6194 // CMS Bit Map Wrapper ///////////////////////////////////////// | |
6195 | |
6196 // Construct a CMS bit map infrastructure, but don't create the | |
6197 // bit vector itself. That is done by a separate call CMSBitMap::allocate() | |
6198 // further below. | |
6199 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): | |
6200 _bm(NULL,0), | |
6201 _shifter(shifter), | |
6202 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) | |
6203 { | |
6204 _bmStartWord = 0; | |
6205 _bmWordSize = 0; | |
6206 } | |
6207 | |
6208 bool CMSBitMap::allocate(MemRegion mr) { | |
6209 _bmStartWord = mr.start(); | |
6210 _bmWordSize = mr.word_size(); | |
6211 ReservedSpace brs(ReservedSpace::allocation_align_size_up( | |
6212 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); | |
6213 if (!brs.is_reserved()) { | |
6214 warning("CMS bit map allocation failure"); | |
6215 return false; | |
6216 } | |
6217 // For now we'll just commit all of the bit map up fromt. | |
6218 // Later on we'll try to be more parsimonious with swap. | |
6219 if (!_virtual_space.initialize(brs, brs.size())) { | |
6220 warning("CMS bit map backing store failure"); | |
6221 return false; | |
6222 } | |
6223 assert(_virtual_space.committed_size() == brs.size(), | |
6224 "didn't reserve backing store for all of CMS bit map?"); | |
6225 _bm.set_map((uintptr_t*)_virtual_space.low()); | |
6226 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= | |
6227 _bmWordSize, "inconsistency in bit map sizing"); | |
6228 _bm.set_size(_bmWordSize >> _shifter); | |
6229 | |
6230 // bm.clear(); // can we rely on getting zero'd memory? verify below | |
6231 assert(isAllClear(), | |
6232 "Expected zero'd memory from ReservedSpace constructor"); | |
6233 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), | |
6234 "consistency check"); | |
6235 return true; | |
6236 } | |
6237 | |
6238 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { | |
6239 HeapWord *next_addr, *end_addr, *last_addr; | |
6240 assert_locked(); | |
6241 assert(covers(mr), "out-of-range error"); | |
6242 // XXX assert that start and end are appropriately aligned | |
6243 for (next_addr = mr.start(), end_addr = mr.end(); | |
6244 next_addr < end_addr; next_addr = last_addr) { | |
6245 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); | |
6246 last_addr = dirty_region.end(); | |
6247 if (!dirty_region.is_empty()) { | |
6248 cl->do_MemRegion(dirty_region); | |
6249 } else { | |
6250 assert(last_addr == end_addr, "program logic"); | |
6251 return; | |
6252 } | |
6253 } | |
6254 } | |
6255 | |
6256 #ifndef PRODUCT | |
6257 void CMSBitMap::assert_locked() const { | |
6258 CMSLockVerifier::assert_locked(lock()); | |
6259 } | |
6260 | |
6261 bool CMSBitMap::covers(MemRegion mr) const { | |
6262 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); | |
6263 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), | |
6264 "size inconsistency"); | |
6265 return (mr.start() >= _bmStartWord) && | |
6266 (mr.end() <= endWord()); | |
6267 } | |
6268 | |
6269 bool CMSBitMap::covers(HeapWord* start, size_t size) const { | |
6270 return (start >= _bmStartWord && (start + size) <= endWord()); | |
6271 } | |
6272 | |
6273 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { | |
6274 // verify that there are no 1 bits in the interval [left, right) | |
6275 FalseBitMapClosure falseBitMapClosure; | |
6276 iterate(&falseBitMapClosure, left, right); | |
6277 } | |
6278 | |
6279 void CMSBitMap::region_invariant(MemRegion mr) | |
6280 { | |
6281 assert_locked(); | |
6282 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); | |
6283 assert(!mr.is_empty(), "unexpected empty region"); | |
6284 assert(covers(mr), "mr should be covered by bit map"); | |
6285 // convert address range into offset range | |
6286 size_t start_ofs = heapWordToOffset(mr.start()); | |
6287 // Make sure that end() is appropriately aligned | |
6288 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(), | |
6289 (1 << (_shifter+LogHeapWordSize))), | |
6290 "Misaligned mr.end()"); | |
6291 size_t end_ofs = heapWordToOffset(mr.end()); | |
6292 assert(end_ofs > start_ofs, "Should mark at least one bit"); | |
6293 } | |
6294 | |
6295 #endif | |
6296 | |
6297 bool CMSMarkStack::allocate(size_t size) { | |
6298 // allocate a stack of the requisite depth | |
6299 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6300 size * sizeof(oop))); | |
6301 if (!rs.is_reserved()) { | |
6302 warning("CMSMarkStack allocation failure"); | |
6303 return false; | |
6304 } | |
6305 if (!_virtual_space.initialize(rs, rs.size())) { | |
6306 warning("CMSMarkStack backing store failure"); | |
6307 return false; | |
6308 } | |
6309 assert(_virtual_space.committed_size() == rs.size(), | |
6310 "didn't reserve backing store for all of CMS stack?"); | |
6311 _base = (oop*)(_virtual_space.low()); | |
6312 _index = 0; | |
6313 _capacity = size; | |
6314 NOT_PRODUCT(_max_depth = 0); | |
6315 return true; | |
6316 } | |
6317 | |
6318 // XXX FIX ME !!! In the MT case we come in here holding a | |
6319 // leaf lock. For printing we need to take a further lock | |
6320 // which has lower rank. We need to recallibrate the two | |
6321 // lock-ranks involved in order to be able to rpint the | |
6322 // messages below. (Or defer the printing to the caller. | |
6323 // For now we take the expedient path of just disabling the | |
6324 // messages for the problematic case.) | |
6325 void CMSMarkStack::expand() { | |
6326 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted"); | |
6327 if (_capacity == CMSMarkStackSizeMax) { | |
6328 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { | |
6329 // We print a warning message only once per CMS cycle. | |
6330 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); | |
6331 } | |
6332 return; | |
6333 } | |
6334 // Double capacity if possible | |
6335 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax); | |
6336 // Do not give up existing stack until we have managed to | |
6337 // get the double capacity that we desired. | |
6338 ReservedSpace rs(ReservedSpace::allocation_align_size_up( | |
6339 new_capacity * sizeof(oop))); | |
6340 if (rs.is_reserved()) { | |
6341 // Release the backing store associated with old stack | |
6342 _virtual_space.release(); | |
6343 // Reinitialize virtual space for new stack | |
6344 if (!_virtual_space.initialize(rs, rs.size())) { | |
6345 fatal("Not enough swap for expanded marking stack"); | |
6346 } | |
6347 _base = (oop*)(_virtual_space.low()); | |
6348 _index = 0; | |
6349 _capacity = new_capacity; | |
6350 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { | |
6351 // Failed to double capacity, continue; | |
6352 // we print a detail message only once per CMS cycle. | |
6353 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to " | |
6354 SIZE_FORMAT"K", | |
6355 _capacity / K, new_capacity / K); | |
6356 } | |
6357 } | |
6358 | |
6359 | |
6360 // Closures | |
6361 // XXX: there seems to be a lot of code duplication here; | |
6362 // should refactor and consolidate common code. | |
6363 | |
6364 // This closure is used to mark refs into the CMS generation in | |
6365 // the CMS bit map. Called at the first checkpoint. This closure | |
6366 // assumes that we do not need to re-mark dirty cards; if the CMS | |
6367 // generation on which this is used is not an oldest (modulo perm gen) | |
6368 // generation then this will lose younger_gen cards! | |
6369 | |
6370 MarkRefsIntoClosure::MarkRefsIntoClosure( | |
6371 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods): | |
6372 _span(span), | |
6373 _bitMap(bitMap), | |
6374 _should_do_nmethods(should_do_nmethods) | |
6375 { | |
6376 assert(_ref_processor == NULL, "deliberately left NULL"); | |
6377 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); | |
6378 } | |
6379 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6380 void MarkRefsIntoClosure::do_oop(oop obj) { |
0 | 6381 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6382 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6383 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6384 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6385 // this should be made more efficient |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6386 _bitMap->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6387 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6388 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6389 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6390 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6391 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } |
0 | 6392 |
6393 // A variant of the above, used for CMS marking verification. | |
6394 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( | |
6395 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
6396 bool should_do_nmethods): | |
6397 _span(span), | |
6398 _verification_bm(verification_bm), | |
6399 _cms_bm(cms_bm), | |
6400 _should_do_nmethods(should_do_nmethods) { | |
6401 assert(_ref_processor == NULL, "deliberately left NULL"); | |
6402 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); | |
6403 } | |
6404 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6405 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { |
0 | 6406 // if p points into _span, then mark corresponding bit in _markBitMap |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6407 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6408 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6409 if (_span.contains(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6410 _verification_bm->mark(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6411 if (!_cms_bm->isMarked(addr)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6412 oop(addr)->print(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6413 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6414 fatal("... aborting"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6415 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6416 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6417 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6418 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6419 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6420 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } |
0 | 6421 |
6422 ////////////////////////////////////////////////// | |
6423 // MarkRefsIntoAndScanClosure | |
6424 ////////////////////////////////////////////////// | |
6425 | |
6426 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, | |
6427 ReferenceProcessor* rp, | |
6428 CMSBitMap* bit_map, | |
6429 CMSBitMap* mod_union_table, | |
6430 CMSMarkStack* mark_stack, | |
6431 CMSMarkStack* revisit_stack, | |
6432 CMSCollector* collector, | |
6433 bool should_yield, | |
6434 bool concurrent_precleaning): | |
6435 _collector(collector), | |
6436 _span(span), | |
6437 _bit_map(bit_map), | |
6438 _mark_stack(mark_stack), | |
6439 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table, | |
6440 mark_stack, revisit_stack, concurrent_precleaning), | |
6441 _yield(should_yield), | |
6442 _concurrent_precleaning(concurrent_precleaning), | |
6443 _freelistLock(NULL) | |
6444 { | |
6445 _ref_processor = rp; | |
6446 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6447 } | |
6448 | |
6449 // This closure is used to mark refs into the CMS generation at the | |
6450 // second (final) checkpoint, and to scan and transitively follow | |
6451 // the unmarked oops. It is also used during the concurrent precleaning | |
6452 // phase while scanning objects on dirty cards in the CMS generation. | |
6453 // The marks are made in the marking bit map and the marking stack is | |
6454 // used for keeping the (newly) grey objects during the scan. | |
6455 // The parallel version (Par_...) appears further below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6456 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6457 if (obj != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6458 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6459 HeapWord* addr = (HeapWord*)obj; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6460 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6461 assert(_collector->overflow_list_is_empty(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6462 "overflow list should be empty"); |
0 | 6463 if (_span.contains(addr) && |
6464 !_bit_map->isMarked(addr)) { | |
6465 // mark bit map (object is now grey) | |
6466 _bit_map->mark(addr); | |
6467 // push on marking stack (stack should be empty), and drain the | |
6468 // stack by applying this closure to the oops in the oops popped | |
6469 // from the stack (i.e. blacken the grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6470 bool res = _mark_stack->push(obj); |
0 | 6471 assert(res, "Should have space to push on empty stack"); |
6472 do { | |
6473 oop new_oop = _mark_stack->pop(); | |
6474 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6475 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6476 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6477 "only grey objects on this stack"); | |
6478 // iterate over the oops in this oop, marking and pushing | |
6479 // the ones in CMS heap (i.e. in _span). | |
6480 new_oop->oop_iterate(&_pushAndMarkClosure); | |
6481 // check if it's time to yield | |
6482 do_yield_check(); | |
6483 } while (!_mark_stack->isEmpty() || | |
6484 (!_concurrent_precleaning && take_from_overflow_list())); | |
6485 // if marking stack is empty, and we are not doing this | |
6486 // during precleaning, then check the overflow list | |
6487 } | |
6488 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
6489 assert(_collector->overflow_list_is_empty(), | |
6490 "overflow list was drained above"); | |
6491 // We could restore evacuated mark words, if any, used for | |
6492 // overflow list links here because the overflow list is | |
6493 // provably empty here. That would reduce the maximum | |
6494 // size requirements for preserved_{oop,mark}_stack. | |
6495 // But we'll just postpone it until we are all done | |
6496 // so we can just stream through. | |
6497 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) { | |
6498 _collector->restore_preserved_marks_if_any(); | |
6499 assert(_collector->no_preserved_marks(), "No preserved marks"); | |
6500 } | |
6501 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(), | |
6502 "All preserved marks should have been restored above"); | |
6503 } | |
6504 } | |
6505 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6506 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6507 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6508 |
0 | 6509 void MarkRefsIntoAndScanClosure::do_yield_work() { |
6510 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6511 "CMS thread should hold CMS token"); | |
6512 assert_lock_strong(_freelistLock); | |
6513 assert_lock_strong(_bit_map->lock()); | |
6514 // relinquish the free_list_lock and bitMaplock() | |
6515 _bit_map->lock()->unlock(); | |
6516 _freelistLock->unlock(); | |
6517 ConcurrentMarkSweepThread::desynchronize(true); | |
6518 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6519 _collector->stopTimer(); | |
6520 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6521 if (PrintCMSStatistics != 0) { | |
6522 _collector->incrementYields(); | |
6523 } | |
6524 _collector->icms_wait(); | |
6525 | |
6526 // See the comment in coordinator_yield() | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6527 for (unsigned i = 0; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6528 i < CMSYieldSleepCount && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6529 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6530 !CMSCollector::foregroundGCIsActive(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6531 ++i) { |
0 | 6532 os::sleep(Thread::current(), 1, false); |
6533 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6534 } | |
6535 | |
6536 ConcurrentMarkSweepThread::synchronize(true); | |
6537 _freelistLock->lock_without_safepoint_check(); | |
6538 _bit_map->lock()->lock_without_safepoint_check(); | |
6539 _collector->startTimer(); | |
6540 } | |
6541 | |
6542 /////////////////////////////////////////////////////////// | |
6543 // Par_MarkRefsIntoAndScanClosure: a parallel version of | |
6544 // MarkRefsIntoAndScanClosure | |
6545 /////////////////////////////////////////////////////////// | |
6546 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( | |
6547 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, | |
6548 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): | |
6549 _span(span), | |
6550 _bit_map(bit_map), | |
6551 _work_queue(work_queue), | |
6552 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), | |
6553 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))), | |
6554 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue, | |
6555 revisit_stack) | |
6556 { | |
6557 _ref_processor = rp; | |
6558 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
6559 } | |
6560 | |
6561 // This closure is used to mark refs into the CMS generation at the | |
6562 // second (final) checkpoint, and to scan and transitively follow | |
6563 // the unmarked oops. The marks are made in the marking bit map and | |
6564 // the work_queue is used for keeping the (newly) grey objects during | |
6565 // the scan phase whence they are also available for stealing by parallel | |
6566 // threads. Since the marking bit map is shared, updates are | |
6567 // synchronized (via CAS). | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6568 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6569 if (obj != NULL) { |
0 | 6570 // Ignore mark word because this could be an already marked oop |
6571 // that may be chained at the end of the overflow list. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6572 assert(obj->is_oop(), "expected an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6573 HeapWord* addr = (HeapWord*)obj; |
0 | 6574 if (_span.contains(addr) && |
6575 !_bit_map->isMarked(addr)) { | |
6576 // mark bit map (object will become grey): | |
6577 // It is possible for several threads to be | |
6578 // trying to "claim" this object concurrently; | |
6579 // the unique thread that succeeds in marking the | |
6580 // object first will do the subsequent push on | |
6581 // to the work queue (or overflow list). | |
6582 if (_bit_map->par_mark(addr)) { | |
6583 // push on work_queue (which may not be empty), and trim the | |
6584 // queue to an appropriate length by applying this closure to | |
6585 // the oops in the oops popped from the stack (i.e. blacken the | |
6586 // grey objects) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6587 bool res = _work_queue->push(obj); |
0 | 6588 assert(res, "Low water mark should be less than capacity?"); |
6589 trim_queue(_low_water_mark); | |
6590 } // Else, another thread claimed the object | |
6591 } | |
6592 } | |
6593 } | |
6594 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6595 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6596 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6597 |
0 | 6598 // This closure is used to rescan the marked objects on the dirty cards |
6599 // in the mod union table and the card table proper. | |
6600 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( | |
6601 oop p, MemRegion mr) { | |
6602 | |
6603 size_t size = 0; | |
6604 HeapWord* addr = (HeapWord*)p; | |
6605 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6606 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
6607 // check if it's time to yield | |
6608 if (do_yield_check()) { | |
6609 // We yielded for some foreground stop-world work, | |
6610 // and we have been asked to abort this ongoing preclean cycle. | |
6611 return 0; | |
6612 } | |
6613 if (_bitMap->isMarked(addr)) { | |
6614 // it's marked; is it potentially uninitialized? | |
187 | 6615 if (p->klass_or_null() != NULL) { |
0 | 6616 if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { |
6617 // Signal precleaning to redirty the card since | |
6618 // the klass pointer is already installed. | |
6619 assert(size == 0, "Initial value"); | |
6620 } else { | |
6621 assert(p->is_parsable(), "must be parsable."); | |
6622 // an initialized object; ignore mark word in verification below | |
6623 // since we are running concurrent with mutators | |
6624 assert(p->is_oop(true), "should be an oop"); | |
6625 if (p->is_objArray()) { | |
6626 // objArrays are precisely marked; restrict scanning | |
6627 // to dirty cards only. | |
187 | 6628 size = CompactibleFreeListSpace::adjustObjectSize( |
6629 p->oop_iterate(_scanningClosure, mr)); | |
0 | 6630 } else { |
6631 // A non-array may have been imprecisely marked; we need | |
6632 // to scan object in its entirety. | |
6633 size = CompactibleFreeListSpace::adjustObjectSize( | |
6634 p->oop_iterate(_scanningClosure)); | |
6635 } | |
6636 #ifdef DEBUG | |
6637 size_t direct_size = | |
6638 CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6639 assert(size == direct_size, "Inconsistency in size"); | |
6640 assert(size >= 3, "Necessary for Printezis marks to work"); | |
6641 if (!_bitMap->isMarked(addr+1)) { | |
6642 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size); | |
6643 } else { | |
6644 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1); | |
6645 assert(_bitMap->isMarked(addr+size-1), | |
6646 "inconsistent Printezis mark"); | |
6647 } | |
6648 #endif // DEBUG | |
6649 } | |
6650 } else { | |
6651 // an unitialized object | |
6652 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); | |
6653 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
6654 size = pointer_delta(nextOneAddr + 1, addr); | |
6655 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
6656 "alignment problem"); | |
6657 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() | |
6658 // will dirty the card when the klass pointer is installed in the | |
6659 // object (signalling the completion of initialization). | |
6660 } | |
6661 } else { | |
6662 // Either a not yet marked object or an uninitialized object | |
187 | 6663 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 6664 // An uninitialized object, skip to the next card, since |
6665 // we may not be able to read its P-bits yet. | |
6666 assert(size == 0, "Initial value"); | |
6667 } else { | |
6668 // An object not (yet) reached by marking: we merely need to | |
6669 // compute its size so as to go look at the next block. | |
6670 assert(p->is_oop(true), "should be an oop"); | |
6671 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); | |
6672 } | |
6673 } | |
6674 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6675 return size; | |
6676 } | |
6677 | |
6678 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { | |
6679 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6680 "CMS thread should hold CMS token"); | |
6681 assert_lock_strong(_freelistLock); | |
6682 assert_lock_strong(_bitMap->lock()); | |
6683 // relinquish the free_list_lock and bitMaplock() | |
6684 _bitMap->lock()->unlock(); | |
6685 _freelistLock->unlock(); | |
6686 ConcurrentMarkSweepThread::desynchronize(true); | |
6687 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6688 _collector->stopTimer(); | |
6689 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6690 if (PrintCMSStatistics != 0) { | |
6691 _collector->incrementYields(); | |
6692 } | |
6693 _collector->icms_wait(); | |
6694 | |
6695 // See the comment in coordinator_yield() | |
6696 for (unsigned i = 0; i < CMSYieldSleepCount && | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6697 ConcurrentMarkSweepThread::should_yield() && |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6698 !CMSCollector::foregroundGCIsActive(); ++i) { |
0 | 6699 os::sleep(Thread::current(), 1, false); |
6700 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6701 } | |
6702 | |
6703 ConcurrentMarkSweepThread::synchronize(true); | |
6704 _freelistLock->lock_without_safepoint_check(); | |
6705 _bitMap->lock()->lock_without_safepoint_check(); | |
6706 _collector->startTimer(); | |
6707 } | |
6708 | |
6709 | |
6710 ////////////////////////////////////////////////////////////////// | |
6711 // SurvivorSpacePrecleanClosure | |
6712 ////////////////////////////////////////////////////////////////// | |
6713 // This (single-threaded) closure is used to preclean the oops in | |
6714 // the survivor spaces. | |
6715 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { | |
6716 | |
6717 HeapWord* addr = (HeapWord*)p; | |
6718 DEBUG_ONLY(_collector->verify_work_stacks_empty();) | |
6719 assert(!_span.contains(addr), "we are scanning the survivor spaces"); | |
187 | 6720 assert(p->klass_or_null() != NULL, "object should be initializd"); |
0 | 6721 assert(p->is_parsable(), "must be parsable."); |
6722 // an initialized object; ignore mark word in verification below | |
6723 // since we are running concurrent with mutators | |
6724 assert(p->is_oop(true), "should be an oop"); | |
6725 // Note that we do not yield while we iterate over | |
6726 // the interior oops of p, pushing the relevant ones | |
6727 // on our marking stack. | |
6728 size_t size = p->oop_iterate(_scanning_closure); | |
6729 do_yield_check(); | |
6730 // Observe that below, we do not abandon the preclean | |
6731 // phase as soon as we should; rather we empty the | |
6732 // marking stack before returning. This is to satisfy | |
6733 // some existing assertions. In general, it may be a | |
6734 // good idea to abort immediately and complete the marking | |
6735 // from the grey objects at a later time. | |
6736 while (!_mark_stack->isEmpty()) { | |
6737 oop new_oop = _mark_stack->pop(); | |
6738 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
6739 assert(new_oop->is_parsable(), "Found unparsable oop"); | |
6740 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
6741 "only grey objects on this stack"); | |
6742 // iterate over the oops in this oop, marking and pushing | |
6743 // the ones in CMS heap (i.e. in _span). | |
6744 new_oop->oop_iterate(_scanning_closure); | |
6745 // check if it's time to yield | |
6746 do_yield_check(); | |
6747 } | |
6748 unsigned int after_count = | |
6749 GenCollectedHeap::heap()->total_collections(); | |
6750 bool abort = (_before_count != after_count) || | |
6751 _collector->should_abort_preclean(); | |
6752 return abort ? 0 : size; | |
6753 } | |
6754 | |
6755 void SurvivorSpacePrecleanClosure::do_yield_work() { | |
6756 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6757 "CMS thread should hold CMS token"); | |
6758 assert_lock_strong(_bit_map->lock()); | |
6759 // Relinquish the bit map lock | |
6760 _bit_map->lock()->unlock(); | |
6761 ConcurrentMarkSweepThread::desynchronize(true); | |
6762 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6763 _collector->stopTimer(); | |
6764 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6765 if (PrintCMSStatistics != 0) { | |
6766 _collector->incrementYields(); | |
6767 } | |
6768 _collector->icms_wait(); | |
6769 | |
6770 // See the comment in coordinator_yield() | |
6771 for (unsigned i = 0; i < CMSYieldSleepCount && | |
6772 ConcurrentMarkSweepThread::should_yield() && | |
6773 !CMSCollector::foregroundGCIsActive(); ++i) { | |
6774 os::sleep(Thread::current(), 1, false); | |
6775 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6776 } | |
6777 | |
6778 ConcurrentMarkSweepThread::synchronize(true); | |
6779 _bit_map->lock()->lock_without_safepoint_check(); | |
6780 _collector->startTimer(); | |
6781 } | |
6782 | |
6783 // This closure is used to rescan the marked objects on the dirty cards | |
6784 // in the mod union table and the card table proper. In the parallel | |
6785 // case, although the bitMap is shared, we do a single read so the | |
6786 // isMarked() query is "safe". | |
6787 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { | |
6788 // Ignore mark word because we are running concurrent with mutators | |
6789 assert(p->is_oop_or_null(true), "expected an oop or null"); | |
6790 HeapWord* addr = (HeapWord*)p; | |
6791 assert(_span.contains(addr), "we are scanning the CMS generation"); | |
6792 bool is_obj_array = false; | |
6793 #ifdef DEBUG | |
6794 if (!_parallel) { | |
6795 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); | |
6796 assert(_collector->overflow_list_is_empty(), | |
6797 "overflow list should be empty"); | |
6798 | |
6799 } | |
6800 #endif // DEBUG | |
6801 if (_bit_map->isMarked(addr)) { | |
6802 // Obj arrays are precisely marked, non-arrays are not; | |
6803 // so we scan objArrays precisely and non-arrays in their | |
6804 // entirety. | |
6805 if (p->is_objArray()) { | |
6806 is_obj_array = true; | |
6807 if (_parallel) { | |
6808 p->oop_iterate(_par_scan_closure, mr); | |
6809 } else { | |
6810 p->oop_iterate(_scan_closure, mr); | |
6811 } | |
6812 } else { | |
6813 if (_parallel) { | |
6814 p->oop_iterate(_par_scan_closure); | |
6815 } else { | |
6816 p->oop_iterate(_scan_closure); | |
6817 } | |
6818 } | |
6819 } | |
6820 #ifdef DEBUG | |
6821 if (!_parallel) { | |
6822 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); | |
6823 assert(_collector->overflow_list_is_empty(), | |
6824 "overflow list should be empty"); | |
6825 | |
6826 } | |
6827 #endif // DEBUG | |
6828 return is_obj_array; | |
6829 } | |
6830 | |
6831 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, | |
6832 MemRegion span, | |
6833 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
6834 CMSMarkStack* revisitStack, | |
6835 bool should_yield, bool verifying): | |
6836 _collector(collector), | |
6837 _span(span), | |
6838 _bitMap(bitMap), | |
6839 _mut(&collector->_modUnionTable), | |
6840 _markStack(markStack), | |
6841 _revisitStack(revisitStack), | |
6842 _yield(should_yield), | |
6843 _skipBits(0) | |
6844 { | |
6845 assert(_markStack->isEmpty(), "stack should be empty"); | |
6846 _finger = _bitMap->startWord(); | |
6847 _threshold = _finger; | |
6848 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
6849 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
6850 DEBUG_ONLY(_verifying = verifying;) | |
6851 } | |
6852 | |
6853 void MarkFromRootsClosure::reset(HeapWord* addr) { | |
6854 assert(_markStack->isEmpty(), "would cause duplicates on stack"); | |
6855 assert(_span.contains(addr), "Out of bounds _finger?"); | |
6856 _finger = addr; | |
6857 _threshold = (HeapWord*)round_to( | |
6858 (intptr_t)_finger, CardTableModRefBS::card_size); | |
6859 } | |
6860 | |
6861 // Should revisit to see if this should be restructured for | |
6862 // greater efficiency. | |
6863 void MarkFromRootsClosure::do_bit(size_t offset) { | |
6864 if (_skipBits > 0) { | |
6865 _skipBits--; | |
6866 return; | |
6867 } | |
6868 // convert offset into a HeapWord* | |
6869 HeapWord* addr = _bitMap->startWord() + offset; | |
6870 assert(_bitMap->endWord() && addr < _bitMap->endWord(), | |
6871 "address out of range"); | |
6872 assert(_bitMap->isMarked(addr), "tautology"); | |
6873 if (_bitMap->isMarked(addr+1)) { | |
6874 // this is an allocated but not yet initialized object | |
6875 assert(_skipBits == 0, "tautology"); | |
6876 _skipBits = 2; // skip next two marked bits ("Printezis-marks") | |
6877 oop p = oop(addr); | |
187 | 6878 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 6879 DEBUG_ONLY(if (!_verifying) {) |
6880 // We re-dirty the cards on which this object lies and increase | |
6881 // the _threshold so that we'll come back to scan this object | |
6882 // during the preclean or remark phase. (CMSCleanOnEnter) | |
6883 if (CMSCleanOnEnter) { | |
6884 size_t sz = _collector->block_size_using_printezis_bits(addr); | |
6885 HeapWord* start_card_addr = (HeapWord*)round_down( | |
6886 (intptr_t)addr, CardTableModRefBS::card_size); | |
6887 HeapWord* end_card_addr = (HeapWord*)round_to( | |
6888 (intptr_t)(addr+sz), CardTableModRefBS::card_size); | |
6889 MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr); | |
6890 assert(!redirty_range.is_empty(), "Arithmetical tautology"); | |
6891 // Bump _threshold to end_card_addr; note that | |
6892 // _threshold cannot possibly exceed end_card_addr, anyhow. | |
6893 // This prevents future clearing of the card as the scan proceeds | |
6894 // to the right. | |
6895 assert(_threshold <= end_card_addr, | |
6896 "Because we are just scanning into this object"); | |
6897 if (_threshold < end_card_addr) { | |
6898 _threshold = end_card_addr; | |
6899 } | |
187 | 6900 if (p->klass_or_null() != NULL) { |
0 | 6901 // Redirty the range of cards... |
6902 _mut->mark_range(redirty_range); | |
6903 } // ...else the setting of klass will dirty the card anyway. | |
6904 } | |
6905 DEBUG_ONLY(}) | |
6906 return; | |
6907 } | |
6908 } | |
6909 scanOopsInOop(addr); | |
6910 } | |
6911 | |
6912 // We take a break if we've been at this for a while, | |
6913 // so as to avoid monopolizing the locks involved. | |
6914 void MarkFromRootsClosure::do_yield_work() { | |
6915 // First give up the locks, then yield, then re-lock | |
6916 // We should probably use a constructor/destructor idiom to | |
6917 // do this unlock/lock or modify the MutexUnlocker class to | |
6918 // serve our purpose. XXX | |
6919 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
6920 "CMS thread should hold CMS token"); | |
6921 assert_lock_strong(_bitMap->lock()); | |
6922 _bitMap->lock()->unlock(); | |
6923 ConcurrentMarkSweepThread::desynchronize(true); | |
6924 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6925 _collector->stopTimer(); | |
6926 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
6927 if (PrintCMSStatistics != 0) { | |
6928 _collector->incrementYields(); | |
6929 } | |
6930 _collector->icms_wait(); | |
6931 | |
6932 // See the comment in coordinator_yield() | |
6933 for (unsigned i = 0; i < CMSYieldSleepCount && | |
6934 ConcurrentMarkSweepThread::should_yield() && | |
6935 !CMSCollector::foregroundGCIsActive(); ++i) { | |
6936 os::sleep(Thread::current(), 1, false); | |
6937 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
6938 } | |
6939 | |
6940 ConcurrentMarkSweepThread::synchronize(true); | |
6941 _bitMap->lock()->lock_without_safepoint_check(); | |
6942 _collector->startTimer(); | |
6943 } | |
6944 | |
6945 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { | |
6946 assert(_bitMap->isMarked(ptr), "expected bit to be set"); | |
6947 assert(_markStack->isEmpty(), | |
6948 "should drain stack to limit stack usage"); | |
6949 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6950 oop obj = oop(ptr); |
0 | 6951 // Ignore mark word in verification below, since we |
6952 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6953 assert(obj->is_oop(true), "should be an oop"); |
0 | 6954 assert(_finger <= ptr, "_finger runneth ahead"); |
6955 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
6956 _finger = ptr + obj->size(); |
0 | 6957 assert(_finger > ptr, "we just incremented it above"); |
6958 // On large heaps, it may take us some time to get through | |
6959 // the marking phase (especially if running iCMS). During | |
6960 // this time it's possible that a lot of mutations have | |
6961 // accumulated in the card table and the mod union table -- | |
6962 // these mutation records are redundant until we have | |
6963 // actually traced into the corresponding card. | |
6964 // Here, we check whether advancing the finger would make | |
6965 // us cross into a new card, and if so clear corresponding | |
6966 // cards in the MUT (preclean them in the card-table in the | |
6967 // future). | |
6968 | |
6969 DEBUG_ONLY(if (!_verifying) {) | |
6970 // The clean-on-enter optimization is disabled by default, | |
6971 // until we fix 6178663. | |
6972 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
6973 // [_threshold, _finger) represents the interval | |
6974 // of cards to be cleared in MUT (or precleaned in card table). | |
6975 // The set of cards to be cleared is all those that overlap | |
6976 // with the interval [_threshold, _finger); note that | |
6977 // _threshold is always kept card-aligned but _finger isn't | |
6978 // always card-aligned. | |
6979 HeapWord* old_threshold = _threshold; | |
6980 assert(old_threshold == (HeapWord*)round_to( | |
6981 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
6982 "_threshold should always be card-aligned"); | |
6983 _threshold = (HeapWord*)round_to( | |
6984 (intptr_t)_finger, CardTableModRefBS::card_size); | |
6985 MemRegion mr(old_threshold, _threshold); | |
6986 assert(!mr.is_empty(), "Control point invariant"); | |
6987 assert(_span.contains(mr), "Should clear within span"); | |
6988 // XXX When _finger crosses from old gen into perm gen | |
6989 // we may be doing unnecessary cleaning; do better in the | |
6990 // future by detecting that condition and clearing fewer | |
6991 // MUT/CT entries. | |
6992 _mut->clear_range(mr); | |
6993 } | |
6994 DEBUG_ONLY(}) | |
6995 | |
6996 // Note: the finger doesn't advance while we drain | |
6997 // the stack below. | |
6998 PushOrMarkClosure pushOrMarkClosure(_collector, | |
6999 _span, _bitMap, _markStack, | |
7000 _revisitStack, | |
7001 _finger, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7002 bool res = _markStack->push(obj); |
0 | 7003 assert(res, "Empty non-zero size stack should have space for single push"); |
7004 while (!_markStack->isEmpty()) { | |
7005 oop new_oop = _markStack->pop(); | |
7006 // Skip verifying header mark word below because we are | |
7007 // running concurrent with mutators. | |
7008 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7009 // now scan this oop's oops | |
7010 new_oop->oop_iterate(&pushOrMarkClosure); | |
7011 do_yield_check(); | |
7012 } | |
7013 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); | |
7014 } | |
7015 | |
7016 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, | |
7017 CMSCollector* collector, MemRegion span, | |
7018 CMSBitMap* bit_map, | |
7019 OopTaskQueue* work_queue, | |
7020 CMSMarkStack* overflow_stack, | |
7021 CMSMarkStack* revisit_stack, | |
7022 bool should_yield): | |
7023 _collector(collector), | |
7024 _whole_span(collector->_span), | |
7025 _span(span), | |
7026 _bit_map(bit_map), | |
7027 _mut(&collector->_modUnionTable), | |
7028 _work_queue(work_queue), | |
7029 _overflow_stack(overflow_stack), | |
7030 _revisit_stack(revisit_stack), | |
7031 _yield(should_yield), | |
7032 _skip_bits(0), | |
7033 _task(task) | |
7034 { | |
7035 assert(_work_queue->size() == 0, "work_queue should be empty"); | |
7036 _finger = span.start(); | |
7037 _threshold = _finger; // XXX Defer clear-on-enter optimization for now | |
7038 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7039 } | |
7040 | |
7041 // Should revisit to see if this should be restructured for | |
7042 // greater efficiency. | |
7043 void Par_MarkFromRootsClosure::do_bit(size_t offset) { | |
7044 if (_skip_bits > 0) { | |
7045 _skip_bits--; | |
7046 return; | |
7047 } | |
7048 // convert offset into a HeapWord* | |
7049 HeapWord* addr = _bit_map->startWord() + offset; | |
7050 assert(_bit_map->endWord() && addr < _bit_map->endWord(), | |
7051 "address out of range"); | |
7052 assert(_bit_map->isMarked(addr), "tautology"); | |
7053 if (_bit_map->isMarked(addr+1)) { | |
7054 // this is an allocated object that might not yet be initialized | |
7055 assert(_skip_bits == 0, "tautology"); | |
7056 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") | |
7057 oop p = oop(addr); | |
187 | 7058 if (p->klass_or_null() == NULL || !p->is_parsable()) { |
0 | 7059 // in the case of Clean-on-Enter optimization, redirty card |
7060 // and avoid clearing card by increasing the threshold. | |
7061 return; | |
7062 } | |
7063 } | |
7064 scan_oops_in_oop(addr); | |
7065 } | |
7066 | |
7067 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { | |
7068 assert(_bit_map->isMarked(ptr), "expected bit to be set"); | |
7069 // Should we assert that our work queue is empty or | |
7070 // below some drain limit? | |
7071 assert(_work_queue->size() == 0, | |
7072 "should drain stack to limit stack usage"); | |
7073 // convert ptr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7074 oop obj = oop(ptr); |
0 | 7075 // Ignore mark word in verification below, since we |
7076 // may be running concurrent with mutators. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7077 assert(obj->is_oop(true), "should be an oop"); |
0 | 7078 assert(_finger <= ptr, "_finger runneth ahead"); |
7079 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7080 _finger = ptr + obj->size(); |
0 | 7081 assert(_finger > ptr, "we just incremented it above"); |
7082 // On large heaps, it may take us some time to get through | |
7083 // the marking phase (especially if running iCMS). During | |
7084 // this time it's possible that a lot of mutations have | |
7085 // accumulated in the card table and the mod union table -- | |
7086 // these mutation records are redundant until we have | |
7087 // actually traced into the corresponding card. | |
7088 // Here, we check whether advancing the finger would make | |
7089 // us cross into a new card, and if so clear corresponding | |
7090 // cards in the MUT (preclean them in the card-table in the | |
7091 // future). | |
7092 | |
7093 // The clean-on-enter optimization is disabled by default, | |
7094 // until we fix 6178663. | |
7095 if (CMSCleanOnEnter && (_finger > _threshold)) { | |
7096 // [_threshold, _finger) represents the interval | |
7097 // of cards to be cleared in MUT (or precleaned in card table). | |
7098 // The set of cards to be cleared is all those that overlap | |
7099 // with the interval [_threshold, _finger); note that | |
7100 // _threshold is always kept card-aligned but _finger isn't | |
7101 // always card-aligned. | |
7102 HeapWord* old_threshold = _threshold; | |
7103 assert(old_threshold == (HeapWord*)round_to( | |
7104 (intptr_t)old_threshold, CardTableModRefBS::card_size), | |
7105 "_threshold should always be card-aligned"); | |
7106 _threshold = (HeapWord*)round_to( | |
7107 (intptr_t)_finger, CardTableModRefBS::card_size); | |
7108 MemRegion mr(old_threshold, _threshold); | |
7109 assert(!mr.is_empty(), "Control point invariant"); | |
7110 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? | |
7111 // XXX When _finger crosses from old gen into perm gen | |
7112 // we may be doing unnecessary cleaning; do better in the | |
7113 // future by detecting that condition and clearing fewer | |
7114 // MUT/CT entries. | |
7115 _mut->clear_range(mr); | |
7116 } | |
7117 | |
7118 // Note: the local finger doesn't advance while we drain | |
7119 // the stack below, but the global finger sure can and will. | |
7120 HeapWord** gfa = _task->global_finger_addr(); | |
7121 Par_PushOrMarkClosure pushOrMarkClosure(_collector, | |
7122 _span, _bit_map, | |
7123 _work_queue, | |
7124 _overflow_stack, | |
7125 _revisit_stack, | |
7126 _finger, | |
7127 gfa, this); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7128 bool res = _work_queue->push(obj); // overflow could occur here |
0 | 7129 assert(res, "Will hold once we use workqueues"); |
7130 while (true) { | |
7131 oop new_oop; | |
7132 if (!_work_queue->pop_local(new_oop)) { | |
7133 // We emptied our work_queue; check if there's stuff that can | |
7134 // be gotten from the overflow stack. | |
7135 if (CMSConcMarkingTask::get_work_from_overflow_stack( | |
7136 _overflow_stack, _work_queue)) { | |
7137 do_yield_check(); | |
7138 continue; | |
7139 } else { // done | |
7140 break; | |
7141 } | |
7142 } | |
7143 // Skip verifying header mark word below because we are | |
7144 // running concurrent with mutators. | |
7145 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); | |
7146 // now scan this oop's oops | |
7147 new_oop->oop_iterate(&pushOrMarkClosure); | |
7148 do_yield_check(); | |
7149 } | |
7150 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); | |
7151 } | |
7152 | |
7153 // Yield in response to a request from VM Thread or | |
7154 // from mutators. | |
7155 void Par_MarkFromRootsClosure::do_yield_work() { | |
7156 assert(_task != NULL, "sanity"); | |
7157 _task->yield(); | |
7158 } | |
7159 | |
7160 // A variant of the above used for verifying CMS marking work. | |
7161 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, | |
7162 MemRegion span, | |
7163 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7164 CMSMarkStack* mark_stack): | |
7165 _collector(collector), | |
7166 _span(span), | |
7167 _verification_bm(verification_bm), | |
7168 _cms_bm(cms_bm), | |
7169 _mark_stack(mark_stack), | |
7170 _pam_verify_closure(collector, span, verification_bm, cms_bm, | |
7171 mark_stack) | |
7172 { | |
7173 assert(_mark_stack->isEmpty(), "stack should be empty"); | |
7174 _finger = _verification_bm->startWord(); | |
7175 assert(_collector->_restart_addr == NULL, "Sanity check"); | |
7176 assert(_span.contains(_finger), "Out of bounds _finger?"); | |
7177 } | |
7178 | |
7179 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { | |
7180 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); | |
7181 assert(_span.contains(addr), "Out of bounds _finger?"); | |
7182 _finger = addr; | |
7183 } | |
7184 | |
7185 // Should revisit to see if this should be restructured for | |
7186 // greater efficiency. | |
7187 void MarkFromRootsVerifyClosure::do_bit(size_t offset) { | |
7188 // convert offset into a HeapWord* | |
7189 HeapWord* addr = _verification_bm->startWord() + offset; | |
7190 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), | |
7191 "address out of range"); | |
7192 assert(_verification_bm->isMarked(addr), "tautology"); | |
7193 assert(_cms_bm->isMarked(addr), "tautology"); | |
7194 | |
7195 assert(_mark_stack->isEmpty(), | |
7196 "should drain stack to limit stack usage"); | |
7197 // convert addr to an oop preparatory to scanning | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7198 oop obj = oop(addr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7199 assert(obj->is_oop(), "should be an oop"); |
0 | 7200 assert(_finger <= addr, "_finger runneth ahead"); |
7201 // advance the finger to right end of this object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7202 _finger = addr + obj->size(); |
0 | 7203 assert(_finger > addr, "we just incremented it above"); |
7204 // Note: the finger doesn't advance while we drain | |
7205 // the stack below. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7206 bool res = _mark_stack->push(obj); |
0 | 7207 assert(res, "Empty non-zero size stack should have space for single push"); |
7208 while (!_mark_stack->isEmpty()) { | |
7209 oop new_oop = _mark_stack->pop(); | |
7210 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); | |
7211 // now scan this oop's oops | |
7212 new_oop->oop_iterate(&_pam_verify_closure); | |
7213 } | |
7214 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); | |
7215 } | |
7216 | |
7217 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( | |
7218 CMSCollector* collector, MemRegion span, | |
7219 CMSBitMap* verification_bm, CMSBitMap* cms_bm, | |
7220 CMSMarkStack* mark_stack): | |
7221 OopClosure(collector->ref_processor()), | |
7222 _collector(collector), | |
7223 _span(span), | |
7224 _verification_bm(verification_bm), | |
7225 _cms_bm(cms_bm), | |
7226 _mark_stack(mark_stack) | |
7227 { } | |
7228 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7229 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7230 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } |
0 | 7231 |
7232 // Upon stack overflow, we discard (part of) the stack, | |
7233 // remembering the least address amongst those discarded | |
7234 // in CMSCollector's _restart_address. | |
7235 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { | |
7236 // Remember the least grey address discarded | |
7237 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); | |
7238 _collector->lower_restart_addr(ra); | |
7239 _mark_stack->reset(); // discard stack contents | |
7240 _mark_stack->expand(); // expand the stack if possible | |
7241 } | |
7242 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7243 void PushAndMarkVerifyClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7244 assert(obj->is_oop_or_null(), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7245 HeapWord* addr = (HeapWord*)obj; |
0 | 7246 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { |
7247 // Oop lies in _span and isn't yet grey or black | |
7248 _verification_bm->mark(addr); // now grey | |
7249 if (!_cms_bm->isMarked(addr)) { | |
7250 oop(addr)->print(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7251 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7252 addr); |
0 | 7253 fatal("... aborting"); |
7254 } | |
7255 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7256 if (!_mark_stack->push(obj)) { // stack overflow |
0 | 7257 if (PrintCMSStatistics != 0) { |
7258 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7259 SIZE_FORMAT, _mark_stack->capacity()); | |
7260 } | |
7261 assert(_mark_stack->isFull(), "Else push should have succeeded"); | |
7262 handle_stack_overflow(addr); | |
7263 } | |
7264 // anything including and to the right of _finger | |
7265 // will be scanned as we iterate over the remainder of the | |
7266 // bit map | |
7267 } | |
7268 } | |
7269 | |
7270 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, | |
7271 MemRegion span, | |
7272 CMSBitMap* bitMap, CMSMarkStack* markStack, | |
7273 CMSMarkStack* revisitStack, | |
7274 HeapWord* finger, MarkFromRootsClosure* parent) : | |
7275 OopClosure(collector->ref_processor()), | |
7276 _collector(collector), | |
7277 _span(span), | |
7278 _bitMap(bitMap), | |
7279 _markStack(markStack), | |
7280 _revisitStack(revisitStack), | |
7281 _finger(finger), | |
7282 _parent(parent), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
7283 _should_remember_klasses(collector->should_unload_classes()) |
0 | 7284 { } |
7285 | |
7286 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, | |
7287 MemRegion span, | |
7288 CMSBitMap* bit_map, | |
7289 OopTaskQueue* work_queue, | |
7290 CMSMarkStack* overflow_stack, | |
7291 CMSMarkStack* revisit_stack, | |
7292 HeapWord* finger, | |
7293 HeapWord** global_finger_addr, | |
7294 Par_MarkFromRootsClosure* parent) : | |
7295 OopClosure(collector->ref_processor()), | |
7296 _collector(collector), | |
7297 _whole_span(collector->_span), | |
7298 _span(span), | |
7299 _bit_map(bit_map), | |
7300 _work_queue(work_queue), | |
7301 _overflow_stack(overflow_stack), | |
7302 _revisit_stack(revisit_stack), | |
7303 _finger(finger), | |
7304 _global_finger_addr(global_finger_addr), | |
7305 _parent(parent), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
7306 _should_remember_klasses(collector->should_unload_classes()) |
0 | 7307 { } |
7308 | |
7309 void CMSCollector::lower_restart_addr(HeapWord* low) { | |
7310 assert(_span.contains(low), "Out of bounds addr"); | |
7311 if (_restart_addr == NULL) { | |
7312 _restart_addr = low; | |
7313 } else { | |
7314 _restart_addr = MIN2(_restart_addr, low); | |
7315 } | |
7316 } | |
7317 | |
7318 // Upon stack overflow, we discard (part of) the stack, | |
7319 // remembering the least address amongst those discarded | |
7320 // in CMSCollector's _restart_address. | |
7321 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7322 // Remember the least grey address discarded | |
7323 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); | |
7324 _collector->lower_restart_addr(ra); | |
7325 _markStack->reset(); // discard stack contents | |
7326 _markStack->expand(); // expand the stack if possible | |
7327 } | |
7328 | |
7329 // Upon stack overflow, we discard (part of) the stack, | |
7330 // remembering the least address amongst those discarded | |
7331 // in CMSCollector's _restart_address. | |
7332 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { | |
7333 // We need to do this under a mutex to prevent other | |
7334 // workers from interfering with the expansion below. | |
7335 MutexLockerEx ml(_overflow_stack->par_lock(), | |
7336 Mutex::_no_safepoint_check_flag); | |
7337 // Remember the least grey address discarded | |
7338 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); | |
7339 _collector->lower_restart_addr(ra); | |
7340 _overflow_stack->reset(); // discard stack contents | |
7341 _overflow_stack->expand(); // expand the stack if possible | |
7342 } | |
7343 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7344 void PushOrMarkClosure::do_oop(oop obj) { |
0 | 7345 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7346 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7347 HeapWord* addr = (HeapWord*)obj; |
0 | 7348 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { |
7349 // Oop lies in _span and isn't yet grey or black | |
7350 _bitMap->mark(addr); // now grey | |
7351 if (addr < _finger) { | |
7352 // the bit map iteration has already either passed, or | |
7353 // sampled, this bit in the bit map; we'll need to | |
7354 // use the marking stack to scan this oop's oops. | |
7355 bool simulate_overflow = false; | |
7356 NOT_PRODUCT( | |
7357 if (CMSMarkStackOverflowALot && | |
7358 _collector->simulate_overflow()) { | |
7359 // simulate a stack overflow | |
7360 simulate_overflow = true; | |
7361 } | |
7362 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7363 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow |
0 | 7364 if (PrintCMSStatistics != 0) { |
7365 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7366 SIZE_FORMAT, _markStack->capacity()); | |
7367 } | |
7368 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); | |
7369 handle_stack_overflow(addr); | |
7370 } | |
7371 } | |
7372 // anything including and to the right of _finger | |
7373 // will be scanned as we iterate over the remainder of the | |
7374 // bit map | |
7375 do_yield_check(); | |
7376 } | |
7377 } | |
7378 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7379 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7380 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7381 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7382 void Par_PushOrMarkClosure::do_oop(oop obj) { |
0 | 7383 // Ignore mark word because we are running concurrent with mutators. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7384 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7385 HeapWord* addr = (HeapWord*)obj; |
0 | 7386 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { |
7387 // Oop lies in _span and isn't yet grey or black | |
7388 // We read the global_finger (volatile read) strictly after marking oop | |
7389 bool res = _bit_map->par_mark(addr); // now grey | |
7390 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; | |
7391 // Should we push this marked oop on our stack? | |
7392 // -- if someone else marked it, nothing to do | |
7393 // -- if target oop is above global finger nothing to do | |
7394 // -- if target oop is in chunk and above local finger | |
7395 // then nothing to do | |
7396 // -- else push on work queue | |
7397 if ( !res // someone else marked it, they will deal with it | |
7398 || (addr >= *gfa) // will be scanned in a later task | |
7399 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk | |
7400 return; | |
7401 } | |
7402 // the bit map iteration has already either passed, or | |
7403 // sampled, this bit in the bit map; we'll need to | |
7404 // use the marking stack to scan this oop's oops. | |
7405 bool simulate_overflow = false; | |
7406 NOT_PRODUCT( | |
7407 if (CMSMarkStackOverflowALot && | |
7408 _collector->simulate_overflow()) { | |
7409 // simulate a stack overflow | |
7410 simulate_overflow = true; | |
7411 } | |
7412 ) | |
7413 if (simulate_overflow || | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7414 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { |
0 | 7415 // stack overflow |
7416 if (PrintCMSStatistics != 0) { | |
7417 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " | |
7418 SIZE_FORMAT, _overflow_stack->capacity()); | |
7419 } | |
7420 // We cannot assert that the overflow stack is full because | |
7421 // it may have been emptied since. | |
7422 assert(simulate_overflow || | |
7423 _work_queue->size() == _work_queue->max_elems(), | |
7424 "Else push should have succeeded"); | |
7425 handle_stack_overflow(addr); | |
7426 } | |
7427 do_yield_check(); | |
7428 } | |
7429 } | |
7430 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7431 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7432 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } |
0 | 7433 |
7434 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, | |
7435 MemRegion span, | |
7436 ReferenceProcessor* rp, | |
7437 CMSBitMap* bit_map, | |
7438 CMSBitMap* mod_union_table, | |
7439 CMSMarkStack* mark_stack, | |
7440 CMSMarkStack* revisit_stack, | |
7441 bool concurrent_precleaning): | |
7442 OopClosure(rp), | |
7443 _collector(collector), | |
7444 _span(span), | |
7445 _bit_map(bit_map), | |
7446 _mod_union_table(mod_union_table), | |
7447 _mark_stack(mark_stack), | |
7448 _revisit_stack(revisit_stack), | |
7449 _concurrent_precleaning(concurrent_precleaning), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
7450 _should_remember_klasses(collector->should_unload_classes()) |
0 | 7451 { |
7452 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7453 } | |
7454 | |
7455 // Grey object rescan during pre-cleaning and second checkpoint phases -- | |
7456 // the non-parallel version (the parallel version appears further below.) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7457 void PushAndMarkClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7458 // If _concurrent_precleaning, ignore mark word verification |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7459 assert(obj->is_oop_or_null(_concurrent_precleaning), |
0 | 7460 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7461 HeapWord* addr = (HeapWord*)obj; |
0 | 7462 // Check if oop points into the CMS generation |
7463 // and is not marked | |
7464 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7465 // a white object ... | |
7466 _bit_map->mark(addr); // ... now grey | |
7467 // push on the marking stack (grey set) | |
7468 bool simulate_overflow = false; | |
7469 NOT_PRODUCT( | |
7470 if (CMSMarkStackOverflowALot && | |
7471 _collector->simulate_overflow()) { | |
7472 // simulate a stack overflow | |
7473 simulate_overflow = true; | |
7474 } | |
7475 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7476 if (simulate_overflow || !_mark_stack->push(obj)) { |
0 | 7477 if (_concurrent_precleaning) { |
7478 // During precleaning we can just dirty the appropriate card | |
7479 // in the mod union table, thus ensuring that the object remains | |
7480 // in the grey set and continue. Note that no one can be intefering | |
7481 // with us in this action of dirtying the mod union table, so | |
7482 // no locking is required. | |
7483 _mod_union_table->mark(addr); | |
7484 _collector->_ser_pmc_preclean_ovflw++; | |
7485 } else { | |
7486 // During the remark phase, we need to remember this oop | |
7487 // in the overflow list. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7488 _collector->push_on_overflow_list(obj); |
0 | 7489 _collector->_ser_pmc_remark_ovflw++; |
7490 } | |
7491 } | |
7492 } | |
7493 } | |
7494 | |
7495 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, | |
7496 MemRegion span, | |
7497 ReferenceProcessor* rp, | |
7498 CMSBitMap* bit_map, | |
7499 OopTaskQueue* work_queue, | |
7500 CMSMarkStack* revisit_stack): | |
7501 OopClosure(rp), | |
7502 _collector(collector), | |
7503 _span(span), | |
7504 _bit_map(bit_map), | |
7505 _work_queue(work_queue), | |
7506 _revisit_stack(revisit_stack), | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
7507 _should_remember_klasses(collector->should_unload_classes()) |
0 | 7508 { |
7509 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); | |
7510 } | |
7511 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7512 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7513 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7514 |
0 | 7515 // Grey object rescan during second checkpoint phase -- |
7516 // the parallel version. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7517 void Par_PushAndMarkClosure::do_oop(oop obj) { |
0 | 7518 // In the assert below, we ignore the mark word because |
7519 // this oop may point to an already visited object that is | |
7520 // on the overflow stack (in which case the mark word has | |
7521 // been hijacked for chaining into the overflow stack -- | |
7522 // if this is the last object in the overflow stack then | |
7523 // its mark word will be NULL). Because this object may | |
7524 // have been subsequently popped off the global overflow | |
7525 // stack, and the mark word possibly restored to the prototypical | |
7526 // value, by the time we get to examined this failing assert in | |
7527 // the debugger, is_oop_or_null(false) may subsequently start | |
7528 // to hold. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7529 assert(obj->is_oop_or_null(true), |
0 | 7530 "expected an oop or NULL"); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7531 HeapWord* addr = (HeapWord*)obj; |
0 | 7532 // Check if oop points into the CMS generation |
7533 // and is not marked | |
7534 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { | |
7535 // a white object ... | |
7536 // If we manage to "claim" the object, by being the | |
7537 // first thread to mark it, then we push it on our | |
7538 // marking stack | |
7539 if (_bit_map->par_mark(addr)) { // ... now grey | |
7540 // push on work queue (grey set) | |
7541 bool simulate_overflow = false; | |
7542 NOT_PRODUCT( | |
7543 if (CMSMarkStackOverflowALot && | |
7544 _collector->par_simulate_overflow()) { | |
7545 // simulate a stack overflow | |
7546 simulate_overflow = true; | |
7547 } | |
7548 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7549 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7550 _collector->par_push_on_overflow_list(obj); |
0 | 7551 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS |
7552 } | |
7553 } // Else, some other thread got there first | |
7554 } | |
7555 } | |
7556 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7557 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7558 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
7559 |
0 | 7560 void PushAndMarkClosure::remember_klass(Klass* k) { |
7561 if (!_revisit_stack->push(oop(k))) { | |
7562 fatal("Revisit stack overflowed in PushAndMarkClosure"); | |
7563 } | |
7564 } | |
7565 | |
7566 void Par_PushAndMarkClosure::remember_klass(Klass* k) { | |
7567 if (!_revisit_stack->par_push(oop(k))) { | |
7568 fatal("Revist stack overflowed in Par_PushAndMarkClosure"); | |
7569 } | |
7570 } | |
7571 | |
7572 void CMSPrecleanRefsYieldClosure::do_yield_work() { | |
7573 Mutex* bml = _collector->bitMapLock(); | |
7574 assert_lock_strong(bml); | |
7575 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
7576 "CMS thread should hold CMS token"); | |
7577 | |
7578 bml->unlock(); | |
7579 ConcurrentMarkSweepThread::desynchronize(true); | |
7580 | |
7581 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7582 | |
7583 _collector->stopTimer(); | |
7584 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
7585 if (PrintCMSStatistics != 0) { | |
7586 _collector->incrementYields(); | |
7587 } | |
7588 _collector->icms_wait(); | |
7589 | |
7590 // See the comment in coordinator_yield() | |
7591 for (unsigned i = 0; i < CMSYieldSleepCount && | |
7592 ConcurrentMarkSweepThread::should_yield() && | |
7593 !CMSCollector::foregroundGCIsActive(); ++i) { | |
7594 os::sleep(Thread::current(), 1, false); | |
7595 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
7596 } | |
7597 | |
7598 ConcurrentMarkSweepThread::synchronize(true); | |
7599 bml->lock(); | |
7600 | |
7601 _collector->startTimer(); | |
7602 } | |
7603 | |
7604 bool CMSPrecleanRefsYieldClosure::should_return() { | |
7605 if (ConcurrentMarkSweepThread::should_yield()) { | |
7606 do_yield_work(); | |
7607 } | |
7608 return _collector->foregroundGCIsActive(); | |
7609 } | |
7610 | |
7611 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { | |
7612 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0, | |
7613 "mr should be aligned to start at a card boundary"); | |
7614 // We'd like to assert: | |
7615 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, | |
7616 // "mr should be a range of cards"); | |
7617 // However, that would be too strong in one case -- the last | |
7618 // partition ends at _unallocated_block which, in general, can be | |
7619 // an arbitrary boundary, not necessarily card aligned. | |
7620 if (PrintCMSStatistics != 0) { | |
7621 _num_dirty_cards += | |
7622 mr.word_size()/CardTableModRefBS::card_size_in_words; | |
7623 } | |
7624 _space->object_iterate_mem(mr, &_scan_cl); | |
7625 } | |
7626 | |
7627 SweepClosure::SweepClosure(CMSCollector* collector, | |
7628 ConcurrentMarkSweepGeneration* g, | |
7629 CMSBitMap* bitMap, bool should_yield) : | |
7630 _collector(collector), | |
7631 _g(g), | |
7632 _sp(g->cmsSpace()), | |
7633 _limit(_sp->sweep_limit()), | |
7634 _freelistLock(_sp->freelistLock()), | |
7635 _bitMap(bitMap), | |
7636 _yield(should_yield), | |
7637 _inFreeRange(false), // No free range at beginning of sweep | |
7638 _freeRangeInFreeLists(false), // No free range at beginning of sweep | |
7639 _lastFreeRangeCoalesced(false), | |
7640 _freeFinger(g->used_region().start()) | |
7641 { | |
7642 NOT_PRODUCT( | |
7643 _numObjectsFreed = 0; | |
7644 _numWordsFreed = 0; | |
7645 _numObjectsLive = 0; | |
7646 _numWordsLive = 0; | |
7647 _numObjectsAlreadyFree = 0; | |
7648 _numWordsAlreadyFree = 0; | |
7649 _last_fc = NULL; | |
7650 | |
7651 _sp->initializeIndexedFreeListArrayReturnedBytes(); | |
7652 _sp->dictionary()->initializeDictReturnedBytes(); | |
7653 ) | |
7654 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), | |
7655 "sweep _limit out of bounds"); | |
7656 if (CMSTraceSweeper) { | |
7657 gclog_or_tty->print("\n====================\nStarting new sweep\n"); | |
7658 } | |
7659 } | |
7660 | |
7661 // We need this destructor to reclaim any space at the end | |
7662 // of the space, which do_blk below may not have added back to | |
7663 // the free lists. [basically dealing with the "fringe effect"] | |
7664 SweepClosure::~SweepClosure() { | |
7665 assert_lock_strong(_freelistLock); | |
7666 // this should be treated as the end of a free run if any | |
7667 // The current free range should be returned to the free lists | |
7668 // as one coalesced chunk. | |
7669 if (inFreeRange()) { | |
7670 flushCurFreeChunk(freeFinger(), | |
7671 pointer_delta(_limit, freeFinger())); | |
7672 assert(freeFinger() < _limit, "the finger pointeth off base"); | |
7673 if (CMSTraceSweeper) { | |
7674 gclog_or_tty->print("destructor:"); | |
7675 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") " | |
7676 "[coalesced:"SIZE_FORMAT"]\n", | |
7677 freeFinger(), pointer_delta(_limit, freeFinger()), | |
7678 lastFreeRangeCoalesced()); | |
7679 } | |
7680 } | |
7681 NOT_PRODUCT( | |
7682 if (Verbose && PrintGC) { | |
7683 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " | |
7684 SIZE_FORMAT " bytes", | |
7685 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); | |
7686 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " | |
7687 SIZE_FORMAT" bytes " | |
7688 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", | |
7689 _numObjectsLive, _numWordsLive*sizeof(HeapWord), | |
7690 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); | |
7691 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * | |
7692 sizeof(HeapWord); | |
7693 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); | |
7694 | |
7695 if (PrintCMSStatistics && CMSVerifyReturnedBytes) { | |
7696 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); | |
7697 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); | |
7698 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; | |
7699 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); | |
7700 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", | |
7701 indexListReturnedBytes); | |
7702 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", | |
7703 dictReturnedBytes); | |
7704 } | |
7705 } | |
7706 ) | |
7707 // Now, in debug mode, just null out the sweep_limit | |
7708 NOT_PRODUCT(_sp->clear_sweep_limit();) | |
7709 if (CMSTraceSweeper) { | |
7710 gclog_or_tty->print("end of sweep\n================\n"); | |
7711 } | |
7712 } | |
7713 | |
7714 void SweepClosure::initialize_free_range(HeapWord* freeFinger, | |
7715 bool freeRangeInFreeLists) { | |
7716 if (CMSTraceSweeper) { | |
7717 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n", | |
7718 freeFinger, _sp->block_size(freeFinger), | |
7719 freeRangeInFreeLists); | |
7720 } | |
7721 assert(!inFreeRange(), "Trampling existing free range"); | |
7722 set_inFreeRange(true); | |
7723 set_lastFreeRangeCoalesced(false); | |
7724 | |
7725 set_freeFinger(freeFinger); | |
7726 set_freeRangeInFreeLists(freeRangeInFreeLists); | |
7727 if (CMSTestInFreeList) { | |
7728 if (freeRangeInFreeLists) { | |
7729 FreeChunk* fc = (FreeChunk*) freeFinger; | |
7730 assert(fc->isFree(), "A chunk on the free list should be free."); | |
7731 assert(fc->size() > 0, "Free range should have a size"); | |
7732 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists"); | |
7733 } | |
7734 } | |
7735 } | |
7736 | |
7737 // Note that the sweeper runs concurrently with mutators. Thus, | |
7738 // it is possible for direct allocation in this generation to happen | |
7739 // in the middle of the sweep. Note that the sweeper also coalesces | |
7740 // contiguous free blocks. Thus, unless the sweeper and the allocator | |
7741 // synchronize appropriately freshly allocated blocks may get swept up. | |
7742 // This is accomplished by the sweeper locking the free lists while | |
7743 // it is sweeping. Thus blocks that are determined to be free are | |
7744 // indeed free. There is however one additional complication: | |
7745 // blocks that have been allocated since the final checkpoint and | |
7746 // mark, will not have been marked and so would be treated as | |
7747 // unreachable and swept up. To prevent this, the allocator marks | |
7748 // the bit map when allocating during the sweep phase. This leads, | |
7749 // however, to a further complication -- objects may have been allocated | |
7750 // but not yet initialized -- in the sense that the header isn't yet | |
7751 // installed. The sweeper can not then determine the size of the block | |
7752 // in order to skip over it. To deal with this case, we use a technique | |
7753 // (due to Printezis) to encode such uninitialized block sizes in the | |
7754 // bit map. Since the bit map uses a bit per every HeapWord, but the | |
7755 // CMS generation has a minimum object size of 3 HeapWords, it follows | |
7756 // that "normal marks" won't be adjacent in the bit map (there will | |
7757 // always be at least two 0 bits between successive 1 bits). We make use | |
7758 // of these "unused" bits to represent uninitialized blocks -- the bit | |
7759 // corresponding to the start of the uninitialized object and the next | |
7760 // bit are both set. Finally, a 1 bit marks the end of the object that | |
7761 // started with the two consecutive 1 bits to indicate its potentially | |
7762 // uninitialized state. | |
7763 | |
7764 size_t SweepClosure::do_blk_careful(HeapWord* addr) { | |
7765 FreeChunk* fc = (FreeChunk*)addr; | |
7766 size_t res; | |
7767 | |
7768 // check if we are done sweepinrg | |
7769 if (addr == _limit) { // we have swept up to the limit, do nothing more | |
7770 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), | |
7771 "sweep _limit out of bounds"); | |
7772 // help the closure application finish | |
7773 return pointer_delta(_sp->end(), _limit); | |
7774 } | |
7775 assert(addr <= _limit, "sweep invariant"); | |
7776 | |
7777 // check if we should yield | |
7778 do_yield_check(addr); | |
7779 if (fc->isFree()) { | |
7780 // Chunk that is already free | |
7781 res = fc->size(); | |
7782 doAlreadyFreeChunk(fc); | |
7783 debug_only(_sp->verifyFreeLists()); | |
7784 assert(res == fc->size(), "Don't expect the size to change"); | |
7785 NOT_PRODUCT( | |
7786 _numObjectsAlreadyFree++; | |
7787 _numWordsAlreadyFree += res; | |
7788 ) | |
7789 NOT_PRODUCT(_last_fc = fc;) | |
7790 } else if (!_bitMap->isMarked(addr)) { | |
7791 // Chunk is fresh garbage | |
7792 res = doGarbageChunk(fc); | |
7793 debug_only(_sp->verifyFreeLists()); | |
7794 NOT_PRODUCT( | |
7795 _numObjectsFreed++; | |
7796 _numWordsFreed += res; | |
7797 ) | |
7798 } else { | |
7799 // Chunk that is alive. | |
7800 res = doLiveChunk(fc); | |
7801 debug_only(_sp->verifyFreeLists()); | |
7802 NOT_PRODUCT( | |
7803 _numObjectsLive++; | |
7804 _numWordsLive += res; | |
7805 ) | |
7806 } | |
7807 return res; | |
7808 } | |
7809 | |
7810 // For the smart allocation, record following | |
7811 // split deaths - a free chunk is removed from its free list because | |
7812 // it is being split into two or more chunks. | |
7813 // split birth - a free chunk is being added to its free list because | |
7814 // a larger free chunk has been split and resulted in this free chunk. | |
7815 // coal death - a free chunk is being removed from its free list because | |
7816 // it is being coalesced into a large free chunk. | |
7817 // coal birth - a free chunk is being added to its free list because | |
7818 // it was created when two or more free chunks where coalesced into | |
7819 // this free chunk. | |
7820 // | |
7821 // These statistics are used to determine the desired number of free | |
7822 // chunks of a given size. The desired number is chosen to be relative | |
7823 // to the end of a CMS sweep. The desired number at the end of a sweep | |
7824 // is the | |
7825 // count-at-end-of-previous-sweep (an amount that was enough) | |
7826 // - count-at-beginning-of-current-sweep (the excess) | |
7827 // + split-births (gains in this size during interval) | |
7828 // - split-deaths (demands on this size during interval) | |
7829 // where the interval is from the end of one sweep to the end of the | |
7830 // next. | |
7831 // | |
7832 // When sweeping the sweeper maintains an accumulated chunk which is | |
7833 // the chunk that is made up of chunks that have been coalesced. That | |
7834 // will be termed the left-hand chunk. A new chunk of garbage that | |
7835 // is being considered for coalescing will be referred to as the | |
7836 // right-hand chunk. | |
7837 // | |
7838 // When making a decision on whether to coalesce a right-hand chunk with | |
7839 // the current left-hand chunk, the current count vs. the desired count | |
7840 // of the left-hand chunk is considered. Also if the right-hand chunk | |
7841 // is near the large chunk at the end of the heap (see | |
7842 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the | |
7843 // left-hand chunk is coalesced. | |
7844 // | |
7845 // When making a decision about whether to split a chunk, the desired count | |
7846 // vs. the current count of the candidate to be split is also considered. | |
7847 // If the candidate is underpopulated (currently fewer chunks than desired) | |
7848 // a chunk of an overpopulated (currently more chunks than desired) size may | |
7849 // be chosen. The "hint" associated with a free list, if non-null, points | |
7850 // to a free list which may be overpopulated. | |
7851 // | |
7852 | |
7853 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) { | |
7854 size_t size = fc->size(); | |
7855 // Chunks that cannot be coalesced are not in the | |
7856 // free lists. | |
7857 if (CMSTestInFreeList && !fc->cantCoalesce()) { | |
7858 assert(_sp->verifyChunkInFreeLists(fc), | |
7859 "free chunk should be in free lists"); | |
7860 } | |
7861 // a chunk that is already free, should not have been | |
7862 // marked in the bit map | |
7863 HeapWord* addr = (HeapWord*) fc; | |
7864 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); | |
7865 // Verify that the bit map has no bits marked between | |
7866 // addr and purported end of this block. | |
7867 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
7868 | |
7869 // Some chunks cannot be coalesced in under any circumstances. | |
7870 // See the definition of cantCoalesce(). | |
7871 if (!fc->cantCoalesce()) { | |
7872 // This chunk can potentially be coalesced. | |
7873 if (_sp->adaptive_freelists()) { | |
7874 // All the work is done in | |
7875 doPostIsFreeOrGarbageChunk(fc, size); | |
7876 } else { // Not adaptive free lists | |
7877 // this is a free chunk that can potentially be coalesced by the sweeper; | |
7878 if (!inFreeRange()) { | |
7879 // if the next chunk is a free block that can't be coalesced | |
7880 // it doesn't make sense to remove this chunk from the free lists | |
7881 FreeChunk* nextChunk = (FreeChunk*)(addr + size); | |
7882 assert((HeapWord*)nextChunk <= _limit, "sweep invariant"); | |
7883 if ((HeapWord*)nextChunk < _limit && // there's a next chunk... | |
7884 nextChunk->isFree() && // which is free... | |
7885 nextChunk->cantCoalesce()) { // ... but cant be coalesced | |
7886 // nothing to do | |
7887 } else { | |
7888 // Potentially the start of a new free range: | |
7889 // Don't eagerly remove it from the free lists. | |
7890 // No need to remove it if it will just be put | |
7891 // back again. (Also from a pragmatic point of view | |
7892 // if it is a free block in a region that is beyond | |
7893 // any allocated blocks, an assertion will fail) | |
7894 // Remember the start of a free run. | |
7895 initialize_free_range(addr, true); | |
7896 // end - can coalesce with next chunk | |
7897 } | |
7898 } else { | |
7899 // the midst of a free range, we are coalescing | |
7900 debug_only(record_free_block_coalesced(fc);) | |
7901 if (CMSTraceSweeper) { | |
7902 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); | |
7903 } | |
7904 // remove it from the free lists | |
7905 _sp->removeFreeChunkFromFreeLists(fc); | |
7906 set_lastFreeRangeCoalesced(true); | |
7907 // If the chunk is being coalesced and the current free range is | |
7908 // in the free lists, remove the current free range so that it | |
7909 // will be returned to the free lists in its entirety - all | |
7910 // the coalesced pieces included. | |
7911 if (freeRangeInFreeLists()) { | |
7912 FreeChunk* ffc = (FreeChunk*) freeFinger(); | |
7913 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
7914 "Size of free range is inconsistent with chunk size."); | |
7915 if (CMSTestInFreeList) { | |
7916 assert(_sp->verifyChunkInFreeLists(ffc), | |
7917 "free range is not in free lists"); | |
7918 } | |
7919 _sp->removeFreeChunkFromFreeLists(ffc); | |
7920 set_freeRangeInFreeLists(false); | |
7921 } | |
7922 } | |
7923 } | |
7924 } else { | |
7925 // Code path common to both original and adaptive free lists. | |
7926 | |
7927 // cant coalesce with previous block; this should be treated | |
7928 // as the end of a free run if any | |
7929 if (inFreeRange()) { | |
7930 // we kicked some butt; time to pick up the garbage | |
7931 assert(freeFinger() < addr, "the finger pointeth off base"); | |
7932 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); | |
7933 } | |
7934 // else, nothing to do, just continue | |
7935 } | |
7936 } | |
7937 | |
7938 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) { | |
7939 // This is a chunk of garbage. It is not in any free list. | |
7940 // Add it to a free list or let it possibly be coalesced into | |
7941 // a larger chunk. | |
7942 HeapWord* addr = (HeapWord*) fc; | |
7943 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); | |
7944 | |
7945 if (_sp->adaptive_freelists()) { | |
7946 // Verify that the bit map has no bits marked between | |
7947 // addr and purported end of just dead object. | |
7948 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
7949 | |
7950 doPostIsFreeOrGarbageChunk(fc, size); | |
7951 } else { | |
7952 if (!inFreeRange()) { | |
7953 // start of a new free range | |
7954 assert(size > 0, "A free range should have a size"); | |
7955 initialize_free_range(addr, false); | |
7956 | |
7957 } else { | |
7958 // this will be swept up when we hit the end of the | |
7959 // free range | |
7960 if (CMSTraceSweeper) { | |
7961 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size); | |
7962 } | |
7963 // If the chunk is being coalesced and the current free range is | |
7964 // in the free lists, remove the current free range so that it | |
7965 // will be returned to the free lists in its entirety - all | |
7966 // the coalesced pieces included. | |
7967 if (freeRangeInFreeLists()) { | |
7968 FreeChunk* ffc = (FreeChunk*)freeFinger(); | |
7969 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
7970 "Size of free range is inconsistent with chunk size."); | |
7971 if (CMSTestInFreeList) { | |
7972 assert(_sp->verifyChunkInFreeLists(ffc), | |
7973 "free range is not in free lists"); | |
7974 } | |
7975 _sp->removeFreeChunkFromFreeLists(ffc); | |
7976 set_freeRangeInFreeLists(false); | |
7977 } | |
7978 set_lastFreeRangeCoalesced(true); | |
7979 } | |
7980 // this will be swept up when we hit the end of the free range | |
7981 | |
7982 // Verify that the bit map has no bits marked between | |
7983 // addr and purported end of just dead object. | |
7984 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); | |
7985 } | |
7986 return size; | |
7987 } | |
7988 | |
7989 size_t SweepClosure::doLiveChunk(FreeChunk* fc) { | |
7990 HeapWord* addr = (HeapWord*) fc; | |
7991 // The sweeper has just found a live object. Return any accumulated | |
7992 // left hand chunk to the free lists. | |
7993 if (inFreeRange()) { | |
7994 if (_sp->adaptive_freelists()) { | |
7995 flushCurFreeChunk(freeFinger(), | |
7996 pointer_delta(addr, freeFinger())); | |
7997 } else { // not adaptive freelists | |
7998 set_inFreeRange(false); | |
7999 // Add the free range back to the free list if it is not already | |
8000 // there. | |
8001 if (!freeRangeInFreeLists()) { | |
8002 assert(freeFinger() < addr, "the finger pointeth off base"); | |
8003 if (CMSTraceSweeper) { | |
8004 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) " | |
8005 "[coalesced:%d]\n", | |
8006 freeFinger(), pointer_delta(addr, freeFinger()), | |
8007 lastFreeRangeCoalesced()); | |
8008 } | |
8009 _sp->addChunkAndRepairOffsetTable(freeFinger(), | |
8010 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced()); | |
8011 } | |
8012 } | |
8013 } | |
8014 | |
8015 // Common code path for original and adaptive free lists. | |
8016 | |
8017 // this object is live: we'd normally expect this to be | |
8018 // an oop, and like to assert the following: | |
8019 // assert(oop(addr)->is_oop(), "live block should be an oop"); | |
8020 // However, as we commented above, this may be an object whose | |
8021 // header hasn't yet been initialized. | |
8022 size_t size; | |
8023 assert(_bitMap->isMarked(addr), "Tautology for this control point"); | |
8024 if (_bitMap->isMarked(addr + 1)) { | |
8025 // Determine the size from the bit map, rather than trying to | |
8026 // compute it from the object header. | |
8027 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); | |
8028 size = pointer_delta(nextOneAddr + 1, addr); | |
8029 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), | |
8030 "alignment problem"); | |
8031 | |
8032 #ifdef DEBUG | |
187 | 8033 if (oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8034 ( !_collector->should_unload_classes() |
0 | 8035 || oop(addr)->is_parsable())) { |
8036 // Ignore mark word because we are running concurrent with mutators | |
8037 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
8038 assert(size == | |
8039 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), | |
8040 "P-mark and computed size do not agree"); | |
8041 } | |
8042 #endif | |
8043 | |
8044 } else { | |
8045 // This should be an initialized object that's alive. | |
187 | 8046 assert(oop(addr)->klass_or_null() != NULL && |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
9
diff
changeset
|
8047 (!_collector->should_unload_classes() |
0 | 8048 || oop(addr)->is_parsable()), |
8049 "Should be an initialized object"); | |
8050 // Ignore mark word because we are running concurrent with mutators | |
8051 assert(oop(addr)->is_oop(true), "live block should be an oop"); | |
8052 // Verify that the bit map has no bits marked between | |
8053 // addr and purported end of this block. | |
8054 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); | |
8055 assert(size >= 3, "Necessary for Printezis marks to work"); | |
8056 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); | |
8057 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) | |
8058 } | |
8059 return size; | |
8060 } | |
8061 | |
8062 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, | |
8063 size_t chunkSize) { | |
8064 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation | |
8065 // scheme. | |
8066 bool fcInFreeLists = fc->isFree(); | |
8067 assert(_sp->adaptive_freelists(), "Should only be used in this case."); | |
8068 assert((HeapWord*)fc <= _limit, "sweep invariant"); | |
8069 if (CMSTestInFreeList && fcInFreeLists) { | |
8070 assert(_sp->verifyChunkInFreeLists(fc), | |
8071 "free chunk is not in free lists"); | |
8072 } | |
8073 | |
8074 | |
8075 if (CMSTraceSweeper) { | |
8076 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); | |
8077 } | |
8078 | |
8079 HeapWord* addr = (HeapWord*) fc; | |
8080 | |
8081 bool coalesce; | |
8082 size_t left = pointer_delta(addr, freeFinger()); | |
8083 size_t right = chunkSize; | |
8084 switch (FLSCoalescePolicy) { | |
8085 // numeric value forms a coalition aggressiveness metric | |
8086 case 0: { // never coalesce | |
8087 coalesce = false; | |
8088 break; | |
8089 } | |
8090 case 1: { // coalesce if left & right chunks on overpopulated lists | |
8091 coalesce = _sp->coalOverPopulated(left) && | |
8092 _sp->coalOverPopulated(right); | |
8093 break; | |
8094 } | |
8095 case 2: { // coalesce if left chunk on overpopulated list (default) | |
8096 coalesce = _sp->coalOverPopulated(left); | |
8097 break; | |
8098 } | |
8099 case 3: { // coalesce if left OR right chunk on overpopulated list | |
8100 coalesce = _sp->coalOverPopulated(left) || | |
8101 _sp->coalOverPopulated(right); | |
8102 break; | |
8103 } | |
8104 case 4: { // always coalesce | |
8105 coalesce = true; | |
8106 break; | |
8107 } | |
8108 default: | |
8109 ShouldNotReachHere(); | |
8110 } | |
8111 | |
8112 // Should the current free range be coalesced? | |
8113 // If the chunk is in a free range and either we decided to coalesce above | |
8114 // or the chunk is near the large block at the end of the heap | |
8115 // (isNearLargestChunk() returns true), then coalesce this chunk. | |
8116 bool doCoalesce = inFreeRange() && | |
8117 (coalesce || _g->isNearLargestChunk((HeapWord*)fc)); | |
8118 if (doCoalesce) { | |
8119 // Coalesce the current free range on the left with the new | |
8120 // chunk on the right. If either is on a free list, | |
8121 // it must be removed from the list and stashed in the closure. | |
8122 if (freeRangeInFreeLists()) { | |
8123 FreeChunk* ffc = (FreeChunk*)freeFinger(); | |
8124 assert(ffc->size() == pointer_delta(addr, freeFinger()), | |
8125 "Size of free range is inconsistent with chunk size."); | |
8126 if (CMSTestInFreeList) { | |
8127 assert(_sp->verifyChunkInFreeLists(ffc), | |
8128 "Chunk is not in free lists"); | |
8129 } | |
8130 _sp->coalDeath(ffc->size()); | |
8131 _sp->removeFreeChunkFromFreeLists(ffc); | |
8132 set_freeRangeInFreeLists(false); | |
8133 } | |
8134 if (fcInFreeLists) { | |
8135 _sp->coalDeath(chunkSize); | |
8136 assert(fc->size() == chunkSize, | |
8137 "The chunk has the wrong size or is not in the free lists"); | |
8138 _sp->removeFreeChunkFromFreeLists(fc); | |
8139 } | |
8140 set_lastFreeRangeCoalesced(true); | |
8141 } else { // not in a free range and/or should not coalesce | |
8142 // Return the current free range and start a new one. | |
8143 if (inFreeRange()) { | |
8144 // In a free range but cannot coalesce with the right hand chunk. | |
8145 // Put the current free range into the free lists. | |
8146 flushCurFreeChunk(freeFinger(), | |
8147 pointer_delta(addr, freeFinger())); | |
8148 } | |
8149 // Set up for new free range. Pass along whether the right hand | |
8150 // chunk is in the free lists. | |
8151 initialize_free_range((HeapWord*)fc, fcInFreeLists); | |
8152 } | |
8153 } | |
8154 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) { | |
8155 assert(inFreeRange(), "Should only be called if currently in a free range."); | |
8156 assert(size > 0, | |
8157 "A zero sized chunk cannot be added to the free lists."); | |
8158 if (!freeRangeInFreeLists()) { | |
8159 if(CMSTestInFreeList) { | |
8160 FreeChunk* fc = (FreeChunk*) chunk; | |
8161 fc->setSize(size); | |
8162 assert(!_sp->verifyChunkInFreeLists(fc), | |
8163 "chunk should not be in free lists yet"); | |
8164 } | |
8165 if (CMSTraceSweeper) { | |
8166 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists", | |
8167 chunk, size); | |
8168 } | |
8169 // A new free range is going to be starting. The current | |
8170 // free range has not been added to the free lists yet or | |
8171 // was removed so add it back. | |
8172 // If the current free range was coalesced, then the death | |
8173 // of the free range was recorded. Record a birth now. | |
8174 if (lastFreeRangeCoalesced()) { | |
8175 _sp->coalBirth(size); | |
8176 } | |
8177 _sp->addChunkAndRepairOffsetTable(chunk, size, | |
8178 lastFreeRangeCoalesced()); | |
8179 } | |
8180 set_inFreeRange(false); | |
8181 set_freeRangeInFreeLists(false); | |
8182 } | |
8183 | |
8184 // We take a break if we've been at this for a while, | |
8185 // so as to avoid monopolizing the locks involved. | |
8186 void SweepClosure::do_yield_work(HeapWord* addr) { | |
8187 // Return current free chunk being used for coalescing (if any) | |
8188 // to the appropriate freelist. After yielding, the next | |
8189 // free block encountered will start a coalescing range of | |
8190 // free blocks. If the next free block is adjacent to the | |
8191 // chunk just flushed, they will need to wait for the next | |
8192 // sweep to be coalesced. | |
8193 if (inFreeRange()) { | |
8194 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); | |
8195 } | |
8196 | |
8197 // First give up the locks, then yield, then re-lock. | |
8198 // We should probably use a constructor/destructor idiom to | |
8199 // do this unlock/lock or modify the MutexUnlocker class to | |
8200 // serve our purpose. XXX | |
8201 assert_lock_strong(_bitMap->lock()); | |
8202 assert_lock_strong(_freelistLock); | |
8203 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), | |
8204 "CMS thread should hold CMS token"); | |
8205 _bitMap->lock()->unlock(); | |
8206 _freelistLock->unlock(); | |
8207 ConcurrentMarkSweepThread::desynchronize(true); | |
8208 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8209 _collector->stopTimer(); | |
8210 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); | |
8211 if (PrintCMSStatistics != 0) { | |
8212 _collector->incrementYields(); | |
8213 } | |
8214 _collector->icms_wait(); | |
8215 | |
8216 // See the comment in coordinator_yield() | |
8217 for (unsigned i = 0; i < CMSYieldSleepCount && | |
8218 ConcurrentMarkSweepThread::should_yield() && | |
8219 !CMSCollector::foregroundGCIsActive(); ++i) { | |
8220 os::sleep(Thread::current(), 1, false); | |
8221 ConcurrentMarkSweepThread::acknowledge_yield_request(); | |
8222 } | |
8223 | |
8224 ConcurrentMarkSweepThread::synchronize(true); | |
8225 _freelistLock->lock(); | |
8226 _bitMap->lock()->lock_without_safepoint_check(); | |
8227 _collector->startTimer(); | |
8228 } | |
8229 | |
8230 #ifndef PRODUCT | |
8231 // This is actually very useful in a product build if it can | |
8232 // be called from the debugger. Compile it into the product | |
8233 // as needed. | |
8234 bool debug_verifyChunkInFreeLists(FreeChunk* fc) { | |
8235 return debug_cms_space->verifyChunkInFreeLists(fc); | |
8236 } | |
8237 | |
8238 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const { | |
8239 if (CMSTraceSweeper) { | |
8240 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size()); | |
8241 } | |
8242 } | |
8243 #endif | |
8244 | |
8245 // CMSIsAliveClosure | |
8246 bool CMSIsAliveClosure::do_object_b(oop obj) { | |
8247 HeapWord* addr = (HeapWord*)obj; | |
8248 return addr != NULL && | |
8249 (!_span.contains(addr) || _bit_map->isMarked(addr)); | |
8250 } | |
8251 | |
8252 // CMSKeepAliveClosure: the serial version | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8253 void CMSKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8254 HeapWord* addr = (HeapWord*)obj; |
0 | 8255 if (_span.contains(addr) && |
8256 !_bit_map->isMarked(addr)) { | |
8257 _bit_map->mark(addr); | |
8258 bool simulate_overflow = false; | |
8259 NOT_PRODUCT( | |
8260 if (CMSMarkStackOverflowALot && | |
8261 _collector->simulate_overflow()) { | |
8262 // simulate a stack overflow | |
8263 simulate_overflow = true; | |
8264 } | |
8265 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8266 if (simulate_overflow || !_mark_stack->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8267 _collector->push_on_overflow_list(obj); |
0 | 8268 _collector->_ser_kac_ovflw++; |
8269 } | |
8270 } | |
8271 } | |
8272 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8273 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8274 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8275 |
0 | 8276 // CMSParKeepAliveClosure: a parallel version of the above. |
8277 // The work queues are private to each closure (thread), | |
8278 // but (may be) available for stealing by other threads. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8279 void CMSParKeepAliveClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8280 HeapWord* addr = (HeapWord*)obj; |
0 | 8281 if (_span.contains(addr) && |
8282 !_bit_map->isMarked(addr)) { | |
8283 // In general, during recursive tracing, several threads | |
8284 // may be concurrently getting here; the first one to | |
8285 // "tag" it, claims it. | |
8286 if (_bit_map->par_mark(addr)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8287 bool res = _work_queue->push(obj); |
0 | 8288 assert(res, "Low water mark should be much less than capacity"); |
8289 // Do a recursive trim in the hope that this will keep | |
8290 // stack usage lower, but leave some oops for potential stealers | |
8291 trim_queue(_low_water_mark); | |
8292 } // Else, another thread got there first | |
8293 } | |
8294 } | |
8295 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8296 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8297 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8298 |
0 | 8299 void CMSParKeepAliveClosure::trim_queue(uint max) { |
8300 while (_work_queue->size() > max) { | |
8301 oop new_oop; | |
8302 if (_work_queue->pop_local(new_oop)) { | |
8303 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); | |
8304 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8305 "no white objects on this stack!"); | |
8306 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8307 // iterate over the oops in this oop, marking and pushing | |
8308 // the ones in CMS heap (i.e. in _span). | |
8309 new_oop->oop_iterate(&_mark_and_push); | |
8310 } | |
8311 } | |
8312 } | |
8313 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8314 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8315 HeapWord* addr = (HeapWord*)obj; |
0 | 8316 if (_span.contains(addr) && |
8317 !_bit_map->isMarked(addr)) { | |
8318 if (_bit_map->par_mark(addr)) { | |
8319 bool simulate_overflow = false; | |
8320 NOT_PRODUCT( | |
8321 if (CMSMarkStackOverflowALot && | |
8322 _collector->par_simulate_overflow()) { | |
8323 // simulate a stack overflow | |
8324 simulate_overflow = true; | |
8325 } | |
8326 ) | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8327 if (simulate_overflow || !_work_queue->push(obj)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8328 _collector->par_push_on_overflow_list(obj); |
0 | 8329 _collector->_par_kac_ovflw++; |
8330 } | |
8331 } // Else another thread got there already | |
8332 } | |
8333 } | |
8334 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8335 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8336 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8337 |
0 | 8338 ////////////////////////////////////////////////////////////////// |
8339 // CMSExpansionCause ///////////////////////////// | |
8340 ////////////////////////////////////////////////////////////////// | |
8341 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { | |
8342 switch (cause) { | |
8343 case _no_expansion: | |
8344 return "No expansion"; | |
8345 case _satisfy_free_ratio: | |
8346 return "Free ratio"; | |
8347 case _satisfy_promotion: | |
8348 return "Satisfy promotion"; | |
8349 case _satisfy_allocation: | |
8350 return "allocation"; | |
8351 case _allocate_par_lab: | |
8352 return "Par LAB"; | |
8353 case _allocate_par_spooling_space: | |
8354 return "Par Spooling Space"; | |
8355 case _adaptive_size_policy: | |
8356 return "Ergonomics"; | |
8357 default: | |
8358 return "unknown"; | |
8359 } | |
8360 } | |
8361 | |
8362 void CMSDrainMarkingStackClosure::do_void() { | |
8363 // the max number to take from overflow list at a time | |
8364 const size_t num = _mark_stack->capacity()/4; | |
8365 while (!_mark_stack->isEmpty() || | |
8366 // if stack is empty, check the overflow list | |
8367 _collector->take_from_overflow_list(num, _mark_stack)) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8368 oop obj = _mark_stack->pop(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8369 HeapWord* addr = (HeapWord*)obj; |
0 | 8370 assert(_span.contains(addr), "Should be within span"); |
8371 assert(_bit_map->isMarked(addr), "Should be marked"); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8372 assert(obj->is_oop(), "Should be an oop"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
8373 obj->oop_iterate(_keep_alive); |
0 | 8374 } |
8375 } | |
8376 | |
8377 void CMSParDrainMarkingStackClosure::do_void() { | |
8378 // drain queue | |
8379 trim_queue(0); | |
8380 } | |
8381 | |
8382 // Trim our work_queue so its length is below max at return | |
8383 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { | |
8384 while (_work_queue->size() > max) { | |
8385 oop new_oop; | |
8386 if (_work_queue->pop_local(new_oop)) { | |
8387 assert(new_oop->is_oop(), "Expected an oop"); | |
8388 assert(_bit_map->isMarked((HeapWord*)new_oop), | |
8389 "no white objects on this stack!"); | |
8390 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); | |
8391 // iterate over the oops in this oop, marking and pushing | |
8392 // the ones in CMS heap (i.e. in _span). | |
8393 new_oop->oop_iterate(&_mark_and_push); | |
8394 } | |
8395 } | |
8396 } | |
8397 | |
8398 //////////////////////////////////////////////////////////////////// | |
8399 // Support for Marking Stack Overflow list handling and related code | |
8400 //////////////////////////////////////////////////////////////////// | |
8401 // Much of the following code is similar in shape and spirit to the | |
8402 // code used in ParNewGC. We should try and share that code | |
8403 // as much as possible in the future. | |
8404 | |
8405 #ifndef PRODUCT | |
8406 // Debugging support for CMSStackOverflowALot | |
8407 | |
8408 // It's OK to call this multi-threaded; the worst thing | |
8409 // that can happen is that we'll get a bunch of closely | |
8410 // spaced simulated oveflows, but that's OK, in fact | |
8411 // probably good as it would exercise the overflow code | |
8412 // under contention. | |
8413 bool CMSCollector::simulate_overflow() { | |
8414 if (_overflow_counter-- <= 0) { // just being defensive | |
8415 _overflow_counter = CMSMarkStackOverflowInterval; | |
8416 return true; | |
8417 } else { | |
8418 return false; | |
8419 } | |
8420 } | |
8421 | |
8422 bool CMSCollector::par_simulate_overflow() { | |
8423 return simulate_overflow(); | |
8424 } | |
8425 #endif | |
8426 | |
8427 // Single-threaded | |
8428 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { | |
8429 assert(stack->isEmpty(), "Expected precondition"); | |
8430 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); | |
8431 size_t i = num; | |
8432 oop cur = _overflow_list; | |
8433 const markOop proto = markOopDesc::prototype(); | |
8434 NOT_PRODUCT(size_t n = 0;) | |
8435 for (oop next; i > 0 && cur != NULL; cur = next, i--) { | |
8436 next = oop(cur->mark()); | |
8437 cur->set_mark(proto); // until proven otherwise | |
8438 assert(cur->is_oop(), "Should be an oop"); | |
8439 bool res = stack->push(cur); | |
8440 assert(res, "Bit off more than can chew?"); | |
8441 NOT_PRODUCT(n++;) | |
8442 } | |
8443 _overflow_list = cur; | |
8444 #ifndef PRODUCT | |
8445 assert(_num_par_pushes >= n, "Too many pops?"); | |
8446 _num_par_pushes -=n; | |
8447 #endif | |
8448 return !stack->isEmpty(); | |
8449 } | |
8450 | |
8451 // Multi-threaded; use CAS to break off a prefix | |
8452 bool CMSCollector::par_take_from_overflow_list(size_t num, | |
8453 OopTaskQueue* work_q) { | |
8454 assert(work_q->size() == 0, "That's the current policy"); | |
8455 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); | |
8456 if (_overflow_list == NULL) { | |
8457 return false; | |
8458 } | |
8459 // Grab the entire list; we'll put back a suffix | |
8460 oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list); | |
8461 if (prefix == NULL) { // someone grabbed it before we did ... | |
8462 // ... we could spin for a short while, but for now we don't | |
8463 return false; | |
8464 } | |
8465 size_t i = num; | |
8466 oop cur = prefix; | |
8467 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--); | |
8468 if (cur->mark() != NULL) { | |
8469 oop suffix_head = cur->mark(); // suffix will be put back on global list | |
8470 cur->set_mark(NULL); // break off suffix | |
8471 // Find tail of suffix so we can prepend suffix to global list | |
8472 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark())); | |
8473 oop suffix_tail = cur; | |
8474 assert(suffix_tail != NULL && suffix_tail->mark() == NULL, | |
8475 "Tautology"); | |
8476 oop observed_overflow_list = _overflow_list; | |
8477 do { | |
8478 cur = observed_overflow_list; | |
8479 suffix_tail->set_mark(markOop(cur)); | |
8480 observed_overflow_list = | |
8481 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur); | |
8482 } while (cur != observed_overflow_list); | |
8483 } | |
8484 | |
8485 // Push the prefix elements on work_q | |
8486 assert(prefix != NULL, "control point invariant"); | |
8487 const markOop proto = markOopDesc::prototype(); | |
8488 oop next; | |
8489 NOT_PRODUCT(size_t n = 0;) | |
8490 for (cur = prefix; cur != NULL; cur = next) { | |
8491 next = oop(cur->mark()); | |
8492 cur->set_mark(proto); // until proven otherwise | |
8493 assert(cur->is_oop(), "Should be an oop"); | |
8494 bool res = work_q->push(cur); | |
8495 assert(res, "Bit off more than we can chew?"); | |
8496 NOT_PRODUCT(n++;) | |
8497 } | |
8498 #ifndef PRODUCT | |
8499 assert(_num_par_pushes >= n, "Too many pops?"); | |
8500 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); | |
8501 #endif | |
8502 return true; | |
8503 } | |
8504 | |
8505 // Single-threaded | |
8506 void CMSCollector::push_on_overflow_list(oop p) { | |
8507 NOT_PRODUCT(_num_par_pushes++;) | |
8508 assert(p->is_oop(), "Not an oop"); | |
8509 preserve_mark_if_necessary(p); | |
8510 p->set_mark((markOop)_overflow_list); | |
8511 _overflow_list = p; | |
8512 } | |
8513 | |
8514 // Multi-threaded; use CAS to prepend to overflow list | |
8515 void CMSCollector::par_push_on_overflow_list(oop p) { | |
8516 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);) | |
8517 assert(p->is_oop(), "Not an oop"); | |
8518 par_preserve_mark_if_necessary(p); | |
8519 oop observed_overflow_list = _overflow_list; | |
8520 oop cur_overflow_list; | |
8521 do { | |
8522 cur_overflow_list = observed_overflow_list; | |
8523 p->set_mark(markOop(cur_overflow_list)); | |
8524 observed_overflow_list = | |
8525 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); | |
8526 } while (cur_overflow_list != observed_overflow_list); | |
8527 } | |
8528 | |
8529 // Single threaded | |
8530 // General Note on GrowableArray: pushes may silently fail | |
8531 // because we are (temporarily) out of C-heap for expanding | |
8532 // the stack. The problem is quite ubiquitous and affects | |
8533 // a lot of code in the JVM. The prudent thing for GrowableArray | |
8534 // to do (for now) is to exit with an error. However, that may | |
8535 // be too draconian in some cases because the caller may be | |
8536 // able to recover without much harm. For suych cases, we | |
8537 // should probably introduce a "soft_push" method which returns | |
8538 // an indication of success or failure with the assumption that | |
8539 // the caller may be able to recover from a failure; code in | |
8540 // the VM can then be changed, incrementally, to deal with such | |
8541 // failures where possible, thus, incrementally hardening the VM | |
8542 // in such low resource situations. | |
8543 void CMSCollector::preserve_mark_work(oop p, markOop m) { | |
8544 int PreserveMarkStackSize = 128; | |
8545 | |
8546 if (_preserved_oop_stack == NULL) { | |
8547 assert(_preserved_mark_stack == NULL, | |
8548 "bijection with preserved_oop_stack"); | |
8549 // Allocate the stacks | |
8550 _preserved_oop_stack = new (ResourceObj::C_HEAP) | |
8551 GrowableArray<oop>(PreserveMarkStackSize, true); | |
8552 _preserved_mark_stack = new (ResourceObj::C_HEAP) | |
8553 GrowableArray<markOop>(PreserveMarkStackSize, true); | |
8554 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) { | |
8555 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */, | |
8556 "Preserved Mark/Oop Stack for CMS (C-heap)"); | |
8557 } | |
8558 } | |
8559 _preserved_oop_stack->push(p); | |
8560 _preserved_mark_stack->push(m); | |
8561 assert(m == p->mark(), "Mark word changed"); | |
8562 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(), | |
8563 "bijection"); | |
8564 } | |
8565 | |
8566 // Single threaded | |
8567 void CMSCollector::preserve_mark_if_necessary(oop p) { | |
8568 markOop m = p->mark(); | |
8569 if (m->must_be_preserved(p)) { | |
8570 preserve_mark_work(p, m); | |
8571 } | |
8572 } | |
8573 | |
8574 void CMSCollector::par_preserve_mark_if_necessary(oop p) { | |
8575 markOop m = p->mark(); | |
8576 if (m->must_be_preserved(p)) { | |
8577 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
8578 // Even though we read the mark word without holding | |
8579 // the lock, we are assured that it will not change | |
8580 // because we "own" this oop, so no other thread can | |
8581 // be trying to push it on the overflow list; see | |
8582 // the assertion in preserve_mark_work() that checks | |
8583 // that m == p->mark(). | |
8584 preserve_mark_work(p, m); | |
8585 } | |
8586 } | |
8587 | |
8588 // We should be able to do this multi-threaded, | |
8589 // a chunk of stack being a task (this is | |
8590 // correct because each oop only ever appears | |
8591 // once in the overflow list. However, it's | |
8592 // not very easy to completely overlap this with | |
8593 // other operations, so will generally not be done | |
8594 // until all work's been completed. Because we | |
8595 // expect the preserved oop stack (set) to be small, | |
8596 // it's probably fine to do this single-threaded. | |
8597 // We can explore cleverer concurrent/overlapped/parallel | |
8598 // processing of preserved marks if we feel the | |
8599 // need for this in the future. Stack overflow should | |
8600 // be so rare in practice and, when it happens, its | |
8601 // effect on performance so great that this will | |
8602 // likely just be in the noise anyway. | |
8603 void CMSCollector::restore_preserved_marks_if_any() { | |
8604 if (_preserved_oop_stack == NULL) { | |
8605 assert(_preserved_mark_stack == NULL, | |
8606 "bijection with preserved_oop_stack"); | |
8607 return; | |
8608 } | |
8609 | |
8610 assert(SafepointSynchronize::is_at_safepoint(), | |
8611 "world should be stopped"); | |
8612 assert(Thread::current()->is_ConcurrentGC_thread() || | |
8613 Thread::current()->is_VM_thread(), | |
8614 "should be single-threaded"); | |
8615 | |
8616 int length = _preserved_oop_stack->length(); | |
8617 assert(_preserved_mark_stack->length() == length, "bijection"); | |
8618 for (int i = 0; i < length; i++) { | |
8619 oop p = _preserved_oop_stack->at(i); | |
8620 assert(p->is_oop(), "Should be an oop"); | |
8621 assert(_span.contains(p), "oop should be in _span"); | |
8622 assert(p->mark() == markOopDesc::prototype(), | |
8623 "Set when taken from overflow list"); | |
8624 markOop m = _preserved_mark_stack->at(i); | |
8625 p->set_mark(m); | |
8626 } | |
8627 _preserved_mark_stack->clear(); | |
8628 _preserved_oop_stack->clear(); | |
8629 assert(_preserved_mark_stack->is_empty() && | |
8630 _preserved_oop_stack->is_empty(), | |
8631 "stacks were cleared above"); | |
8632 } | |
8633 | |
8634 #ifndef PRODUCT | |
8635 bool CMSCollector::no_preserved_marks() const { | |
8636 return ( ( _preserved_mark_stack == NULL | |
8637 && _preserved_oop_stack == NULL) | |
8638 || ( _preserved_mark_stack->is_empty() | |
8639 && _preserved_oop_stack->is_empty())); | |
8640 } | |
8641 #endif | |
8642 | |
8643 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const | |
8644 { | |
8645 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
8646 CMSAdaptiveSizePolicy* size_policy = | |
8647 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy(); | |
8648 assert(size_policy->is_gc_cms_adaptive_size_policy(), | |
8649 "Wrong type for size policy"); | |
8650 return size_policy; | |
8651 } | |
8652 | |
8653 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size, | |
8654 size_t desired_promo_size) { | |
8655 if (cur_promo_size < desired_promo_size) { | |
8656 size_t expand_bytes = desired_promo_size - cur_promo_size; | |
8657 if (PrintAdaptiveSizePolicy && Verbose) { | |
8658 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
8659 "Expanding tenured generation by " SIZE_FORMAT " (bytes)", | |
8660 expand_bytes); | |
8661 } | |
8662 expand(expand_bytes, | |
8663 MinHeapDeltaBytes, | |
8664 CMSExpansionCause::_adaptive_size_policy); | |
8665 } else if (desired_promo_size < cur_promo_size) { | |
8666 size_t shrink_bytes = cur_promo_size - desired_promo_size; | |
8667 if (PrintAdaptiveSizePolicy && Verbose) { | |
8668 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " | |
8669 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)", | |
8670 shrink_bytes); | |
8671 } | |
8672 shrink(shrink_bytes); | |
8673 } | |
8674 } | |
8675 | |
8676 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() { | |
8677 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
8678 CMSGCAdaptivePolicyCounters* counters = | |
8679 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); | |
8680 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, | |
8681 "Wrong kind of counters"); | |
8682 return counters; | |
8683 } | |
8684 | |
8685 | |
8686 void ASConcurrentMarkSweepGeneration::update_counters() { | |
8687 if (UsePerfData) { | |
8688 _space_counters->update_all(); | |
8689 _gen_counters->update_all(); | |
8690 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
8691 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
8692 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
8693 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
8694 "Wrong gc statistics type"); | |
8695 counters->update_counters(gc_stats_l); | |
8696 } | |
8697 } | |
8698 | |
8699 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) { | |
8700 if (UsePerfData) { | |
8701 _space_counters->update_used(used); | |
8702 _space_counters->update_capacity(); | |
8703 _gen_counters->update_all(); | |
8704 | |
8705 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
8706 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
8707 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); | |
8708 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, | |
8709 "Wrong gc statistics type"); | |
8710 counters->update_counters(gc_stats_l); | |
8711 } | |
8712 } | |
8713 | |
8714 // The desired expansion delta is computed so that: | |
8715 // . desired free percentage or greater is used | |
8716 void ASConcurrentMarkSweepGeneration::compute_new_size() { | |
8717 assert_locked_or_safepoint(Heap_lock); | |
8718 | |
8719 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); | |
8720 | |
8721 // If incremental collection failed, we just want to expand | |
8722 // to the limit. | |
8723 if (incremental_collection_failed()) { | |
8724 clear_incremental_collection_failed(); | |
8725 grow_to_reserved(); | |
8726 return; | |
8727 } | |
8728 | |
8729 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing"); | |
8730 | |
8731 assert(gch->kind() == CollectedHeap::GenCollectedHeap, | |
8732 "Wrong type of heap"); | |
8733 int prev_level = level() - 1; | |
8734 assert(prev_level >= 0, "The cms generation is the lowest generation"); | |
8735 Generation* prev_gen = gch->get_gen(prev_level); | |
8736 assert(prev_gen->kind() == Generation::ASParNew, | |
8737 "Wrong type of young generation"); | |
8738 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen; | |
8739 size_t cur_eden = younger_gen->eden()->capacity(); | |
8740 CMSAdaptiveSizePolicy* size_policy = cms_size_policy(); | |
8741 size_t cur_promo = free(); | |
8742 size_policy->compute_tenured_generation_free_space(cur_promo, | |
8743 max_available(), | |
8744 cur_eden); | |
8745 resize(cur_promo, size_policy->promo_size()); | |
8746 | |
8747 // Record the new size of the space in the cms generation | |
8748 // that is available for promotions. This is temporary. | |
8749 // It should be the desired promo size. | |
8750 size_policy->avg_cms_promo()->sample(free()); | |
8751 size_policy->avg_old_live()->sample(used()); | |
8752 | |
8753 if (UsePerfData) { | |
8754 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); | |
8755 counters->update_cms_capacity_counter(capacity()); | |
8756 } | |
8757 } | |
8758 | |
8759 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { | |
8760 assert_locked_or_safepoint(Heap_lock); | |
8761 assert_lock_strong(freelistLock()); | |
8762 HeapWord* old_end = _cmsSpace->end(); | |
8763 HeapWord* unallocated_start = _cmsSpace->unallocated_block(); | |
8764 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start"); | |
8765 FreeChunk* chunk_at_end = find_chunk_at_end(); | |
8766 if (chunk_at_end == NULL) { | |
8767 // No room to shrink | |
8768 if (PrintGCDetails && Verbose) { | |
8769 gclog_or_tty->print_cr("No room to shrink: old_end " | |
8770 PTR_FORMAT " unallocated_start " PTR_FORMAT | |
8771 " chunk_at_end " PTR_FORMAT, | |
8772 old_end, unallocated_start, chunk_at_end); | |
8773 } | |
8774 return; | |
8775 } else { | |
8776 | |
8777 // Find the chunk at the end of the space and determine | |
8778 // how much it can be shrunk. | |
8779 size_t shrinkable_size_in_bytes = chunk_at_end->size(); | |
8780 size_t aligned_shrinkable_size_in_bytes = | |
8781 align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); | |
8782 assert(unallocated_start <= chunk_at_end->end(), | |
8783 "Inconsistent chunk at end of space"); | |
8784 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); | |
8785 size_t word_size_before = heap_word_size(_virtual_space.committed_size()); | |
8786 | |
8787 // Shrink the underlying space | |
8788 _virtual_space.shrink_by(bytes); | |
8789 if (PrintGCDetails && Verbose) { | |
8790 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:" | |
8791 " desired_bytes " SIZE_FORMAT | |
8792 " shrinkable_size_in_bytes " SIZE_FORMAT | |
8793 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT | |
8794 " bytes " SIZE_FORMAT, | |
8795 desired_bytes, shrinkable_size_in_bytes, | |
8796 aligned_shrinkable_size_in_bytes, bytes); | |
8797 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT | |
8798 " unallocated_start " SIZE_FORMAT, | |
8799 old_end, unallocated_start); | |
8800 } | |
8801 | |
8802 // If the space did shrink (shrinking is not guaranteed), | |
8803 // shrink the chunk at the end by the appropriate amount. | |
8804 if (((HeapWord*)_virtual_space.high()) < old_end) { | |
8805 size_t new_word_size = | |
8806 heap_word_size(_virtual_space.committed_size()); | |
8807 | |
8808 // Have to remove the chunk from the dictionary because it is changing | |
8809 // size and might be someplace elsewhere in the dictionary. | |
8810 | |
8811 // Get the chunk at end, shrink it, and put it | |
8812 // back. | |
8813 _cmsSpace->removeChunkFromDictionary(chunk_at_end); | |
8814 size_t word_size_change = word_size_before - new_word_size; | |
8815 size_t chunk_at_end_old_size = chunk_at_end->size(); | |
8816 assert(chunk_at_end_old_size >= word_size_change, | |
8817 "Shrink is too large"); | |
8818 chunk_at_end->setSize(chunk_at_end_old_size - | |
8819 word_size_change); | |
8820 _cmsSpace->freed((HeapWord*) chunk_at_end->end(), | |
8821 word_size_change); | |
8822 | |
8823 _cmsSpace->returnChunkToDictionary(chunk_at_end); | |
8824 | |
8825 MemRegion mr(_cmsSpace->bottom(), new_word_size); | |
8826 _bts->resize(new_word_size); // resize the block offset shared array | |
8827 Universe::heap()->barrier_set()->resize_covered_region(mr); | |
8828 _cmsSpace->assert_locked(); | |
8829 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); | |
8830 | |
8831 NOT_PRODUCT(_cmsSpace->dictionary()->verify()); | |
8832 | |
8833 // update the space and generation capacity counters | |
8834 if (UsePerfData) { | |
8835 _space_counters->update_capacity(); | |
8836 _gen_counters->update_all(); | |
8837 } | |
8838 | |
8839 if (Verbose && PrintGCDetails) { | |
8840 size_t new_mem_size = _virtual_space.committed_size(); | |
8841 size_t old_mem_size = new_mem_size + bytes; | |
8842 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK", | |
8843 name(), old_mem_size/K, bytes/K, new_mem_size/K); | |
8844 } | |
8845 } | |
8846 | |
8847 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), | |
8848 "Inconsistency at end of space"); | |
8849 assert(chunk_at_end->end() == _cmsSpace->end(), | |
8850 "Shrinking is inconsistent"); | |
8851 return; | |
8852 } | |
8853 } | |
8854 | |
8855 // Transfer some number of overflown objects to usual marking | |
8856 // stack. Return true if some objects were transferred. | |
8857 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { | |
8858 size_t num = MIN2((size_t)_mark_stack->capacity()/4, | |
8859 (size_t)ParGCDesiredObjsFromOverflowList); | |
8860 | |
8861 bool res = _collector->take_from_overflow_list(num, _mark_stack); | |
8862 assert(_collector->overflow_list_is_empty() || res, | |
8863 "If list is not empty, we should have taken something"); | |
8864 assert(!res || !_mark_stack->isEmpty(), | |
8865 "If we took something, it should now be on our stack"); | |
8866 return res; | |
8867 } | |
8868 | |
8869 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { | |
8870 size_t res = _sp->block_size_no_stall(addr, _collector); | |
8871 assert(res != 0, "Should always be able to compute a size"); | |
8872 if (_sp->block_is_obj(addr)) { | |
8873 if (_live_bit_map->isMarked(addr)) { | |
8874 // It can't have been dead in a previous cycle | |
8875 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); | |
8876 } else { | |
8877 _dead_bit_map->mark(addr); // mark the dead object | |
8878 } | |
8879 } | |
8880 return res; | |
8881 } |