Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @ 12233:40136aa2cdb1
8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes
author | tschatzl |
---|---|
date | Wed, 11 Sep 2013 16:25:02 +0200 |
parents | ca9dedeebdec |
children | 63a4eb8bcd23 7426d8d76305 060cdf93040c |
rev | line source |
---|---|
0 | 1 /* |
8035 | 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP | |
27 | |
10405 | 28 #include "gc_implementation/shared/gcHeapSummary.hpp" |
1972 | 29 #include "gc_implementation/shared/gSpaceCounters.hpp" |
30 #include "gc_implementation/shared/gcStats.hpp" | |
10405 | 31 #include "gc_implementation/shared/gcWhen.hpp" |
1972 | 32 #include "gc_implementation/shared/generationCounters.hpp" |
6026 | 33 #include "memory/freeBlockDictionary.hpp" |
1972 | 34 #include "memory/generation.hpp" |
35 #include "runtime/mutexLocker.hpp" | |
36 #include "runtime/virtualspace.hpp" | |
37 #include "services/memoryService.hpp" | |
38 #include "utilities/bitMap.inline.hpp" | |
39 #include "utilities/stack.inline.hpp" | |
40 #include "utilities/taskqueue.hpp" | |
41 #include "utilities/yieldingWorkgroup.hpp" | |
42 | |
0 | 43 // ConcurrentMarkSweepGeneration is in support of a concurrent |
44 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker | |
45 // style. We assume, for now, that this generation is always the | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
46 // seniormost generation and for simplicity |
0 | 47 // in the first implementation, that this generation is a single compactible |
48 // space. Neither of these restrictions appears essential, and will be | |
49 // relaxed in the future when more time is available to implement the | |
50 // greater generality (and there's a need for it). | |
51 // | |
52 // Concurrent mode failures are currently handled by | |
53 // means of a sliding mark-compact. | |
54 | |
55 class CMSAdaptiveSizePolicy; | |
56 class CMSConcMarkingTask; | |
57 class CMSGCAdaptivePolicyCounters; | |
10405 | 58 class CMSTracer; |
59 class ConcurrentGCTimer; | |
0 | 60 class ConcurrentMarkSweepGeneration; |
61 class ConcurrentMarkSweepPolicy; | |
62 class ConcurrentMarkSweepThread; | |
63 class CompactibleFreeListSpace; | |
64 class FreeChunk; | |
65 class PromotionInfo; | |
66 class ScanMarkedObjectsAgainCarefullyClosure; | |
9072
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
67 class TenuredGeneration; |
10405 | 68 class SerialOldTracer; |
0 | 69 |
70 // A generic CMS bit map. It's the basis for both the CMS marking bit map | |
71 // as well as for the mod union table (in each case only a subset of the | |
72 // methods are used). This is essentially a wrapper around the BitMap class, | |
73 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, | |
74 // we have _shifter == 0. and for the mod union table we have | |
75 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) | |
76 // XXX 64-bit issues in BitMap? | |
77 class CMSBitMap VALUE_OBJ_CLASS_SPEC { | |
78 friend class VMStructs; | |
79 | |
80 HeapWord* _bmStartWord; // base address of range covered by map | |
81 size_t _bmWordSize; // map size (in #HeapWords covered) | |
82 const int _shifter; // shifts to convert HeapWord to bit position | |
83 VirtualSpace _virtual_space; // underlying the bit map | |
84 BitMap _bm; // the bit map itself | |
85 public: | |
86 Mutex* const _lock; // mutex protecting _bm; | |
87 | |
88 public: | |
89 // constructor | |
90 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); | |
91 | |
92 // allocates the actual storage for the map | |
93 bool allocate(MemRegion mr); | |
94 // field getter | |
95 Mutex* lock() const { return _lock; } | |
96 // locking verifier convenience function | |
97 void assert_locked() const PRODUCT_RETURN; | |
98 | |
99 // inquiries | |
100 HeapWord* startWord() const { return _bmStartWord; } | |
101 size_t sizeInWords() const { return _bmWordSize; } | |
102 size_t sizeInBits() const { return _bm.size(); } | |
103 // the following is one past the last word in space | |
104 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } | |
105 | |
106 // reading marks | |
107 bool isMarked(HeapWord* addr) const; | |
108 bool par_isMarked(HeapWord* addr) const; // do not lock checks | |
109 bool isUnmarked(HeapWord* addr) const; | |
110 bool isAllClear() const; | |
111 | |
112 // writing marks | |
113 void mark(HeapWord* addr); | |
114 // For marking by parallel GC threads; | |
115 // returns true if we did, false if another thread did | |
116 bool par_mark(HeapWord* addr); | |
117 | |
118 void mark_range(MemRegion mr); | |
119 void par_mark_range(MemRegion mr); | |
120 void mark_large_range(MemRegion mr); | |
121 void par_mark_large_range(MemRegion mr); | |
122 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. | |
123 void clear_range(MemRegion mr); | |
124 void par_clear_range(MemRegion mr); | |
125 void clear_large_range(MemRegion mr); | |
126 void par_clear_large_range(MemRegion mr); | |
127 void clear_all(); | |
128 void clear_all_incrementally(); // Not yet implemented!! | |
129 | |
130 NOT_PRODUCT( | |
131 // checks the memory region for validity | |
132 void region_invariant(MemRegion mr); | |
133 ) | |
134 | |
135 // iteration | |
136 void iterate(BitMapClosure* cl) { | |
137 _bm.iterate(cl); | |
138 } | |
139 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); | |
140 void dirty_range_iterate_clear(MemRegionClosure* cl); | |
141 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); | |
142 | |
143 // auxiliary support for iteration | |
144 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; | |
145 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, | |
146 HeapWord* end_addr) const; | |
147 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; | |
148 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, | |
149 HeapWord* end_addr) const; | |
150 MemRegion getAndClearMarkedRegion(HeapWord* addr); | |
151 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, | |
152 HeapWord* end_addr); | |
153 | |
154 // conversion utilities | |
155 HeapWord* offsetToHeapWord(size_t offset) const; | |
156 size_t heapWordToOffset(HeapWord* addr) const; | |
157 size_t heapWordDiffToOffsetDiff(size_t diff) const; | |
158 | |
9076
7b835924c31c
8011872: Include Bit Map addresses in the hs_err files
stefank
parents:
9072
diff
changeset
|
159 void print_on_error(outputStream* st, const char* prefix) const; |
7b835924c31c
8011872: Include Bit Map addresses in the hs_err files
stefank
parents:
9072
diff
changeset
|
160 |
0 | 161 // debugging |
162 // is this address range covered by the bit-map? | |
163 NOT_PRODUCT( | |
164 bool covers(MemRegion mr) const; | |
165 bool covers(HeapWord* start, size_t size = 0) const; | |
166 ) | |
167 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; | |
168 }; | |
169 | |
170 // Represents a marking stack used by the CMS collector. | |
171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). | |
6197 | 172 class CMSMarkStack: public CHeapObj<mtGC> { |
0 | 173 // |
174 friend class CMSCollector; // to get at expasion stats further below | |
175 // | |
176 | |
177 VirtualSpace _virtual_space; // space for the stack | |
178 oop* _base; // bottom of stack | |
179 size_t _index; // one more than last occupied index | |
180 size_t _capacity; // max #elements | |
181 Mutex _par_lock; // an advisory lock used in case of parallel access | |
182 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run | |
183 | |
184 protected: | |
185 size_t _hit_limit; // we hit max stack size limit | |
186 size_t _failed_double; // we failed expansion before hitting limit | |
187 | |
188 public: | |
189 CMSMarkStack(): | |
190 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), | |
191 _hit_limit(0), | |
192 _failed_double(0) {} | |
193 | |
194 bool allocate(size_t size); | |
195 | |
196 size_t capacity() const { return _capacity; } | |
197 | |
198 oop pop() { | |
199 if (!isEmpty()) { | |
200 return _base[--_index] ; | |
201 } | |
202 return NULL; | |
203 } | |
204 | |
205 bool push(oop ptr) { | |
206 if (isFull()) { | |
207 return false; | |
208 } else { | |
209 _base[_index++] = ptr; | |
210 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); | |
211 return true; | |
212 } | |
213 } | |
214 | |
215 bool isEmpty() const { return _index == 0; } | |
216 bool isFull() const { | |
217 assert(_index <= _capacity, "buffer overflow"); | |
218 return _index == _capacity; | |
219 } | |
220 | |
221 size_t length() { return _index; } | |
222 | |
223 // "Parallel versions" of some of the above | |
224 oop par_pop() { | |
225 // lock and pop | |
226 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); | |
227 return pop(); | |
228 } | |
229 | |
230 bool par_push(oop ptr) { | |
231 // lock and push | |
232 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); | |
233 return push(ptr); | |
234 } | |
235 | |
236 // Forcibly reset the stack, losing all of its contents. | |
237 void reset() { | |
238 _index = 0; | |
239 } | |
240 | |
241 // Expand the stack, typically in response to an overflow condition | |
242 void expand(); | |
243 | |
244 // Compute the least valued stack element. | |
245 oop least_value(HeapWord* low) { | |
246 oop least = (oop)low; | |
247 for (size_t i = 0; i < _index; i++) { | |
248 least = MIN2(least, _base[i]); | |
249 } | |
250 return least; | |
251 } | |
252 | |
253 // Exposed here to allow stack expansion in || case | |
254 Mutex* par_lock() { return &_par_lock; } | |
255 }; | |
256 | |
257 class CardTableRS; | |
258 class CMSParGCThreadState; | |
259 | |
260 class ModUnionClosure: public MemRegionClosure { | |
261 protected: | |
262 CMSBitMap* _t; | |
263 public: | |
264 ModUnionClosure(CMSBitMap* t): _t(t) { } | |
265 void do_MemRegion(MemRegion mr); | |
266 }; | |
267 | |
268 class ModUnionClosurePar: public ModUnionClosure { | |
269 public: | |
270 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } | |
271 void do_MemRegion(MemRegion mr); | |
272 }; | |
273 | |
274 // Survivor Chunk Array in support of parallelization of | |
275 // Survivor Space rescan. | |
6197 | 276 class ChunkArray: public CHeapObj<mtGC> { |
0 | 277 size_t _index; |
278 size_t _capacity; | |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
279 size_t _overflows; |
0 | 280 HeapWord** _array; // storage for array |
281 | |
282 public: | |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
283 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} |
0 | 284 ChunkArray(HeapWord** a, size_t c): |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
285 _index(0), _capacity(c), _overflows(0), _array(a) {} |
0 | 286 |
287 HeapWord** array() { return _array; } | |
288 void set_array(HeapWord** a) { _array = a; } | |
289 | |
290 size_t capacity() { return _capacity; } | |
291 void set_capacity(size_t c) { _capacity = c; } | |
292 | |
293 size_t end() { | |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
294 assert(_index <= capacity(), |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
295 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
296 _index, _capacity)); |
0 | 297 return _index; |
298 } // exclusive | |
299 | |
300 HeapWord* nth(size_t n) { | |
301 assert(n < end(), "Out of bounds access"); | |
302 return _array[n]; | |
303 } | |
304 | |
305 void reset() { | |
306 _index = 0; | |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
307 if (_overflows > 0 && PrintCMSStatistics > 1) { |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
308 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
309 _capacity, _overflows); |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
310 } |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
311 _overflows = 0; |
0 | 312 } |
313 | |
314 void record_sample(HeapWord* p, size_t sz) { | |
315 // For now we do not do anything with the size | |
316 if (_index < _capacity) { | |
317 _array[_index++] = p; | |
1753
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
318 } else { |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
319 ++_overflows; |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
320 assert(_index == _capacity, |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
321 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
322 "): out of bounds at overflow#" SIZE_FORMAT, |
bba76f745fe6
6910183: CMS: assert(_index < capacity(),"_index out of bounds")
ysr
parents:
1716
diff
changeset
|
323 _index, _capacity, _overflows)); |
0 | 324 } |
325 } | |
326 }; | |
327 | |
328 // | |
329 // Timing, allocation and promotion statistics for gc scheduling and incremental | |
330 // mode pacing. Most statistics are exponential averages. | |
331 // | |
332 class CMSStats VALUE_OBJ_CLASS_SPEC { | |
333 private: | |
334 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. | |
335 | |
336 // The following are exponential averages with factor alpha: | |
337 // avg = (100 - alpha) * avg + alpha * cur_sample | |
338 // | |
339 // The durations measure: end_time[n] - start_time[n] | |
340 // The periods measure: start_time[n] - start_time[n-1] | |
341 // | |
342 // The cms period and duration include only concurrent collections; time spent | |
343 // in foreground cms collections due to System.gc() or because of a failure to | |
344 // keep up are not included. | |
345 // | |
346 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the | |
347 // real value, but is used only after the first period. A value of 100 is | |
348 // used for the first sample so it gets the entire weight. | |
349 unsigned int _saved_alpha; // 0-100 | |
350 unsigned int _gc0_alpha; | |
351 unsigned int _cms_alpha; | |
352 | |
353 double _gc0_duration; | |
354 double _gc0_period; | |
355 size_t _gc0_promoted; // bytes promoted per gc0 | |
356 double _cms_duration; | |
357 double _cms_duration_pre_sweep; // time from initiation to start of sweep | |
358 double _cms_duration_per_mb; | |
359 double _cms_period; | |
360 size_t _cms_allocated; // bytes of direct allocation per gc0 period | |
361 | |
362 // Timers. | |
363 elapsedTimer _cms_timer; | |
364 TimeStamp _gc0_begin_time; | |
365 TimeStamp _cms_begin_time; | |
366 TimeStamp _cms_end_time; | |
367 | |
368 // Snapshots of the amount used in the CMS generation. | |
369 size_t _cms_used_at_gc0_begin; | |
370 size_t _cms_used_at_gc0_end; | |
371 size_t _cms_used_at_cms_begin; | |
372 | |
373 // Used to prevent the duty cycle from being reduced in the middle of a cms | |
374 // cycle. | |
375 bool _allow_duty_cycle_reduction; | |
376 | |
377 enum { | |
378 _GC0_VALID = 0x1, | |
379 _CMS_VALID = 0x2, | |
380 _ALL_VALID = _GC0_VALID | _CMS_VALID | |
381 }; | |
382 | |
383 unsigned int _valid_bits; | |
384 | |
385 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). | |
386 | |
387 protected: | |
388 | |
389 // Return a duty cycle that avoids wild oscillations, by limiting the amount | |
390 // of change between old_duty_cycle and new_duty_cycle (the latter is treated | |
391 // as a recommended value). | |
392 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, | |
393 unsigned int new_duty_cycle); | |
394 unsigned int icms_update_duty_cycle_impl(); | |
395 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
396 // In support of adjusting of cms trigger ratios based on history |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
397 // of concurrent mode failure. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
398 double cms_free_adjustment_factor(size_t free) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
399 void adjust_cms_free_adjustment_factor(bool fail, size_t free); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
400 |
0 | 401 public: |
402 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, | |
403 unsigned int alpha = CMSExpAvgFactor); | |
404 | |
405 // Whether or not the statistics contain valid data; higher level statistics | |
406 // cannot be called until this returns true (they require at least one young | |
407 // gen and one cms cycle to have completed). | |
408 bool valid() const; | |
409 | |
410 // Record statistics. | |
411 void record_gc0_begin(); | |
412 void record_gc0_end(size_t cms_gen_bytes_used); | |
413 void record_cms_begin(); | |
414 void record_cms_end(); | |
415 | |
416 // Allow management of the cms timer, which must be stopped/started around | |
417 // yield points. | |
418 elapsedTimer& cms_timer() { return _cms_timer; } | |
419 void start_cms_timer() { _cms_timer.start(); } | |
420 void stop_cms_timer() { _cms_timer.stop(); } | |
421 | |
422 // Basic statistics; units are seconds or bytes. | |
423 double gc0_period() const { return _gc0_period; } | |
424 double gc0_duration() const { return _gc0_duration; } | |
425 size_t gc0_promoted() const { return _gc0_promoted; } | |
426 double cms_period() const { return _cms_period; } | |
427 double cms_duration() const { return _cms_duration; } | |
428 double cms_duration_per_mb() const { return _cms_duration_per_mb; } | |
429 size_t cms_allocated() const { return _cms_allocated; } | |
430 | |
431 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} | |
432 | |
433 // Seconds since the last background cms cycle began or ended. | |
434 double cms_time_since_begin() const; | |
435 double cms_time_since_end() const; | |
436 | |
437 // Higher level statistics--caller must check that valid() returns true before | |
438 // calling. | |
439 | |
440 // Returns bytes promoted per second of wall clock time. | |
441 double promotion_rate() const; | |
442 | |
443 // Returns bytes directly allocated per second of wall clock time. | |
444 double cms_allocation_rate() const; | |
445 | |
446 // Rate at which space in the cms generation is being consumed (sum of the | |
447 // above two). | |
448 double cms_consumption_rate() const; | |
449 | |
450 // Returns an estimate of the number of seconds until the cms generation will | |
451 // fill up, assuming no collection work is done. | |
452 double time_until_cms_gen_full() const; | |
453 | |
454 // Returns an estimate of the number of seconds remaining until | |
455 // the cms generation collection should start. | |
456 double time_until_cms_start() const; | |
457 | |
458 // End of higher level statistics. | |
459 | |
460 // Returns the cms incremental mode duty cycle, as a percentage (0-100). | |
461 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } | |
462 | |
463 // Update the duty cycle and return the new value. | |
464 unsigned int icms_update_duty_cycle(); | |
465 | |
466 // Debugging. | |
467 void print_on(outputStream* st) const PRODUCT_RETURN; | |
468 void print() const { print_on(gclog_or_tty); } | |
469 }; | |
470 | |
471 // A closure related to weak references processing which | |
472 // we embed in the CMSCollector, since we need to pass | |
473 // it to the reference processor for secondary filtering | |
474 // of references based on reachability of referent; | |
475 // see role of _is_alive_non_header closure in the | |
476 // ReferenceProcessor class. | |
477 // For objects in the CMS generation, this closure checks | |
478 // if the object is "live" (reachable). Used in weak | |
479 // reference processing. | |
480 class CMSIsAliveClosure: public BoolObjectClosure { | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
481 const MemRegion _span; |
0 | 482 const CMSBitMap* _bit_map; |
483 | |
484 friend class CMSCollector; | |
485 public: | |
486 CMSIsAliveClosure(MemRegion span, | |
487 CMSBitMap* bit_map): | |
488 _span(span), | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
489 _bit_map(bit_map) { |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
490 assert(!span.is_empty(), "Empty span could spell trouble"); |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
491 } |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
492 |
0 | 493 bool do_object_b(oop obj); |
494 }; | |
495 | |
496 | |
497 // Implements AbstractRefProcTaskExecutor for CMS. | |
498 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { | |
499 public: | |
500 | |
501 CMSRefProcTaskExecutor(CMSCollector& collector) | |
502 : _collector(collector) | |
503 { } | |
504 | |
505 // Executes a task using worker threads. | |
506 virtual void execute(ProcessTask& task); | |
507 virtual void execute(EnqueueTask& task); | |
508 private: | |
509 CMSCollector& _collector; | |
510 }; | |
511 | |
512 | |
6197 | 513 class CMSCollector: public CHeapObj<mtGC> { |
0 | 514 friend class VMStructs; |
515 friend class ConcurrentMarkSweepThread; | |
516 friend class ConcurrentMarkSweepGeneration; | |
517 friend class CompactibleFreeListSpace; | |
11975 | 518 friend class CMSParMarkTask; |
519 friend class CMSParInitialMarkTask; | |
0 | 520 friend class CMSParRemarkTask; |
521 friend class CMSConcMarkingTask; | |
522 friend class CMSRefProcTaskProxy; | |
523 friend class CMSRefProcTaskExecutor; | |
524 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden | |
525 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- | |
526 friend class PushOrMarkClosure; // to access _restart_addr | |
527 friend class Par_PushOrMarkClosure; // to access _restart_addr | |
528 friend class MarkFromRootsClosure; // -- ditto -- | |
529 // ... and for clearing cards | |
530 friend class Par_MarkFromRootsClosure; // to access _restart_addr | |
531 // ... and for clearing cards | |
532 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. | |
533 friend class MarkFromRootsVerifyClosure; // to access _restart_addr | |
534 friend class PushAndMarkVerifyClosure; // -- ditto -- | |
535 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list | |
536 friend class PushAndMarkClosure; // -- ditto -- | |
537 friend class Par_PushAndMarkClosure; // -- ditto -- | |
538 friend class CMSKeepAliveClosure; // -- ditto -- | |
539 friend class CMSDrainMarkingStackClosure; // -- ditto -- | |
540 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- | |
541 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list | |
542 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait | |
543 friend class VM_CMS_Operation; | |
544 friend class VM_CMS_Initial_Mark; | |
545 friend class VM_CMS_Final_Remark; | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
546 friend class TraceCMSMemoryManagerStats; |
0 | 547 |
548 private: | |
549 jlong _time_of_last_gc; | |
550 void update_time_of_last_gc(jlong now) { | |
551 _time_of_last_gc = now; | |
552 } | |
553 | |
554 OopTaskQueueSet* _task_queues; | |
555 | |
556 // Overflow list of grey objects, threaded through mark-word | |
557 // Manipulated with CAS in the parallel/multi-threaded case. | |
558 oop _overflow_list; | |
559 // The following array-pair keeps track of mark words | |
560 // displaced for accomodating overflow list above. | |
561 // This code will likely be revisited under RFE#4922830. | |
6197 | 562 Stack<oop, mtGC> _preserved_oop_stack; |
563 Stack<markOop, mtGC> _preserved_mark_stack; | |
0 | 564 |
565 int* _hash_seed; | |
566 | |
567 // In support of multi-threaded concurrent phases | |
568 YieldingFlexibleWorkGang* _conc_workers; | |
569 | |
570 // Performance Counters | |
571 CollectorCounters* _gc_counters; | |
572 | |
573 // Initialization Errors | |
574 bool _completed_initialization; | |
575 | |
576 // In support of ExplicitGCInvokesConcurrent | |
10405 | 577 static bool _full_gc_requested; |
578 static GCCause::Cause _full_gc_cause; | |
579 unsigned int _collection_count_start; | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
580 |
0 | 581 // Should we unload classes this concurrent cycle? |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
582 bool _should_unload_classes; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
583 unsigned int _concurrent_cycles_since_last_unload; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
584 unsigned int concurrent_cycles_since_last_unload() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
585 return _concurrent_cycles_since_last_unload; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
586 } |
0 | 587 // Did we (allow) unload classes in the previous concurrent cycle? |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
588 bool unloaded_classes_last_cycle() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
589 return concurrent_cycles_since_last_unload() == 0; |
0 | 590 } |
798
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
591 // Root scanning options for perm gen |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
592 int _roots_scanning_options; |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
593 int roots_scanning_options() const { return _roots_scanning_options; } |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
594 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
595 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } |
0 | 596 |
597 // Verification support | |
598 CMSBitMap _verification_mark_bm; | |
599 void verify_after_remark_work_1(); | |
600 void verify_after_remark_work_2(); | |
601 | |
602 // true if any verification flag is on. | |
603 bool _verifying; | |
604 bool verifying() const { return _verifying; } | |
605 void set_verifying(bool v) { _verifying = v; } | |
606 | |
607 // Collector policy | |
608 ConcurrentMarkSweepPolicy* _collector_policy; | |
609 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } | |
610 | |
10244
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
611 void set_did_compact(bool v); |
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
612 |
0 | 613 // XXX Move these to CMSStats ??? FIX ME !!! |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
614 elapsedTimer _inter_sweep_timer; // time between sweeps |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
615 elapsedTimer _intra_sweep_timer; // time _in_ sweeps |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
616 // padded decaying average estimates of the above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
617 AdaptivePaddedAverage _inter_sweep_estimate; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
618 AdaptivePaddedAverage _intra_sweep_estimate; |
0 | 619 |
10405 | 620 CMSTracer* _gc_tracer_cm; |
621 ConcurrentGCTimer* _gc_timer_cm; | |
622 | |
623 bool _cms_start_registered; | |
624 | |
625 GCHeapSummary _last_heap_summary; | |
626 MetaspaceSummary _last_metaspace_summary; | |
627 | |
628 void register_foreground_gc_start(GCCause::Cause cause); | |
629 void register_gc_start(GCCause::Cause cause); | |
630 void register_gc_end(); | |
631 void save_heap_summary(); | |
632 void report_heap_summary(GCWhen::Type when); | |
633 | |
0 | 634 protected: |
635 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) | |
636 MemRegion _span; // span covering above two | |
637 CardTableRS* _ct; // card table | |
638 | |
639 // CMS marking support structures | |
640 CMSBitMap _markBitMap; | |
641 CMSBitMap _modUnionTable; | |
642 CMSMarkStack _markStack; | |
643 | |
644 HeapWord* _restart_addr; // in support of marking stack overflow | |
645 void lower_restart_addr(HeapWord* low); | |
646 | |
647 // Counters in support of marking stack / work queue overflow handling: | |
648 // a non-zero value indicates certain types of overflow events during | |
649 // the current CMS cycle and could lead to stack resizing efforts at | |
650 // an opportune future time. | |
651 size_t _ser_pmc_preclean_ovflw; | |
652 size_t _ser_pmc_remark_ovflw; | |
653 size_t _par_pmc_remark_ovflw; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
654 size_t _ser_kac_preclean_ovflw; |
0 | 655 size_t _ser_kac_ovflw; |
656 size_t _par_kac_ovflw; | |
534 | 657 NOT_PRODUCT(ssize_t _num_par_pushes;) |
0 | 658 |
659 // ("Weak") Reference processing support | |
660 ReferenceProcessor* _ref_processor; | |
661 CMSIsAliveClosure _is_alive_closure; | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
662 // keep this textually after _markBitMap and _span; c'tor dependency |
0 | 663 |
664 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work | |
665 ModUnionClosure _modUnionClosure; | |
666 ModUnionClosurePar _modUnionClosurePar; | |
667 | |
668 // CMS abstract state machine | |
669 // initial_state: Idling | |
670 // next_state(Idling) = {Marking} | |
671 // next_state(Marking) = {Precleaning, Sweeping} | |
672 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} | |
673 // next_state(AbortablePreclean) = {FinalMarking} | |
674 // next_state(FinalMarking) = {Sweeping} | |
675 // next_state(Sweeping) = {Resizing} | |
676 // next_state(Resizing) = {Resetting} | |
677 // next_state(Resetting) = {Idling} | |
678 // The numeric values below are chosen so that: | |
679 // . _collectorState <= Idling == post-sweep && pre-mark | |
680 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || | |
681 // precleaning || abortablePrecleanb | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
682 public: |
0 | 683 enum CollectorState { |
684 Resizing = 0, | |
685 Resetting = 1, | |
686 Idling = 2, | |
687 InitialMarking = 3, | |
688 Marking = 4, | |
689 Precleaning = 5, | |
690 AbortablePreclean = 6, | |
691 FinalMarking = 7, | |
692 Sweeping = 8 | |
693 }; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
694 protected: |
0 | 695 static CollectorState _collectorState; |
696 | |
697 // State related to prologue/epilogue invocation for my generations | |
698 bool _between_prologue_and_epilogue; | |
699 | |
700 // Signalling/State related to coordination between fore- and backgroud GC | |
701 // Note: When the baton has been passed from background GC to foreground GC, | |
702 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. | |
703 static bool _foregroundGCIsActive; // true iff foreground collector is active or | |
704 // wants to go active | |
705 static bool _foregroundGCShouldWait; // true iff background GC is active and has not | |
706 // yet passed the baton to the foreground GC | |
707 | |
708 // Support for CMSScheduleRemark (abortable preclean) | |
709 bool _abort_preclean; | |
710 bool _start_sampling; | |
711 | |
712 int _numYields; | |
713 size_t _numDirtyCards; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
714 size_t _sweep_count; |
0 | 715 // number of full gc's since the last concurrent gc. |
716 uint _full_gcs_since_conc_gc; | |
717 | |
718 // occupancy used for bootstrapping stats | |
719 double _bootstrap_occupancy; | |
720 | |
721 // timer | |
722 elapsedTimer _timer; | |
723 | |
724 // Timing, allocation and promotion statistics, used for scheduling. | |
725 CMSStats _stats; | |
726 | |
727 // Allocation limits installed in the young gen, used only in | |
728 // CMSIncrementalMode. When an allocation in the young gen would cross one of | |
729 // these limits, the cms generation is notified and the cms thread is started | |
730 // or stopped, respectively. | |
731 HeapWord* _icms_start_limit; | |
732 HeapWord* _icms_stop_limit; | |
733 | |
734 enum CMS_op_type { | |
735 CMS_op_checkpointRootsInitial, | |
736 CMS_op_checkpointRootsFinal | |
737 }; | |
738 | |
6064
9d679effd28c
7166894: Add gc cause to GC logging for all collectors
brutisso
parents:
6026
diff
changeset
|
739 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); |
0 | 740 bool stop_world_and_do(CMS_op_type op); |
741 | |
742 OopTaskQueueSet* task_queues() { return _task_queues; } | |
743 int* hash_seed(int i) { return &_hash_seed[i]; } | |
744 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } | |
745 | |
746 // Support for parallelizing Eden rescan in CMS remark phase | |
747 void sample_eden(); // ... sample Eden space top | |
748 | |
749 private: | |
750 // Support for parallelizing young gen rescan in CMS remark phase | |
751 Generation* _young_gen; // the younger gen | |
752 HeapWord** _top_addr; // ... Top of Eden | |
753 HeapWord** _end_addr; // ... End of Eden | |
11973
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
754 Mutex* _eden_chunk_lock; |
0 | 755 HeapWord** _eden_chunk_array; // ... Eden partitioning array |
756 size_t _eden_chunk_index; // ... top (exclusive) of array | |
757 size_t _eden_chunk_capacity; // ... max entries in array | |
758 | |
759 // Support for parallelizing survivor space rescan | |
760 HeapWord** _survivor_chunk_array; | |
761 size_t _survivor_chunk_index; | |
762 size_t _survivor_chunk_capacity; | |
763 size_t* _cursor; | |
764 ChunkArray* _survivor_plab_array; | |
765 | |
766 // Support for marking stack overflow handling | |
767 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1753
diff
changeset
|
768 bool par_take_from_overflow_list(size_t num, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1753
diff
changeset
|
769 OopTaskQueue* to_work_q, |
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1753
diff
changeset
|
770 int no_of_gc_threads); |
0 | 771 void push_on_overflow_list(oop p); |
772 void par_push_on_overflow_list(oop p); | |
773 // the following is, obviously, not, in general, "MT-stable" | |
774 bool overflow_list_is_empty() const; | |
775 | |
776 void preserve_mark_if_necessary(oop p); | |
777 void par_preserve_mark_if_necessary(oop p); | |
778 void preserve_mark_work(oop p, markOop m); | |
779 void restore_preserved_marks_if_any(); | |
780 NOT_PRODUCT(bool no_preserved_marks() const;) | |
781 // in support of testing overflow code | |
782 NOT_PRODUCT(int _overflow_counter;) | |
783 NOT_PRODUCT(bool simulate_overflow();) // sequential | |
784 NOT_PRODUCT(bool par_simulate_overflow();) // MT version | |
785 | |
786 // CMS work methods | |
787 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work | |
788 | |
789 // a return value of false indicates failure due to stack overflow | |
790 bool markFromRootsWork(bool asynch); // concurrent marking work | |
791 | |
792 public: // FIX ME!!! only for testing | |
793 bool do_marking_st(bool asynch); // single-threaded marking | |
794 bool do_marking_mt(bool asynch); // multi-threaded marking | |
795 | |
796 private: | |
797 | |
798 // concurrent precleaning work | |
799 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, | |
800 ScanMarkedObjectsAgainCarefullyClosure* cl); | |
801 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, | |
802 ScanMarkedObjectsAgainCarefullyClosure* cl); | |
803 // Does precleaning work, returning a quantity indicative of | |
804 // the amount of "useful work" done. | |
805 size_t preclean_work(bool clean_refs, bool clean_survivors); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
806 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); |
0 | 807 void abortable_preclean(); // Preclean while looking for possible abort |
808 void initialize_sequential_subtasks_for_young_gen_rescan(int i); | |
809 // Helper function for above; merge-sorts the per-thread plab samples | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1753
diff
changeset
|
810 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); |
0 | 811 // Resets (i.e. clears) the per-thread plab sample vectors |
812 void reset_survivor_plab_arrays(); | |
813 | |
814 // final (second) checkpoint work | |
815 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, | |
816 bool init_mark_was_synchronous); | |
817 // work routine for parallel version of remark | |
818 void do_remark_parallel(); | |
819 // work routine for non-parallel version of remark | |
820 void do_remark_non_parallel(); | |
821 // reference processing work routine (during second checkpoint) | |
822 void refProcessingWork(bool asynch, bool clear_all_soft_refs); | |
823 | |
824 // concurrent sweeping work | |
825 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); | |
826 | |
827 // (concurrent) resetting of support data structures | |
828 void reset(bool asynch); | |
829 | |
830 // Clear _expansion_cause fields of constituent generations | |
831 void clear_expansion_cause(); | |
832 | |
833 // An auxilliary method used to record the ends of | |
834 // used regions of each generation to limit the extent of sweep | |
835 void save_sweep_limits(); | |
836 | |
837 // A work method used by foreground collection to determine | |
838 // what type of collection (compacting or not, continuing or fresh) | |
839 // it should do. | |
840 void decide_foreground_collection_type(bool clear_all_soft_refs, | |
841 bool* should_compact, bool* should_start_over); | |
842 | |
843 // A work method used by the foreground collector to do | |
844 // a mark-sweep-compact. | |
845 void do_compaction_work(bool clear_all_soft_refs); | |
846 | |
847 // A work method used by the foreground collector to do | |
848 // a mark-sweep, after taking over from a possibly on-going | |
849 // concurrent mark-sweep collection. | |
850 void do_mark_sweep_work(bool clear_all_soft_refs, | |
851 CollectorState first_state, bool should_start_over); | |
852 | |
10405 | 853 // Work methods for reporting concurrent mode interruption or failure |
854 bool is_external_interruption(); | |
855 void report_concurrent_mode_interruption(); | |
856 | |
0 | 857 // If the backgrould GC is active, acquire control from the background |
858 // GC and do the collection. | |
859 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); | |
860 | |
861 // For synchronizing passing of control from background to foreground | |
862 // GC. waitForForegroundGC() is called by the background | |
863 // collector. It if had to wait for a foreground collection, | |
864 // it returns true and the background collection should assume | |
865 // that the collection was finished by the foreground | |
866 // collector. | |
867 bool waitForForegroundGC(); | |
868 | |
869 // Incremental mode triggering: recompute the icms duty cycle and set the | |
870 // allocation limits in the young gen. | |
871 void icms_update_allocation_limits(); | |
872 | |
873 size_t block_size_using_printezis_bits(HeapWord* addr) const; | |
874 size_t block_size_if_printezis_bits(HeapWord* addr) const; | |
875 HeapWord* next_card_start_after_block(HeapWord* addr) const; | |
876 | |
877 void setup_cms_unloading_and_verification_state(); | |
878 public: | |
879 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, | |
880 CardTableRS* ct, | |
881 ConcurrentMarkSweepPolicy* cp); | |
882 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } | |
883 | |
884 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
885 void ref_processor_init(); | |
886 | |
887 Mutex* bitMapLock() const { return _markBitMap.lock(); } | |
888 static CollectorState abstract_state() { return _collectorState; } | |
889 | |
890 bool should_abort_preclean() const; // Whether preclean should be aborted. | |
891 size_t get_eden_used() const; | |
892 size_t get_eden_capacity() const; | |
893 | |
894 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } | |
895 | |
896 // locking checks | |
897 NOT_PRODUCT(static bool have_cms_token();) | |
898 | |
899 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); | |
900 bool shouldConcurrentCollect(); | |
901 | |
902 void collect(bool full, | |
903 bool clear_all_soft_refs, | |
904 size_t size, | |
905 bool tlab); | |
10405 | 906 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); |
907 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); | |
0 | 908 |
909 // In support of ExplicitGCInvokesConcurrent | |
10405 | 910 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); |
0 | 911 // Should we unload classes in a particular concurrent cycle? |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
912 bool should_unload_classes() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
913 return _should_unload_classes; |
0 | 914 } |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
915 void update_should_unload_classes(); |
0 | 916 |
917 void direct_allocated(HeapWord* start, size_t size); | |
918 | |
919 // Object is dead if not marked and current phase is sweeping. | |
920 bool is_dead_obj(oop obj) const; | |
921 | |
922 // After a promotion (of "start"), do any necessary marking. | |
923 // If "par", then it's being done by a parallel GC thread. | |
924 // The last two args indicate if we need precise marking | |
925 // and if so the size of the object so it can be dirtied | |
926 // in its entirety. | |
927 void promoted(bool par, HeapWord* start, | |
928 bool is_obj_array, size_t obj_size); | |
929 | |
930 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, | |
931 size_t word_size); | |
932 | |
933 void getFreelistLocks() const; | |
934 void releaseFreelistLocks() const; | |
935 bool haveFreelistLocks() const; | |
936 | |
9072
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
937 // Adjust size of underlying generation |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
938 void compute_new_size(); |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
939 |
0 | 940 // GC prologue and epilogue |
941 void gc_prologue(bool full); | |
942 void gc_epilogue(bool full); | |
943 | |
944 jlong time_of_last_gc(jlong now) { | |
945 if (_collectorState <= Idling) { | |
946 // gc not in progress | |
947 return _time_of_last_gc; | |
948 } else { | |
949 // collection in progress | |
950 return now; | |
951 } | |
952 } | |
953 | |
954 // Support for parallel remark of survivor space | |
955 void* get_data_recorder(int thr_num); | |
11973
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
956 void sample_eden_chunk(); |
0 | 957 |
958 CMSBitMap* markBitMap() { return &_markBitMap; } | |
959 void directAllocated(HeapWord* start, size_t size); | |
960 | |
961 // main CMS steps and related support | |
962 void checkpointRootsInitial(bool asynch); | |
963 bool markFromRoots(bool asynch); // a return value of false indicates failure | |
964 // due to stack overflow | |
965 void preclean(); | |
966 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, | |
967 bool init_mark_was_synchronous); | |
968 void sweep(bool asynch); | |
969 | |
970 // Check that the currently executing thread is the expected | |
971 // one (foreground collector or background collector). | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
972 static void check_correct_thread_executing() PRODUCT_RETURN; |
0 | 973 // XXXPERM void print_statistics() PRODUCT_RETURN; |
974 | |
975 bool is_cms_reachable(HeapWord* addr); | |
976 | |
977 // Performance Counter Support | |
978 CollectorCounters* counters() { return _gc_counters; } | |
979 | |
980 // timer stuff | |
981 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } | |
982 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } | |
983 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } | |
984 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } | |
985 | |
986 int yields() { return _numYields; } | |
987 void resetYields() { _numYields = 0; } | |
988 void incrementYields() { _numYields++; } | |
989 void resetNumDirtyCards() { _numDirtyCards = 0; } | |
990 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } | |
991 size_t numDirtyCards() { return _numDirtyCards; } | |
992 | |
993 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } | |
994 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } | |
995 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } | |
996 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
997 size_t sweep_count() const { return _sweep_count; } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
998 void increment_sweep_count() { _sweep_count++; } |
0 | 999 |
1000 // Timers/stats for gc scheduling and incremental mode pacing. | |
1001 CMSStats& stats() { return _stats; } | |
1002 | |
1003 // Convenience methods that check whether CMSIncrementalMode is enabled and | |
1004 // forward to the corresponding methods in ConcurrentMarkSweepThread. | |
1005 static void start_icms(); | |
1006 static void stop_icms(); // Called at the end of the cms cycle. | |
1007 static void disable_icms(); // Called before a foreground collection. | |
1008 static void enable_icms(); // Called after a foreground collection. | |
1009 void icms_wait(); // Called at yield points. | |
1010 | |
1011 // Adaptive size policy | |
1012 CMSAdaptiveSizePolicy* size_policy(); | |
1013 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); | |
1014 | |
9076
7b835924c31c
8011872: Include Bit Map addresses in the hs_err files
stefank
parents:
9072
diff
changeset
|
1015 static void print_on_error(outputStream* st); |
7b835924c31c
8011872: Include Bit Map addresses in the hs_err files
stefank
parents:
9072
diff
changeset
|
1016 |
0 | 1017 // debugging |
6008 | 1018 void verify(); |
10186
b06ac540229e
8013132: Add a flag to turn off the output of the verbose verification code
stefank
parents:
9076
diff
changeset
|
1019 bool verify_after_remark(bool silent = VerifySilently); |
0 | 1020 void verify_ok_to_terminate() const PRODUCT_RETURN; |
1021 void verify_work_stacks_empty() const PRODUCT_RETURN; | |
1022 void verify_overflow_empty() const PRODUCT_RETURN; | |
1023 | |
1024 // convenience methods in support of debugging | |
1025 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; | |
1026 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; | |
1027 | |
1028 // accessors | |
1029 CMSMarkStack* verification_mark_stack() { return &_markStack; } | |
1030 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } | |
1031 | |
1032 // Initialization errors | |
1033 bool completed_initialization() { return _completed_initialization; } | |
11973
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1034 |
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1035 void print_eden_and_survivor_chunk_arrays(); |
0 | 1036 }; |
1037 | |
1038 class CMSExpansionCause : public AllStatic { | |
1039 public: | |
1040 enum Cause { | |
1041 _no_expansion, | |
1042 _satisfy_free_ratio, | |
1043 _satisfy_promotion, | |
1044 _satisfy_allocation, | |
1045 _allocate_par_lab, | |
1046 _allocate_par_spooling_space, | |
1047 _adaptive_size_policy | |
1048 }; | |
1049 // Return a string describing the cause of the expansion. | |
1050 static const char* to_string(CMSExpansionCause::Cause cause); | |
1051 }; | |
1052 | |
1053 class ConcurrentMarkSweepGeneration: public CardGeneration { | |
1054 friend class VMStructs; | |
1055 friend class ConcurrentMarkSweepThread; | |
1056 friend class ConcurrentMarkSweep; | |
1057 friend class CMSCollector; | |
1058 protected: | |
1059 static CMSCollector* _collector; // the collector that collects us | |
1060 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) | |
1061 | |
1062 // Performance Counters | |
1063 GenerationCounters* _gen_counters; | |
1064 GSpaceCounters* _space_counters; | |
1065 | |
1066 // Words directly allocated, used by CMSStats. | |
1067 size_t _direct_allocated_words; | |
1068 | |
1069 // Non-product stat counters | |
1070 NOT_PRODUCT( | |
1716
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1703
diff
changeset
|
1071 size_t _numObjectsPromoted; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1703
diff
changeset
|
1072 size_t _numWordsPromoted; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1703
diff
changeset
|
1073 size_t _numObjectsAllocated; |
be3f9c242c9d
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
ysr
parents:
1703
diff
changeset
|
1074 size_t _numWordsAllocated; |
0 | 1075 ) |
1076 | |
1077 // Used for sizing decisions | |
1078 bool _incremental_collection_failed; | |
1079 bool incremental_collection_failed() { | |
1080 return _incremental_collection_failed; | |
1081 } | |
1082 void set_incremental_collection_failed() { | |
1083 _incremental_collection_failed = true; | |
1084 } | |
1085 void clear_incremental_collection_failed() { | |
1086 _incremental_collection_failed = false; | |
1087 } | |
1088 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1089 // accessors |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1090 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1091 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1092 |
0 | 1093 private: |
1094 // For parallel young-gen GC support. | |
1095 CMSParGCThreadState** _par_gc_thread_states; | |
1096 | |
1097 // Reason generation was expanded | |
1098 CMSExpansionCause::Cause _expansion_cause; | |
1099 | |
1100 // In support of MinChunkSize being larger than min object size | |
1101 const double _dilatation_factor; | |
1102 | |
1103 enum CollectionTypes { | |
1104 Concurrent_collection_type = 0, | |
1105 MS_foreground_collection_type = 1, | |
1106 MSC_foreground_collection_type = 2, | |
1107 Unknown_collection_type = 3 | |
1108 }; | |
1109 | |
1110 CollectionTypes _debug_collection_type; | |
1111 | |
10244
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1112 // True if a compactiing collection was done. |
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1113 bool _did_compact; |
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1114 bool did_compact() { return _did_compact; } |
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1115 |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1116 // Fraction of current occupancy at which to start a CMS collection which |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1117 // will collect this generation (at least). |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1118 double _initiating_occupancy; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1119 |
0 | 1120 protected: |
1121 // Shrink generation by specified size (returns false if unable to shrink) | |
9072
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1122 void shrink_free_list_by(size_t bytes); |
0 | 1123 |
1124 // Update statistics for GC | |
1125 virtual void update_gc_stats(int level, bool full); | |
1126 | |
1127 // Maximum available space in the generation (including uncommitted) | |
1128 // space. | |
1129 size_t max_available() const; | |
1130 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1131 // getter and initializer for _initiating_occupancy field. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1132 double initiating_occupancy() const { return _initiating_occupancy; } |
8035 | 1133 void init_initiating_occupancy(intx io, uintx tr); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1134 |
0 | 1135 public: |
1136 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, | |
1137 int level, CardTableRS* ct, | |
1138 bool use_adaptive_freelists, | |
6026 | 1139 FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
0 | 1140 |
1141 // Accessors | |
1142 CMSCollector* collector() const { return _collector; } | |
1143 static void set_collector(CMSCollector* collector) { | |
1144 assert(_collector == NULL, "already set"); | |
1145 _collector = collector; | |
1146 } | |
1147 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } | |
1148 | |
1149 Mutex* freelistLock() const; | |
1150 | |
1151 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } | |
1152 | |
1153 // Adaptive size policy | |
1154 CMSAdaptiveSizePolicy* size_policy(); | |
1155 | |
10244
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1156 void set_did_compact(bool v) { _did_compact = v; } |
06ab37f08701
8013184: CMS: Call reset_after_compaction() only if a compaction has been done
jmasa
parents:
10186
diff
changeset
|
1157 |
0 | 1158 bool refs_discovery_is_atomic() const { return false; } |
1159 bool refs_discovery_is_mt() const { | |
1160 // Note: CMS does MT-discovery during the parallel-remark | |
1161 // phases. Use ReferenceProcessorMTMutator to make refs | |
1162 // discovery MT-safe during such phases or other parallel | |
1163 // discovery phases in the future. This may all go away | |
1164 // if/when we decide that refs discovery is sufficiently | |
1165 // rare that the cost of the CAS's involved is in the | |
1166 // noise. That's a measurement that should be done, and | |
1167 // the code simplified if that turns out to be the case. | |
2369
92da084fefc9
6668573: CMS: reference processing crash if ParallelCMSThreads > ParallelGCThreads
ysr
parents:
2132
diff
changeset
|
1168 return ConcGCThreads > 1; |
0 | 1169 } |
1170 | |
1171 // Override | |
1172 virtual void ref_processor_init(); | |
1173 | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1174 // Grow generation by specified size (returns false if unable to grow) |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1175 bool grow_by(size_t bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1176 // Grow generation to reserved size. |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1177 bool grow_to_reserved(); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1178 |
0 | 1179 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } |
1180 | |
1181 // Space enquiries | |
1182 size_t capacity() const; | |
1183 size_t used() const; | |
1184 size_t free() const; | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1185 double occupancy() const { return ((double)used())/((double)capacity()); } |
0 | 1186 size_t contiguous_available() const; |
1187 size_t unsafe_max_alloc_nogc() const; | |
1188 | |
1189 // over-rides | |
1190 MemRegion used_region() const; | |
1191 MemRegion used_region_at_save_marks() const; | |
1192 | |
1193 // Does a "full" (forced) collection invoked on this generation collect | |
1194 // all younger generations as well? Note that the second conjunct is a | |
1195 // hack to allow the collection of the younger gen first if the flag is | |
1196 // set. This is better than using th policy's should_collect_gen0_first() | |
1197 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. | |
1198 virtual bool full_collects_younger_generations() const { | |
1199 return UseCMSCompactAtFullCollection && !CollectGen0First; | |
1200 } | |
1201 | |
1202 void space_iterate(SpaceClosure* blk, bool usedOnly = false); | |
1203 | |
1204 // Support for compaction | |
1205 CompactibleSpace* first_compaction_space() const; | |
1206 // Adjust quantites in the generation affected by | |
1207 // the compaction. | |
1208 void reset_after_compaction(); | |
1209 | |
1210 // Allocation support | |
1211 HeapWord* allocate(size_t size, bool tlab); | |
1212 HeapWord* have_lock_and_allocate(size_t size, bool tlab); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1213 oop promote(oop obj, size_t obj_size); |
0 | 1214 HeapWord* par_allocate(size_t size, bool tlab) { |
1215 return allocate(size, tlab); | |
1216 } | |
1217 | |
1218 // Incremental mode triggering. | |
1219 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, | |
1220 size_t word_size); | |
1221 | |
1222 // Used by CMSStats to track direct allocation. The value is sampled and | |
1223 // reset after each young gen collection. | |
1224 size_t direct_allocated_words() const { return _direct_allocated_words; } | |
1225 void reset_direct_allocated_words() { _direct_allocated_words = 0; } | |
1226 | |
1227 // Overrides for parallel promotion. | |
1228 virtual oop par_promote(int thread_num, | |
1229 oop obj, markOop m, size_t word_sz); | |
1230 // This one should not be called for CMS. | |
1231 virtual void par_promote_alloc_undo(int thread_num, | |
1232 HeapWord* obj, size_t word_sz); | |
1233 virtual void par_promote_alloc_done(int thread_num); | |
1234 virtual void par_oop_since_save_marks_iterate_done(int thread_num); | |
1235 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
1236 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; |
0 | 1237 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1238 // Inform this (non-young) generation that a promotion failure was |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1239 // encountered during a collection of a younger generation that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1240 // promotes into this generation. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1241 virtual void promotion_failure_occurred(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1242 |
0 | 1243 bool should_collect(bool full, size_t size, bool tlab); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1244 virtual bool should_concurrent_collect() const; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1245 virtual bool is_too_full() const; |
0 | 1246 void collect(bool full, |
1247 bool clear_all_soft_refs, | |
1248 size_t size, | |
1249 bool tlab); | |
1250 | |
1251 HeapWord* expand_and_allocate(size_t word_size, | |
1252 bool tlab, | |
1253 bool parallel = false); | |
1254 | |
1255 // GC prologue and epilogue | |
1256 void gc_prologue(bool full); | |
1257 void gc_prologue_work(bool full, bool registerClosure, | |
1258 ModUnionClosure* modUnionClosure); | |
1259 void gc_epilogue(bool full); | |
1260 void gc_epilogue_work(bool full); | |
1261 | |
1262 // Time since last GC of this generation | |
1263 jlong time_of_last_gc(jlong now) { | |
1264 return collector()->time_of_last_gc(now); | |
1265 } | |
1266 void update_time_of_last_gc(jlong now) { | |
1267 collector()-> update_time_of_last_gc(now); | |
1268 } | |
1269 | |
1270 // Allocation failure | |
1271 void expand(size_t bytes, size_t expand_bytes, | |
1272 CMSExpansionCause::Cause cause); | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1273 virtual bool expand(size_t bytes, size_t expand_bytes); |
0 | 1274 void shrink(size_t bytes); |
9072
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1275 void shrink_by(size_t bytes); |
0 | 1276 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); |
1277 bool expand_and_ensure_spooling_space(PromotionInfo* promo); | |
1278 | |
1279 // Iteration support and related enquiries | |
1280 void save_marks(); | |
1281 bool no_allocs_since_save_marks(); | |
1282 void younger_refs_iterate(OopsInGenClosure* cl); | |
1283 | |
1284 // Iteration support specific to CMS generations | |
1285 void save_sweep_limit(); | |
1286 | |
1287 // More iteration support | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1288 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1289 virtual void oop_iterate(ExtendedOopClosure* cl); |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
452
diff
changeset
|
1290 virtual void safe_object_iterate(ObjectClosure* cl); |
0 | 1291 virtual void object_iterate(ObjectClosure* cl); |
1292 | |
1293 // Need to declare the full complement of closures, whether we'll | |
1294 // override them or not, or get message from the compiler: | |
1295 // oop_since_save_marks_iterate_nv hides virtual function... | |
1296 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
1297 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
1298 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) | |
1299 | |
1300 // Smart allocation XXX -- move to CFLSpace? | |
1301 void setNearLargestChunk(); | |
1302 bool isNearLargestChunk(HeapWord* addr); | |
1303 | |
1304 // Get the chunk at the end of the space. Delagates to | |
1305 // the space. | |
1306 FreeChunk* find_chunk_at_end(); | |
1307 | |
1308 void post_compact(); | |
1309 | |
1310 // Debugging | |
1311 void prepare_for_verify(); | |
6008 | 1312 void verify(); |
0 | 1313 void print_statistics() PRODUCT_RETURN; |
1314 | |
1315 // Performance Counters support | |
1316 virtual void update_counters(); | |
1317 virtual void update_counters(size_t used); | |
1318 void initialize_performance_counters(); | |
1319 CollectorCounters* counters() { return collector()->counters(); } | |
1320 | |
1321 // Support for parallel remark of survivor space | |
1322 void* get_data_recorder(int thr_num) { | |
1323 //Delegate to collector | |
1324 return collector()->get_data_recorder(thr_num); | |
1325 } | |
11973
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1326 void sample_eden_chunk() { |
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1327 //Delegate to collector |
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1328 return collector()->sample_eden_chunk(); |
7b06ae405d7b
6990419: CMS Remaining work for 6572569: consistently skewed work distribution in (long) re-mark pauses
jmasa
parents:
11096
diff
changeset
|
1329 } |
0 | 1330 |
1331 // Printing | |
1332 const char* name() const; | |
1333 virtual const char* short_name() const { return "CMS"; } | |
1334 void print() const; | |
1335 void printOccupancy(const char* s); | |
1336 bool must_be_youngest() const { return false; } | |
1337 bool must_be_oldest() const { return true; } | |
1338 | |
9072
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1339 // Resize the generation after a compacting GC. The |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1340 // generation can be treated as a contiguous space |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1341 // after the compaction. |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1342 virtual void compute_new_size(); |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1343 // Resize the generation after a non-compacting |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1344 // collection. |
8617e38bb4cb
8008508: CMS does not correctly reduce heap size after a Full GC
jmasa
parents:
8035
diff
changeset
|
1345 void compute_new_size_free_list(); |
0 | 1346 |
1347 CollectionTypes debug_collection_type() { return _debug_collection_type; } | |
1348 void rotate_debug_collection_type(); | |
1349 }; | |
1350 | |
1351 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { | |
1352 | |
1353 // Return the size policy from the heap's collector | |
1354 // policy casted to CMSAdaptiveSizePolicy*. | |
1355 CMSAdaptiveSizePolicy* cms_size_policy() const; | |
1356 | |
1357 // Resize the generation based on the adaptive size | |
1358 // policy. | |
1359 void resize(size_t cur_promo, size_t desired_promo); | |
1360 | |
1361 // Return the GC counters from the collector policy | |
1362 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); | |
1363 | |
1364 virtual void shrink_by(size_t bytes); | |
1365 | |
1366 public: | |
1367 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, | |
1368 int level, CardTableRS* ct, | |
1369 bool use_adaptive_freelists, | |
6026 | 1370 FreeBlockDictionary<FreeChunk>::DictionaryChoice |
0 | 1371 dictionaryChoice) : |
1372 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, | |
1373 use_adaptive_freelists, dictionaryChoice) {} | |
1374 | |
1375 virtual const char* short_name() const { return "ASCMS"; } | |
1376 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } | |
1377 | |
1378 virtual void update_counters(); | |
1379 virtual void update_counters(size_t used); | |
1380 }; | |
1381 | |
1382 // | |
1383 // Closures of various sorts used by CMS to accomplish its work | |
1384 // | |
1385 | |
1386 // This closure is used to check that a certain set of oops is empty. | |
1387 class FalseClosure: public OopClosure { | |
1388 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1389 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1390 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } |
0 | 1391 }; |
1392 | |
1393 // This closure is used to do concurrent marking from the roots | |
1394 // following the first checkpoint. | |
1395 class MarkFromRootsClosure: public BitMapClosure { | |
1396 CMSCollector* _collector; | |
1397 MemRegion _span; | |
1398 CMSBitMap* _bitMap; | |
1399 CMSBitMap* _mut; | |
1400 CMSMarkStack* _markStack; | |
1401 bool _yield; | |
1402 int _skipBits; | |
1403 HeapWord* _finger; | |
1404 HeapWord* _threshold; | |
1405 DEBUG_ONLY(bool _verifying;) | |
1406 | |
1407 public: | |
1408 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, | |
1409 CMSBitMap* bitMap, | |
1410 CMSMarkStack* markStack, | |
1411 bool should_yield, bool verifying = false); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1412 bool do_bit(size_t offset); |
0 | 1413 void reset(HeapWord* addr); |
1414 inline void do_yield_check(); | |
1415 | |
1416 private: | |
1417 void scanOopsInOop(HeapWord* ptr); | |
1418 void do_yield_work(); | |
1419 }; | |
1420 | |
1421 // This closure is used to do concurrent multi-threaded | |
1422 // marking from the roots following the first checkpoint. | |
1423 // XXX This should really be a subclass of The serial version | |
1424 // above, but i have not had the time to refactor things cleanly. | |
1425 // That willbe done for Dolphin. | |
1426 class Par_MarkFromRootsClosure: public BitMapClosure { | |
1427 CMSCollector* _collector; | |
1428 MemRegion _whole_span; | |
1429 MemRegion _span; | |
1430 CMSBitMap* _bit_map; | |
1431 CMSBitMap* _mut; | |
1432 OopTaskQueue* _work_queue; | |
1433 CMSMarkStack* _overflow_stack; | |
1434 bool _yield; | |
1435 int _skip_bits; | |
1436 HeapWord* _finger; | |
1437 HeapWord* _threshold; | |
1438 CMSConcMarkingTask* _task; | |
1439 public: | |
1440 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, | |
1441 MemRegion span, | |
1442 CMSBitMap* bit_map, | |
1443 OopTaskQueue* work_queue, | |
1444 CMSMarkStack* overflow_stack, | |
1445 bool should_yield); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1446 bool do_bit(size_t offset); |
0 | 1447 inline void do_yield_check(); |
1448 | |
1449 private: | |
1450 void scan_oops_in_oop(HeapWord* ptr); | |
1451 void do_yield_work(); | |
1452 bool get_work_from_overflow_stack(); | |
1453 }; | |
1454 | |
1455 // The following closures are used to do certain kinds of verification of | |
1456 // CMS marking. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1457 class PushAndMarkVerifyClosure: public CMSOopClosure { |
0 | 1458 CMSCollector* _collector; |
1459 MemRegion _span; | |
1460 CMSBitMap* _verification_bm; | |
1461 CMSBitMap* _cms_bm; | |
1462 CMSMarkStack* _mark_stack; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1463 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1464 void do_oop(oop p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1465 template <class T> inline void do_oop_work(T *p) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1466 oop obj = oopDesc::load_decode_heap_oop(p); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1467 do_oop(obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1468 } |
0 | 1469 public: |
1470 PushAndMarkVerifyClosure(CMSCollector* cms_collector, | |
1471 MemRegion span, | |
1472 CMSBitMap* verification_bm, | |
1473 CMSBitMap* cms_bm, | |
1474 CMSMarkStack* mark_stack); | |
1475 void do_oop(oop* p); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1476 void do_oop(narrowOop* p); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1477 |
0 | 1478 // Deal with a stack overflow condition |
1479 void handle_stack_overflow(HeapWord* lost); | |
1480 }; | |
1481 | |
1482 class MarkFromRootsVerifyClosure: public BitMapClosure { | |
1483 CMSCollector* _collector; | |
1484 MemRegion _span; | |
1485 CMSBitMap* _verification_bm; | |
1486 CMSBitMap* _cms_bm; | |
1487 CMSMarkStack* _mark_stack; | |
1488 HeapWord* _finger; | |
1489 PushAndMarkVerifyClosure _pam_verify_closure; | |
1490 public: | |
1491 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, | |
1492 CMSBitMap* verification_bm, | |
1493 CMSBitMap* cms_bm, | |
1494 CMSMarkStack* mark_stack); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1495 bool do_bit(size_t offset); |
0 | 1496 void reset(HeapWord* addr); |
1497 }; | |
1498 | |
1499 | |
1500 // This closure is used to check that a certain set of bits is | |
1501 // "empty" (i.e. the bit vector doesn't have any 1-bits). | |
1502 class FalseBitMapClosure: public BitMapClosure { | |
1503 public: | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1504 bool do_bit(size_t offset) { |
0 | 1505 guarantee(false, "Should not have a 1 bit"); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1506 return true; |
0 | 1507 } |
1508 }; | |
1509 | |
1510 // This closure is used during the second checkpointing phase | |
1511 // to rescan the marked objects on the dirty cards in the mod | |
1512 // union table and the card table proper. It's invoked via | |
1513 // MarkFromDirtyCardsClosure below. It uses either | |
1514 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) | |
1515 // declared in genOopClosures.hpp to accomplish some of its work. | |
1516 // In the parallel case the bitMap is shared, so access to | |
1517 // it needs to be suitably synchronized for updates by embedded | |
1518 // closures that update it; however, this closure itself only | |
1519 // reads the bit_map and because it is idempotent, is immune to | |
1520 // reading stale values. | |
1521 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { | |
1522 #ifdef ASSERT | |
1523 CMSCollector* _collector; | |
1524 MemRegion _span; | |
1525 union { | |
1526 CMSMarkStack* _mark_stack; | |
1527 OopTaskQueue* _work_queue; | |
1528 }; | |
1529 #endif // ASSERT | |
1530 bool _parallel; | |
1531 CMSBitMap* _bit_map; | |
1532 union { | |
1533 MarkRefsIntoAndScanClosure* _scan_closure; | |
1534 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; | |
1535 }; | |
1536 | |
1537 public: | |
1538 ScanMarkedObjectsAgainClosure(CMSCollector* collector, | |
1539 MemRegion span, | |
1540 ReferenceProcessor* rp, | |
1541 CMSBitMap* bit_map, | |
1542 CMSMarkStack* mark_stack, | |
1543 MarkRefsIntoAndScanClosure* cl): | |
1544 #ifdef ASSERT | |
1545 _collector(collector), | |
1546 _span(span), | |
1547 _mark_stack(mark_stack), | |
1548 #endif // ASSERT | |
1549 _parallel(false), | |
1550 _bit_map(bit_map), | |
1551 _scan_closure(cl) { } | |
1552 | |
1553 ScanMarkedObjectsAgainClosure(CMSCollector* collector, | |
1554 MemRegion span, | |
1555 ReferenceProcessor* rp, | |
1556 CMSBitMap* bit_map, | |
1557 OopTaskQueue* work_queue, | |
1558 Par_MarkRefsIntoAndScanClosure* cl): | |
1559 #ifdef ASSERT | |
1560 _collector(collector), | |
1561 _span(span), | |
1562 _work_queue(work_queue), | |
1563 #endif // ASSERT | |
1564 _parallel(true), | |
1565 _bit_map(bit_map), | |
1566 _par_scan_closure(cl) { } | |
1567 | |
1568 bool do_object_b(oop obj) { | |
1569 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); | |
1570 return false; | |
1571 } | |
1572 bool do_object_bm(oop p, MemRegion mr); | |
1573 }; | |
1574 | |
1575 // This closure is used during the second checkpointing phase | |
1576 // to rescan the marked objects on the dirty cards in the mod | |
1577 // union table and the card table proper. It invokes | |
1578 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. | |
1579 // In the parallel case, the bit map is shared and requires | |
1580 // synchronized access. | |
1581 class MarkFromDirtyCardsClosure: public MemRegionClosure { | |
1582 CompactibleFreeListSpace* _space; | |
1583 ScanMarkedObjectsAgainClosure _scan_cl; | |
1584 size_t _num_dirty_cards; | |
1585 | |
1586 public: | |
1587 MarkFromDirtyCardsClosure(CMSCollector* collector, | |
1588 MemRegion span, | |
1589 CompactibleFreeListSpace* space, | |
1590 CMSBitMap* bit_map, | |
1591 CMSMarkStack* mark_stack, | |
1592 MarkRefsIntoAndScanClosure* cl): | |
1593 _space(space), | |
1594 _num_dirty_cards(0), | |
1595 _scan_cl(collector, span, collector->ref_processor(), bit_map, | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1596 mark_stack, cl) { } |
0 | 1597 |
1598 MarkFromDirtyCardsClosure(CMSCollector* collector, | |
1599 MemRegion span, | |
1600 CompactibleFreeListSpace* space, | |
1601 CMSBitMap* bit_map, | |
1602 OopTaskQueue* work_queue, | |
1603 Par_MarkRefsIntoAndScanClosure* cl): | |
1604 _space(space), | |
1605 _num_dirty_cards(0), | |
1606 _scan_cl(collector, span, collector->ref_processor(), bit_map, | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1607 work_queue, cl) { } |
0 | 1608 |
1609 void do_MemRegion(MemRegion mr); | |
1610 void set_space(CompactibleFreeListSpace* space) { _space = space; } | |
1611 size_t num_dirty_cards() { return _num_dirty_cards; } | |
1612 }; | |
1613 | |
1614 // This closure is used in the non-product build to check | |
1615 // that there are no MemRegions with a certain property. | |
1616 class FalseMemRegionClosure: public MemRegionClosure { | |
1617 void do_MemRegion(MemRegion mr) { | |
1618 guarantee(!mr.is_empty(), "Shouldn't be empty"); | |
1619 guarantee(false, "Should never be here"); | |
1620 } | |
1621 }; | |
1622 | |
1623 // This closure is used during the precleaning phase | |
1624 // to "carefully" rescan marked objects on dirty cards. | |
1625 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp | |
1626 // to accomplish some of its work. | |
1627 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { | |
1628 CMSCollector* _collector; | |
1629 MemRegion _span; | |
1630 bool _yield; | |
1631 Mutex* _freelistLock; | |
1632 CMSBitMap* _bitMap; | |
1633 CMSMarkStack* _markStack; | |
1634 MarkRefsIntoAndScanClosure* _scanningClosure; | |
1635 | |
1636 public: | |
1637 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, | |
1638 MemRegion span, | |
1639 CMSBitMap* bitMap, | |
1640 CMSMarkStack* markStack, | |
1641 MarkRefsIntoAndScanClosure* cl, | |
1642 bool should_yield): | |
1643 _collector(collector), | |
1644 _span(span), | |
1645 _yield(should_yield), | |
1646 _bitMap(bitMap), | |
1647 _markStack(markStack), | |
1648 _scanningClosure(cl) { | |
1649 } | |
1650 | |
1651 void do_object(oop p) { | |
1652 guarantee(false, "call do_object_careful instead"); | |
1653 } | |
1654 | |
1655 size_t do_object_careful(oop p) { | |
1656 guarantee(false, "Unexpected caller"); | |
1657 return 0; | |
1658 } | |
1659 | |
1660 size_t do_object_careful_m(oop p, MemRegion mr); | |
1661 | |
1662 void setFreelistLock(Mutex* m) { | |
1663 _freelistLock = m; | |
1664 _scanningClosure->set_freelistLock(m); | |
1665 } | |
1666 | |
1667 private: | |
1668 inline bool do_yield_check(); | |
1669 | |
1670 void do_yield_work(); | |
1671 }; | |
1672 | |
1673 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { | |
1674 CMSCollector* _collector; | |
1675 MemRegion _span; | |
1676 bool _yield; | |
1677 CMSBitMap* _bit_map; | |
1678 CMSMarkStack* _mark_stack; | |
1679 PushAndMarkClosure* _scanning_closure; | |
1680 unsigned int _before_count; | |
1681 | |
1682 public: | |
1683 SurvivorSpacePrecleanClosure(CMSCollector* collector, | |
1684 MemRegion span, | |
1685 CMSBitMap* bit_map, | |
1686 CMSMarkStack* mark_stack, | |
1687 PushAndMarkClosure* cl, | |
1688 unsigned int before_count, | |
1689 bool should_yield): | |
1690 _collector(collector), | |
1691 _span(span), | |
1692 _yield(should_yield), | |
1693 _bit_map(bit_map), | |
1694 _mark_stack(mark_stack), | |
1695 _scanning_closure(cl), | |
1696 _before_count(before_count) | |
1697 { } | |
1698 | |
1699 void do_object(oop p) { | |
1700 guarantee(false, "call do_object_careful instead"); | |
1701 } | |
1702 | |
1703 size_t do_object_careful(oop p); | |
1704 | |
1705 size_t do_object_careful_m(oop p, MemRegion mr) { | |
1706 guarantee(false, "Unexpected caller"); | |
1707 return 0; | |
1708 } | |
1709 | |
1710 private: | |
1711 inline void do_yield_check(); | |
1712 void do_yield_work(); | |
1713 }; | |
1714 | |
1715 // This closure is used to accomplish the sweeping work | |
1716 // after the second checkpoint but before the concurrent reset | |
1717 // phase. | |
1718 // | |
1719 // Terminology | |
1720 // left hand chunk (LHC) - block of one or more chunks currently being | |
1721 // coalesced. The LHC is available for coalescing with a new chunk. | |
1722 // right hand chunk (RHC) - block that is currently being swept that is | |
1723 // free or garbage that can be coalesced with the LHC. | |
1724 // _inFreeRange is true if there is currently a LHC | |
1725 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. | |
1726 // _freeRangeInFreeLists is true if the LHC is in the free lists. | |
1727 // _freeFinger is the address of the current LHC | |
1728 class SweepClosure: public BlkClosureCareful { | |
1729 CMSCollector* _collector; // collector doing the work | |
1730 ConcurrentMarkSweepGeneration* _g; // Generation being swept | |
1731 CompactibleFreeListSpace* _sp; // Space being swept | |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1732 HeapWord* _limit;// the address at or above which the sweep should stop |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1733 // because we do not expect newly garbage blocks |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1734 // eligible for sweeping past that address. |
0 | 1735 Mutex* _freelistLock; // Free list lock (in space) |
1736 CMSBitMap* _bitMap; // Marking bit map (in | |
1737 // generation) | |
1738 bool _inFreeRange; // Indicates if we are in the | |
1739 // midst of a free run | |
1740 bool _freeRangeInFreeLists; | |
1741 // Often, we have just found | |
1742 // a free chunk and started | |
1743 // a new free range; we do not | |
1744 // eagerly remove this chunk from | |
1745 // the free lists unless there is | |
1746 // a possibility of coalescing. | |
1747 // When true, this flag indicates | |
1748 // that the _freeFinger below | |
1749 // points to a potentially free chunk | |
1750 // that may still be in the free lists | |
1751 bool _lastFreeRangeCoalesced; | |
1752 // free range contains chunks | |
1753 // coalesced | |
1754 bool _yield; | |
1755 // Whether sweeping should be | |
1756 // done with yields. For instance | |
1757 // when done by the foreground | |
1758 // collector we shouldn't yield. | |
1759 HeapWord* _freeFinger; // When _inFreeRange is set, the | |
1760 // pointer to the "left hand | |
1761 // chunk" | |
1762 size_t _freeRangeSize; | |
1763 // When _inFreeRange is set, this | |
1764 // indicates the accumulated size | |
1765 // of the "left hand chunk" | |
1766 NOT_PRODUCT( | |
1767 size_t _numObjectsFreed; | |
1768 size_t _numWordsFreed; | |
1769 size_t _numObjectsLive; | |
1770 size_t _numWordsLive; | |
1771 size_t _numObjectsAlreadyFree; | |
1772 size_t _numWordsAlreadyFree; | |
1773 FreeChunk* _last_fc; | |
1774 ) | |
1775 private: | |
1776 // Code that is common to a free chunk or garbage when | |
1777 // encountered during sweeping. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1972
diff
changeset
|
1778 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); |
0 | 1779 // Process a free chunk during sweeping. |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1972
diff
changeset
|
1780 void do_already_free_chunk(FreeChunk *fc); |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1781 // Work method called when processing an already free or a |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1782 // freshly garbage chunk to do a lookahead and possibly a |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1783 // premptive flush if crossing over _limit. |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1784 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); |
0 | 1785 // Process a garbage chunk during sweeping. |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1972
diff
changeset
|
1786 size_t do_garbage_chunk(FreeChunk *fc); |
0 | 1787 // Process a live chunk during sweeping. |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1972
diff
changeset
|
1788 size_t do_live_chunk(FreeChunk* fc); |
0 | 1789 |
1790 // Accessors. | |
1791 HeapWord* freeFinger() const { return _freeFinger; } | |
1792 void set_freeFinger(HeapWord* v) { _freeFinger = v; } | |
1793 bool inFreeRange() const { return _inFreeRange; } | |
1794 void set_inFreeRange(bool v) { _inFreeRange = v; } | |
1795 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } | |
1796 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } | |
1797 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } | |
1798 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } | |
1799 | |
1800 // Initialize a free range. | |
1801 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); | |
1802 // Return this chunk to the free lists. | |
2132
4947ee68d19c
7008136: CMS: assert((HeapWord*)nextChunk <= _limit) failed: sweep invariant
ysr
parents:
1972
diff
changeset
|
1803 void flush_cur_free_chunk(HeapWord* chunk, size_t size); |
0 | 1804 |
1805 // Check if we should yield and do so when necessary. | |
1806 inline void do_yield_check(HeapWord* addr); | |
1807 | |
1808 // Yield | |
1809 void do_yield_work(HeapWord* addr); | |
1810 | |
1811 // Debugging/Printing | |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1812 void print_free_block_coalesced(FreeChunk* fc) const; |
0 | 1813 |
1814 public: | |
1815 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, | |
1816 CMSBitMap* bitMap, bool should_yield); | |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1817 ~SweepClosure() PRODUCT_RETURN; |
0 | 1818 |
1819 size_t do_blk_careful(HeapWord* addr); | |
3746
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1820 void print() const { print_on(tty); } |
537a4053b0f9
7042740: CMS: assert(n> q) failed: Looping at: ... blockOffsetTable.cpp:557
ysr
parents:
3356
diff
changeset
|
1821 void print_on(outputStream *st) const; |
0 | 1822 }; |
1823 | |
1824 // Closures related to weak references processing | |
1825 | |
1826 // During CMS' weak reference processing, this is a | |
1827 // work-routine/closure used to complete transitive | |
1828 // marking of objects as live after a certain point | |
1829 // in which an initial set has been completely accumulated. | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1830 // This closure is currently used both during the final |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1831 // remark stop-world phase, as well as during the concurrent |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1832 // precleaning of the discovered reference lists. |
0 | 1833 class CMSDrainMarkingStackClosure: public VoidClosure { |
1834 CMSCollector* _collector; | |
1835 MemRegion _span; | |
1836 CMSMarkStack* _mark_stack; | |
1837 CMSBitMap* _bit_map; | |
1838 CMSKeepAliveClosure* _keep_alive; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1839 bool _concurrent_precleaning; |
0 | 1840 public: |
1841 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, | |
1842 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1843 CMSKeepAliveClosure* keep_alive, |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1844 bool cpc): |
0 | 1845 _collector(collector), |
1846 _span(span), | |
1847 _bit_map(bit_map), | |
1848 _mark_stack(mark_stack), | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1849 _keep_alive(keep_alive), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1850 _concurrent_precleaning(cpc) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1851 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1852 "Mismatch"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1853 } |
0 | 1854 |
1855 void do_void(); | |
1856 }; | |
1857 | |
1858 // A parallel version of CMSDrainMarkingStackClosure above. | |
1859 class CMSParDrainMarkingStackClosure: public VoidClosure { | |
1860 CMSCollector* _collector; | |
1861 MemRegion _span; | |
1862 OopTaskQueue* _work_queue; | |
1863 CMSBitMap* _bit_map; | |
1864 CMSInnerParMarkAndPushClosure _mark_and_push; | |
1865 | |
1866 public: | |
1867 CMSParDrainMarkingStackClosure(CMSCollector* collector, | |
1868 MemRegion span, CMSBitMap* bit_map, | |
1869 OopTaskQueue* work_queue): | |
1870 _collector(collector), | |
1871 _span(span), | |
1872 _bit_map(bit_map), | |
1873 _work_queue(work_queue), | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1874 _mark_and_push(collector, span, bit_map, work_queue) { } |
0 | 1875 |
1876 public: | |
1877 void trim_queue(uint max); | |
1878 void do_void(); | |
1879 }; | |
1880 | |
1881 // Allow yielding or short-circuiting of reference list | |
1882 // prelceaning work. | |
1883 class CMSPrecleanRefsYieldClosure: public YieldClosure { | |
1884 CMSCollector* _collector; | |
1885 void do_yield_work(); | |
1886 public: | |
1887 CMSPrecleanRefsYieldClosure(CMSCollector* collector): | |
1888 _collector(collector) {} | |
1889 virtual bool should_return(); | |
1890 }; | |
1891 | |
1892 | |
1893 // Convenience class that locks free list locks for given CMS collector | |
1894 class FreelistLocker: public StackObj { | |
1895 private: | |
1896 CMSCollector* _collector; | |
1897 public: | |
1898 FreelistLocker(CMSCollector* collector): | |
1899 _collector(collector) { | |
1900 _collector->getFreelistLocks(); | |
1901 } | |
1902 | |
1903 ~FreelistLocker() { | |
1904 _collector->releaseFreelistLocks(); | |
1905 } | |
1906 }; | |
1907 | |
1908 // Mark all dead objects in a given space. | |
1909 class MarkDeadObjectsClosure: public BlkClosure { | |
1910 const CMSCollector* _collector; | |
1911 const CompactibleFreeListSpace* _sp; | |
1912 CMSBitMap* _live_bit_map; | |
1913 CMSBitMap* _dead_bit_map; | |
1914 public: | |
1915 MarkDeadObjectsClosure(const CMSCollector* collector, | |
1916 const CompactibleFreeListSpace* sp, | |
1917 CMSBitMap *live_bit_map, | |
1918 CMSBitMap *dead_bit_map) : | |
1919 _collector(collector), | |
1920 _sp(sp), | |
1921 _live_bit_map(live_bit_map), | |
1922 _dead_bit_map(dead_bit_map) {} | |
1923 size_t do_blk(HeapWord* addr); | |
1924 }; | |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1925 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1926 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1927 |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1928 public: |
3356
78542e2b5e35
7036199: Adding a notification to the implementation of GarbageCollectorMXBeans
fparain
parents:
2369
diff
changeset
|
1929 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); |
1703
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1930 }; |
f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
kevinw
parents:
1552
diff
changeset
|
1931 |
1972 | 1932 |
1933 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |