Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @ 1286:ab75c83d7c37
Merge
author | johnc |
---|---|
date | Tue, 02 Mar 2010 13:57:46 -0800 |
parents | e018e6884bd8 |
children | 0bfd3fb24150 |
rev | line source |
---|---|
0 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // ConcurrentMarkSweepGeneration is in support of a concurrent | |
26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker | |
27 // style. We assume, for now, that this generation is always the | |
28 // seniormost generation (modulo the PermGeneration), and for simplicity | |
29 // in the first implementation, that this generation is a single compactible | |
30 // space. Neither of these restrictions appears essential, and will be | |
31 // relaxed in the future when more time is available to implement the | |
32 // greater generality (and there's a need for it). | |
33 // | |
34 // Concurrent mode failures are currently handled by | |
35 // means of a sliding mark-compact. | |
36 | |
37 class CMSAdaptiveSizePolicy; | |
38 class CMSConcMarkingTask; | |
39 class CMSGCAdaptivePolicyCounters; | |
40 class ConcurrentMarkSweepGeneration; | |
41 class ConcurrentMarkSweepPolicy; | |
42 class ConcurrentMarkSweepThread; | |
43 class CompactibleFreeListSpace; | |
44 class FreeChunk; | |
45 class PromotionInfo; | |
46 class ScanMarkedObjectsAgainCarefullyClosure; | |
47 | |
48 // A generic CMS bit map. It's the basis for both the CMS marking bit map | |
49 // as well as for the mod union table (in each case only a subset of the | |
50 // methods are used). This is essentially a wrapper around the BitMap class, | |
51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, | |
52 // we have _shifter == 0. and for the mod union table we have | |
53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) | |
54 // XXX 64-bit issues in BitMap? | |
55 class CMSBitMap VALUE_OBJ_CLASS_SPEC { | |
56 friend class VMStructs; | |
57 | |
58 HeapWord* _bmStartWord; // base address of range covered by map | |
59 size_t _bmWordSize; // map size (in #HeapWords covered) | |
60 const int _shifter; // shifts to convert HeapWord to bit position | |
61 VirtualSpace _virtual_space; // underlying the bit map | |
62 BitMap _bm; // the bit map itself | |
63 public: | |
64 Mutex* const _lock; // mutex protecting _bm; | |
65 | |
66 public: | |
67 // constructor | |
68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); | |
69 | |
70 // allocates the actual storage for the map | |
71 bool allocate(MemRegion mr); | |
72 // field getter | |
73 Mutex* lock() const { return _lock; } | |
74 // locking verifier convenience function | |
75 void assert_locked() const PRODUCT_RETURN; | |
76 | |
77 // inquiries | |
78 HeapWord* startWord() const { return _bmStartWord; } | |
79 size_t sizeInWords() const { return _bmWordSize; } | |
80 size_t sizeInBits() const { return _bm.size(); } | |
81 // the following is one past the last word in space | |
82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } | |
83 | |
84 // reading marks | |
85 bool isMarked(HeapWord* addr) const; | |
86 bool par_isMarked(HeapWord* addr) const; // do not lock checks | |
87 bool isUnmarked(HeapWord* addr) const; | |
88 bool isAllClear() const; | |
89 | |
90 // writing marks | |
91 void mark(HeapWord* addr); | |
92 // For marking by parallel GC threads; | |
93 // returns true if we did, false if another thread did | |
94 bool par_mark(HeapWord* addr); | |
95 | |
96 void mark_range(MemRegion mr); | |
97 void par_mark_range(MemRegion mr); | |
98 void mark_large_range(MemRegion mr); | |
99 void par_mark_large_range(MemRegion mr); | |
100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. | |
101 void clear_range(MemRegion mr); | |
102 void par_clear_range(MemRegion mr); | |
103 void clear_large_range(MemRegion mr); | |
104 void par_clear_large_range(MemRegion mr); | |
105 void clear_all(); | |
106 void clear_all_incrementally(); // Not yet implemented!! | |
107 | |
108 NOT_PRODUCT( | |
109 // checks the memory region for validity | |
110 void region_invariant(MemRegion mr); | |
111 ) | |
112 | |
113 // iteration | |
114 void iterate(BitMapClosure* cl) { | |
115 _bm.iterate(cl); | |
116 } | |
117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); | |
118 void dirty_range_iterate_clear(MemRegionClosure* cl); | |
119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); | |
120 | |
121 // auxiliary support for iteration | |
122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; | |
123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, | |
124 HeapWord* end_addr) const; | |
125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; | |
126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, | |
127 HeapWord* end_addr) const; | |
128 MemRegion getAndClearMarkedRegion(HeapWord* addr); | |
129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, | |
130 HeapWord* end_addr); | |
131 | |
132 // conversion utilities | |
133 HeapWord* offsetToHeapWord(size_t offset) const; | |
134 size_t heapWordToOffset(HeapWord* addr) const; | |
135 size_t heapWordDiffToOffsetDiff(size_t diff) const; | |
136 | |
137 // debugging | |
138 // is this address range covered by the bit-map? | |
139 NOT_PRODUCT( | |
140 bool covers(MemRegion mr) const; | |
141 bool covers(HeapWord* start, size_t size = 0) const; | |
142 ) | |
143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; | |
144 }; | |
145 | |
146 // Represents a marking stack used by the CMS collector. | |
147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). | |
148 class CMSMarkStack: public CHeapObj { | |
149 // | |
150 friend class CMSCollector; // to get at expasion stats further below | |
151 // | |
152 | |
153 VirtualSpace _virtual_space; // space for the stack | |
154 oop* _base; // bottom of stack | |
155 size_t _index; // one more than last occupied index | |
156 size_t _capacity; // max #elements | |
157 Mutex _par_lock; // an advisory lock used in case of parallel access | |
158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run | |
159 | |
160 protected: | |
161 size_t _hit_limit; // we hit max stack size limit | |
162 size_t _failed_double; // we failed expansion before hitting limit | |
163 | |
164 public: | |
165 CMSMarkStack(): | |
166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), | |
167 _hit_limit(0), | |
168 _failed_double(0) {} | |
169 | |
170 bool allocate(size_t size); | |
171 | |
172 size_t capacity() const { return _capacity; } | |
173 | |
174 oop pop() { | |
175 if (!isEmpty()) { | |
176 return _base[--_index] ; | |
177 } | |
178 return NULL; | |
179 } | |
180 | |
181 bool push(oop ptr) { | |
182 if (isFull()) { | |
183 return false; | |
184 } else { | |
185 _base[_index++] = ptr; | |
186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); | |
187 return true; | |
188 } | |
189 } | |
190 | |
191 bool isEmpty() const { return _index == 0; } | |
192 bool isFull() const { | |
193 assert(_index <= _capacity, "buffer overflow"); | |
194 return _index == _capacity; | |
195 } | |
196 | |
197 size_t length() { return _index; } | |
198 | |
199 // "Parallel versions" of some of the above | |
200 oop par_pop() { | |
201 // lock and pop | |
202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); | |
203 return pop(); | |
204 } | |
205 | |
206 bool par_push(oop ptr) { | |
207 // lock and push | |
208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); | |
209 return push(ptr); | |
210 } | |
211 | |
212 // Forcibly reset the stack, losing all of its contents. | |
213 void reset() { | |
214 _index = 0; | |
215 } | |
216 | |
217 // Expand the stack, typically in response to an overflow condition | |
218 void expand(); | |
219 | |
220 // Compute the least valued stack element. | |
221 oop least_value(HeapWord* low) { | |
222 oop least = (oop)low; | |
223 for (size_t i = 0; i < _index; i++) { | |
224 least = MIN2(least, _base[i]); | |
225 } | |
226 return least; | |
227 } | |
228 | |
229 // Exposed here to allow stack expansion in || case | |
230 Mutex* par_lock() { return &_par_lock; } | |
231 }; | |
232 | |
233 class CardTableRS; | |
234 class CMSParGCThreadState; | |
235 | |
236 class ModUnionClosure: public MemRegionClosure { | |
237 protected: | |
238 CMSBitMap* _t; | |
239 public: | |
240 ModUnionClosure(CMSBitMap* t): _t(t) { } | |
241 void do_MemRegion(MemRegion mr); | |
242 }; | |
243 | |
244 class ModUnionClosurePar: public ModUnionClosure { | |
245 public: | |
246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } | |
247 void do_MemRegion(MemRegion mr); | |
248 }; | |
249 | |
250 // Survivor Chunk Array in support of parallelization of | |
251 // Survivor Space rescan. | |
252 class ChunkArray: public CHeapObj { | |
253 size_t _index; | |
254 size_t _capacity; | |
255 HeapWord** _array; // storage for array | |
256 | |
257 public: | |
258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {} | |
259 ChunkArray(HeapWord** a, size_t c): | |
260 _index(0), _capacity(c), _array(a) {} | |
261 | |
262 HeapWord** array() { return _array; } | |
263 void set_array(HeapWord** a) { _array = a; } | |
264 | |
265 size_t capacity() { return _capacity; } | |
266 void set_capacity(size_t c) { _capacity = c; } | |
267 | |
268 size_t end() { | |
269 assert(_index < capacity(), "_index out of bounds"); | |
270 return _index; | |
271 } // exclusive | |
272 | |
273 HeapWord* nth(size_t n) { | |
274 assert(n < end(), "Out of bounds access"); | |
275 return _array[n]; | |
276 } | |
277 | |
278 void reset() { | |
279 _index = 0; | |
280 } | |
281 | |
282 void record_sample(HeapWord* p, size_t sz) { | |
283 // For now we do not do anything with the size | |
284 if (_index < _capacity) { | |
285 _array[_index++] = p; | |
286 } | |
287 } | |
288 }; | |
289 | |
290 // | |
291 // Timing, allocation and promotion statistics for gc scheduling and incremental | |
292 // mode pacing. Most statistics are exponential averages. | |
293 // | |
294 class CMSStats VALUE_OBJ_CLASS_SPEC { | |
295 private: | |
296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. | |
297 | |
298 // The following are exponential averages with factor alpha: | |
299 // avg = (100 - alpha) * avg + alpha * cur_sample | |
300 // | |
301 // The durations measure: end_time[n] - start_time[n] | |
302 // The periods measure: start_time[n] - start_time[n-1] | |
303 // | |
304 // The cms period and duration include only concurrent collections; time spent | |
305 // in foreground cms collections due to System.gc() or because of a failure to | |
306 // keep up are not included. | |
307 // | |
308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the | |
309 // real value, but is used only after the first period. A value of 100 is | |
310 // used for the first sample so it gets the entire weight. | |
311 unsigned int _saved_alpha; // 0-100 | |
312 unsigned int _gc0_alpha; | |
313 unsigned int _cms_alpha; | |
314 | |
315 double _gc0_duration; | |
316 double _gc0_period; | |
317 size_t _gc0_promoted; // bytes promoted per gc0 | |
318 double _cms_duration; | |
319 double _cms_duration_pre_sweep; // time from initiation to start of sweep | |
320 double _cms_duration_per_mb; | |
321 double _cms_period; | |
322 size_t _cms_allocated; // bytes of direct allocation per gc0 period | |
323 | |
324 // Timers. | |
325 elapsedTimer _cms_timer; | |
326 TimeStamp _gc0_begin_time; | |
327 TimeStamp _cms_begin_time; | |
328 TimeStamp _cms_end_time; | |
329 | |
330 // Snapshots of the amount used in the CMS generation. | |
331 size_t _cms_used_at_gc0_begin; | |
332 size_t _cms_used_at_gc0_end; | |
333 size_t _cms_used_at_cms_begin; | |
334 | |
335 // Used to prevent the duty cycle from being reduced in the middle of a cms | |
336 // cycle. | |
337 bool _allow_duty_cycle_reduction; | |
338 | |
339 enum { | |
340 _GC0_VALID = 0x1, | |
341 _CMS_VALID = 0x2, | |
342 _ALL_VALID = _GC0_VALID | _CMS_VALID | |
343 }; | |
344 | |
345 unsigned int _valid_bits; | |
346 | |
347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). | |
348 | |
349 protected: | |
350 | |
351 // Return a duty cycle that avoids wild oscillations, by limiting the amount | |
352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated | |
353 // as a recommended value). | |
354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, | |
355 unsigned int new_duty_cycle); | |
356 unsigned int icms_update_duty_cycle_impl(); | |
357 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
358 // In support of adjusting of cms trigger ratios based on history |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
359 // of concurrent mode failure. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
360 double cms_free_adjustment_factor(size_t free) const; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
361 void adjust_cms_free_adjustment_factor(bool fail, size_t free); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
362 |
0 | 363 public: |
364 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, | |
365 unsigned int alpha = CMSExpAvgFactor); | |
366 | |
367 // Whether or not the statistics contain valid data; higher level statistics | |
368 // cannot be called until this returns true (they require at least one young | |
369 // gen and one cms cycle to have completed). | |
370 bool valid() const; | |
371 | |
372 // Record statistics. | |
373 void record_gc0_begin(); | |
374 void record_gc0_end(size_t cms_gen_bytes_used); | |
375 void record_cms_begin(); | |
376 void record_cms_end(); | |
377 | |
378 // Allow management of the cms timer, which must be stopped/started around | |
379 // yield points. | |
380 elapsedTimer& cms_timer() { return _cms_timer; } | |
381 void start_cms_timer() { _cms_timer.start(); } | |
382 void stop_cms_timer() { _cms_timer.stop(); } | |
383 | |
384 // Basic statistics; units are seconds or bytes. | |
385 double gc0_period() const { return _gc0_period; } | |
386 double gc0_duration() const { return _gc0_duration; } | |
387 size_t gc0_promoted() const { return _gc0_promoted; } | |
388 double cms_period() const { return _cms_period; } | |
389 double cms_duration() const { return _cms_duration; } | |
390 double cms_duration_per_mb() const { return _cms_duration_per_mb; } | |
391 size_t cms_allocated() const { return _cms_allocated; } | |
392 | |
393 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} | |
394 | |
395 // Seconds since the last background cms cycle began or ended. | |
396 double cms_time_since_begin() const; | |
397 double cms_time_since_end() const; | |
398 | |
399 // Higher level statistics--caller must check that valid() returns true before | |
400 // calling. | |
401 | |
402 // Returns bytes promoted per second of wall clock time. | |
403 double promotion_rate() const; | |
404 | |
405 // Returns bytes directly allocated per second of wall clock time. | |
406 double cms_allocation_rate() const; | |
407 | |
408 // Rate at which space in the cms generation is being consumed (sum of the | |
409 // above two). | |
410 double cms_consumption_rate() const; | |
411 | |
412 // Returns an estimate of the number of seconds until the cms generation will | |
413 // fill up, assuming no collection work is done. | |
414 double time_until_cms_gen_full() const; | |
415 | |
416 // Returns an estimate of the number of seconds remaining until | |
417 // the cms generation collection should start. | |
418 double time_until_cms_start() const; | |
419 | |
420 // End of higher level statistics. | |
421 | |
422 // Returns the cms incremental mode duty cycle, as a percentage (0-100). | |
423 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } | |
424 | |
425 // Update the duty cycle and return the new value. | |
426 unsigned int icms_update_duty_cycle(); | |
427 | |
428 // Debugging. | |
429 void print_on(outputStream* st) const PRODUCT_RETURN; | |
430 void print() const { print_on(gclog_or_tty); } | |
431 }; | |
432 | |
433 // A closure related to weak references processing which | |
434 // we embed in the CMSCollector, since we need to pass | |
435 // it to the reference processor for secondary filtering | |
436 // of references based on reachability of referent; | |
437 // see role of _is_alive_non_header closure in the | |
438 // ReferenceProcessor class. | |
439 // For objects in the CMS generation, this closure checks | |
440 // if the object is "live" (reachable). Used in weak | |
441 // reference processing. | |
442 class CMSIsAliveClosure: public BoolObjectClosure { | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
443 const MemRegion _span; |
0 | 444 const CMSBitMap* _bit_map; |
445 | |
446 friend class CMSCollector; | |
447 public: | |
448 CMSIsAliveClosure(MemRegion span, | |
449 CMSBitMap* bit_map): | |
450 _span(span), | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
451 _bit_map(bit_map) { |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
452 assert(!span.is_empty(), "Empty span could spell trouble"); |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
453 } |
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
454 |
0 | 455 void do_object(oop obj) { |
456 assert(false, "not to be invoked"); | |
457 } | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
458 |
0 | 459 bool do_object_b(oop obj); |
460 }; | |
461 | |
462 | |
463 // Implements AbstractRefProcTaskExecutor for CMS. | |
464 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { | |
465 public: | |
466 | |
467 CMSRefProcTaskExecutor(CMSCollector& collector) | |
468 : _collector(collector) | |
469 { } | |
470 | |
471 // Executes a task using worker threads. | |
472 virtual void execute(ProcessTask& task); | |
473 virtual void execute(EnqueueTask& task); | |
474 private: | |
475 CMSCollector& _collector; | |
476 }; | |
477 | |
478 | |
479 class CMSCollector: public CHeapObj { | |
480 friend class VMStructs; | |
481 friend class ConcurrentMarkSweepThread; | |
482 friend class ConcurrentMarkSweepGeneration; | |
483 friend class CompactibleFreeListSpace; | |
484 friend class CMSParRemarkTask; | |
485 friend class CMSConcMarkingTask; | |
486 friend class CMSRefProcTaskProxy; | |
487 friend class CMSRefProcTaskExecutor; | |
488 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden | |
489 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- | |
490 friend class PushOrMarkClosure; // to access _restart_addr | |
491 friend class Par_PushOrMarkClosure; // to access _restart_addr | |
492 friend class MarkFromRootsClosure; // -- ditto -- | |
493 // ... and for clearing cards | |
494 friend class Par_MarkFromRootsClosure; // to access _restart_addr | |
495 // ... and for clearing cards | |
496 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. | |
497 friend class MarkFromRootsVerifyClosure; // to access _restart_addr | |
498 friend class PushAndMarkVerifyClosure; // -- ditto -- | |
499 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list | |
500 friend class PushAndMarkClosure; // -- ditto -- | |
501 friend class Par_PushAndMarkClosure; // -- ditto -- | |
502 friend class CMSKeepAliveClosure; // -- ditto -- | |
503 friend class CMSDrainMarkingStackClosure; // -- ditto -- | |
504 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- | |
505 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list | |
506 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait | |
507 friend class VM_CMS_Operation; | |
508 friend class VM_CMS_Initial_Mark; | |
509 friend class VM_CMS_Final_Remark; | |
510 | |
511 private: | |
512 jlong _time_of_last_gc; | |
513 void update_time_of_last_gc(jlong now) { | |
514 _time_of_last_gc = now; | |
515 } | |
516 | |
517 OopTaskQueueSet* _task_queues; | |
518 | |
519 // Overflow list of grey objects, threaded through mark-word | |
520 // Manipulated with CAS in the parallel/multi-threaded case. | |
521 oop _overflow_list; | |
522 // The following array-pair keeps track of mark words | |
523 // displaced for accomodating overflow list above. | |
524 // This code will likely be revisited under RFE#4922830. | |
525 GrowableArray<oop>* _preserved_oop_stack; | |
526 GrowableArray<markOop>* _preserved_mark_stack; | |
527 | |
528 int* _hash_seed; | |
529 | |
530 // In support of multi-threaded concurrent phases | |
531 YieldingFlexibleWorkGang* _conc_workers; | |
532 | |
533 // Performance Counters | |
534 CollectorCounters* _gc_counters; | |
535 | |
536 // Initialization Errors | |
537 bool _completed_initialization; | |
538 | |
539 // In support of ExplicitGCInvokesConcurrent | |
540 static bool _full_gc_requested; | |
541 unsigned int _collection_count_start; | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
542 |
0 | 543 // Should we unload classes this concurrent cycle? |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
544 bool _should_unload_classes; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
545 unsigned int _concurrent_cycles_since_last_unload; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
546 unsigned int concurrent_cycles_since_last_unload() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
547 return _concurrent_cycles_since_last_unload; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
548 } |
0 | 549 // Did we (allow) unload classes in the previous concurrent cycle? |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
550 bool unloaded_classes_last_cycle() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
551 return concurrent_cycles_since_last_unload() == 0; |
0 | 552 } |
798
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
553 // Root scanning options for perm gen |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
554 int _roots_scanning_options; |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
555 int roots_scanning_options() const { return _roots_scanning_options; } |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
556 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } |
fe1574da39fc
6848641: CMSCollector::_roots_scanning_options should be initialized
ysr
parents:
579
diff
changeset
|
557 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } |
0 | 558 |
559 // Verification support | |
560 CMSBitMap _verification_mark_bm; | |
561 void verify_after_remark_work_1(); | |
562 void verify_after_remark_work_2(); | |
563 | |
564 // true if any verification flag is on. | |
565 bool _verifying; | |
566 bool verifying() const { return _verifying; } | |
567 void set_verifying(bool v) { _verifying = v; } | |
568 | |
569 // Collector policy | |
570 ConcurrentMarkSweepPolicy* _collector_policy; | |
571 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } | |
572 | |
573 // Check whether the gc time limit has been | |
574 // exceeded and set the size policy flag | |
575 // appropriately. | |
576 void check_gc_time_limit(); | |
577 // XXX Move these to CMSStats ??? FIX ME !!! | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
578 elapsedTimer _inter_sweep_timer; // time between sweeps |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
579 elapsedTimer _intra_sweep_timer; // time _in_ sweeps |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
580 // padded decaying average estimates of the above |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
581 AdaptivePaddedAverage _inter_sweep_estimate; |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
582 AdaptivePaddedAverage _intra_sweep_estimate; |
0 | 583 |
584 protected: | |
585 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) | |
586 ConcurrentMarkSweepGeneration* _permGen; // perm gen | |
587 MemRegion _span; // span covering above two | |
588 CardTableRS* _ct; // card table | |
589 | |
590 // CMS marking support structures | |
591 CMSBitMap _markBitMap; | |
592 CMSBitMap _modUnionTable; | |
593 CMSMarkStack _markStack; | |
594 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects | |
595 // to revisit | |
596 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. | |
597 | |
598 HeapWord* _restart_addr; // in support of marking stack overflow | |
599 void lower_restart_addr(HeapWord* low); | |
600 | |
601 // Counters in support of marking stack / work queue overflow handling: | |
602 // a non-zero value indicates certain types of overflow events during | |
603 // the current CMS cycle and could lead to stack resizing efforts at | |
604 // an opportune future time. | |
605 size_t _ser_pmc_preclean_ovflw; | |
606 size_t _ser_pmc_remark_ovflw; | |
607 size_t _par_pmc_remark_ovflw; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
608 size_t _ser_kac_preclean_ovflw; |
0 | 609 size_t _ser_kac_ovflw; |
610 size_t _par_kac_ovflw; | |
534 | 611 NOT_PRODUCT(ssize_t _num_par_pushes;) |
0 | 612 |
613 // ("Weak") Reference processing support | |
614 ReferenceProcessor* _ref_processor; | |
615 CMSIsAliveClosure _is_alive_closure; | |
143
b5489bb705c9
6662086: 6u4+, 7b11+: CMS never clears referents when -XX:+ParallelRefProcEnabled
ysr
parents:
113
diff
changeset
|
616 // keep this textually after _markBitMap and _span; c'tor dependency |
0 | 617 |
618 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work | |
619 ModUnionClosure _modUnionClosure; | |
620 ModUnionClosurePar _modUnionClosurePar; | |
621 | |
622 // CMS abstract state machine | |
623 // initial_state: Idling | |
624 // next_state(Idling) = {Marking} | |
625 // next_state(Marking) = {Precleaning, Sweeping} | |
626 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} | |
627 // next_state(AbortablePreclean) = {FinalMarking} | |
628 // next_state(FinalMarking) = {Sweeping} | |
629 // next_state(Sweeping) = {Resizing} | |
630 // next_state(Resizing) = {Resetting} | |
631 // next_state(Resetting) = {Idling} | |
632 // The numeric values below are chosen so that: | |
633 // . _collectorState <= Idling == post-sweep && pre-mark | |
634 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || | |
635 // precleaning || abortablePrecleanb | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
636 public: |
0 | 637 enum CollectorState { |
638 Resizing = 0, | |
639 Resetting = 1, | |
640 Idling = 2, | |
641 InitialMarking = 3, | |
642 Marking = 4, | |
643 Precleaning = 5, | |
644 AbortablePreclean = 6, | |
645 FinalMarking = 7, | |
646 Sweeping = 8 | |
647 }; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
648 protected: |
0 | 649 static CollectorState _collectorState; |
650 | |
651 // State related to prologue/epilogue invocation for my generations | |
652 bool _between_prologue_and_epilogue; | |
653 | |
654 // Signalling/State related to coordination between fore- and backgroud GC | |
655 // Note: When the baton has been passed from background GC to foreground GC, | |
656 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. | |
657 static bool _foregroundGCIsActive; // true iff foreground collector is active or | |
658 // wants to go active | |
659 static bool _foregroundGCShouldWait; // true iff background GC is active and has not | |
660 // yet passed the baton to the foreground GC | |
661 | |
662 // Support for CMSScheduleRemark (abortable preclean) | |
663 bool _abort_preclean; | |
664 bool _start_sampling; | |
665 | |
666 int _numYields; | |
667 size_t _numDirtyCards; | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
668 size_t _sweep_count; |
0 | 669 // number of full gc's since the last concurrent gc. |
670 uint _full_gcs_since_conc_gc; | |
671 | |
672 // occupancy used for bootstrapping stats | |
673 double _bootstrap_occupancy; | |
674 | |
675 // timer | |
676 elapsedTimer _timer; | |
677 | |
678 // Timing, allocation and promotion statistics, used for scheduling. | |
679 CMSStats _stats; | |
680 | |
681 // Allocation limits installed in the young gen, used only in | |
682 // CMSIncrementalMode. When an allocation in the young gen would cross one of | |
683 // these limits, the cms generation is notified and the cms thread is started | |
684 // or stopped, respectively. | |
685 HeapWord* _icms_start_limit; | |
686 HeapWord* _icms_stop_limit; | |
687 | |
688 enum CMS_op_type { | |
689 CMS_op_checkpointRootsInitial, | |
690 CMS_op_checkpointRootsFinal | |
691 }; | |
692 | |
693 void do_CMS_operation(CMS_op_type op); | |
694 bool stop_world_and_do(CMS_op_type op); | |
695 | |
696 OopTaskQueueSet* task_queues() { return _task_queues; } | |
697 int* hash_seed(int i) { return &_hash_seed[i]; } | |
698 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } | |
699 | |
700 // Support for parallelizing Eden rescan in CMS remark phase | |
701 void sample_eden(); // ... sample Eden space top | |
702 | |
703 private: | |
704 // Support for parallelizing young gen rescan in CMS remark phase | |
705 Generation* _young_gen; // the younger gen | |
706 HeapWord** _top_addr; // ... Top of Eden | |
707 HeapWord** _end_addr; // ... End of Eden | |
708 HeapWord** _eden_chunk_array; // ... Eden partitioning array | |
709 size_t _eden_chunk_index; // ... top (exclusive) of array | |
710 size_t _eden_chunk_capacity; // ... max entries in array | |
711 | |
712 // Support for parallelizing survivor space rescan | |
713 HeapWord** _survivor_chunk_array; | |
714 size_t _survivor_chunk_index; | |
715 size_t _survivor_chunk_capacity; | |
716 size_t* _cursor; | |
717 ChunkArray* _survivor_plab_array; | |
718 | |
719 // Support for marking stack overflow handling | |
720 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); | |
721 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q); | |
722 void push_on_overflow_list(oop p); | |
723 void par_push_on_overflow_list(oop p); | |
724 // the following is, obviously, not, in general, "MT-stable" | |
725 bool overflow_list_is_empty() const; | |
726 | |
727 void preserve_mark_if_necessary(oop p); | |
728 void par_preserve_mark_if_necessary(oop p); | |
729 void preserve_mark_work(oop p, markOop m); | |
730 void restore_preserved_marks_if_any(); | |
731 NOT_PRODUCT(bool no_preserved_marks() const;) | |
732 // in support of testing overflow code | |
733 NOT_PRODUCT(int _overflow_counter;) | |
734 NOT_PRODUCT(bool simulate_overflow();) // sequential | |
735 NOT_PRODUCT(bool par_simulate_overflow();) // MT version | |
736 | |
737 // CMS work methods | |
738 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work | |
739 | |
740 // a return value of false indicates failure due to stack overflow | |
741 bool markFromRootsWork(bool asynch); // concurrent marking work | |
742 | |
743 public: // FIX ME!!! only for testing | |
744 bool do_marking_st(bool asynch); // single-threaded marking | |
745 bool do_marking_mt(bool asynch); // multi-threaded marking | |
746 | |
747 private: | |
748 | |
749 // concurrent precleaning work | |
750 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, | |
751 ScanMarkedObjectsAgainCarefullyClosure* cl); | |
752 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, | |
753 ScanMarkedObjectsAgainCarefullyClosure* cl); | |
754 // Does precleaning work, returning a quantity indicative of | |
755 // the amount of "useful work" done. | |
756 size_t preclean_work(bool clean_refs, bool clean_survivors); | |
757 void abortable_preclean(); // Preclean while looking for possible abort | |
758 void initialize_sequential_subtasks_for_young_gen_rescan(int i); | |
759 // Helper function for above; merge-sorts the per-thread plab samples | |
760 void merge_survivor_plab_arrays(ContiguousSpace* surv); | |
761 // Resets (i.e. clears) the per-thread plab sample vectors | |
762 void reset_survivor_plab_arrays(); | |
763 | |
764 // final (second) checkpoint work | |
765 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, | |
766 bool init_mark_was_synchronous); | |
767 // work routine for parallel version of remark | |
768 void do_remark_parallel(); | |
769 // work routine for non-parallel version of remark | |
770 void do_remark_non_parallel(); | |
771 // reference processing work routine (during second checkpoint) | |
772 void refProcessingWork(bool asynch, bool clear_all_soft_refs); | |
773 | |
774 // concurrent sweeping work | |
775 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); | |
776 | |
777 // (concurrent) resetting of support data structures | |
778 void reset(bool asynch); | |
779 | |
780 // Clear _expansion_cause fields of constituent generations | |
781 void clear_expansion_cause(); | |
782 | |
783 // An auxilliary method used to record the ends of | |
784 // used regions of each generation to limit the extent of sweep | |
785 void save_sweep_limits(); | |
786 | |
787 // Resize the generations included in the collector. | |
788 void compute_new_size(); | |
789 | |
790 // A work method used by foreground collection to determine | |
791 // what type of collection (compacting or not, continuing or fresh) | |
792 // it should do. | |
793 void decide_foreground_collection_type(bool clear_all_soft_refs, | |
794 bool* should_compact, bool* should_start_over); | |
795 | |
796 // A work method used by the foreground collector to do | |
797 // a mark-sweep-compact. | |
798 void do_compaction_work(bool clear_all_soft_refs); | |
799 | |
800 // A work method used by the foreground collector to do | |
801 // a mark-sweep, after taking over from a possibly on-going | |
802 // concurrent mark-sweep collection. | |
803 void do_mark_sweep_work(bool clear_all_soft_refs, | |
804 CollectorState first_state, bool should_start_over); | |
805 | |
806 // If the backgrould GC is active, acquire control from the background | |
807 // GC and do the collection. | |
808 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); | |
809 | |
810 // For synchronizing passing of control from background to foreground | |
811 // GC. waitForForegroundGC() is called by the background | |
812 // collector. It if had to wait for a foreground collection, | |
813 // it returns true and the background collection should assume | |
814 // that the collection was finished by the foreground | |
815 // collector. | |
816 bool waitForForegroundGC(); | |
817 | |
818 // Incremental mode triggering: recompute the icms duty cycle and set the | |
819 // allocation limits in the young gen. | |
820 void icms_update_allocation_limits(); | |
821 | |
822 size_t block_size_using_printezis_bits(HeapWord* addr) const; | |
823 size_t block_size_if_printezis_bits(HeapWord* addr) const; | |
824 HeapWord* next_card_start_after_block(HeapWord* addr) const; | |
825 | |
826 void setup_cms_unloading_and_verification_state(); | |
827 public: | |
828 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, | |
829 ConcurrentMarkSweepGeneration* permGen, | |
830 CardTableRS* ct, | |
831 ConcurrentMarkSweepPolicy* cp); | |
832 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } | |
833 | |
834 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
835 void ref_processor_init(); | |
836 | |
837 Mutex* bitMapLock() const { return _markBitMap.lock(); } | |
838 static CollectorState abstract_state() { return _collectorState; } | |
839 | |
840 bool should_abort_preclean() const; // Whether preclean should be aborted. | |
841 size_t get_eden_used() const; | |
842 size_t get_eden_capacity() const; | |
843 | |
844 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } | |
845 | |
846 // locking checks | |
847 NOT_PRODUCT(static bool have_cms_token();) | |
848 | |
849 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); | |
850 bool shouldConcurrentCollect(); | |
851 | |
852 void collect(bool full, | |
853 bool clear_all_soft_refs, | |
854 size_t size, | |
855 bool tlab); | |
856 void collect_in_background(bool clear_all_soft_refs); | |
857 void collect_in_foreground(bool clear_all_soft_refs); | |
858 | |
859 // In support of ExplicitGCInvokesConcurrent | |
860 static void request_full_gc(unsigned int full_gc_count); | |
861 // Should we unload classes in a particular concurrent cycle? | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
862 bool should_unload_classes() const { |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
863 return _should_unload_classes; |
0 | 864 } |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
865 bool update_should_unload_classes(); |
0 | 866 |
867 void direct_allocated(HeapWord* start, size_t size); | |
868 | |
869 // Object is dead if not marked and current phase is sweeping. | |
870 bool is_dead_obj(oop obj) const; | |
871 | |
872 // After a promotion (of "start"), do any necessary marking. | |
873 // If "par", then it's being done by a parallel GC thread. | |
874 // The last two args indicate if we need precise marking | |
875 // and if so the size of the object so it can be dirtied | |
876 // in its entirety. | |
877 void promoted(bool par, HeapWord* start, | |
878 bool is_obj_array, size_t obj_size); | |
879 | |
880 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, | |
881 size_t word_size); | |
882 | |
883 void getFreelistLocks() const; | |
884 void releaseFreelistLocks() const; | |
885 bool haveFreelistLocks() const; | |
886 | |
887 // GC prologue and epilogue | |
888 void gc_prologue(bool full); | |
889 void gc_epilogue(bool full); | |
890 | |
891 jlong time_of_last_gc(jlong now) { | |
892 if (_collectorState <= Idling) { | |
893 // gc not in progress | |
894 return _time_of_last_gc; | |
895 } else { | |
896 // collection in progress | |
897 return now; | |
898 } | |
899 } | |
900 | |
901 // Support for parallel remark of survivor space | |
902 void* get_data_recorder(int thr_num); | |
903 | |
904 CMSBitMap* markBitMap() { return &_markBitMap; } | |
905 void directAllocated(HeapWord* start, size_t size); | |
906 | |
907 // main CMS steps and related support | |
908 void checkpointRootsInitial(bool asynch); | |
909 bool markFromRoots(bool asynch); // a return value of false indicates failure | |
910 // due to stack overflow | |
911 void preclean(); | |
912 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, | |
913 bool init_mark_was_synchronous); | |
914 void sweep(bool asynch); | |
915 | |
916 // Check that the currently executing thread is the expected | |
917 // one (foreground collector or background collector). | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
918 static void check_correct_thread_executing() PRODUCT_RETURN; |
0 | 919 // XXXPERM void print_statistics() PRODUCT_RETURN; |
920 | |
921 bool is_cms_reachable(HeapWord* addr); | |
922 | |
923 // Performance Counter Support | |
924 CollectorCounters* counters() { return _gc_counters; } | |
925 | |
926 // timer stuff | |
927 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } | |
928 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } | |
929 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } | |
930 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } | |
931 | |
932 int yields() { return _numYields; } | |
933 void resetYields() { _numYields = 0; } | |
934 void incrementYields() { _numYields++; } | |
935 void resetNumDirtyCards() { _numDirtyCards = 0; } | |
936 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } | |
937 size_t numDirtyCards() { return _numDirtyCards; } | |
938 | |
939 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } | |
940 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } | |
941 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } | |
942 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
943 size_t sweep_count() const { return _sweep_count; } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
944 void increment_sweep_count() { _sweep_count++; } |
0 | 945 |
946 // Timers/stats for gc scheduling and incremental mode pacing. | |
947 CMSStats& stats() { return _stats; } | |
948 | |
949 // Convenience methods that check whether CMSIncrementalMode is enabled and | |
950 // forward to the corresponding methods in ConcurrentMarkSweepThread. | |
951 static void start_icms(); | |
952 static void stop_icms(); // Called at the end of the cms cycle. | |
953 static void disable_icms(); // Called before a foreground collection. | |
954 static void enable_icms(); // Called after a foreground collection. | |
955 void icms_wait(); // Called at yield points. | |
956 | |
957 // Adaptive size policy | |
958 CMSAdaptiveSizePolicy* size_policy(); | |
959 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); | |
960 | |
961 // debugging | |
962 void verify(bool); | |
963 bool verify_after_remark(); | |
964 void verify_ok_to_terminate() const PRODUCT_RETURN; | |
965 void verify_work_stacks_empty() const PRODUCT_RETURN; | |
966 void verify_overflow_empty() const PRODUCT_RETURN; | |
967 | |
968 // convenience methods in support of debugging | |
969 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; | |
970 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; | |
971 | |
972 // accessors | |
973 CMSMarkStack* verification_mark_stack() { return &_markStack; } | |
974 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } | |
975 | |
976 // Get the bit map with a perm gen "deadness" information. | |
977 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } | |
978 | |
979 // Initialization errors | |
980 bool completed_initialization() { return _completed_initialization; } | |
981 }; | |
982 | |
983 class CMSExpansionCause : public AllStatic { | |
984 public: | |
985 enum Cause { | |
986 _no_expansion, | |
987 _satisfy_free_ratio, | |
988 _satisfy_promotion, | |
989 _satisfy_allocation, | |
990 _allocate_par_lab, | |
991 _allocate_par_spooling_space, | |
992 _adaptive_size_policy | |
993 }; | |
994 // Return a string describing the cause of the expansion. | |
995 static const char* to_string(CMSExpansionCause::Cause cause); | |
996 }; | |
997 | |
998 class ConcurrentMarkSweepGeneration: public CardGeneration { | |
999 friend class VMStructs; | |
1000 friend class ConcurrentMarkSweepThread; | |
1001 friend class ConcurrentMarkSweep; | |
1002 friend class CMSCollector; | |
1003 protected: | |
1004 static CMSCollector* _collector; // the collector that collects us | |
1005 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) | |
1006 | |
1007 // Performance Counters | |
1008 GenerationCounters* _gen_counters; | |
1009 GSpaceCounters* _space_counters; | |
1010 | |
1011 // Words directly allocated, used by CMSStats. | |
1012 size_t _direct_allocated_words; | |
1013 | |
1014 // Non-product stat counters | |
1015 NOT_PRODUCT( | |
1016 int _numObjectsPromoted; | |
1017 int _numWordsPromoted; | |
1018 int _numObjectsAllocated; | |
1019 int _numWordsAllocated; | |
1020 ) | |
1021 | |
1022 // Used for sizing decisions | |
1023 bool _incremental_collection_failed; | |
1024 bool incremental_collection_failed() { | |
1025 return _incremental_collection_failed; | |
1026 } | |
1027 void set_incremental_collection_failed() { | |
1028 _incremental_collection_failed = true; | |
1029 } | |
1030 void clear_incremental_collection_failed() { | |
1031 _incremental_collection_failed = false; | |
1032 } | |
1033 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1034 // accessors |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1035 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1036 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1037 |
0 | 1038 private: |
1039 // For parallel young-gen GC support. | |
1040 CMSParGCThreadState** _par_gc_thread_states; | |
1041 | |
1042 // Reason generation was expanded | |
1043 CMSExpansionCause::Cause _expansion_cause; | |
1044 | |
1045 // In support of MinChunkSize being larger than min object size | |
1046 const double _dilatation_factor; | |
1047 | |
1048 enum CollectionTypes { | |
1049 Concurrent_collection_type = 0, | |
1050 MS_foreground_collection_type = 1, | |
1051 MSC_foreground_collection_type = 2, | |
1052 Unknown_collection_type = 3 | |
1053 }; | |
1054 | |
1055 CollectionTypes _debug_collection_type; | |
1056 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1057 // Fraction of current occupancy at which to start a CMS collection which |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1058 // will collect this generation (at least). |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1059 double _initiating_occupancy; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1060 |
0 | 1061 protected: |
1062 // Shrink generation by specified size (returns false if unable to shrink) | |
1063 virtual void shrink_by(size_t bytes); | |
1064 | |
1065 // Update statistics for GC | |
1066 virtual void update_gc_stats(int level, bool full); | |
1067 | |
1068 // Maximum available space in the generation (including uncommitted) | |
1069 // space. | |
1070 size_t max_available() const; | |
1071 | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1072 // getter and initializer for _initiating_occupancy field. |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1073 double initiating_occupancy() const { return _initiating_occupancy; } |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1074 void init_initiating_occupancy(intx io, intx tr); |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1075 |
0 | 1076 public: |
1077 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, | |
1078 int level, CardTableRS* ct, | |
1079 bool use_adaptive_freelists, | |
1080 FreeBlockDictionary::DictionaryChoice); | |
1081 | |
1082 // Accessors | |
1083 CMSCollector* collector() const { return _collector; } | |
1084 static void set_collector(CMSCollector* collector) { | |
1085 assert(_collector == NULL, "already set"); | |
1086 _collector = collector; | |
1087 } | |
1088 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } | |
1089 | |
1090 Mutex* freelistLock() const; | |
1091 | |
1092 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } | |
1093 | |
1094 // Adaptive size policy | |
1095 CMSAdaptiveSizePolicy* size_policy(); | |
1096 | |
1097 bool refs_discovery_is_atomic() const { return false; } | |
1098 bool refs_discovery_is_mt() const { | |
1099 // Note: CMS does MT-discovery during the parallel-remark | |
1100 // phases. Use ReferenceProcessorMTMutator to make refs | |
1101 // discovery MT-safe during such phases or other parallel | |
1102 // discovery phases in the future. This may all go away | |
1103 // if/when we decide that refs discovery is sufficiently | |
1104 // rare that the cost of the CAS's involved is in the | |
1105 // noise. That's a measurement that should be done, and | |
1106 // the code simplified if that turns out to be the case. | |
1107 return false; | |
1108 } | |
1109 | |
1110 // Override | |
1111 virtual void ref_processor_init(); | |
1112 | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1113 // Grow generation by specified size (returns false if unable to grow) |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1114 bool grow_by(size_t bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1115 // Grow generation to reserved size. |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1116 bool grow_to_reserved(); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1117 |
0 | 1118 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } |
1119 | |
1120 // Space enquiries | |
1121 size_t capacity() const; | |
1122 size_t used() const; | |
1123 size_t free() const; | |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1124 double occupancy() const { return ((double)used())/((double)capacity()); } |
0 | 1125 size_t contiguous_available() const; |
1126 size_t unsafe_max_alloc_nogc() const; | |
1127 | |
1128 // over-rides | |
1129 MemRegion used_region() const; | |
1130 MemRegion used_region_at_save_marks() const; | |
1131 | |
1132 // Does a "full" (forced) collection invoked on this generation collect | |
1133 // all younger generations as well? Note that the second conjunct is a | |
1134 // hack to allow the collection of the younger gen first if the flag is | |
1135 // set. This is better than using th policy's should_collect_gen0_first() | |
1136 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. | |
1137 virtual bool full_collects_younger_generations() const { | |
1138 return UseCMSCompactAtFullCollection && !CollectGen0First; | |
1139 } | |
1140 | |
1141 void space_iterate(SpaceClosure* blk, bool usedOnly = false); | |
1142 | |
1143 // Support for compaction | |
1144 CompactibleSpace* first_compaction_space() const; | |
1145 // Adjust quantites in the generation affected by | |
1146 // the compaction. | |
1147 void reset_after_compaction(); | |
1148 | |
1149 // Allocation support | |
1150 HeapWord* allocate(size_t size, bool tlab); | |
1151 HeapWord* have_lock_and_allocate(size_t size, bool tlab); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1152 oop promote(oop obj, size_t obj_size); |
0 | 1153 HeapWord* par_allocate(size_t size, bool tlab) { |
1154 return allocate(size, tlab); | |
1155 } | |
1156 | |
1157 // Incremental mode triggering. | |
1158 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, | |
1159 size_t word_size); | |
1160 | |
1161 // Used by CMSStats to track direct allocation. The value is sampled and | |
1162 // reset after each young gen collection. | |
1163 size_t direct_allocated_words() const { return _direct_allocated_words; } | |
1164 void reset_direct_allocated_words() { _direct_allocated_words = 0; } | |
1165 | |
1166 // Overrides for parallel promotion. | |
1167 virtual oop par_promote(int thread_num, | |
1168 oop obj, markOop m, size_t word_sz); | |
1169 // This one should not be called for CMS. | |
1170 virtual void par_promote_alloc_undo(int thread_num, | |
1171 HeapWord* obj, size_t word_sz); | |
1172 virtual void par_promote_alloc_done(int thread_num); | |
1173 virtual void par_oop_since_save_marks_iterate_done(int thread_num); | |
1174 | |
1175 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, | |
1176 bool younger_handles_promotion_failure) const; | |
1177 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1178 // Inform this (non-young) generation that a promotion failure was |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1179 // encountered during a collection of a younger generation that |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1180 // promotes into this generation. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1181 virtual void promotion_failure_occurred(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
935
diff
changeset
|
1182 |
0 | 1183 bool should_collect(bool full, size_t size, bool tlab); |
94
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1184 virtual bool should_concurrent_collect() const; |
0834225a7916
6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
ysr
parents:
0
diff
changeset
|
1185 virtual bool is_too_full() const; |
0 | 1186 void collect(bool full, |
1187 bool clear_all_soft_refs, | |
1188 size_t size, | |
1189 bool tlab); | |
1190 | |
1191 HeapWord* expand_and_allocate(size_t word_size, | |
1192 bool tlab, | |
1193 bool parallel = false); | |
1194 | |
1195 // GC prologue and epilogue | |
1196 void gc_prologue(bool full); | |
1197 void gc_prologue_work(bool full, bool registerClosure, | |
1198 ModUnionClosure* modUnionClosure); | |
1199 void gc_epilogue(bool full); | |
1200 void gc_epilogue_work(bool full); | |
1201 | |
1202 // Time since last GC of this generation | |
1203 jlong time_of_last_gc(jlong now) { | |
1204 return collector()->time_of_last_gc(now); | |
1205 } | |
1206 void update_time_of_last_gc(jlong now) { | |
1207 collector()-> update_time_of_last_gc(now); | |
1208 } | |
1209 | |
1210 // Allocation failure | |
1211 void expand(size_t bytes, size_t expand_bytes, | |
1212 CMSExpansionCause::Cause cause); | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
196
diff
changeset
|
1213 virtual bool expand(size_t bytes, size_t expand_bytes); |
0 | 1214 void shrink(size_t bytes); |
1215 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); | |
1216 bool expand_and_ensure_spooling_space(PromotionInfo* promo); | |
1217 | |
1218 // Iteration support and related enquiries | |
1219 void save_marks(); | |
1220 bool no_allocs_since_save_marks(); | |
1221 void object_iterate_since_last_GC(ObjectClosure* cl); | |
1222 void younger_refs_iterate(OopsInGenClosure* cl); | |
1223 | |
1224 // Iteration support specific to CMS generations | |
1225 void save_sweep_limit(); | |
1226 | |
1227 // More iteration support | |
1228 virtual void oop_iterate(MemRegion mr, OopClosure* cl); | |
1229 virtual void oop_iterate(OopClosure* cl); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
452
diff
changeset
|
1230 virtual void safe_object_iterate(ObjectClosure* cl); |
0 | 1231 virtual void object_iterate(ObjectClosure* cl); |
1232 | |
1233 // Need to declare the full complement of closures, whether we'll | |
1234 // override them or not, or get message from the compiler: | |
1235 // oop_since_save_marks_iterate_nv hides virtual function... | |
1236 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
1237 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
1238 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) | |
1239 | |
1240 // Smart allocation XXX -- move to CFLSpace? | |
1241 void setNearLargestChunk(); | |
1242 bool isNearLargestChunk(HeapWord* addr); | |
1243 | |
1244 // Get the chunk at the end of the space. Delagates to | |
1245 // the space. | |
1246 FreeChunk* find_chunk_at_end(); | |
1247 | |
1248 // Overriding of unused functionality (sharing not yet supported with CMS) | |
1249 void pre_adjust_pointers(); | |
1250 void post_compact(); | |
1251 | |
1252 // Debugging | |
1253 void prepare_for_verify(); | |
1254 void verify(bool allow_dirty); | |
1255 void print_statistics() PRODUCT_RETURN; | |
1256 | |
1257 // Performance Counters support | |
1258 virtual void update_counters(); | |
1259 virtual void update_counters(size_t used); | |
1260 void initialize_performance_counters(); | |
1261 CollectorCounters* counters() { return collector()->counters(); } | |
1262 | |
1263 // Support for parallel remark of survivor space | |
1264 void* get_data_recorder(int thr_num) { | |
1265 //Delegate to collector | |
1266 return collector()->get_data_recorder(thr_num); | |
1267 } | |
1268 | |
1269 // Printing | |
1270 const char* name() const; | |
1271 virtual const char* short_name() const { return "CMS"; } | |
1272 void print() const; | |
1273 void printOccupancy(const char* s); | |
1274 bool must_be_youngest() const { return false; } | |
1275 bool must_be_oldest() const { return true; } | |
1276 | |
1277 void compute_new_size(); | |
1278 | |
1279 CollectionTypes debug_collection_type() { return _debug_collection_type; } | |
1280 void rotate_debug_collection_type(); | |
1281 }; | |
1282 | |
1283 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { | |
1284 | |
1285 // Return the size policy from the heap's collector | |
1286 // policy casted to CMSAdaptiveSizePolicy*. | |
1287 CMSAdaptiveSizePolicy* cms_size_policy() const; | |
1288 | |
1289 // Resize the generation based on the adaptive size | |
1290 // policy. | |
1291 void resize(size_t cur_promo, size_t desired_promo); | |
1292 | |
1293 // Return the GC counters from the collector policy | |
1294 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); | |
1295 | |
1296 virtual void shrink_by(size_t bytes); | |
1297 | |
1298 public: | |
1299 virtual void compute_new_size(); | |
1300 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, | |
1301 int level, CardTableRS* ct, | |
1302 bool use_adaptive_freelists, | |
1303 FreeBlockDictionary::DictionaryChoice | |
1304 dictionaryChoice) : | |
1305 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, | |
1306 use_adaptive_freelists, dictionaryChoice) {} | |
1307 | |
1308 virtual const char* short_name() const { return "ASCMS"; } | |
1309 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } | |
1310 | |
1311 virtual void update_counters(); | |
1312 virtual void update_counters(size_t used); | |
1313 }; | |
1314 | |
1315 // | |
1316 // Closures of various sorts used by CMS to accomplish its work | |
1317 // | |
1318 | |
1319 // This closure is used to check that a certain set of oops is empty. | |
1320 class FalseClosure: public OopClosure { | |
1321 public: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1322 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1323 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } |
0 | 1324 }; |
1325 | |
1326 // This closure is used to do concurrent marking from the roots | |
1327 // following the first checkpoint. | |
1328 class MarkFromRootsClosure: public BitMapClosure { | |
1329 CMSCollector* _collector; | |
1330 MemRegion _span; | |
1331 CMSBitMap* _bitMap; | |
1332 CMSBitMap* _mut; | |
1333 CMSMarkStack* _markStack; | |
1334 CMSMarkStack* _revisitStack; | |
1335 bool _yield; | |
1336 int _skipBits; | |
1337 HeapWord* _finger; | |
1338 HeapWord* _threshold; | |
1339 DEBUG_ONLY(bool _verifying;) | |
1340 | |
1341 public: | |
1342 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, | |
1343 CMSBitMap* bitMap, | |
1344 CMSMarkStack* markStack, | |
1345 CMSMarkStack* revisitStack, | |
1346 bool should_yield, bool verifying = false); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1347 bool do_bit(size_t offset); |
0 | 1348 void reset(HeapWord* addr); |
1349 inline void do_yield_check(); | |
1350 | |
1351 private: | |
1352 void scanOopsInOop(HeapWord* ptr); | |
1353 void do_yield_work(); | |
1354 }; | |
1355 | |
1356 // This closure is used to do concurrent multi-threaded | |
1357 // marking from the roots following the first checkpoint. | |
1358 // XXX This should really be a subclass of The serial version | |
1359 // above, but i have not had the time to refactor things cleanly. | |
1360 // That willbe done for Dolphin. | |
1361 class Par_MarkFromRootsClosure: public BitMapClosure { | |
1362 CMSCollector* _collector; | |
1363 MemRegion _whole_span; | |
1364 MemRegion _span; | |
1365 CMSBitMap* _bit_map; | |
1366 CMSBitMap* _mut; | |
1367 OopTaskQueue* _work_queue; | |
1368 CMSMarkStack* _overflow_stack; | |
1369 CMSMarkStack* _revisit_stack; | |
1370 bool _yield; | |
1371 int _skip_bits; | |
1372 HeapWord* _finger; | |
1373 HeapWord* _threshold; | |
1374 CMSConcMarkingTask* _task; | |
1375 public: | |
1376 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, | |
1377 MemRegion span, | |
1378 CMSBitMap* bit_map, | |
1379 OopTaskQueue* work_queue, | |
1380 CMSMarkStack* overflow_stack, | |
1381 CMSMarkStack* revisit_stack, | |
1382 bool should_yield); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1383 bool do_bit(size_t offset); |
0 | 1384 inline void do_yield_check(); |
1385 | |
1386 private: | |
1387 void scan_oops_in_oop(HeapWord* ptr); | |
1388 void do_yield_work(); | |
1389 bool get_work_from_overflow_stack(); | |
1390 }; | |
1391 | |
1392 // The following closures are used to do certain kinds of verification of | |
1393 // CMS marking. | |
1394 class PushAndMarkVerifyClosure: public OopClosure { | |
1395 CMSCollector* _collector; | |
1396 MemRegion _span; | |
1397 CMSBitMap* _verification_bm; | |
1398 CMSBitMap* _cms_bm; | |
1399 CMSMarkStack* _mark_stack; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1400 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1401 void do_oop(oop p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1402 template <class T> inline void do_oop_work(T *p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1403 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1404 do_oop(obj); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1405 } |
0 | 1406 public: |
1407 PushAndMarkVerifyClosure(CMSCollector* cms_collector, | |
1408 MemRegion span, | |
1409 CMSBitMap* verification_bm, | |
1410 CMSBitMap* cms_bm, | |
1411 CMSMarkStack* mark_stack); | |
1412 void do_oop(oop* p); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
94
diff
changeset
|
1413 void do_oop(narrowOop* p); |
0 | 1414 // Deal with a stack overflow condition |
1415 void handle_stack_overflow(HeapWord* lost); | |
1416 }; | |
1417 | |
1418 class MarkFromRootsVerifyClosure: public BitMapClosure { | |
1419 CMSCollector* _collector; | |
1420 MemRegion _span; | |
1421 CMSBitMap* _verification_bm; | |
1422 CMSBitMap* _cms_bm; | |
1423 CMSMarkStack* _mark_stack; | |
1424 HeapWord* _finger; | |
1425 PushAndMarkVerifyClosure _pam_verify_closure; | |
1426 public: | |
1427 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, | |
1428 CMSBitMap* verification_bm, | |
1429 CMSBitMap* cms_bm, | |
1430 CMSMarkStack* mark_stack); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1431 bool do_bit(size_t offset); |
0 | 1432 void reset(HeapWord* addr); |
1433 }; | |
1434 | |
1435 | |
1436 // This closure is used to check that a certain set of bits is | |
1437 // "empty" (i.e. the bit vector doesn't have any 1-bits). | |
1438 class FalseBitMapClosure: public BitMapClosure { | |
1439 public: | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1440 bool do_bit(size_t offset) { |
0 | 1441 guarantee(false, "Should not have a 1 bit"); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
143
diff
changeset
|
1442 return true; |
0 | 1443 } |
1444 }; | |
1445 | |
1446 // This closure is used during the second checkpointing phase | |
1447 // to rescan the marked objects on the dirty cards in the mod | |
1448 // union table and the card table proper. It's invoked via | |
1449 // MarkFromDirtyCardsClosure below. It uses either | |
1450 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) | |
1451 // declared in genOopClosures.hpp to accomplish some of its work. | |
1452 // In the parallel case the bitMap is shared, so access to | |
1453 // it needs to be suitably synchronized for updates by embedded | |
1454 // closures that update it; however, this closure itself only | |
1455 // reads the bit_map and because it is idempotent, is immune to | |
1456 // reading stale values. | |
1457 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { | |
1458 #ifdef ASSERT | |
1459 CMSCollector* _collector; | |
1460 MemRegion _span; | |
1461 union { | |
1462 CMSMarkStack* _mark_stack; | |
1463 OopTaskQueue* _work_queue; | |
1464 }; | |
1465 #endif // ASSERT | |
1466 bool _parallel; | |
1467 CMSBitMap* _bit_map; | |
1468 union { | |
1469 MarkRefsIntoAndScanClosure* _scan_closure; | |
1470 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; | |
1471 }; | |
1472 | |
1473 public: | |
1474 ScanMarkedObjectsAgainClosure(CMSCollector* collector, | |
1475 MemRegion span, | |
1476 ReferenceProcessor* rp, | |
1477 CMSBitMap* bit_map, | |
1478 CMSMarkStack* mark_stack, | |
1479 CMSMarkStack* revisit_stack, | |
1480 MarkRefsIntoAndScanClosure* cl): | |
1481 #ifdef ASSERT | |
1482 _collector(collector), | |
1483 _span(span), | |
1484 _mark_stack(mark_stack), | |
1485 #endif // ASSERT | |
1486 _parallel(false), | |
1487 _bit_map(bit_map), | |
1488 _scan_closure(cl) { } | |
1489 | |
1490 ScanMarkedObjectsAgainClosure(CMSCollector* collector, | |
1491 MemRegion span, | |
1492 ReferenceProcessor* rp, | |
1493 CMSBitMap* bit_map, | |
1494 OopTaskQueue* work_queue, | |
1495 CMSMarkStack* revisit_stack, | |
1496 Par_MarkRefsIntoAndScanClosure* cl): | |
1497 #ifdef ASSERT | |
1498 _collector(collector), | |
1499 _span(span), | |
1500 _work_queue(work_queue), | |
1501 #endif // ASSERT | |
1502 _parallel(true), | |
1503 _bit_map(bit_map), | |
1504 _par_scan_closure(cl) { } | |
1505 | |
1506 void do_object(oop obj) { | |
1507 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); | |
1508 } | |
1509 bool do_object_b(oop obj) { | |
1510 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); | |
1511 return false; | |
1512 } | |
1513 bool do_object_bm(oop p, MemRegion mr); | |
1514 }; | |
1515 | |
1516 // This closure is used during the second checkpointing phase | |
1517 // to rescan the marked objects on the dirty cards in the mod | |
1518 // union table and the card table proper. It invokes | |
1519 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. | |
1520 // In the parallel case, the bit map is shared and requires | |
1521 // synchronized access. | |
1522 class MarkFromDirtyCardsClosure: public MemRegionClosure { | |
1523 CompactibleFreeListSpace* _space; | |
1524 ScanMarkedObjectsAgainClosure _scan_cl; | |
1525 size_t _num_dirty_cards; | |
1526 | |
1527 public: | |
1528 MarkFromDirtyCardsClosure(CMSCollector* collector, | |
1529 MemRegion span, | |
1530 CompactibleFreeListSpace* space, | |
1531 CMSBitMap* bit_map, | |
1532 CMSMarkStack* mark_stack, | |
1533 CMSMarkStack* revisit_stack, | |
1534 MarkRefsIntoAndScanClosure* cl): | |
1535 _space(space), | |
1536 _num_dirty_cards(0), | |
1537 _scan_cl(collector, span, collector->ref_processor(), bit_map, | |
1538 mark_stack, revisit_stack, cl) { } | |
1539 | |
1540 MarkFromDirtyCardsClosure(CMSCollector* collector, | |
1541 MemRegion span, | |
1542 CompactibleFreeListSpace* space, | |
1543 CMSBitMap* bit_map, | |
1544 OopTaskQueue* work_queue, | |
1545 CMSMarkStack* revisit_stack, | |
1546 Par_MarkRefsIntoAndScanClosure* cl): | |
1547 _space(space), | |
1548 _num_dirty_cards(0), | |
1549 _scan_cl(collector, span, collector->ref_processor(), bit_map, | |
1550 work_queue, revisit_stack, cl) { } | |
1551 | |
1552 void do_MemRegion(MemRegion mr); | |
1553 void set_space(CompactibleFreeListSpace* space) { _space = space; } | |
1554 size_t num_dirty_cards() { return _num_dirty_cards; } | |
1555 }; | |
1556 | |
1557 // This closure is used in the non-product build to check | |
1558 // that there are no MemRegions with a certain property. | |
1559 class FalseMemRegionClosure: public MemRegionClosure { | |
1560 void do_MemRegion(MemRegion mr) { | |
1561 guarantee(!mr.is_empty(), "Shouldn't be empty"); | |
1562 guarantee(false, "Should never be here"); | |
1563 } | |
1564 }; | |
1565 | |
1566 // This closure is used during the precleaning phase | |
1567 // to "carefully" rescan marked objects on dirty cards. | |
1568 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp | |
1569 // to accomplish some of its work. | |
1570 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { | |
1571 CMSCollector* _collector; | |
1572 MemRegion _span; | |
1573 bool _yield; | |
1574 Mutex* _freelistLock; | |
1575 CMSBitMap* _bitMap; | |
1576 CMSMarkStack* _markStack; | |
1577 MarkRefsIntoAndScanClosure* _scanningClosure; | |
1578 | |
1579 public: | |
1580 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, | |
1581 MemRegion span, | |
1582 CMSBitMap* bitMap, | |
1583 CMSMarkStack* markStack, | |
1584 CMSMarkStack* revisitStack, | |
1585 MarkRefsIntoAndScanClosure* cl, | |
1586 bool should_yield): | |
1587 _collector(collector), | |
1588 _span(span), | |
1589 _yield(should_yield), | |
1590 _bitMap(bitMap), | |
1591 _markStack(markStack), | |
1592 _scanningClosure(cl) { | |
1593 } | |
1594 | |
1595 void do_object(oop p) { | |
1596 guarantee(false, "call do_object_careful instead"); | |
1597 } | |
1598 | |
1599 size_t do_object_careful(oop p) { | |
1600 guarantee(false, "Unexpected caller"); | |
1601 return 0; | |
1602 } | |
1603 | |
1604 size_t do_object_careful_m(oop p, MemRegion mr); | |
1605 | |
1606 void setFreelistLock(Mutex* m) { | |
1607 _freelistLock = m; | |
1608 _scanningClosure->set_freelistLock(m); | |
1609 } | |
1610 | |
1611 private: | |
1612 inline bool do_yield_check(); | |
1613 | |
1614 void do_yield_work(); | |
1615 }; | |
1616 | |
1617 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { | |
1618 CMSCollector* _collector; | |
1619 MemRegion _span; | |
1620 bool _yield; | |
1621 CMSBitMap* _bit_map; | |
1622 CMSMarkStack* _mark_stack; | |
1623 PushAndMarkClosure* _scanning_closure; | |
1624 unsigned int _before_count; | |
1625 | |
1626 public: | |
1627 SurvivorSpacePrecleanClosure(CMSCollector* collector, | |
1628 MemRegion span, | |
1629 CMSBitMap* bit_map, | |
1630 CMSMarkStack* mark_stack, | |
1631 PushAndMarkClosure* cl, | |
1632 unsigned int before_count, | |
1633 bool should_yield): | |
1634 _collector(collector), | |
1635 _span(span), | |
1636 _yield(should_yield), | |
1637 _bit_map(bit_map), | |
1638 _mark_stack(mark_stack), | |
1639 _scanning_closure(cl), | |
1640 _before_count(before_count) | |
1641 { } | |
1642 | |
1643 void do_object(oop p) { | |
1644 guarantee(false, "call do_object_careful instead"); | |
1645 } | |
1646 | |
1647 size_t do_object_careful(oop p); | |
1648 | |
1649 size_t do_object_careful_m(oop p, MemRegion mr) { | |
1650 guarantee(false, "Unexpected caller"); | |
1651 return 0; | |
1652 } | |
1653 | |
1654 private: | |
1655 inline void do_yield_check(); | |
1656 void do_yield_work(); | |
1657 }; | |
1658 | |
1659 // This closure is used to accomplish the sweeping work | |
1660 // after the second checkpoint but before the concurrent reset | |
1661 // phase. | |
1662 // | |
1663 // Terminology | |
1664 // left hand chunk (LHC) - block of one or more chunks currently being | |
1665 // coalesced. The LHC is available for coalescing with a new chunk. | |
1666 // right hand chunk (RHC) - block that is currently being swept that is | |
1667 // free or garbage that can be coalesced with the LHC. | |
1668 // _inFreeRange is true if there is currently a LHC | |
1669 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. | |
1670 // _freeRangeInFreeLists is true if the LHC is in the free lists. | |
1671 // _freeFinger is the address of the current LHC | |
1672 class SweepClosure: public BlkClosureCareful { | |
1673 CMSCollector* _collector; // collector doing the work | |
1674 ConcurrentMarkSweepGeneration* _g; // Generation being swept | |
1675 CompactibleFreeListSpace* _sp; // Space being swept | |
1676 HeapWord* _limit; | |
1677 Mutex* _freelistLock; // Free list lock (in space) | |
1678 CMSBitMap* _bitMap; // Marking bit map (in | |
1679 // generation) | |
1680 bool _inFreeRange; // Indicates if we are in the | |
1681 // midst of a free run | |
1682 bool _freeRangeInFreeLists; | |
1683 // Often, we have just found | |
1684 // a free chunk and started | |
1685 // a new free range; we do not | |
1686 // eagerly remove this chunk from | |
1687 // the free lists unless there is | |
1688 // a possibility of coalescing. | |
1689 // When true, this flag indicates | |
1690 // that the _freeFinger below | |
1691 // points to a potentially free chunk | |
1692 // that may still be in the free lists | |
1693 bool _lastFreeRangeCoalesced; | |
1694 // free range contains chunks | |
1695 // coalesced | |
1696 bool _yield; | |
1697 // Whether sweeping should be | |
1698 // done with yields. For instance | |
1699 // when done by the foreground | |
1700 // collector we shouldn't yield. | |
1701 HeapWord* _freeFinger; // When _inFreeRange is set, the | |
1702 // pointer to the "left hand | |
1703 // chunk" | |
1704 size_t _freeRangeSize; | |
1705 // When _inFreeRange is set, this | |
1706 // indicates the accumulated size | |
1707 // of the "left hand chunk" | |
1708 NOT_PRODUCT( | |
1709 size_t _numObjectsFreed; | |
1710 size_t _numWordsFreed; | |
1711 size_t _numObjectsLive; | |
1712 size_t _numWordsLive; | |
1713 size_t _numObjectsAlreadyFree; | |
1714 size_t _numWordsAlreadyFree; | |
1715 FreeChunk* _last_fc; | |
1716 ) | |
1717 private: | |
1718 // Code that is common to a free chunk or garbage when | |
1719 // encountered during sweeping. | |
1720 void doPostIsFreeOrGarbageChunk(FreeChunk *fc, | |
1721 size_t chunkSize); | |
1722 // Process a free chunk during sweeping. | |
1723 void doAlreadyFreeChunk(FreeChunk *fc); | |
1724 // Process a garbage chunk during sweeping. | |
1725 size_t doGarbageChunk(FreeChunk *fc); | |
1726 // Process a live chunk during sweeping. | |
1727 size_t doLiveChunk(FreeChunk* fc); | |
1728 | |
1729 // Accessors. | |
1730 HeapWord* freeFinger() const { return _freeFinger; } | |
1731 void set_freeFinger(HeapWord* v) { _freeFinger = v; } | |
1732 size_t freeRangeSize() const { return _freeRangeSize; } | |
1733 void set_freeRangeSize(size_t v) { _freeRangeSize = v; } | |
1734 bool inFreeRange() const { return _inFreeRange; } | |
1735 void set_inFreeRange(bool v) { _inFreeRange = v; } | |
1736 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } | |
1737 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } | |
1738 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } | |
1739 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } | |
1740 | |
1741 // Initialize a free range. | |
1742 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); | |
1743 // Return this chunk to the free lists. | |
1744 void flushCurFreeChunk(HeapWord* chunk, size_t size); | |
1745 | |
1746 // Check if we should yield and do so when necessary. | |
1747 inline void do_yield_check(HeapWord* addr); | |
1748 | |
1749 // Yield | |
1750 void do_yield_work(HeapWord* addr); | |
1751 | |
1752 // Debugging/Printing | |
1753 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; | |
1754 | |
1755 public: | |
1756 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, | |
1757 CMSBitMap* bitMap, bool should_yield); | |
1758 ~SweepClosure(); | |
1759 | |
1760 size_t do_blk_careful(HeapWord* addr); | |
1761 }; | |
1762 | |
1763 // Closures related to weak references processing | |
1764 | |
1765 // During CMS' weak reference processing, this is a | |
1766 // work-routine/closure used to complete transitive | |
1767 // marking of objects as live after a certain point | |
1768 // in which an initial set has been completely accumulated. | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1769 // This closure is currently used both during the final |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1770 // remark stop-world phase, as well as during the concurrent |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1771 // precleaning of the discovered reference lists. |
0 | 1772 class CMSDrainMarkingStackClosure: public VoidClosure { |
1773 CMSCollector* _collector; | |
1774 MemRegion _span; | |
1775 CMSMarkStack* _mark_stack; | |
1776 CMSBitMap* _bit_map; | |
1777 CMSKeepAliveClosure* _keep_alive; | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1778 bool _concurrent_precleaning; |
0 | 1779 public: |
1780 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, | |
1781 CMSBitMap* bit_map, CMSMarkStack* mark_stack, | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1782 CMSKeepAliveClosure* keep_alive, |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1783 bool cpc): |
0 | 1784 _collector(collector), |
1785 _span(span), | |
1786 _bit_map(bit_map), | |
1787 _mark_stack(mark_stack), | |
452
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1788 _keep_alive(keep_alive), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1789 _concurrent_precleaning(cpc) { |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1790 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1791 "Mismatch"); |
00b023ae2d78
6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
ysr
parents:
356
diff
changeset
|
1792 } |
0 | 1793 |
1794 void do_void(); | |
1795 }; | |
1796 | |
1797 // A parallel version of CMSDrainMarkingStackClosure above. | |
1798 class CMSParDrainMarkingStackClosure: public VoidClosure { | |
1799 CMSCollector* _collector; | |
1800 MemRegion _span; | |
1801 OopTaskQueue* _work_queue; | |
1802 CMSBitMap* _bit_map; | |
1803 CMSInnerParMarkAndPushClosure _mark_and_push; | |
1804 | |
1805 public: | |
1806 CMSParDrainMarkingStackClosure(CMSCollector* collector, | |
1807 MemRegion span, CMSBitMap* bit_map, | |
935 | 1808 CMSMarkStack* revisit_stack, |
0 | 1809 OopTaskQueue* work_queue): |
1810 _collector(collector), | |
1811 _span(span), | |
1812 _bit_map(bit_map), | |
1813 _work_queue(work_queue), | |
935 | 1814 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } |
0 | 1815 |
1816 public: | |
1817 void trim_queue(uint max); | |
1818 void do_void(); | |
1819 }; | |
1820 | |
1821 // Allow yielding or short-circuiting of reference list | |
1822 // prelceaning work. | |
1823 class CMSPrecleanRefsYieldClosure: public YieldClosure { | |
1824 CMSCollector* _collector; | |
1825 void do_yield_work(); | |
1826 public: | |
1827 CMSPrecleanRefsYieldClosure(CMSCollector* collector): | |
1828 _collector(collector) {} | |
1829 virtual bool should_return(); | |
1830 }; | |
1831 | |
1832 | |
1833 // Convenience class that locks free list locks for given CMS collector | |
1834 class FreelistLocker: public StackObj { | |
1835 private: | |
1836 CMSCollector* _collector; | |
1837 public: | |
1838 FreelistLocker(CMSCollector* collector): | |
1839 _collector(collector) { | |
1840 _collector->getFreelistLocks(); | |
1841 } | |
1842 | |
1843 ~FreelistLocker() { | |
1844 _collector->releaseFreelistLocks(); | |
1845 } | |
1846 }; | |
1847 | |
1848 // Mark all dead objects in a given space. | |
1849 class MarkDeadObjectsClosure: public BlkClosure { | |
1850 const CMSCollector* _collector; | |
1851 const CompactibleFreeListSpace* _sp; | |
1852 CMSBitMap* _live_bit_map; | |
1853 CMSBitMap* _dead_bit_map; | |
1854 public: | |
1855 MarkDeadObjectsClosure(const CMSCollector* collector, | |
1856 const CompactibleFreeListSpace* sp, | |
1857 CMSBitMap *live_bit_map, | |
1858 CMSBitMap *dead_bit_map) : | |
1859 _collector(collector), | |
1860 _sp(sp), | |
1861 _live_bit_map(live_bit_map), | |
1862 _dead_bit_map(dead_bit_map) {} | |
1863 size_t do_blk(HeapWord* addr); | |
1864 }; |