Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 1703:f6f3eef8a521
6581734: CMS Old Gen's collection usage is zero after GC which is incorrect
Summary: Management code enabled for use by a concurrent collector.
Reviewed-by: mchung, ysr
author | kevinw |
---|---|
date | Fri, 30 Jul 2010 22:43:50 +0100 |
parents | c18cbe5936b8 |
children | 4805b9f4779e |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 class G1CollectedHeap; | |
26 class CMTask; | |
1311
2a1472c30599
4396719: Mark Sweep stack overflow on deeply nested Object arrays
jcoomes
parents:
1044
diff
changeset
|
27 typedef GenericTaskQueue<oop> CMTaskQueue; |
2a1472c30599
4396719: Mark Sweep stack overflow on deeply nested Object arrays
jcoomes
parents:
1044
diff
changeset
|
28 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet; |
342 | 29 |
30 // A generic CM bit map. This is essentially a wrapper around the BitMap | |
31 // class, with one bit per (1<<_shifter) HeapWords. | |
32 | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
33 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { |
342 | 34 protected: |
35 HeapWord* _bmStartWord; // base address of range covered by map | |
36 size_t _bmWordSize; // map size (in #HeapWords covered) | |
37 const int _shifter; // map to char or bit | |
38 VirtualSpace _virtual_space; // underlying the bit map | |
39 BitMap _bm; // the bit map itself | |
40 | |
41 public: | |
42 // constructor | |
43 CMBitMapRO(ReservedSpace rs, int shifter); | |
44 | |
45 enum { do_yield = true }; | |
46 | |
47 // inquiries | |
48 HeapWord* startWord() const { return _bmStartWord; } | |
49 size_t sizeInWords() const { return _bmWordSize; } | |
50 // the following is one past the last word in space | |
51 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } | |
52 | |
53 // read marks | |
54 | |
55 bool isMarked(HeapWord* addr) const { | |
56 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | |
57 "outside underlying space?"); | |
58 return _bm.at(heapWordToOffset(addr)); | |
59 } | |
60 | |
61 // iteration | |
62 bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); } | |
63 bool iterate(BitMapClosure* cl, MemRegion mr); | |
64 | |
65 // Return the address corresponding to the next marked bit at or after | |
66 // "addr", and before "limit", if "limit" is non-NULL. If there is no | |
67 // such bit, returns "limit" if that is non-NULL, or else "endWord()". | |
68 HeapWord* getNextMarkedWordAddress(HeapWord* addr, | |
69 HeapWord* limit = NULL) const; | |
70 // Return the address corresponding to the next unmarked bit at or after | |
71 // "addr", and before "limit", if "limit" is non-NULL. If there is no | |
72 // such bit, returns "limit" if that is non-NULL, or else "endWord()". | |
73 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, | |
74 HeapWord* limit = NULL) const; | |
75 | |
76 // conversion utilities | |
77 // XXX Fix these so that offsets are size_t's... | |
78 HeapWord* offsetToHeapWord(size_t offset) const { | |
79 return _bmStartWord + (offset << _shifter); | |
80 } | |
81 size_t heapWordToOffset(HeapWord* addr) const { | |
82 return pointer_delta(addr, _bmStartWord) >> _shifter; | |
83 } | |
84 int heapWordDiffToOffsetDiff(size_t diff) const; | |
85 HeapWord* nextWord(HeapWord* addr) { | |
86 return offsetToHeapWord(heapWordToOffset(addr) + 1); | |
87 } | |
88 | |
89 void mostly_disjoint_range_union(BitMap* from_bitmap, | |
90 size_t from_start_index, | |
91 HeapWord* to_start_word, | |
92 size_t word_num); | |
93 | |
94 // debugging | |
95 NOT_PRODUCT(bool covers(ReservedSpace rs) const;) | |
96 }; | |
97 | |
98 class CMBitMap : public CMBitMapRO { | |
99 | |
100 public: | |
101 // constructor | |
102 CMBitMap(ReservedSpace rs, int shifter) : | |
103 CMBitMapRO(rs, shifter) {} | |
104 | |
105 // write marks | |
106 void mark(HeapWord* addr) { | |
107 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | |
108 "outside underlying space?"); | |
109 _bm.at_put(heapWordToOffset(addr), true); | |
110 } | |
111 void clear(HeapWord* addr) { | |
112 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | |
113 "outside underlying space?"); | |
114 _bm.at_put(heapWordToOffset(addr), false); | |
115 } | |
116 bool parMark(HeapWord* addr) { | |
117 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | |
118 "outside underlying space?"); | |
119 return _bm.par_at_put(heapWordToOffset(addr), true); | |
120 } | |
121 bool parClear(HeapWord* addr) { | |
122 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | |
123 "outside underlying space?"); | |
124 return _bm.par_at_put(heapWordToOffset(addr), false); | |
125 } | |
126 void markRange(MemRegion mr); | |
127 void clearAll(); | |
128 void clearRange(MemRegion mr); | |
129 | |
130 // Starting at the bit corresponding to "addr" (inclusive), find the next | |
131 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find | |
132 // the end of this run (stopping at "end_addr"). Return the MemRegion | |
133 // covering from the start of the region corresponding to the first bit | |
134 // of the run to the end of the region corresponding to the last bit of | |
135 // the run. If there is no "1" bit at or after "addr", return an empty | |
136 // MemRegion. | |
137 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); | |
138 }; | |
139 | |
140 // Represents a marking stack used by the CM collector. | |
141 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
142 class CMMarkStack VALUE_OBJ_CLASS_SPEC { |
342 | 143 ConcurrentMark* _cm; |
144 oop* _base; // bottom of stack | |
145 jint _index; // one more than last occupied index | |
146 jint _capacity; // max #elements | |
147 jint _oops_do_bound; // Number of elements to include in next iteration. | |
148 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run | |
149 | |
150 bool _overflow; | |
151 DEBUG_ONLY(bool _drain_in_progress;) | |
152 DEBUG_ONLY(bool _drain_in_progress_yields;) | |
153 | |
154 public: | |
155 CMMarkStack(ConcurrentMark* cm); | |
156 ~CMMarkStack(); | |
157 | |
158 void allocate(size_t size); | |
159 | |
160 oop pop() { | |
161 if (!isEmpty()) { | |
162 return _base[--_index] ; | |
163 } | |
164 return NULL; | |
165 } | |
166 | |
167 // If overflow happens, don't do the push, and record the overflow. | |
168 // *Requires* that "ptr" is already marked. | |
169 void push(oop ptr) { | |
170 if (isFull()) { | |
171 // Record overflow. | |
172 _overflow = true; | |
173 return; | |
174 } else { | |
175 _base[_index++] = ptr; | |
176 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); | |
177 } | |
178 } | |
179 // Non-block impl. Note: concurrency is allowed only with other | |
180 // "par_push" operations, not with "pop" or "drain". We would need | |
181 // parallel versions of them if such concurrency was desired. | |
182 void par_push(oop ptr); | |
183 | |
184 // Pushes the first "n" elements of "ptr_arr" on the stack. | |
185 // Non-block impl. Note: concurrency is allowed only with other | |
186 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". | |
187 void par_adjoin_arr(oop* ptr_arr, int n); | |
188 | |
189 // Pushes the first "n" elements of "ptr_arr" on the stack. | |
190 // Locking impl: concurrency is allowed only with | |
191 // "par_push_arr" and/or "par_pop_arr" operations, which use the same | |
192 // locking strategy. | |
193 void par_push_arr(oop* ptr_arr, int n); | |
194 | |
195 // If returns false, the array was empty. Otherwise, removes up to "max" | |
196 // elements from the stack, and transfers them to "ptr_arr" in an | |
197 // unspecified order. The actual number transferred is given in "n" ("n | |
198 // == 0" is deliberately redundant with the return value.) Locking impl: | |
199 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" | |
200 // operations, which use the same locking strategy. | |
201 bool par_pop_arr(oop* ptr_arr, int max, int* n); | |
202 | |
203 // Drain the mark stack, applying the given closure to all fields of | |
204 // objects on the stack. (That is, continue until the stack is empty, | |
205 // even if closure applications add entries to the stack.) The "bm" | |
206 // argument, if non-null, may be used to verify that only marked objects | |
207 // are on the mark stack. If "yield_after" is "true", then the | |
208 // concurrent marker performing the drain offers to yield after | |
209 // processing each object. If a yield occurs, stops the drain operation | |
210 // and returns false. Otherwise, returns true. | |
211 template<class OopClosureClass> | |
212 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); | |
213 | |
214 bool isEmpty() { return _index == 0; } | |
215 bool isFull() { return _index == _capacity; } | |
216 int maxElems() { return _capacity; } | |
217 | |
218 bool overflow() { return _overflow; } | |
219 void clear_overflow() { _overflow = false; } | |
220 | |
221 int size() { return _index; } | |
222 | |
223 void setEmpty() { _index = 0; clear_overflow(); } | |
224 | |
225 // Record the current size; a subsequent "oops_do" will iterate only over | |
226 // indices valid at the time of this call. | |
227 void set_oops_do_bound(jint bound = -1) { | |
228 if (bound == -1) { | |
229 _oops_do_bound = _index; | |
230 } else { | |
231 _oops_do_bound = bound; | |
232 } | |
233 } | |
234 jint oops_do_bound() { return _oops_do_bound; } | |
235 // iterate over the oops in the mark stack, up to the bound recorded via | |
236 // the call above. | |
237 void oops_do(OopClosure* f); | |
238 }; | |
239 | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
240 class CMRegionStack VALUE_OBJ_CLASS_SPEC { |
342 | 241 MemRegion* _base; |
242 jint _capacity; | |
243 jint _index; | |
244 jint _oops_do_bound; | |
245 bool _overflow; | |
246 public: | |
247 CMRegionStack(); | |
248 ~CMRegionStack(); | |
249 void allocate(size_t size); | |
250 | |
251 // This is lock-free; assumes that it will only be called in parallel | |
252 // with other "push" operations (no pops). | |
253 void push(MemRegion mr); | |
254 | |
1358
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
255 #if 0 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
256 // This is currently not used. See the comment in the .cpp file. |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
257 |
342 | 258 // Lock-free; assumes that it will only be called in parallel |
259 // with other "pop" operations (no pushes). | |
260 MemRegion pop(); | |
1358
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
261 #endif // 0 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
262 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
263 // These two are the implementations that use a lock. They can be |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
264 // called concurrently with each other but they should not be called |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
265 // concurrently with the lock-free versions (push() / pop()). |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
266 void push_with_lock(MemRegion mr); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
267 MemRegion pop_with_lock(); |
342 | 268 |
269 bool isEmpty() { return _index == 0; } | |
270 bool isFull() { return _index == _capacity; } | |
271 | |
272 bool overflow() { return _overflow; } | |
273 void clear_overflow() { _overflow = false; } | |
274 | |
275 int size() { return _index; } | |
276 | |
277 // It iterates over the entries in the region stack and it | |
278 // invalidates (i.e. assigns MemRegion()) the ones that point to | |
279 // regions in the collection set. | |
280 bool invalidate_entries_into_cset(); | |
281 | |
282 // This gives an upper bound up to which the iteration in | |
283 // invalidate_entries_into_cset() will reach. This prevents | |
284 // newly-added entries to be unnecessarily scanned. | |
285 void set_oops_do_bound() { | |
286 _oops_do_bound = _index; | |
287 } | |
288 | |
289 void setEmpty() { _index = 0; clear_overflow(); } | |
290 }; | |
291 | |
292 // this will enable a variety of different statistics per GC task | |
293 #define _MARKING_STATS_ 0 | |
294 // this will enable the higher verbose levels | |
295 #define _MARKING_VERBOSE_ 0 | |
296 | |
297 #if _MARKING_STATS_ | |
298 #define statsOnly(statement) \ | |
299 do { \ | |
300 statement ; \ | |
301 } while (0) | |
302 #else // _MARKING_STATS_ | |
303 #define statsOnly(statement) \ | |
304 do { \ | |
305 } while (0) | |
306 #endif // _MARKING_STATS_ | |
307 | |
308 typedef enum { | |
309 no_verbose = 0, // verbose turned off | |
310 stats_verbose, // only prints stats at the end of marking | |
311 low_verbose, // low verbose, mostly per region and per major event | |
312 medium_verbose, // a bit more detailed than low | |
313 high_verbose // per object verbose | |
314 } CMVerboseLevel; | |
315 | |
316 | |
317 class ConcurrentMarkThread; | |
318 | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
319 class ConcurrentMark: public CHeapObj { |
342 | 320 friend class ConcurrentMarkThread; |
321 friend class CMTask; | |
322 friend class CMBitMapClosure; | |
323 friend class CSMarkOopClosure; | |
324 friend class CMGlobalObjectClosure; | |
325 friend class CMRemarkTask; | |
326 friend class CMConcurrentMarkingTask; | |
327 friend class G1ParNoteEndTask; | |
328 friend class CalcLiveObjectsClosure; | |
329 | |
330 protected: | |
331 ConcurrentMarkThread* _cmThread; // the thread doing the work | |
332 G1CollectedHeap* _g1h; // the heap. | |
333 size_t _parallel_marking_threads; // the number of marking | |
334 // threads we'll use | |
335 double _sleep_factor; // how much we have to sleep, with | |
336 // respect to the work we just did, to | |
337 // meet the marking overhead goal | |
338 double _marking_task_overhead; // marking target overhead for | |
339 // a single task | |
340 | |
341 // same as the two above, but for the cleanup task | |
342 double _cleanup_sleep_factor; | |
343 double _cleanup_task_overhead; | |
344 | |
345 // Stuff related to age cohort processing. | |
346 struct ParCleanupThreadState { | |
347 char _pre[64]; | |
348 UncleanRegionList list; | |
349 char _post[64]; | |
350 }; | |
351 ParCleanupThreadState** _par_cleanup_thread_state; | |
352 | |
353 // CMS marking support structures | |
354 CMBitMap _markBitMap1; | |
355 CMBitMap _markBitMap2; | |
356 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap | |
357 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap | |
358 bool _at_least_one_mark_complete; | |
359 | |
360 BitMap _region_bm; | |
361 BitMap _card_bm; | |
362 | |
363 // Heap bounds | |
364 HeapWord* _heap_start; | |
365 HeapWord* _heap_end; | |
366 | |
367 // For gray objects | |
368 CMMarkStack _markStack; // Grey objects behind global finger. | |
369 CMRegionStack _regionStack; // Grey regions behind global finger. | |
370 HeapWord* volatile _finger; // the global finger, region aligned, | |
371 // always points to the end of the | |
372 // last claimed region | |
373 | |
374 // marking tasks | |
375 size_t _max_task_num; // maximum task number | |
376 size_t _active_tasks; // task num currently active | |
377 CMTask** _tasks; // task queue array (max_task_num len) | |
378 CMTaskQueueSet* _task_queues; // task queue set | |
379 ParallelTaskTerminator _terminator; // for termination | |
380 | |
381 // Two sync barriers that are used to synchronise tasks when an | |
382 // overflow occurs. The algorithm is the following. All tasks enter | |
383 // the first one to ensure that they have all stopped manipulating | |
384 // the global data structures. After they exit it, they re-initialise | |
385 // their data structures and task 0 re-initialises the global data | |
386 // structures. Then, they enter the second sync barrier. This | |
387 // ensure, that no task starts doing work before all data | |
388 // structures (local and global) have been re-initialised. When they | |
389 // exit it, they are free to start working again. | |
390 WorkGangBarrierSync _first_overflow_barrier_sync; | |
391 WorkGangBarrierSync _second_overflow_barrier_sync; | |
392 | |
393 | |
394 // this is set by any task, when an overflow on the global data | |
395 // structures is detected. | |
396 volatile bool _has_overflown; | |
397 // true: marking is concurrent, false: we're in remark | |
398 volatile bool _concurrent; | |
399 // set at the end of a Full GC so that marking aborts | |
400 volatile bool _has_aborted; | |
401 // used when remark aborts due to an overflow to indicate that | |
402 // another concurrent marking phase should start | |
403 volatile bool _restart_for_overflow; | |
404 | |
405 // This is true from the very start of concurrent marking until the | |
406 // point when all the tasks complete their work. It is really used | |
407 // to determine the points between the end of concurrent marking and | |
408 // time of remark. | |
409 volatile bool _concurrent_marking_in_progress; | |
410 | |
411 // verbose level | |
412 CMVerboseLevel _verbose_level; | |
413 | |
414 // These two fields are used to implement the optimisation that | |
415 // avoids pushing objects on the global/region stack if there are | |
416 // no collection set regions above the lowest finger. | |
417 | |
418 // This is the lowest finger (among the global and local fingers), | |
419 // which is calculated before a new collection set is chosen. | |
420 HeapWord* _min_finger; | |
421 // If this flag is true, objects/regions that are marked below the | |
422 // finger should be pushed on the stack(s). If this is flag is | |
423 // false, it is safe not to push them on the stack(s). | |
424 bool _should_gray_objects; | |
425 | |
426 // All of these times are in ms. | |
427 NumberSeq _init_times; | |
428 NumberSeq _remark_times; | |
429 NumberSeq _remark_mark_times; | |
430 NumberSeq _remark_weak_ref_times; | |
431 NumberSeq _cleanup_times; | |
432 double _total_counting_time; | |
433 double _total_rs_scrub_time; | |
434 | |
435 double* _accum_task_vtime; // accumulated task vtime | |
436 | |
437 WorkGang* _parallel_workers; | |
438 | |
439 void weakRefsWork(bool clear_all_soft_refs); | |
440 | |
441 void swapMarkBitMaps(); | |
442 | |
443 // It resets the global marking data structures, as well as the | |
444 // task local ones; should be called during initial mark. | |
445 void reset(); | |
446 // It resets all the marking data structures. | |
447 void clear_marking_state(); | |
448 | |
449 // It should be called to indicate which phase we're in (concurrent | |
450 // mark or remark) and how many threads are currently active. | |
451 void set_phase(size_t active_tasks, bool concurrent); | |
452 // We do this after we're done with marking so that the marking data | |
453 // structures are initialised to a sensible and predictable state. | |
454 void set_non_marking_state(); | |
455 | |
456 // prints all gathered CM-related statistics | |
457 void print_stats(); | |
458 | |
459 // accessor methods | |
460 size_t parallel_marking_threads() { return _parallel_marking_threads; } | |
461 double sleep_factor() { return _sleep_factor; } | |
462 double marking_task_overhead() { return _marking_task_overhead;} | |
463 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } | |
464 double cleanup_task_overhead() { return _cleanup_task_overhead;} | |
465 | |
466 HeapWord* finger() { return _finger; } | |
467 bool concurrent() { return _concurrent; } | |
468 size_t active_tasks() { return _active_tasks; } | |
469 ParallelTaskTerminator* terminator() { return &_terminator; } | |
470 | |
471 // It claims the next available region to be scanned by a marking | |
472 // task. It might return NULL if the next region is empty or we have | |
473 // run out of regions. In the latter case, out_of_regions() | |
474 // determines whether we've really run out of regions or the task | |
475 // should call claim_region() again. This might seem a bit | |
476 // awkward. Originally, the code was written so that claim_region() | |
477 // either successfully returned with a non-empty region or there | |
478 // were no more regions to be claimed. The problem with this was | |
479 // that, in certain circumstances, it iterated over large chunks of | |
480 // the heap finding only empty regions and, while it was working, it | |
481 // was preventing the calling task to call its regular clock | |
482 // method. So, this way, each task will spend very little time in | |
483 // claim_region() and is allowed to call the regular clock method | |
484 // frequently. | |
485 HeapRegion* claim_region(int task); | |
486 | |
487 // It determines whether we've run out of regions to scan. | |
488 bool out_of_regions() { return _finger == _heap_end; } | |
489 | |
490 // Returns the task with the given id | |
491 CMTask* task(int id) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
492 assert(0 <= id && id < (int) _active_tasks, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
493 "task id not within active bounds"); |
342 | 494 return _tasks[id]; |
495 } | |
496 | |
497 // Returns the task queue with the given id | |
498 CMTaskQueue* task_queue(int id) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
499 assert(0 <= id && id < (int) _active_tasks, |
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
500 "task queue id not within active bounds"); |
342 | 501 return (CMTaskQueue*) _task_queues->queue(id); |
502 } | |
503 | |
504 // Returns the task queue set | |
505 CMTaskQueueSet* task_queues() { return _task_queues; } | |
506 | |
507 // Access / manipulation of the overflow flag which is set to | |
508 // indicate that the global stack or region stack has overflown | |
509 bool has_overflown() { return _has_overflown; } | |
510 void set_has_overflown() { _has_overflown = true; } | |
511 void clear_has_overflown() { _has_overflown = false; } | |
512 | |
513 bool has_aborted() { return _has_aborted; } | |
514 bool restart_for_overflow() { return _restart_for_overflow; } | |
515 | |
516 // Methods to enter the two overflow sync barriers | |
517 void enter_first_sync_barrier(int task_num); | |
518 void enter_second_sync_barrier(int task_num); | |
519 | |
520 public: | |
521 // Manipulation of the global mark stack. | |
522 // Notice that the first mark_stack_push is CAS-based, whereas the | |
523 // two below are Mutex-based. This is OK since the first one is only | |
524 // called during evacuation pauses and doesn't compete with the | |
525 // other two (which are called by the marking tasks during | |
526 // concurrent marking or remark). | |
527 bool mark_stack_push(oop p) { | |
528 _markStack.par_push(p); | |
529 if (_markStack.overflow()) { | |
530 set_has_overflown(); | |
531 return false; | |
532 } | |
533 return true; | |
534 } | |
535 bool mark_stack_push(oop* arr, int n) { | |
536 _markStack.par_push_arr(arr, n); | |
537 if (_markStack.overflow()) { | |
538 set_has_overflown(); | |
539 return false; | |
540 } | |
541 return true; | |
542 } | |
543 void mark_stack_pop(oop* arr, int max, int* n) { | |
544 _markStack.par_pop_arr(arr, max, n); | |
545 } | |
546 size_t mark_stack_size() { return _markStack.size(); } | |
547 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } | |
548 bool mark_stack_overflow() { return _markStack.overflow(); } | |
549 bool mark_stack_empty() { return _markStack.isEmpty(); } | |
550 | |
551 // Manipulation of the region stack | |
552 bool region_stack_push(MemRegion mr) { | |
1358
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
553 // Currently we only call the lock-free version during evacuation |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
554 // pauses. |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
555 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
556 |
342 | 557 _regionStack.push(mr); |
558 if (_regionStack.overflow()) { | |
559 set_has_overflown(); | |
560 return false; | |
561 } | |
562 return true; | |
563 } | |
1358
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
564 #if 0 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
565 // Currently this is not used. See the comment in the .cpp file. |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
566 MemRegion region_stack_pop() { return _regionStack.pop(); } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
567 #endif // 0 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
568 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
569 bool region_stack_push_with_lock(MemRegion mr) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
570 // Currently we only call the lock-based version during either |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
571 // concurrent marking or remark. |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
572 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
573 "if we are at a safepoint it should be the remark safepoint"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
574 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
575 _regionStack.push_with_lock(mr); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
576 if (_regionStack.overflow()) { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
577 set_has_overflown(); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
578 return false; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
579 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
580 return true; |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
581 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
582 MemRegion region_stack_pop_with_lock() { |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
583 // Currently we only call the lock-based version during either |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
584 // concurrent marking or remark. |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
585 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
586 "if we are at a safepoint it should be the remark safepoint"); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
587 |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
588 return _regionStack.pop_with_lock(); |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
589 } |
72f725c5a7be
6940310: G1: MT-unsafe calls to CM::region_stack_push() / CM::region_stack_pop()
tonyp
parents:
1311
diff
changeset
|
590 |
342 | 591 int region_stack_size() { return _regionStack.size(); } |
592 bool region_stack_overflow() { return _regionStack.overflow(); } | |
593 bool region_stack_empty() { return _regionStack.isEmpty(); } | |
594 | |
595 bool concurrent_marking_in_progress() { | |
596 return _concurrent_marking_in_progress; | |
597 } | |
598 void set_concurrent_marking_in_progress() { | |
599 _concurrent_marking_in_progress = true; | |
600 } | |
601 void clear_concurrent_marking_in_progress() { | |
602 _concurrent_marking_in_progress = false; | |
603 } | |
604 | |
605 void update_accum_task_vtime(int i, double vtime) { | |
606 _accum_task_vtime[i] += vtime; | |
607 } | |
608 | |
609 double all_task_accum_vtime() { | |
610 double ret = 0.0; | |
611 for (int i = 0; i < (int)_max_task_num; ++i) | |
612 ret += _accum_task_vtime[i]; | |
613 return ret; | |
614 } | |
615 | |
616 // Attempts to steal an object from the task queues of other tasks | |
617 bool try_stealing(int task_num, int* hash_seed, oop& obj) { | |
618 return _task_queues->steal(task_num, hash_seed, obj); | |
619 } | |
620 | |
621 // It grays an object by first marking it. Then, if it's behind the | |
622 // global finger, it also pushes it on the global stack. | |
623 void deal_with_reference(oop obj); | |
624 | |
625 ConcurrentMark(ReservedSpace rs, int max_regions); | |
626 ~ConcurrentMark(); | |
627 ConcurrentMarkThread* cmThread() { return _cmThread; } | |
628 | |
629 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } | |
630 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } | |
631 | |
632 // The following three are interaction between CM and | |
633 // G1CollectedHeap | |
634 | |
635 // This notifies CM that a root during initial-mark needs to be | |
636 // grayed and it's MT-safe. Currently, we just mark it. But, in the | |
637 // future, we can experiment with pushing it on the stack and we can | |
638 // do this without changing G1CollectedHeap. | |
639 void grayRoot(oop p); | |
640 // It's used during evacuation pauses to gray a region, if | |
641 // necessary, and it's MT-safe. It assumes that the caller has | |
642 // marked any objects on that region. If _should_gray_objects is | |
643 // true and we're still doing concurrent marking, the region is | |
644 // pushed on the region stack, if it is located below the global | |
645 // finger, otherwise we do nothing. | |
646 void grayRegionIfNecessary(MemRegion mr); | |
647 // It's used during evacuation pauses to mark and, if necessary, | |
648 // gray a single object and it's MT-safe. It assumes the caller did | |
649 // not mark the object. If _should_gray_objects is true and we're | |
650 // still doing concurrent marking, the objects is pushed on the | |
651 // global stack, if it is located below the global finger, otherwise | |
652 // we do nothing. | |
653 void markAndGrayObjectIfNecessary(oop p); | |
654 | |
1388 | 655 // It iterates over the heap and for each object it comes across it |
656 // will dump the contents of its reference fields, as well as | |
657 // liveness information for the object and its referents. The dump | |
658 // will be written to a file with the following name: | |
659 // G1PrintReachableBaseFile + "." + str. use_prev_marking decides | |
660 // whether the prev (use_prev_marking == true) or next | |
661 // (use_prev_marking == false) marking information will be used to | |
662 // determine the liveness of each object / referent. If all is true, | |
663 // all objects in the heap will be dumped, otherwise only the live | |
664 // ones. In the dump the following symbols / abbreviations are used: | |
665 // M : an explicitly live object (its bitmap bit is set) | |
666 // > : an implicitly live object (over tams) | |
667 // O : an object outside the G1 heap (typically: in the perm gen) | |
668 // NOT : a reference field whose referent is not live | |
669 // AND MARKED : indicates that an object is both explicitly and | |
670 // implicitly live (it should be one or the other, not both) | |
671 void print_reachable(const char* str, | |
672 bool use_prev_marking, bool all) PRODUCT_RETURN; | |
342 | 673 |
674 // Clear the next marking bitmap (will be called concurrently). | |
675 void clearNextBitmap(); | |
676 | |
677 // main CMS steps and related support | |
678 void checkpointRootsInitial(); | |
679 | |
680 // These two do the work that needs to be done before and after the | |
681 // initial root checkpoint. Since this checkpoint can be done at two | |
682 // different points (i.e. an explicit pause or piggy-backed on a | |
683 // young collection), then it's nice to be able to easily share the | |
684 // pre/post code. It might be the case that we can put everything in | |
685 // the post method. TP | |
686 void checkpointRootsInitialPre(); | |
687 void checkpointRootsInitialPost(); | |
688 | |
689 // Do concurrent phase of marking, to a tentative transitive closure. | |
690 void markFromRoots(); | |
691 | |
692 // Process all unprocessed SATB buffers. It is called at the | |
693 // beginning of an evacuation pause. | |
694 void drainAllSATBBuffers(); | |
695 | |
696 void checkpointRootsFinal(bool clear_all_soft_refs); | |
697 void checkpointRootsFinalWork(); | |
698 void calcDesiredRegions(); | |
699 void cleanup(); | |
700 void completeCleanup(); | |
701 | |
702 // Mark in the previous bitmap. NB: this is usually read-only, so use | |
703 // this carefully! | |
704 void markPrev(oop p); | |
705 void clear(oop p); | |
706 // Clears marks for all objects in the given range, for both prev and | |
707 // next bitmaps. NB: the previous bitmap is usually read-only, so use | |
708 // this carefully! | |
709 void clearRangeBothMaps(MemRegion mr); | |
710 | |
711 // Record the current top of the mark and region stacks; a | |
712 // subsequent oops_do() on the mark stack and | |
713 // invalidate_entries_into_cset() on the region stack will iterate | |
714 // only over indices valid at the time of this call. | |
715 void set_oops_do_bound() { | |
716 _markStack.set_oops_do_bound(); | |
717 _regionStack.set_oops_do_bound(); | |
718 } | |
719 // Iterate over the oops in the mark stack and all local queues. It | |
720 // also calls invalidate_entries_into_cset() on the region stack. | |
721 void oops_do(OopClosure* f); | |
722 // It is called at the end of an evacuation pause during marking so | |
723 // that CM is notified of where the new end of the heap is. It | |
724 // doesn't do anything if concurrent_marking_in_progress() is false, | |
725 // unless the force parameter is true. | |
726 void update_g1_committed(bool force = false); | |
727 | |
728 void complete_marking_in_collection_set(); | |
729 | |
730 // It indicates that a new collection set is being chosen. | |
731 void newCSet(); | |
732 // It registers a collection set heap region with CM. This is used | |
733 // to determine whether any heap regions are located above the finger. | |
734 void registerCSetRegion(HeapRegion* hr); | |
735 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
736 // Registers the maximum region-end associated with a set of |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
737 // regions with CM. Again this is used to determine whether any |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
738 // heap regions are located above the finger. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
739 void register_collection_set_finger(HeapWord* max_finger) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
740 // max_finger is the highest heap region end of the regions currently |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
741 // contained in the collection set. If this value is larger than |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
742 // _min_finger then we need to gray objects. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
743 // This routine is like registerCSetRegion but for an entire |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
744 // collection of regions. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
745 if (max_finger > _min_finger) |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
746 _should_gray_objects = true; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
747 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1388
diff
changeset
|
748 |
342 | 749 // Returns "true" if at least one mark has been completed. |
750 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } | |
751 | |
752 bool isMarked(oop p) const { | |
753 assert(p != NULL && p->is_oop(), "expected an oop"); | |
754 HeapWord* addr = (HeapWord*)p; | |
755 assert(addr >= _nextMarkBitMap->startWord() || | |
756 addr < _nextMarkBitMap->endWord(), "in a region"); | |
757 | |
758 return _nextMarkBitMap->isMarked(addr); | |
759 } | |
760 | |
761 inline bool not_yet_marked(oop p) const; | |
762 | |
763 // XXX Debug code | |
764 bool containing_card_is_marked(void* p); | |
765 bool containing_cards_are_marked(void* start, void* last); | |
766 | |
767 bool isPrevMarked(oop p) const { | |
768 assert(p != NULL && p->is_oop(), "expected an oop"); | |
769 HeapWord* addr = (HeapWord*)p; | |
770 assert(addr >= _prevMarkBitMap->startWord() || | |
771 addr < _prevMarkBitMap->endWord(), "in a region"); | |
772 | |
773 return _prevMarkBitMap->isMarked(addr); | |
774 } | |
775 | |
776 inline bool do_yield_check(int worker_i = 0); | |
777 inline bool should_yield(); | |
778 | |
779 // Called to abort the marking cycle after a Full GC takes palce. | |
780 void abort(); | |
781 | |
782 // This prints the global/local fingers. It is used for debugging. | |
783 NOT_PRODUCT(void print_finger();) | |
784 | |
785 void print_summary_info(); | |
786 | |
1019 | 787 void print_worker_threads_on(outputStream* st) const; |
788 | |
342 | 789 // The following indicate whether a given verbose level has been |
790 // set. Notice that anything above stats is conditional to | |
791 // _MARKING_VERBOSE_ having been set to 1 | |
792 bool verbose_stats() | |
793 { return _verbose_level >= stats_verbose; } | |
794 bool verbose_low() | |
795 { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; } | |
796 bool verbose_medium() | |
797 { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; } | |
798 bool verbose_high() | |
799 { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; } | |
800 }; | |
801 | |
802 // A class representing a marking task. | |
803 class CMTask : public TerminatorTerminator { | |
804 private: | |
805 enum PrivateConstants { | |
806 // the regular clock call is called once the scanned words reaches | |
807 // this limit | |
808 words_scanned_period = 12*1024, | |
809 // the regular clock call is called once the number of visited | |
810 // references reaches this limit | |
811 refs_reached_period = 384, | |
812 // initial value for the hash seed, used in the work stealing code | |
813 init_hash_seed = 17, | |
814 // how many entries will be transferred between global stack and | |
815 // local queues | |
816 global_stack_transfer_size = 16 | |
817 }; | |
818 | |
819 int _task_id; | |
820 G1CollectedHeap* _g1h; | |
821 ConcurrentMark* _cm; | |
822 CMBitMap* _nextMarkBitMap; | |
823 // the task queue of this task | |
824 CMTaskQueue* _task_queue; | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
579
diff
changeset
|
825 private: |
342 | 826 // the task queue set---needed for stealing |
827 CMTaskQueueSet* _task_queues; | |
828 // indicates whether the task has been claimed---this is only for | |
829 // debugging purposes | |
830 bool _claimed; | |
831 | |
832 // number of calls to this task | |
833 int _calls; | |
834 | |
835 // when the virtual timer reaches this time, the marking step should | |
836 // exit | |
837 double _time_target_ms; | |
838 // the start time of the current marking step | |
839 double _start_time_ms; | |
840 | |
841 // the oop closure used for iterations over oops | |
842 OopClosure* _oop_closure; | |
843 | |
844 // the region this task is scanning, NULL if we're not scanning any | |
845 HeapRegion* _curr_region; | |
846 // the local finger of this task, NULL if we're not scanning a region | |
847 HeapWord* _finger; | |
848 // limit of the region this task is scanning, NULL if we're not scanning one | |
849 HeapWord* _region_limit; | |
850 | |
851 // This is used only when we scan regions popped from the region | |
852 // stack. It records what the last object on such a region we | |
853 // scanned was. It is used to ensure that, if we abort region | |
854 // iteration, we do not rescan the first part of the region. This | |
855 // should be NULL when we're not scanning a region from the region | |
856 // stack. | |
857 HeapWord* _region_finger; | |
858 | |
859 // the number of words this task has scanned | |
860 size_t _words_scanned; | |
861 // When _words_scanned reaches this limit, the regular clock is | |
862 // called. Notice that this might be decreased under certain | |
863 // circumstances (i.e. when we believe that we did an expensive | |
864 // operation). | |
865 size_t _words_scanned_limit; | |
866 // the initial value of _words_scanned_limit (i.e. what it was | |
867 // before it was decreased). | |
868 size_t _real_words_scanned_limit; | |
869 | |
870 // the number of references this task has visited | |
871 size_t _refs_reached; | |
872 // When _refs_reached reaches this limit, the regular clock is | |
873 // called. Notice this this might be decreased under certain | |
874 // circumstances (i.e. when we believe that we did an expensive | |
875 // operation). | |
876 size_t _refs_reached_limit; | |
877 // the initial value of _refs_reached_limit (i.e. what it was before | |
878 // it was decreased). | |
879 size_t _real_refs_reached_limit; | |
880 | |
881 // used by the work stealing stuff | |
882 int _hash_seed; | |
883 // if this is true, then the task has aborted for some reason | |
884 bool _has_aborted; | |
885 // set when the task aborts because it has met its time quota | |
886 bool _has_aborted_timed_out; | |
887 // true when we're draining SATB buffers; this avoids the task | |
888 // aborting due to SATB buffers being available (as we're already | |
889 // dealing with them) | |
890 bool _draining_satb_buffers; | |
891 | |
892 // number sequence of past step times | |
893 NumberSeq _step_times_ms; | |
894 // elapsed time of this task | |
895 double _elapsed_time_ms; | |
896 // termination time of this task | |
897 double _termination_time_ms; | |
898 // when this task got into the termination protocol | |
899 double _termination_start_time_ms; | |
900 | |
901 // true when the task is during a concurrent phase, false when it is | |
902 // in the remark phase (so, in the latter case, we do not have to | |
903 // check all the things that we have to check during the concurrent | |
904 // phase, i.e. SATB buffer availability...) | |
905 bool _concurrent; | |
906 | |
907 TruncatedSeq _marking_step_diffs_ms; | |
908 | |
909 // LOTS of statistics related with this task | |
910 #if _MARKING_STATS_ | |
911 NumberSeq _all_clock_intervals_ms; | |
912 double _interval_start_time_ms; | |
913 | |
914 int _aborted; | |
915 int _aborted_overflow; | |
916 int _aborted_cm_aborted; | |
917 int _aborted_yield; | |
918 int _aborted_timed_out; | |
919 int _aborted_satb; | |
920 int _aborted_termination; | |
921 | |
922 int _steal_attempts; | |
923 int _steals; | |
924 | |
925 int _clock_due_to_marking; | |
926 int _clock_due_to_scanning; | |
927 | |
928 int _local_pushes; | |
929 int _local_pops; | |
930 int _local_max_size; | |
931 int _objs_scanned; | |
932 | |
933 int _global_pushes; | |
934 int _global_pops; | |
935 int _global_max_size; | |
936 | |
937 int _global_transfers_to; | |
938 int _global_transfers_from; | |
939 | |
940 int _region_stack_pops; | |
941 | |
942 int _regions_claimed; | |
943 int _objs_found_on_bitmap; | |
944 | |
945 int _satb_buffers_processed; | |
946 #endif // _MARKING_STATS_ | |
947 | |
948 // it updates the local fields after this task has claimed | |
949 // a new region to scan | |
950 void setup_for_region(HeapRegion* hr); | |
951 // it brings up-to-date the limit of the region | |
952 void update_region_limit(); | |
953 // it resets the local fields after a task has finished scanning a | |
954 // region | |
955 void giveup_current_region(); | |
956 | |
957 // called when either the words scanned or the refs visited limit | |
958 // has been reached | |
959 void reached_limit(); | |
960 // recalculates the words scanned and refs visited limits | |
961 void recalculate_limits(); | |
962 // decreases the words scanned and refs visited limits when we reach | |
963 // an expensive operation | |
964 void decrease_limits(); | |
965 // it checks whether the words scanned or refs visited reached their | |
966 // respective limit and calls reached_limit() if they have | |
967 void check_limits() { | |
968 if (_words_scanned >= _words_scanned_limit || | |
969 _refs_reached >= _refs_reached_limit) | |
970 reached_limit(); | |
971 } | |
972 // this is supposed to be called regularly during a marking step as | |
973 // it checks a bunch of conditions that might cause the marking step | |
974 // to abort | |
975 void regular_clock_call(); | |
976 bool concurrent() { return _concurrent; } | |
977 | |
978 public: | |
979 // It resets the task; it should be called right at the beginning of | |
980 // a marking phase. | |
981 void reset(CMBitMap* _nextMarkBitMap); | |
982 // it clears all the fields that correspond to a claimed region. | |
983 void clear_region_fields(); | |
984 | |
985 void set_concurrent(bool concurrent) { _concurrent = concurrent; } | |
986 | |
987 // The main method of this class which performs a marking step | |
988 // trying not to exceed the given duration. However, it might exit | |
989 // prematurely, according to some conditions (i.e. SATB buffers are | |
990 // available for processing). | |
991 void do_marking_step(double target_ms); | |
992 | |
993 // These two calls start and stop the timer | |
994 void record_start_time() { | |
995 _elapsed_time_ms = os::elapsedTime() * 1000.0; | |
996 } | |
997 void record_end_time() { | |
998 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; | |
999 } | |
1000 | |
1001 // returns the task ID | |
1002 int task_id() { return _task_id; } | |
1003 | |
1004 // From TerminatorTerminator. It determines whether this task should | |
1005 // exit the termination protocol after it's entered it. | |
1006 virtual bool should_exit_termination(); | |
1007 | |
1008 HeapWord* finger() { return _finger; } | |
1009 | |
1010 bool has_aborted() { return _has_aborted; } | |
1011 void set_has_aborted() { _has_aborted = true; } | |
1012 void clear_has_aborted() { _has_aborted = false; } | |
1013 bool claimed() { return _claimed; } | |
1014 | |
1015 void set_oop_closure(OopClosure* oop_closure) { | |
1016 _oop_closure = oop_closure; | |
1017 } | |
1018 | |
1019 // It grays the object by marking it and, if necessary, pushing it | |
1020 // on the local queue | |
1021 void deal_with_reference(oop obj); | |
1022 | |
1023 // It scans an object and visits its children. | |
1024 void scan_object(oop obj) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
1025 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); |
342 | 1026 |
1027 if (_cm->verbose_high()) | |
1028 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT, | |
1029 _task_id, (void*) obj); | |
1030 | |
1031 size_t obj_size = obj->size(); | |
1032 _words_scanned += obj_size; | |
1033 | |
1034 obj->oop_iterate(_oop_closure); | |
1035 statsOnly( ++_objs_scanned ); | |
1036 check_limits(); | |
1037 } | |
1038 | |
1039 // It pushes an object on the local queue. | |
1040 void push(oop obj); | |
1041 | |
1042 // These two move entries to/from the global stack. | |
1043 void move_entries_to_global_stack(); | |
1044 void get_entries_from_global_stack(); | |
1045 | |
1046 // It pops and scans objects from the local queue. If partially is | |
1047 // true, then it stops when the queue size is of a given limit. If | |
1048 // partially is false, then it stops when the queue is empty. | |
1049 void drain_local_queue(bool partially); | |
1050 // It moves entries from the global stack to the local queue and | |
1051 // drains the local queue. If partially is true, then it stops when | |
1052 // both the global stack and the local queue reach a given size. If | |
1053 // partially if false, it tries to empty them totally. | |
1054 void drain_global_stack(bool partially); | |
1055 // It keeps picking SATB buffers and processing them until no SATB | |
1056 // buffers are available. | |
1057 void drain_satb_buffers(); | |
1058 // It keeps popping regions from the region stack and processing | |
1059 // them until the region stack is empty. | |
1060 void drain_region_stack(BitMapClosure* closure); | |
1061 | |
1062 // moves the local finger to a new location | |
1063 inline void move_finger_to(HeapWord* new_finger) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
1064 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); |
342 | 1065 _finger = new_finger; |
1066 } | |
1067 | |
1068 // moves the region finger to a new location | |
1069 inline void move_region_finger_to(HeapWord* new_finger) { | |
1023
11d4857fe5e1
6888619: G1: too many guarantees in concurrent marking
tonyp
parents:
1019
diff
changeset
|
1070 assert(new_finger < _cm->finger(), "invariant"); |
342 | 1071 _region_finger = new_finger; |
1072 } | |
1073 | |
1074 CMTask(int task_num, ConcurrentMark *cm, | |
1075 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); | |
1076 | |
1077 // it prints statistics associated with this task | |
1078 void print_stats(); | |
1079 | |
1080 #if _MARKING_STATS_ | |
1081 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } | |
1082 #endif // _MARKING_STATS_ | |
1083 }; |