Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegion.hpp @ 1886:72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr
author | tonyp |
---|---|
date | Sat, 16 Oct 2010 17:12:19 -0400 |
parents | 5cbac8938c4c |
children | f95d63e2154a |
rev | line source |
---|---|
342 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1394
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
25 #ifndef SERIALGC | |
26 | |
27 // A HeapRegion is the smallest piece of a G1CollectedHeap that | |
28 // can be collected independently. | |
29 | |
30 // NOTE: Although a HeapRegion is a Space, its | |
31 // Space::initDirtyCardClosure method must not be called. | |
32 // The problem is that the existence of this method breaks | |
33 // the independence of barrier sets from remembered sets. | |
34 // The solution is to remove this method from the definition | |
35 // of a Space. | |
36 | |
37 class CompactibleSpace; | |
38 class ContiguousSpace; | |
39 class HeapRegionRemSet; | |
40 class HeapRegionRemSetIterator; | |
41 class HeapRegion; | |
42 | |
43 // A dirty card to oop closure for heap regions. It | |
44 // knows how to get the G1 heap and how to use the bitmap | |
45 // in the concurrent marker used by G1 to filter remembered | |
46 // sets. | |
47 | |
48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC { | |
49 public: | |
50 // Specification of possible DirtyCardToOopClosure filtering. | |
51 enum FilterKind { | |
52 NoFilterKind, | |
53 IntoCSFilterKind, | |
54 OutOfRegionFilterKind | |
55 }; | |
56 | |
57 protected: | |
58 HeapRegion* _hr; | |
59 FilterKind _fk; | |
60 G1CollectedHeap* _g1; | |
61 | |
62 void walk_mem_region_with_cl(MemRegion mr, | |
63 HeapWord* bottom, HeapWord* top, | |
64 OopClosure* cl); | |
65 | |
66 // We don't specialize this for FilteringClosure; filtering is handled by | |
67 // the "FilterKind" mechanism. But we provide this to avoid a compiler | |
68 // warning. | |
69 void walk_mem_region_with_cl(MemRegion mr, | |
70 HeapWord* bottom, HeapWord* top, | |
71 FilteringClosure* cl) { | |
72 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, | |
73 (OopClosure*)cl); | |
74 } | |
75 | |
76 // Get the actual top of the area on which the closure will | |
77 // operate, given where the top is assumed to be (the end of the | |
78 // memory region passed to do_MemRegion) and where the object | |
79 // at the top is assumed to start. For example, an object may | |
80 // start at the top but actually extend past the assumed top, | |
81 // in which case the top becomes the end of the object. | |
82 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { | |
83 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); | |
84 } | |
85 | |
86 // Walk the given memory region from bottom to (actual) top | |
87 // looking for objects and applying the oop closure (_cl) to | |
88 // them. The base implementation of this treats the area as | |
89 // blocks, where a block may or may not be an object. Sub- | |
90 // classes should override this to provide more accurate | |
91 // or possibly more efficient walking. | |
92 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { | |
93 Filtering_DCTOC::walk_mem_region(mr, bottom, top); | |
94 } | |
95 | |
96 public: | |
97 HeapRegionDCTOC(G1CollectedHeap* g1, | |
98 HeapRegion* hr, OopClosure* cl, | |
99 CardTableModRefBS::PrecisionStyle precision, | |
100 FilterKind fk); | |
101 }; | |
102 | |
103 | |
104 // The complicating factor is that BlockOffsetTable diverged | |
105 // significantly, and we need functionality that is only in the G1 version. | |
106 // So I copied that code, which led to an alternate G1 version of | |
107 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could | |
108 // be reconciled, then G1OffsetTableContigSpace could go away. | |
109 | |
110 // The idea behind time stamps is the following. Doing a save_marks on | |
111 // all regions at every GC pause is time consuming (if I remember | |
112 // well, 10ms or so). So, we would like to do that only for regions | |
113 // that are GC alloc regions. To achieve this, we use time | |
114 // stamps. For every evacuation pause, G1CollectedHeap generates a | |
115 // unique time stamp (essentially a counter that gets | |
116 // incremented). Every time we want to call save_marks on a region, | |
117 // we set the saved_mark_word to top and also copy the current GC | |
118 // time stamp to the time stamp field of the space. Reading the | |
119 // saved_mark_word involves checking the time stamp of the | |
120 // region. If it is the same as the current GC time stamp, then we | |
121 // can safely read the saved_mark_word field, as it is valid. If the | |
122 // time stamp of the region is not the same as the current GC time | |
123 // stamp, then we instead read top, as the saved_mark_word field is | |
124 // invalid. Time stamps (on the regions and also on the | |
125 // G1CollectedHeap) are reset at every cleanup (we iterate over | |
126 // the regions anyway) and at the end of a Full GC. The current scheme | |
127 // that uses sequential unsigned ints will fail only if we have 4b | |
128 // evacuation pauses between two cleanups, which is _highly_ unlikely. | |
129 | |
130 class G1OffsetTableContigSpace: public ContiguousSpace { | |
131 friend class VMStructs; | |
132 protected: | |
133 G1BlockOffsetArrayContigSpace _offsets; | |
134 Mutex _par_alloc_lock; | |
135 volatile unsigned _gc_time_stamp; | |
136 | |
137 public: | |
138 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be | |
139 // assumed to contain zeros. | |
140 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
141 MemRegion mr, bool is_zeroed = false); | |
142 | |
143 void set_bottom(HeapWord* value); | |
144 void set_end(HeapWord* value); | |
145 | |
146 virtual HeapWord* saved_mark_word() const; | |
147 virtual void set_saved_mark(); | |
148 void reset_gc_time_stamp() { _gc_time_stamp = 0; } | |
149 | |
356 | 150 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
151 virtual void clear(bool mangle_space); | |
342 | 152 |
153 HeapWord* block_start(const void* p); | |
154 HeapWord* block_start_const(const void* p) const; | |
155 | |
156 // Add offset table update. | |
157 virtual HeapWord* allocate(size_t word_size); | |
158 HeapWord* par_allocate(size_t word_size); | |
159 | |
160 // MarkSweep support phase3 | |
161 virtual HeapWord* initialize_threshold(); | |
162 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
163 | |
164 virtual void print() const; | |
165 }; | |
166 | |
167 class HeapRegion: public G1OffsetTableContigSpace { | |
168 friend class VMStructs; | |
169 private: | |
170 | |
355 | 171 enum HumongousType { |
172 NotHumongous = 0, | |
173 StartsHumongous, | |
174 ContinuesHumongous | |
175 }; | |
176 | |
342 | 177 // The next filter kind that should be used for a "new_dcto_cl" call with |
178 // the "traditional" signature. | |
179 HeapRegionDCTOC::FilterKind _next_fk; | |
180 | |
181 // Requires that the region "mr" be dense with objects, and begin and end | |
182 // with an object. | |
183 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl); | |
184 | |
185 // The remembered set for this region. | |
186 // (Might want to make this "inline" later, to avoid some alloc failure | |
187 // issues.) | |
188 HeapRegionRemSet* _rem_set; | |
189 | |
190 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } | |
191 | |
192 protected: | |
193 // If this region is a member of a HeapRegionSeq, the index in that | |
194 // sequence, otherwise -1. | |
195 int _hrs_index; | |
196 | |
355 | 197 HumongousType _humongous_type; |
342 | 198 // For a humongous region, region in which it starts. |
199 HeapRegion* _humongous_start_region; | |
200 // For the start region of a humongous sequence, it's original end(). | |
201 HeapWord* _orig_end; | |
202 | |
203 // True iff the region is in current collection_set. | |
204 bool _in_collection_set; | |
205 | |
206 // True iff the region is on the unclean list, waiting to be zero filled. | |
207 bool _is_on_unclean_list; | |
208 | |
209 // True iff the region is on the free list, ready for allocation. | |
210 bool _is_on_free_list; | |
211 | |
212 // Is this or has it been an allocation region in the current collection | |
213 // pause. | |
214 bool _is_gc_alloc_region; | |
215 | |
216 // True iff an attempt to evacuate an object in the region failed. | |
217 bool _evacuation_failed; | |
218 | |
219 // A heap region may be a member one of a number of special subsets, each | |
220 // represented as linked lists through the field below. Currently, these | |
221 // sets include: | |
222 // The collection set. | |
223 // The set of allocation regions used in a collection pause. | |
224 // Spaces that may contain gray objects. | |
225 HeapRegion* _next_in_special_set; | |
226 | |
227 // next region in the young "generation" region set | |
228 HeapRegion* _next_young_region; | |
229 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
230 // Next region whose cards need cleaning |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
231 HeapRegion* _next_dirty_cards_region; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
232 |
342 | 233 // For parallel heapRegion traversal. |
234 jint _claimed; | |
235 | |
236 // We use concurrent marking to determine the amount of live data | |
237 // in each heap region. | |
238 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. | |
239 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. | |
240 | |
241 // See "sort_index" method. -1 means is not in the array. | |
242 int _sort_index; | |
243 | |
244 // <PREDICTION> | |
245 double _gc_efficiency; | |
246 // </PREDICTION> | |
247 | |
248 enum YoungType { | |
249 NotYoung, // a region is not young | |
250 Young, // a region is young | |
251 Survivor // a region is young and it contains | |
252 // survivor | |
253 }; | |
254 | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
255 volatile YoungType _young_type; |
342 | 256 int _young_index_in_cset; |
257 SurvRateGroup* _surv_rate_group; | |
258 int _age_index; | |
259 | |
260 // The start of the unmarked area. The unmarked area extends from this | |
261 // word until the top and/or end of the region, and is the part | |
262 // of the region for which no marking was done, i.e. objects may | |
263 // have been allocated in this part since the last mark phase. | |
264 // "prev" is the top at the start of the last completed marking. | |
265 // "next" is the top at the start of the in-progress marking (if any.) | |
266 HeapWord* _prev_top_at_mark_start; | |
267 HeapWord* _next_top_at_mark_start; | |
268 // If a collection pause is in progress, this is the top at the start | |
269 // of that pause. | |
270 | |
271 // We've counted the marked bytes of objects below here. | |
272 HeapWord* _top_at_conc_mark_count; | |
273 | |
274 void init_top_at_mark_start() { | |
275 assert(_prev_marked_bytes == 0 && | |
276 _next_marked_bytes == 0, | |
277 "Must be called after zero_marked_bytes."); | |
278 HeapWord* bot = bottom(); | |
279 _prev_top_at_mark_start = bot; | |
280 _next_top_at_mark_start = bot; | |
281 _top_at_conc_mark_count = bot; | |
282 } | |
283 | |
284 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. | |
285 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) | |
286 // made it so. | |
287 | |
288 void set_young_type(YoungType new_type) { | |
289 //assert(_young_type != new_type, "setting the same type" ); | |
290 // TODO: add more assertions here | |
291 _young_type = new_type; | |
292 } | |
293 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
294 // Cached attributes used in the collection set policy information |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
295 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
296 // The RSet length that was added to the total value |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
297 // for the collection set. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
298 size_t _recorded_rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
299 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
300 // The predicted elapsed time that was added to total value |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
301 // for the collection set. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
302 double _predicted_elapsed_time_ms; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
303 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
304 // The predicted number of bytes to copy that was added to |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
305 // the total value for the collection set. |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
306 size_t _predicted_bytes_to_copy; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
307 |
342 | 308 public: |
309 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. | |
310 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
311 MemRegion mr, bool is_zeroed); | |
312 | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
313 static int LogOfHRGrainBytes; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
314 static int LogOfHRGrainWords; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
315 // The normal type of these should be size_t. However, they used to |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
316 // be members of an enum before and they are assumed by the |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
317 // compilers to be ints. To avoid going and fixing all their uses, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
318 // I'm declaring them as ints. I'm not anticipating heap region |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
319 // sizes to reach anywhere near 2g, so using an int here is safe. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
320 static int GrainBytes; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
321 static int GrainWords; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
322 static int CardsPerRegion; |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
323 |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
324 // It sets up the heap region size (GrainBytes / GrainWords), as |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
325 // well as other related fields that are based on the heap region |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
326 // size (LogOfHRGrainBytes / LogOfHRGrainWords / |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
327 // CardsPerRegion). All those fields are considered constant |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
328 // throughout the JVM's execution, therefore they should only be set |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
329 // up once during initialization time. |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
811
diff
changeset
|
330 static void setup_heap_region_size(uintx min_heap_size); |
342 | 331 |
355 | 332 enum ClaimValues { |
333 InitialClaimValue = 0, | |
334 FinalCountClaimValue = 1, | |
335 NoteEndClaimValue = 2, | |
390 | 336 ScrubRemSetClaimValue = 3, |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
549
diff
changeset
|
337 ParVerifyClaimValue = 4, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
549
diff
changeset
|
338 RebuildRSClaimValue = 5 |
355 | 339 }; |
340 | |
342 | 341 // Concurrent refinement requires contiguous heap regions (in which TLABs |
342 // might be allocated) to be zero-filled. Each region therefore has a | |
343 // zero-fill-state. | |
344 enum ZeroFillState { | |
345 NotZeroFilled, | |
346 ZeroFilling, | |
347 ZeroFilled, | |
348 Allocated | |
349 }; | |
350 | |
351 // If this region is a member of a HeapRegionSeq, the index in that | |
352 // sequence, otherwise -1. | |
353 int hrs_index() const { return _hrs_index; } | |
354 void set_hrs_index(int index) { _hrs_index = index; } | |
355 | |
356 // The number of bytes marked live in the region in the last marking phase. | |
357 size_t marked_bytes() { return _prev_marked_bytes; } | |
358 // The number of bytes counted in the next marking. | |
359 size_t next_marked_bytes() { return _next_marked_bytes; } | |
360 // The number of bytes live wrt the next marking. | |
361 size_t next_live_bytes() { | |
362 return (top() - next_top_at_mark_start()) | |
363 * HeapWordSize | |
364 + next_marked_bytes(); | |
365 } | |
366 | |
367 // A lower bound on the amount of garbage bytes in the region. | |
368 size_t garbage_bytes() { | |
369 size_t used_at_mark_start_bytes = | |
370 (prev_top_at_mark_start() - bottom()) * HeapWordSize; | |
371 assert(used_at_mark_start_bytes >= marked_bytes(), | |
372 "Can't mark more than we have."); | |
373 return used_at_mark_start_bytes - marked_bytes(); | |
374 } | |
375 | |
376 // An upper bound on the number of live bytes in the region. | |
377 size_t max_live_bytes() { return used() - garbage_bytes(); } | |
378 | |
379 void add_to_marked_bytes(size_t incr_bytes) { | |
380 _next_marked_bytes = _next_marked_bytes + incr_bytes; | |
381 guarantee( _next_marked_bytes <= used(), "invariant" ); | |
382 } | |
383 | |
384 void zero_marked_bytes() { | |
385 _prev_marked_bytes = _next_marked_bytes = 0; | |
386 } | |
387 | |
355 | 388 bool isHumongous() const { return _humongous_type != NotHumongous; } |
389 bool startsHumongous() const { return _humongous_type == StartsHumongous; } | |
390 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } | |
342 | 391 // For a humongous region, region in which it starts. |
392 HeapRegion* humongous_start_region() const { | |
393 return _humongous_start_region; | |
394 } | |
395 | |
396 // Causes the current region to represent a humongous object spanning "n" | |
397 // regions. | |
1886
72a161e62cc4
6991377: G1: race between concurrent refinement and humongous object allocation
tonyp
parents:
1666
diff
changeset
|
398 void set_startsHumongous(HeapWord* new_end); |
342 | 399 |
400 // The regions that continue a humongous sequence should be added using | |
401 // this method, in increasing address order. | |
402 void set_continuesHumongous(HeapRegion* start); | |
403 | |
404 // If the region has a remembered set, return a pointer to it. | |
405 HeapRegionRemSet* rem_set() const { | |
406 return _rem_set; | |
407 } | |
408 | |
409 // True iff the region is in current collection_set. | |
410 bool in_collection_set() const { | |
411 return _in_collection_set; | |
412 } | |
413 void set_in_collection_set(bool b) { | |
414 _in_collection_set = b; | |
415 } | |
416 HeapRegion* next_in_collection_set() { | |
417 assert(in_collection_set(), "should only invoke on member of CS."); | |
418 assert(_next_in_special_set == NULL || | |
419 _next_in_special_set->in_collection_set(), | |
420 "Malformed CS."); | |
421 return _next_in_special_set; | |
422 } | |
423 void set_next_in_collection_set(HeapRegion* r) { | |
424 assert(in_collection_set(), "should only invoke on member of CS."); | |
425 assert(r == NULL || r->in_collection_set(), "Malformed CS."); | |
426 _next_in_special_set = r; | |
427 } | |
428 | |
429 // True iff it is or has been an allocation region in the current | |
430 // collection pause. | |
431 bool is_gc_alloc_region() const { | |
432 return _is_gc_alloc_region; | |
433 } | |
434 void set_is_gc_alloc_region(bool b) { | |
435 _is_gc_alloc_region = b; | |
436 } | |
437 HeapRegion* next_gc_alloc_region() { | |
438 assert(is_gc_alloc_region(), "should only invoke on member of CS."); | |
439 assert(_next_in_special_set == NULL || | |
440 _next_in_special_set->is_gc_alloc_region(), | |
441 "Malformed CS."); | |
442 return _next_in_special_set; | |
443 } | |
444 void set_next_gc_alloc_region(HeapRegion* r) { | |
445 assert(is_gc_alloc_region(), "should only invoke on member of CS."); | |
446 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); | |
447 _next_in_special_set = r; | |
448 } | |
449 | |
450 bool is_on_free_list() { | |
451 return _is_on_free_list; | |
452 } | |
453 | |
454 void set_on_free_list(bool b) { | |
455 _is_on_free_list = b; | |
456 } | |
457 | |
458 HeapRegion* next_from_free_list() { | |
459 assert(is_on_free_list(), | |
460 "Should only invoke on free space."); | |
461 assert(_next_in_special_set == NULL || | |
462 _next_in_special_set->is_on_free_list(), | |
463 "Malformed Free List."); | |
464 return _next_in_special_set; | |
465 } | |
466 | |
467 void set_next_on_free_list(HeapRegion* r) { | |
468 assert(r == NULL || r->is_on_free_list(), "Malformed free list."); | |
469 _next_in_special_set = r; | |
470 } | |
471 | |
472 bool is_on_unclean_list() { | |
473 return _is_on_unclean_list; | |
474 } | |
475 | |
476 void set_on_unclean_list(bool b); | |
477 | |
478 HeapRegion* next_from_unclean_list() { | |
479 assert(is_on_unclean_list(), | |
480 "Should only invoke on unclean space."); | |
481 assert(_next_in_special_set == NULL || | |
482 _next_in_special_set->is_on_unclean_list(), | |
483 "Malformed unclean List."); | |
484 return _next_in_special_set; | |
485 } | |
486 | |
487 void set_next_on_unclean_list(HeapRegion* r); | |
488 | |
489 HeapRegion* get_next_young_region() { return _next_young_region; } | |
490 void set_next_young_region(HeapRegion* hr) { | |
491 _next_young_region = hr; | |
492 } | |
493 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
494 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
495 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
496 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
497 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
677
diff
changeset
|
498 |
342 | 499 // Allows logical separation between objects allocated before and after. |
500 void save_marks(); | |
501 | |
502 // Reset HR stuff to default values. | |
503 void hr_clear(bool par, bool clear_space); | |
504 | |
356 | 505 void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
342 | 506 |
507 // Ensure that "this" is zero-filled. | |
508 void ensure_zero_filled(); | |
509 // This one requires that the calling thread holds ZF_mon. | |
510 void ensure_zero_filled_locked(); | |
511 | |
512 // Get the start of the unmarked area in this region. | |
513 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } | |
514 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } | |
515 | |
516 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects | |
517 // allocated in the current region before the last call to "save_mark". | |
518 void oop_before_save_marks_iterate(OopClosure* cl); | |
519 | |
520 // This call determines the "filter kind" argument that will be used for | |
521 // the next call to "new_dcto_cl" on this region with the "traditional" | |
522 // signature (i.e., the call below.) The default, in the absence of a | |
523 // preceding call to this method, is "NoFilterKind", and a call to this | |
524 // method is necessary for each such call, or else it reverts to the | |
525 // default. | |
526 // (This is really ugly, but all other methods I could think of changed a | |
527 // lot of main-line code for G1.) | |
528 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) { | |
529 _next_fk = nfk; | |
530 } | |
531 | |
532 DirtyCardToOopClosure* | |
533 new_dcto_closure(OopClosure* cl, | |
534 CardTableModRefBS::PrecisionStyle precision, | |
535 HeapRegionDCTOC::FilterKind fk); | |
536 | |
537 #if WHASSUP | |
538 DirtyCardToOopClosure* | |
539 new_dcto_closure(OopClosure* cl, | |
540 CardTableModRefBS::PrecisionStyle precision, | |
541 HeapWord* boundary) { | |
542 assert(boundary == NULL, "This arg doesn't make sense here."); | |
543 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk); | |
544 _next_fk = HeapRegionDCTOC::NoFilterKind; | |
545 return res; | |
546 } | |
547 #endif | |
548 | |
549 // | |
550 // Note the start or end of marking. This tells the heap region | |
551 // that the collector is about to start or has finished (concurrently) | |
552 // marking the heap. | |
553 // | |
554 | |
555 // Note the start of a marking phase. Record the | |
556 // start of the unmarked area of the region here. | |
557 void note_start_of_marking(bool during_initial_mark) { | |
558 init_top_at_conc_mark_count(); | |
559 _next_marked_bytes = 0; | |
560 if (during_initial_mark && is_young() && !is_survivor()) | |
561 _next_top_at_mark_start = bottom(); | |
562 else | |
563 _next_top_at_mark_start = top(); | |
564 } | |
565 | |
566 // Note the end of a marking phase. Install the start of | |
567 // the unmarked area that was captured at start of marking. | |
568 void note_end_of_marking() { | |
569 _prev_top_at_mark_start = _next_top_at_mark_start; | |
570 _prev_marked_bytes = _next_marked_bytes; | |
571 _next_marked_bytes = 0; | |
572 | |
573 guarantee(_prev_marked_bytes <= | |
574 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize, | |
575 "invariant"); | |
576 } | |
577 | |
578 // After an evacuation, we need to update _next_top_at_mark_start | |
579 // to be the current top. Note this is only valid if we have only | |
580 // ever evacuated into this region. If we evacuate, allocate, and | |
581 // then evacuate we are in deep doodoo. | |
582 void note_end_of_copying() { | |
1021
1f19207eefc2
6847956: G1: crash in oopDesc*G1ParCopyHelper::copy_to_survivor_space(oopDesc*)
tonyp
parents:
1020
diff
changeset
|
583 assert(top() >= _next_top_at_mark_start, "Increase only"); |
1f19207eefc2
6847956: G1: crash in oopDesc*G1ParCopyHelper::copy_to_survivor_space(oopDesc*)
tonyp
parents:
1020
diff
changeset
|
584 _next_top_at_mark_start = top(); |
342 | 585 } |
586 | |
587 // Returns "false" iff no object in the region was allocated when the | |
588 // last mark phase ended. | |
589 bool is_marked() { return _prev_top_at_mark_start != bottom(); } | |
590 | |
591 // If "is_marked()" is true, then this is the index of the region in | |
592 // an array constructed at the end of marking of the regions in a | |
593 // "desirability" order. | |
594 int sort_index() { | |
595 return _sort_index; | |
596 } | |
597 void set_sort_index(int i) { | |
598 _sort_index = i; | |
599 } | |
600 | |
601 void init_top_at_conc_mark_count() { | |
602 _top_at_conc_mark_count = bottom(); | |
603 } | |
604 | |
605 void set_top_at_conc_mark_count(HeapWord *cur) { | |
606 assert(bottom() <= cur && cur <= end(), "Sanity."); | |
607 _top_at_conc_mark_count = cur; | |
608 } | |
609 | |
610 HeapWord* top_at_conc_mark_count() { | |
611 return _top_at_conc_mark_count; | |
612 } | |
613 | |
614 void reset_during_compaction() { | |
615 guarantee( isHumongous() && startsHumongous(), | |
616 "should only be called for humongous regions"); | |
617 | |
618 zero_marked_bytes(); | |
619 init_top_at_mark_start(); | |
620 } | |
621 | |
622 // <PREDICTION> | |
623 void calc_gc_efficiency(void); | |
624 double gc_efficiency() { return _gc_efficiency;} | |
625 // </PREDICTION> | |
626 | |
627 bool is_young() const { return _young_type != NotYoung; } | |
628 bool is_survivor() const { return _young_type == Survivor; } | |
629 | |
630 int young_index_in_cset() const { return _young_index_in_cset; } | |
631 void set_young_index_in_cset(int index) { | |
632 assert( (index == -1) || is_young(), "pre-condition" ); | |
633 _young_index_in_cset = index; | |
634 } | |
635 | |
636 int age_in_surv_rate_group() { | |
637 assert( _surv_rate_group != NULL, "pre-condition" ); | |
638 assert( _age_index > -1, "pre-condition" ); | |
639 return _surv_rate_group->age_in_group(_age_index); | |
640 } | |
641 | |
642 void record_surv_words_in_group(size_t words_survived) { | |
643 assert( _surv_rate_group != NULL, "pre-condition" ); | |
644 assert( _age_index > -1, "pre-condition" ); | |
645 int age_in_group = age_in_surv_rate_group(); | |
646 _surv_rate_group->record_surviving_words(age_in_group, words_survived); | |
647 } | |
648 | |
649 int age_in_surv_rate_group_cond() { | |
650 if (_surv_rate_group != NULL) | |
651 return age_in_surv_rate_group(); | |
652 else | |
653 return -1; | |
654 } | |
655 | |
656 SurvRateGroup* surv_rate_group() { | |
657 return _surv_rate_group; | |
658 } | |
659 | |
660 void install_surv_rate_group(SurvRateGroup* surv_rate_group) { | |
661 assert( surv_rate_group != NULL, "pre-condition" ); | |
662 assert( _surv_rate_group == NULL, "pre-condition" ); | |
663 assert( is_young(), "pre-condition" ); | |
664 | |
665 _surv_rate_group = surv_rate_group; | |
666 _age_index = surv_rate_group->next_age_index(); | |
667 } | |
668 | |
669 void uninstall_surv_rate_group() { | |
670 if (_surv_rate_group != NULL) { | |
671 assert( _age_index > -1, "pre-condition" ); | |
672 assert( is_young(), "pre-condition" ); | |
673 | |
674 _surv_rate_group = NULL; | |
675 _age_index = -1; | |
676 } else { | |
677 assert( _age_index == -1, "pre-condition" ); | |
678 } | |
679 } | |
680 | |
681 void set_young() { set_young_type(Young); } | |
682 | |
683 void set_survivor() { set_young_type(Survivor); } | |
684 | |
685 void set_not_young() { set_young_type(NotYoung); } | |
686 | |
687 // Determine if an object has been allocated since the last | |
688 // mark performed by the collector. This returns true iff the object | |
689 // is within the unmarked area of the region. | |
690 bool obj_allocated_since_prev_marking(oop obj) const { | |
691 return (HeapWord *) obj >= prev_top_at_mark_start(); | |
692 } | |
693 bool obj_allocated_since_next_marking(oop obj) const { | |
694 return (HeapWord *) obj >= next_top_at_mark_start(); | |
695 } | |
696 | |
697 // For parallel heapRegion traversal. | |
698 bool claimHeapRegion(int claimValue); | |
699 jint claim_value() { return _claimed; } | |
700 // Use this carefully: only when you're sure no one is claiming... | |
701 void set_claim_value(int claimValue) { _claimed = claimValue; } | |
702 | |
703 // Returns the "evacuation_failed" property of the region. | |
704 bool evacuation_failed() { return _evacuation_failed; } | |
705 | |
706 // Sets the "evacuation_failed" property of the region. | |
707 void set_evacuation_failed(bool b) { | |
708 _evacuation_failed = b; | |
709 | |
710 if (b) { | |
711 init_top_at_conc_mark_count(); | |
712 _next_marked_bytes = 0; | |
713 } | |
714 } | |
715 | |
716 // Requires that "mr" be entirely within the region. | |
717 // Apply "cl->do_object" to all objects that intersect with "mr". | |
718 // If the iteration encounters an unparseable portion of the region, | |
719 // or if "cl->abort()" is true after a closure application, | |
720 // terminate the iteration and return the address of the start of the | |
721 // subregion that isn't done. (The two can be distinguished by querying | |
722 // "cl->abort()".) Return of "NULL" indicates that the iteration | |
723 // completed. | |
724 HeapWord* | |
725 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); | |
726 | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
727 // In this version - if filter_young is true and the region |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
728 // is a young region then we skip the iteration. |
342 | 729 HeapWord* |
730 oops_on_card_seq_iterate_careful(MemRegion mr, | |
1666
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
731 FilterOutOfRegionClosure* cl, |
5cbac8938c4c
6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
johnc
parents:
1552
diff
changeset
|
732 bool filter_young); |
342 | 733 |
734 // A version of block start that is guaranteed to find *some* block | |
735 // boundary at or before "p", but does not object iteration, and may | |
736 // therefore be used safely when the heap is unparseable. | |
737 HeapWord* block_start_careful(const void* p) const { | |
738 return _offsets.block_start_careful(p); | |
739 } | |
740 | |
741 // Requires that "addr" is within the region. Returns the start of the | |
742 // first ("careful") block that starts at or after "addr", or else the | |
743 // "end" of the region if there is no such block. | |
744 HeapWord* next_block_start_careful(HeapWord* addr); | |
745 | |
746 // Returns the zero-fill-state of the current region. | |
747 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } | |
748 bool zero_fill_is_allocated() { return _zfs == Allocated; } | |
749 Thread* zero_filler() { return _zero_filler; } | |
750 | |
751 // Indicate that the contents of the region are unknown, and therefore | |
752 // might require zero-filling. | |
753 void set_zero_fill_needed() { | |
754 set_zero_fill_state_work(NotZeroFilled); | |
755 } | |
756 void set_zero_fill_in_progress(Thread* t) { | |
757 set_zero_fill_state_work(ZeroFilling); | |
758 _zero_filler = t; | |
759 } | |
760 void set_zero_fill_complete(); | |
761 void set_zero_fill_allocated() { | |
762 set_zero_fill_state_work(Allocated); | |
763 } | |
764 | |
765 void set_zero_fill_state_work(ZeroFillState zfs); | |
766 | |
767 // This is called when a full collection shrinks the heap. | |
768 // We want to set the heap region to a value which says | |
769 // it is no longer part of the heap. For now, we'll let "NotZF" fill | |
770 // that role. | |
771 void reset_zero_fill() { | |
772 set_zero_fill_state_work(NotZeroFilled); | |
773 _zero_filler = NULL; | |
774 } | |
775 | |
1394
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
776 size_t recorded_rs_length() const { return _recorded_rs_length; } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
777 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
778 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
779 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
780 void set_recorded_rs_length(size_t rs_length) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
781 _recorded_rs_length = rs_length; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
782 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
783 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
784 void set_predicted_elapsed_time_ms(double ms) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
785 _predicted_elapsed_time_ms = ms; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
786 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
787 |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
788 void set_predicted_bytes_to_copy(size_t bytes) { |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
789 _predicted_bytes_to_copy = bytes; |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
790 } |
1316cec51b4d
6819061: G1: eliminate serial Other times that are proportional to the collection set length
johnc
parents:
1021
diff
changeset
|
791 |
342 | 792 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
793 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
794 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) | |
795 | |
796 CompactibleSpace* next_compaction_space() const; | |
797 | |
798 virtual void reset_after_compaction(); | |
799 | |
800 void print() const; | |
801 void print_on(outputStream* st) const; | |
802 | |
811 | 803 // use_prev_marking == true -> use "prev" marking information, |
804 // use_prev_marking == false -> use "next" marking information | |
805 // NOTE: Only the "prev" marking information is guaranteed to be | |
806 // consistent most of the time, so most calls to this should use | |
807 // use_prev_marking == true. Currently, there is only one case where | |
808 // this is called with use_prev_marking == false, which is to verify | |
809 // the "next" marking information at the end of remark. | |
1020
ff2402f6a50b
6882730: G1: parallel heap verification messes up region dump
tonyp
parents:
942
diff
changeset
|
810 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const; |
811 | 811 |
812 // Override; it uses the "prev" marking information | |
342 | 813 virtual void verify(bool allow_dirty) const; |
814 | |
815 #ifdef DEBUG | |
816 HeapWord* allocate(size_t size); | |
817 #endif | |
818 }; | |
819 | |
820 // HeapRegionClosure is used for iterating over regions. | |
821 // Terminates the iteration when the "doHeapRegion" method returns "true". | |
822 class HeapRegionClosure : public StackObj { | |
823 friend class HeapRegionSeq; | |
824 friend class G1CollectedHeap; | |
825 | |
826 bool _complete; | |
827 void incomplete() { _complete = false; } | |
828 | |
829 public: | |
830 HeapRegionClosure(): _complete(true) {} | |
831 | |
832 // Typically called on each region until it returns true. | |
833 virtual bool doHeapRegion(HeapRegion* r) = 0; | |
834 | |
835 // True after iteration if the closure was applied to all heap regions | |
836 // and returned "false" in all cases. | |
837 bool complete() { return _complete; } | |
838 }; | |
839 | |
840 // A linked lists of heap regions. It leaves the "next" field | |
841 // unspecified; that's up to subtypes. | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
545
diff
changeset
|
842 class RegionList VALUE_OBJ_CLASS_SPEC { |
342 | 843 protected: |
844 virtual HeapRegion* get_next(HeapRegion* chr) = 0; | |
845 virtual void set_next(HeapRegion* chr, | |
846 HeapRegion* new_next) = 0; | |
847 | |
848 HeapRegion* _hd; | |
849 HeapRegion* _tl; | |
850 size_t _sz; | |
851 | |
852 // Protected constructor because this type is only meaningful | |
853 // when the _get/_set next functions are defined. | |
854 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} | |
855 public: | |
856 void reset() { | |
857 _hd = NULL; | |
858 _tl = NULL; | |
859 _sz = 0; | |
860 } | |
861 HeapRegion* hd() { return _hd; } | |
862 HeapRegion* tl() { return _tl; } | |
863 size_t sz() { return _sz; } | |
864 size_t length(); | |
865 | |
866 bool well_formed() { | |
867 return | |
868 ((hd() == NULL && tl() == NULL && sz() == 0) | |
869 || (hd() != NULL && tl() != NULL && sz() > 0)) | |
870 && (sz() == length()); | |
871 } | |
872 virtual void insert_before_head(HeapRegion* r); | |
873 void prepend_list(RegionList* new_list); | |
874 virtual HeapRegion* pop(); | |
875 void dec_sz() { _sz--; } | |
876 // Requires that "r" is an element of the list, and is not the tail. | |
877 void delete_after(HeapRegion* r); | |
878 }; | |
879 | |
880 class EmptyNonHRegionList: public RegionList { | |
881 protected: | |
882 // Protected constructor because this type is only meaningful | |
883 // when the _get/_set next functions are defined. | |
884 EmptyNonHRegionList() : RegionList() {} | |
885 | |
886 public: | |
887 void insert_before_head(HeapRegion* r) { | |
888 // assert(r->is_empty(), "Better be empty"); | |
889 assert(!r->isHumongous(), "Better not be humongous."); | |
890 RegionList::insert_before_head(r); | |
891 } | |
892 void prepend_list(EmptyNonHRegionList* new_list) { | |
893 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(), | |
894 // "Better be empty"); | |
895 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), | |
896 "Better not be humongous."); | |
897 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(), | |
898 // "Better be empty"); | |
899 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), | |
900 "Better not be humongous."); | |
901 RegionList::prepend_list(new_list); | |
902 } | |
903 }; | |
904 | |
905 class UncleanRegionList: public EmptyNonHRegionList { | |
906 public: | |
907 HeapRegion* get_next(HeapRegion* hr) { | |
908 return hr->next_from_unclean_list(); | |
909 } | |
910 void set_next(HeapRegion* hr, HeapRegion* new_next) { | |
911 hr->set_next_on_unclean_list(new_next); | |
912 } | |
913 | |
914 UncleanRegionList() : EmptyNonHRegionList() {} | |
915 | |
916 void insert_before_head(HeapRegion* r) { | |
917 assert(!r->is_on_free_list(), | |
918 "Better not already be on free list"); | |
919 assert(!r->is_on_unclean_list(), | |
920 "Better not already be on unclean list"); | |
921 r->set_zero_fill_needed(); | |
922 r->set_on_unclean_list(true); | |
923 EmptyNonHRegionList::insert_before_head(r); | |
924 } | |
925 void prepend_list(UncleanRegionList* new_list) { | |
926 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), | |
927 "Better not already be on free list"); | |
928 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), | |
929 "Better already be marked as on unclean list"); | |
930 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), | |
931 "Better not already be on free list"); | |
932 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), | |
933 "Better already be marked as on unclean list"); | |
934 EmptyNonHRegionList::prepend_list(new_list); | |
935 } | |
936 HeapRegion* pop() { | |
937 HeapRegion* res = RegionList::pop(); | |
938 if (res != NULL) res->set_on_unclean_list(false); | |
939 return res; | |
940 } | |
941 }; | |
942 | |
943 // Local Variables: *** | |
944 // c-indentation-style: gnu *** | |
945 // End: *** | |
946 | |
947 #endif // SERIALGC |