Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegion.hpp @ 626:87fa6e083d82
6760309: G1: update remembered sets during Full GCs
Reviewed-by: iveresov, tonyp
author | apetrusenko |
---|---|
date | Tue, 10 Mar 2009 00:47:05 -0700 |
parents | fe3d7c11b4b7 |
children | 7bb995fbd3c0 |
rev | line source |
---|---|
342 | 1 /* |
470 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #ifndef SERIALGC | |
26 | |
27 // A HeapRegion is the smallest piece of a G1CollectedHeap that | |
28 // can be collected independently. | |
29 | |
30 // NOTE: Although a HeapRegion is a Space, its | |
31 // Space::initDirtyCardClosure method must not be called. | |
32 // The problem is that the existence of this method breaks | |
33 // the independence of barrier sets from remembered sets. | |
34 // The solution is to remove this method from the definition | |
35 // of a Space. | |
36 | |
37 class CompactibleSpace; | |
38 class ContiguousSpace; | |
39 class HeapRegionRemSet; | |
40 class HeapRegionRemSetIterator; | |
41 class HeapRegion; | |
42 | |
43 // A dirty card to oop closure for heap regions. It | |
44 // knows how to get the G1 heap and how to use the bitmap | |
45 // in the concurrent marker used by G1 to filter remembered | |
46 // sets. | |
47 | |
48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC { | |
49 public: | |
50 // Specification of possible DirtyCardToOopClosure filtering. | |
51 enum FilterKind { | |
52 NoFilterKind, | |
53 IntoCSFilterKind, | |
54 OutOfRegionFilterKind | |
55 }; | |
56 | |
57 protected: | |
58 HeapRegion* _hr; | |
59 FilterKind _fk; | |
60 G1CollectedHeap* _g1; | |
61 | |
62 void walk_mem_region_with_cl(MemRegion mr, | |
63 HeapWord* bottom, HeapWord* top, | |
64 OopClosure* cl); | |
65 | |
66 // We don't specialize this for FilteringClosure; filtering is handled by | |
67 // the "FilterKind" mechanism. But we provide this to avoid a compiler | |
68 // warning. | |
69 void walk_mem_region_with_cl(MemRegion mr, | |
70 HeapWord* bottom, HeapWord* top, | |
71 FilteringClosure* cl) { | |
72 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, | |
73 (OopClosure*)cl); | |
74 } | |
75 | |
76 // Get the actual top of the area on which the closure will | |
77 // operate, given where the top is assumed to be (the end of the | |
78 // memory region passed to do_MemRegion) and where the object | |
79 // at the top is assumed to start. For example, an object may | |
80 // start at the top but actually extend past the assumed top, | |
81 // in which case the top becomes the end of the object. | |
82 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { | |
83 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); | |
84 } | |
85 | |
86 // Walk the given memory region from bottom to (actual) top | |
87 // looking for objects and applying the oop closure (_cl) to | |
88 // them. The base implementation of this treats the area as | |
89 // blocks, where a block may or may not be an object. Sub- | |
90 // classes should override this to provide more accurate | |
91 // or possibly more efficient walking. | |
92 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { | |
93 Filtering_DCTOC::walk_mem_region(mr, bottom, top); | |
94 } | |
95 | |
96 public: | |
97 HeapRegionDCTOC(G1CollectedHeap* g1, | |
98 HeapRegion* hr, OopClosure* cl, | |
99 CardTableModRefBS::PrecisionStyle precision, | |
100 FilterKind fk); | |
101 }; | |
102 | |
103 | |
104 // The complicating factor is that BlockOffsetTable diverged | |
105 // significantly, and we need functionality that is only in the G1 version. | |
106 // So I copied that code, which led to an alternate G1 version of | |
107 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could | |
108 // be reconciled, then G1OffsetTableContigSpace could go away. | |
109 | |
110 // The idea behind time stamps is the following. Doing a save_marks on | |
111 // all regions at every GC pause is time consuming (if I remember | |
112 // well, 10ms or so). So, we would like to do that only for regions | |
113 // that are GC alloc regions. To achieve this, we use time | |
114 // stamps. For every evacuation pause, G1CollectedHeap generates a | |
115 // unique time stamp (essentially a counter that gets | |
116 // incremented). Every time we want to call save_marks on a region, | |
117 // we set the saved_mark_word to top and also copy the current GC | |
118 // time stamp to the time stamp field of the space. Reading the | |
119 // saved_mark_word involves checking the time stamp of the | |
120 // region. If it is the same as the current GC time stamp, then we | |
121 // can safely read the saved_mark_word field, as it is valid. If the | |
122 // time stamp of the region is not the same as the current GC time | |
123 // stamp, then we instead read top, as the saved_mark_word field is | |
124 // invalid. Time stamps (on the regions and also on the | |
125 // G1CollectedHeap) are reset at every cleanup (we iterate over | |
126 // the regions anyway) and at the end of a Full GC. The current scheme | |
127 // that uses sequential unsigned ints will fail only if we have 4b | |
128 // evacuation pauses between two cleanups, which is _highly_ unlikely. | |
129 | |
130 class G1OffsetTableContigSpace: public ContiguousSpace { | |
131 friend class VMStructs; | |
132 protected: | |
133 G1BlockOffsetArrayContigSpace _offsets; | |
134 Mutex _par_alloc_lock; | |
135 volatile unsigned _gc_time_stamp; | |
136 | |
137 public: | |
138 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be | |
139 // assumed to contain zeros. | |
140 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, | |
141 MemRegion mr, bool is_zeroed = false); | |
142 | |
143 void set_bottom(HeapWord* value); | |
144 void set_end(HeapWord* value); | |
145 | |
146 virtual HeapWord* saved_mark_word() const; | |
147 virtual void set_saved_mark(); | |
148 void reset_gc_time_stamp() { _gc_time_stamp = 0; } | |
149 | |
356 | 150 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
151 virtual void clear(bool mangle_space); | |
342 | 152 |
153 HeapWord* block_start(const void* p); | |
154 HeapWord* block_start_const(const void* p) const; | |
155 | |
156 // Add offset table update. | |
157 virtual HeapWord* allocate(size_t word_size); | |
158 HeapWord* par_allocate(size_t word_size); | |
159 | |
160 // MarkSweep support phase3 | |
161 virtual HeapWord* initialize_threshold(); | |
162 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
163 | |
164 virtual void print() const; | |
165 }; | |
166 | |
167 class HeapRegion: public G1OffsetTableContigSpace { | |
168 friend class VMStructs; | |
169 private: | |
170 | |
355 | 171 enum HumongousType { |
172 NotHumongous = 0, | |
173 StartsHumongous, | |
174 ContinuesHumongous | |
175 }; | |
176 | |
342 | 177 // The next filter kind that should be used for a "new_dcto_cl" call with |
178 // the "traditional" signature. | |
179 HeapRegionDCTOC::FilterKind _next_fk; | |
180 | |
181 // Requires that the region "mr" be dense with objects, and begin and end | |
182 // with an object. | |
183 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl); | |
184 | |
185 // The remembered set for this region. | |
186 // (Might want to make this "inline" later, to avoid some alloc failure | |
187 // issues.) | |
188 HeapRegionRemSet* _rem_set; | |
189 | |
190 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } | |
191 | |
192 protected: | |
193 // If this region is a member of a HeapRegionSeq, the index in that | |
194 // sequence, otherwise -1. | |
195 int _hrs_index; | |
196 | |
355 | 197 HumongousType _humongous_type; |
342 | 198 // For a humongous region, region in which it starts. |
199 HeapRegion* _humongous_start_region; | |
200 // For the start region of a humongous sequence, it's original end(). | |
201 HeapWord* _orig_end; | |
202 | |
203 // True iff the region is in current collection_set. | |
204 bool _in_collection_set; | |
205 | |
206 // True iff the region is on the unclean list, waiting to be zero filled. | |
207 bool _is_on_unclean_list; | |
208 | |
209 // True iff the region is on the free list, ready for allocation. | |
210 bool _is_on_free_list; | |
211 | |
212 // Is this or has it been an allocation region in the current collection | |
213 // pause. | |
214 bool _is_gc_alloc_region; | |
215 | |
216 // True iff an attempt to evacuate an object in the region failed. | |
217 bool _evacuation_failed; | |
218 | |
219 // A heap region may be a member one of a number of special subsets, each | |
220 // represented as linked lists through the field below. Currently, these | |
221 // sets include: | |
222 // The collection set. | |
223 // The set of allocation regions used in a collection pause. | |
224 // Spaces that may contain gray objects. | |
225 HeapRegion* _next_in_special_set; | |
226 | |
227 // next region in the young "generation" region set | |
228 HeapRegion* _next_young_region; | |
229 | |
230 // For parallel heapRegion traversal. | |
231 jint _claimed; | |
232 | |
233 // We use concurrent marking to determine the amount of live data | |
234 // in each heap region. | |
235 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. | |
236 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. | |
237 | |
238 // See "sort_index" method. -1 means is not in the array. | |
239 int _sort_index; | |
240 | |
241 // Means it has (or at least had) a very large RS, and should not be | |
242 // considered for membership in a collection set. | |
243 enum PopularityState { | |
244 NotPopular, | |
245 PopularPending, | |
246 Popular | |
247 }; | |
248 PopularityState _popularity; | |
249 | |
250 // <PREDICTION> | |
251 double _gc_efficiency; | |
252 // </PREDICTION> | |
253 | |
254 enum YoungType { | |
255 NotYoung, // a region is not young | |
256 ScanOnly, // a region is young and scan-only | |
257 Young, // a region is young | |
258 Survivor // a region is young and it contains | |
259 // survivor | |
260 }; | |
261 | |
262 YoungType _young_type; | |
263 int _young_index_in_cset; | |
264 SurvRateGroup* _surv_rate_group; | |
265 int _age_index; | |
266 | |
267 // The start of the unmarked area. The unmarked area extends from this | |
268 // word until the top and/or end of the region, and is the part | |
269 // of the region for which no marking was done, i.e. objects may | |
270 // have been allocated in this part since the last mark phase. | |
271 // "prev" is the top at the start of the last completed marking. | |
272 // "next" is the top at the start of the in-progress marking (if any.) | |
273 HeapWord* _prev_top_at_mark_start; | |
274 HeapWord* _next_top_at_mark_start; | |
275 // If a collection pause is in progress, this is the top at the start | |
276 // of that pause. | |
277 | |
278 // We've counted the marked bytes of objects below here. | |
279 HeapWord* _top_at_conc_mark_count; | |
280 | |
281 void init_top_at_mark_start() { | |
282 assert(_prev_marked_bytes == 0 && | |
283 _next_marked_bytes == 0, | |
284 "Must be called after zero_marked_bytes."); | |
285 HeapWord* bot = bottom(); | |
286 _prev_top_at_mark_start = bot; | |
287 _next_top_at_mark_start = bot; | |
288 _top_at_conc_mark_count = bot; | |
289 } | |
290 | |
291 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. | |
292 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) | |
293 // made it so. | |
294 | |
295 void set_young_type(YoungType new_type) { | |
296 //assert(_young_type != new_type, "setting the same type" ); | |
297 // TODO: add more assertions here | |
298 _young_type = new_type; | |
299 } | |
300 | |
301 public: | |
302 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. | |
303 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, | |
304 MemRegion mr, bool is_zeroed); | |
305 | |
306 enum SomePublicConstants { | |
307 // HeapRegions are GrainBytes-aligned | |
308 // and have sizes that are multiples of GrainBytes. | |
309 LogOfHRGrainBytes = 20, | |
310 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize, | |
311 GrainBytes = 1 << LogOfHRGrainBytes, | |
312 GrainWords = 1 <<LogOfHRGrainWords, | |
313 MaxAge = 2, NoOfAges = MaxAge+1 | |
314 }; | |
315 | |
355 | 316 enum ClaimValues { |
317 InitialClaimValue = 0, | |
318 FinalCountClaimValue = 1, | |
319 NoteEndClaimValue = 2, | |
390 | 320 ScrubRemSetClaimValue = 3, |
626
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
549
diff
changeset
|
321 ParVerifyClaimValue = 4, |
87fa6e083d82
6760309: G1: update remembered sets during Full GCs
apetrusenko
parents:
549
diff
changeset
|
322 RebuildRSClaimValue = 5 |
355 | 323 }; |
324 | |
342 | 325 // Concurrent refinement requires contiguous heap regions (in which TLABs |
326 // might be allocated) to be zero-filled. Each region therefore has a | |
327 // zero-fill-state. | |
328 enum ZeroFillState { | |
329 NotZeroFilled, | |
330 ZeroFilling, | |
331 ZeroFilled, | |
332 Allocated | |
333 }; | |
334 | |
335 // If this region is a member of a HeapRegionSeq, the index in that | |
336 // sequence, otherwise -1. | |
337 int hrs_index() const { return _hrs_index; } | |
338 void set_hrs_index(int index) { _hrs_index = index; } | |
339 | |
340 // The number of bytes marked live in the region in the last marking phase. | |
341 size_t marked_bytes() { return _prev_marked_bytes; } | |
342 // The number of bytes counted in the next marking. | |
343 size_t next_marked_bytes() { return _next_marked_bytes; } | |
344 // The number of bytes live wrt the next marking. | |
345 size_t next_live_bytes() { | |
346 return (top() - next_top_at_mark_start()) | |
347 * HeapWordSize | |
348 + next_marked_bytes(); | |
349 } | |
350 | |
351 // A lower bound on the amount of garbage bytes in the region. | |
352 size_t garbage_bytes() { | |
353 size_t used_at_mark_start_bytes = | |
354 (prev_top_at_mark_start() - bottom()) * HeapWordSize; | |
355 assert(used_at_mark_start_bytes >= marked_bytes(), | |
356 "Can't mark more than we have."); | |
357 return used_at_mark_start_bytes - marked_bytes(); | |
358 } | |
359 | |
360 // An upper bound on the number of live bytes in the region. | |
361 size_t max_live_bytes() { return used() - garbage_bytes(); } | |
362 | |
363 void add_to_marked_bytes(size_t incr_bytes) { | |
364 _next_marked_bytes = _next_marked_bytes + incr_bytes; | |
365 guarantee( _next_marked_bytes <= used(), "invariant" ); | |
366 } | |
367 | |
368 void zero_marked_bytes() { | |
369 _prev_marked_bytes = _next_marked_bytes = 0; | |
370 } | |
371 | |
355 | 372 bool isHumongous() const { return _humongous_type != NotHumongous; } |
373 bool startsHumongous() const { return _humongous_type == StartsHumongous; } | |
374 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; } | |
342 | 375 // For a humongous region, region in which it starts. |
376 HeapRegion* humongous_start_region() const { | |
377 return _humongous_start_region; | |
378 } | |
379 | |
380 // Causes the current region to represent a humongous object spanning "n" | |
381 // regions. | |
382 virtual void set_startsHumongous(); | |
383 | |
384 // The regions that continue a humongous sequence should be added using | |
385 // this method, in increasing address order. | |
386 void set_continuesHumongous(HeapRegion* start); | |
387 | |
388 void add_continuingHumongousRegion(HeapRegion* cont); | |
389 | |
390 // If the region has a remembered set, return a pointer to it. | |
391 HeapRegionRemSet* rem_set() const { | |
392 return _rem_set; | |
393 } | |
394 | |
395 // True iff the region is in current collection_set. | |
396 bool in_collection_set() const { | |
397 return _in_collection_set; | |
398 } | |
399 void set_in_collection_set(bool b) { | |
400 _in_collection_set = b; | |
401 } | |
402 HeapRegion* next_in_collection_set() { | |
403 assert(in_collection_set(), "should only invoke on member of CS."); | |
404 assert(_next_in_special_set == NULL || | |
405 _next_in_special_set->in_collection_set(), | |
406 "Malformed CS."); | |
407 return _next_in_special_set; | |
408 } | |
409 void set_next_in_collection_set(HeapRegion* r) { | |
410 assert(in_collection_set(), "should only invoke on member of CS."); | |
411 assert(r == NULL || r->in_collection_set(), "Malformed CS."); | |
412 _next_in_special_set = r; | |
413 } | |
414 | |
415 // True iff it is or has been an allocation region in the current | |
416 // collection pause. | |
417 bool is_gc_alloc_region() const { | |
418 return _is_gc_alloc_region; | |
419 } | |
420 void set_is_gc_alloc_region(bool b) { | |
421 _is_gc_alloc_region = b; | |
422 } | |
423 HeapRegion* next_gc_alloc_region() { | |
424 assert(is_gc_alloc_region(), "should only invoke on member of CS."); | |
425 assert(_next_in_special_set == NULL || | |
426 _next_in_special_set->is_gc_alloc_region(), | |
427 "Malformed CS."); | |
428 return _next_in_special_set; | |
429 } | |
430 void set_next_gc_alloc_region(HeapRegion* r) { | |
431 assert(is_gc_alloc_region(), "should only invoke on member of CS."); | |
432 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS."); | |
433 _next_in_special_set = r; | |
434 } | |
435 | |
436 bool is_reserved() { | |
437 return popular(); | |
438 } | |
439 | |
440 bool is_on_free_list() { | |
441 return _is_on_free_list; | |
442 } | |
443 | |
444 void set_on_free_list(bool b) { | |
445 _is_on_free_list = b; | |
446 } | |
447 | |
448 HeapRegion* next_from_free_list() { | |
449 assert(is_on_free_list(), | |
450 "Should only invoke on free space."); | |
451 assert(_next_in_special_set == NULL || | |
452 _next_in_special_set->is_on_free_list(), | |
453 "Malformed Free List."); | |
454 return _next_in_special_set; | |
455 } | |
456 | |
457 void set_next_on_free_list(HeapRegion* r) { | |
458 assert(r == NULL || r->is_on_free_list(), "Malformed free list."); | |
459 _next_in_special_set = r; | |
460 } | |
461 | |
462 bool is_on_unclean_list() { | |
463 return _is_on_unclean_list; | |
464 } | |
465 | |
466 void set_on_unclean_list(bool b); | |
467 | |
468 HeapRegion* next_from_unclean_list() { | |
469 assert(is_on_unclean_list(), | |
470 "Should only invoke on unclean space."); | |
471 assert(_next_in_special_set == NULL || | |
472 _next_in_special_set->is_on_unclean_list(), | |
473 "Malformed unclean List."); | |
474 return _next_in_special_set; | |
475 } | |
476 | |
477 void set_next_on_unclean_list(HeapRegion* r); | |
478 | |
479 HeapRegion* get_next_young_region() { return _next_young_region; } | |
480 void set_next_young_region(HeapRegion* hr) { | |
481 _next_young_region = hr; | |
482 } | |
483 | |
484 // Allows logical separation between objects allocated before and after. | |
485 void save_marks(); | |
486 | |
487 // Reset HR stuff to default values. | |
488 void hr_clear(bool par, bool clear_space); | |
489 | |
356 | 490 void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
342 | 491 |
492 // Ensure that "this" is zero-filled. | |
493 void ensure_zero_filled(); | |
494 // This one requires that the calling thread holds ZF_mon. | |
495 void ensure_zero_filled_locked(); | |
496 | |
497 // Get the start of the unmarked area in this region. | |
498 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } | |
499 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } | |
500 | |
501 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects | |
502 // allocated in the current region before the last call to "save_mark". | |
503 void oop_before_save_marks_iterate(OopClosure* cl); | |
504 | |
505 // This call determines the "filter kind" argument that will be used for | |
506 // the next call to "new_dcto_cl" on this region with the "traditional" | |
507 // signature (i.e., the call below.) The default, in the absence of a | |
508 // preceding call to this method, is "NoFilterKind", and a call to this | |
509 // method is necessary for each such call, or else it reverts to the | |
510 // default. | |
511 // (This is really ugly, but all other methods I could think of changed a | |
512 // lot of main-line code for G1.) | |
513 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) { | |
514 _next_fk = nfk; | |
515 } | |
516 | |
517 DirtyCardToOopClosure* | |
518 new_dcto_closure(OopClosure* cl, | |
519 CardTableModRefBS::PrecisionStyle precision, | |
520 HeapRegionDCTOC::FilterKind fk); | |
521 | |
522 #if WHASSUP | |
523 DirtyCardToOopClosure* | |
524 new_dcto_closure(OopClosure* cl, | |
525 CardTableModRefBS::PrecisionStyle precision, | |
526 HeapWord* boundary) { | |
527 assert(boundary == NULL, "This arg doesn't make sense here."); | |
528 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk); | |
529 _next_fk = HeapRegionDCTOC::NoFilterKind; | |
530 return res; | |
531 } | |
532 #endif | |
533 | |
534 // | |
535 // Note the start or end of marking. This tells the heap region | |
536 // that the collector is about to start or has finished (concurrently) | |
537 // marking the heap. | |
538 // | |
539 | |
540 // Note the start of a marking phase. Record the | |
541 // start of the unmarked area of the region here. | |
542 void note_start_of_marking(bool during_initial_mark) { | |
543 init_top_at_conc_mark_count(); | |
544 _next_marked_bytes = 0; | |
545 if (during_initial_mark && is_young() && !is_survivor()) | |
546 _next_top_at_mark_start = bottom(); | |
547 else | |
548 _next_top_at_mark_start = top(); | |
549 } | |
550 | |
551 // Note the end of a marking phase. Install the start of | |
552 // the unmarked area that was captured at start of marking. | |
553 void note_end_of_marking() { | |
554 _prev_top_at_mark_start = _next_top_at_mark_start; | |
555 _prev_marked_bytes = _next_marked_bytes; | |
556 _next_marked_bytes = 0; | |
557 | |
558 guarantee(_prev_marked_bytes <= | |
559 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize, | |
560 "invariant"); | |
561 } | |
562 | |
563 // After an evacuation, we need to update _next_top_at_mark_start | |
564 // to be the current top. Note this is only valid if we have only | |
565 // ever evacuated into this region. If we evacuate, allocate, and | |
566 // then evacuate we are in deep doodoo. | |
567 void note_end_of_copying() { | |
568 assert(top() >= _next_top_at_mark_start, | |
569 "Increase only"); | |
545 | 570 // Survivor regions will be scanned on the start of concurrent |
571 // marking. | |
572 if (!is_survivor()) { | |
573 _next_top_at_mark_start = top(); | |
574 } | |
342 | 575 } |
576 | |
577 // Returns "false" iff no object in the region was allocated when the | |
578 // last mark phase ended. | |
579 bool is_marked() { return _prev_top_at_mark_start != bottom(); } | |
580 | |
581 // If "is_marked()" is true, then this is the index of the region in | |
582 // an array constructed at the end of marking of the regions in a | |
583 // "desirability" order. | |
584 int sort_index() { | |
585 return _sort_index; | |
586 } | |
587 void set_sort_index(int i) { | |
588 _sort_index = i; | |
589 } | |
590 | |
591 void init_top_at_conc_mark_count() { | |
592 _top_at_conc_mark_count = bottom(); | |
593 } | |
594 | |
595 void set_top_at_conc_mark_count(HeapWord *cur) { | |
596 assert(bottom() <= cur && cur <= end(), "Sanity."); | |
597 _top_at_conc_mark_count = cur; | |
598 } | |
599 | |
600 HeapWord* top_at_conc_mark_count() { | |
601 return _top_at_conc_mark_count; | |
602 } | |
603 | |
604 void reset_during_compaction() { | |
605 guarantee( isHumongous() && startsHumongous(), | |
606 "should only be called for humongous regions"); | |
607 | |
608 zero_marked_bytes(); | |
609 init_top_at_mark_start(); | |
610 } | |
611 | |
612 bool popular() { return _popularity == Popular; } | |
613 void set_popular(bool b) { | |
614 if (b) { | |
615 _popularity = Popular; | |
616 } else { | |
617 _popularity = NotPopular; | |
618 } | |
619 } | |
620 bool popular_pending() { return _popularity == PopularPending; } | |
621 void set_popular_pending(bool b) { | |
622 if (b) { | |
623 _popularity = PopularPending; | |
624 } else { | |
625 _popularity = NotPopular; | |
626 } | |
627 } | |
628 | |
629 // <PREDICTION> | |
630 void calc_gc_efficiency(void); | |
631 double gc_efficiency() { return _gc_efficiency;} | |
632 // </PREDICTION> | |
633 | |
634 bool is_young() const { return _young_type != NotYoung; } | |
635 bool is_scan_only() const { return _young_type == ScanOnly; } | |
636 bool is_survivor() const { return _young_type == Survivor; } | |
637 | |
638 int young_index_in_cset() const { return _young_index_in_cset; } | |
639 void set_young_index_in_cset(int index) { | |
640 assert( (index == -1) || is_young(), "pre-condition" ); | |
641 _young_index_in_cset = index; | |
642 } | |
643 | |
644 int age_in_surv_rate_group() { | |
645 assert( _surv_rate_group != NULL, "pre-condition" ); | |
646 assert( _age_index > -1, "pre-condition" ); | |
647 return _surv_rate_group->age_in_group(_age_index); | |
648 } | |
649 | |
650 void recalculate_age_in_surv_rate_group() { | |
651 assert( _surv_rate_group != NULL, "pre-condition" ); | |
652 assert( _age_index > -1, "pre-condition" ); | |
653 _age_index = _surv_rate_group->recalculate_age_index(_age_index); | |
654 } | |
655 | |
656 void record_surv_words_in_group(size_t words_survived) { | |
657 assert( _surv_rate_group != NULL, "pre-condition" ); | |
658 assert( _age_index > -1, "pre-condition" ); | |
659 int age_in_group = age_in_surv_rate_group(); | |
660 _surv_rate_group->record_surviving_words(age_in_group, words_survived); | |
661 } | |
662 | |
663 int age_in_surv_rate_group_cond() { | |
664 if (_surv_rate_group != NULL) | |
665 return age_in_surv_rate_group(); | |
666 else | |
667 return -1; | |
668 } | |
669 | |
670 SurvRateGroup* surv_rate_group() { | |
671 return _surv_rate_group; | |
672 } | |
673 | |
674 void install_surv_rate_group(SurvRateGroup* surv_rate_group) { | |
675 assert( surv_rate_group != NULL, "pre-condition" ); | |
676 assert( _surv_rate_group == NULL, "pre-condition" ); | |
677 assert( is_young(), "pre-condition" ); | |
678 | |
679 _surv_rate_group = surv_rate_group; | |
680 _age_index = surv_rate_group->next_age_index(); | |
681 } | |
682 | |
683 void uninstall_surv_rate_group() { | |
684 if (_surv_rate_group != NULL) { | |
685 assert( _age_index > -1, "pre-condition" ); | |
686 assert( is_young(), "pre-condition" ); | |
687 | |
688 _surv_rate_group = NULL; | |
689 _age_index = -1; | |
690 } else { | |
691 assert( _age_index == -1, "pre-condition" ); | |
692 } | |
693 } | |
694 | |
695 void set_young() { set_young_type(Young); } | |
696 | |
697 void set_scan_only() { set_young_type(ScanOnly); } | |
698 | |
699 void set_survivor() { set_young_type(Survivor); } | |
700 | |
701 void set_not_young() { set_young_type(NotYoung); } | |
702 | |
703 // Determine if an object has been allocated since the last | |
704 // mark performed by the collector. This returns true iff the object | |
705 // is within the unmarked area of the region. | |
706 bool obj_allocated_since_prev_marking(oop obj) const { | |
707 return (HeapWord *) obj >= prev_top_at_mark_start(); | |
708 } | |
709 bool obj_allocated_since_next_marking(oop obj) const { | |
710 return (HeapWord *) obj >= next_top_at_mark_start(); | |
711 } | |
712 | |
713 // For parallel heapRegion traversal. | |
714 bool claimHeapRegion(int claimValue); | |
715 jint claim_value() { return _claimed; } | |
716 // Use this carefully: only when you're sure no one is claiming... | |
717 void set_claim_value(int claimValue) { _claimed = claimValue; } | |
718 | |
719 // Returns the "evacuation_failed" property of the region. | |
720 bool evacuation_failed() { return _evacuation_failed; } | |
721 | |
722 // Sets the "evacuation_failed" property of the region. | |
723 void set_evacuation_failed(bool b) { | |
724 _evacuation_failed = b; | |
725 | |
726 if (b) { | |
727 init_top_at_conc_mark_count(); | |
728 _next_marked_bytes = 0; | |
729 } | |
730 } | |
731 | |
732 // Requires that "mr" be entirely within the region. | |
733 // Apply "cl->do_object" to all objects that intersect with "mr". | |
734 // If the iteration encounters an unparseable portion of the region, | |
735 // or if "cl->abort()" is true after a closure application, | |
736 // terminate the iteration and return the address of the start of the | |
737 // subregion that isn't done. (The two can be distinguished by querying | |
738 // "cl->abort()".) Return of "NULL" indicates that the iteration | |
739 // completed. | |
740 HeapWord* | |
741 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); | |
742 | |
743 HeapWord* | |
744 oops_on_card_seq_iterate_careful(MemRegion mr, | |
745 FilterOutOfRegionClosure* cl); | |
746 | |
747 // The region "mr" is entirely in "this", and starts and ends at block | |
748 // boundaries. The caller declares that all the contained blocks are | |
749 // coalesced into one. | |
750 void declare_filled_region_to_BOT(MemRegion mr) { | |
751 _offsets.single_block(mr.start(), mr.end()); | |
752 } | |
753 | |
754 // A version of block start that is guaranteed to find *some* block | |
755 // boundary at or before "p", but does not object iteration, and may | |
756 // therefore be used safely when the heap is unparseable. | |
757 HeapWord* block_start_careful(const void* p) const { | |
758 return _offsets.block_start_careful(p); | |
759 } | |
760 | |
761 // Requires that "addr" is within the region. Returns the start of the | |
762 // first ("careful") block that starts at or after "addr", or else the | |
763 // "end" of the region if there is no such block. | |
764 HeapWord* next_block_start_careful(HeapWord* addr); | |
765 | |
766 // Returns the zero-fill-state of the current region. | |
767 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } | |
768 bool zero_fill_is_allocated() { return _zfs == Allocated; } | |
769 Thread* zero_filler() { return _zero_filler; } | |
770 | |
771 // Indicate that the contents of the region are unknown, and therefore | |
772 // might require zero-filling. | |
773 void set_zero_fill_needed() { | |
774 set_zero_fill_state_work(NotZeroFilled); | |
775 } | |
776 void set_zero_fill_in_progress(Thread* t) { | |
777 set_zero_fill_state_work(ZeroFilling); | |
778 _zero_filler = t; | |
779 } | |
780 void set_zero_fill_complete(); | |
781 void set_zero_fill_allocated() { | |
782 set_zero_fill_state_work(Allocated); | |
783 } | |
784 | |
785 void set_zero_fill_state_work(ZeroFillState zfs); | |
786 | |
787 // This is called when a full collection shrinks the heap. | |
788 // We want to set the heap region to a value which says | |
789 // it is no longer part of the heap. For now, we'll let "NotZF" fill | |
790 // that role. | |
791 void reset_zero_fill() { | |
792 set_zero_fill_state_work(NotZeroFilled); | |
793 _zero_filler = NULL; | |
794 } | |
795 | |
796 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
797 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
798 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) | |
799 | |
800 CompactibleSpace* next_compaction_space() const; | |
801 | |
802 virtual void reset_after_compaction(); | |
803 | |
804 void print() const; | |
805 void print_on(outputStream* st) const; | |
806 | |
807 // Override | |
808 virtual void verify(bool allow_dirty) const; | |
809 | |
810 #ifdef DEBUG | |
811 HeapWord* allocate(size_t size); | |
812 #endif | |
813 }; | |
814 | |
815 // HeapRegionClosure is used for iterating over regions. | |
816 // Terminates the iteration when the "doHeapRegion" method returns "true". | |
817 class HeapRegionClosure : public StackObj { | |
818 friend class HeapRegionSeq; | |
819 friend class G1CollectedHeap; | |
820 | |
821 bool _complete; | |
822 void incomplete() { _complete = false; } | |
823 | |
824 public: | |
825 HeapRegionClosure(): _complete(true) {} | |
826 | |
827 // Typically called on each region until it returns true. | |
828 virtual bool doHeapRegion(HeapRegion* r) = 0; | |
829 | |
830 // True after iteration if the closure was applied to all heap regions | |
831 // and returned "false" in all cases. | |
832 bool complete() { return _complete; } | |
833 }; | |
834 | |
835 // A linked lists of heap regions. It leaves the "next" field | |
836 // unspecified; that's up to subtypes. | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
545
diff
changeset
|
837 class RegionList VALUE_OBJ_CLASS_SPEC { |
342 | 838 protected: |
839 virtual HeapRegion* get_next(HeapRegion* chr) = 0; | |
840 virtual void set_next(HeapRegion* chr, | |
841 HeapRegion* new_next) = 0; | |
842 | |
843 HeapRegion* _hd; | |
844 HeapRegion* _tl; | |
845 size_t _sz; | |
846 | |
847 // Protected constructor because this type is only meaningful | |
848 // when the _get/_set next functions are defined. | |
849 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} | |
850 public: | |
851 void reset() { | |
852 _hd = NULL; | |
853 _tl = NULL; | |
854 _sz = 0; | |
855 } | |
856 HeapRegion* hd() { return _hd; } | |
857 HeapRegion* tl() { return _tl; } | |
858 size_t sz() { return _sz; } | |
859 size_t length(); | |
860 | |
861 bool well_formed() { | |
862 return | |
863 ((hd() == NULL && tl() == NULL && sz() == 0) | |
864 || (hd() != NULL && tl() != NULL && sz() > 0)) | |
865 && (sz() == length()); | |
866 } | |
867 virtual void insert_before_head(HeapRegion* r); | |
868 void prepend_list(RegionList* new_list); | |
869 virtual HeapRegion* pop(); | |
870 void dec_sz() { _sz--; } | |
871 // Requires that "r" is an element of the list, and is not the tail. | |
872 void delete_after(HeapRegion* r); | |
873 }; | |
874 | |
875 class EmptyNonHRegionList: public RegionList { | |
876 protected: | |
877 // Protected constructor because this type is only meaningful | |
878 // when the _get/_set next functions are defined. | |
879 EmptyNonHRegionList() : RegionList() {} | |
880 | |
881 public: | |
882 void insert_before_head(HeapRegion* r) { | |
883 // assert(r->is_empty(), "Better be empty"); | |
884 assert(!r->isHumongous(), "Better not be humongous."); | |
885 RegionList::insert_before_head(r); | |
886 } | |
887 void prepend_list(EmptyNonHRegionList* new_list) { | |
888 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(), | |
889 // "Better be empty"); | |
890 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), | |
891 "Better not be humongous."); | |
892 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(), | |
893 // "Better be empty"); | |
894 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), | |
895 "Better not be humongous."); | |
896 RegionList::prepend_list(new_list); | |
897 } | |
898 }; | |
899 | |
900 class UncleanRegionList: public EmptyNonHRegionList { | |
901 public: | |
902 HeapRegion* get_next(HeapRegion* hr) { | |
903 return hr->next_from_unclean_list(); | |
904 } | |
905 void set_next(HeapRegion* hr, HeapRegion* new_next) { | |
906 hr->set_next_on_unclean_list(new_next); | |
907 } | |
908 | |
909 UncleanRegionList() : EmptyNonHRegionList() {} | |
910 | |
911 void insert_before_head(HeapRegion* r) { | |
912 assert(!r->is_on_free_list(), | |
913 "Better not already be on free list"); | |
914 assert(!r->is_on_unclean_list(), | |
915 "Better not already be on unclean list"); | |
916 r->set_zero_fill_needed(); | |
917 r->set_on_unclean_list(true); | |
918 EmptyNonHRegionList::insert_before_head(r); | |
919 } | |
920 void prepend_list(UncleanRegionList* new_list) { | |
921 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), | |
922 "Better not already be on free list"); | |
923 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), | |
924 "Better already be marked as on unclean list"); | |
925 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), | |
926 "Better not already be on free list"); | |
927 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), | |
928 "Better already be marked as on unclean list"); | |
929 EmptyNonHRegionList::prepend_list(new_list); | |
930 } | |
931 HeapRegion* pop() { | |
932 HeapRegion* res = RegionList::pop(); | |
933 if (res != NULL) res->set_on_unclean_list(false); | |
934 return res; | |
935 } | |
936 }; | |
937 | |
938 // Local Variables: *** | |
939 // c-indentation-style: gnu *** | |
940 // End: *** | |
941 | |
942 #endif // SERIALGC |