Mercurial > hg > graal-jvmci-8
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 807:d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
Summary: For heaps larger than 32Gb, the number of heap regions overflows the data type used to hold the region index in the SparsePRT structure. Changed the region indexes, card indexes, and RSet hash table buckets to ints and added some size overflow guarantees.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Thu, 11 Jun 2009 17:19:33 -0700 |
parents | 29e7d79232b9 |
children | 830ca2573896 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | |
26 // It uses the "Garbage First" heap organization and algorithm, which | |
27 // may combine concurrent marking with parallel, incremental compaction of | |
28 // heap subsets that will yield large amounts of garbage. | |
29 | |
30 class HeapRegion; | |
31 class HeapRegionSeq; | |
32 class PermanentGenerationSpec; | |
33 class GenerationSpec; | |
34 class OopsInHeapRegionClosure; | |
35 class G1ScanHeapEvacClosure; | |
36 class ObjectClosure; | |
37 class SpaceClosure; | |
38 class CompactibleSpaceClosure; | |
39 class Space; | |
40 class G1CollectorPolicy; | |
41 class GenRemSet; | |
42 class G1RemSet; | |
43 class HeapRegionRemSetIterator; | |
44 class ConcurrentMark; | |
45 class ConcurrentMarkThread; | |
46 class ConcurrentG1Refine; | |
47 class ConcurrentZFThread; | |
48 | |
49 // If want to accumulate detailed statistics on work queues | |
50 // turn this on. | |
51 #define G1_DETAILED_STATS 0 | |
52 | |
53 #if G1_DETAILED_STATS | |
54 # define IF_G1_DETAILED_STATS(code) code | |
55 #else | |
56 # define IF_G1_DETAILED_STATS(code) | |
57 #endif | |
58 | |
59 typedef GenericTaskQueue<oop*> RefToScanQueue; | |
60 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet; | |
61 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
62 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
63 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
796
diff
changeset
|
64 |
342 | 65 enum G1GCThreadGroups { |
66 G1CRGroup = 0, | |
67 G1ZFGroup = 1, | |
68 G1CMGroup = 2, | |
69 G1CLGroup = 3 | |
70 }; | |
71 | |
72 enum GCAllocPurpose { | |
73 GCAllocForTenured, | |
74 GCAllocForSurvived, | |
75 GCAllocPurposeCount | |
76 }; | |
77 | |
78 class YoungList : public CHeapObj { | |
79 private: | |
80 G1CollectedHeap* _g1h; | |
81 | |
82 HeapRegion* _head; | |
83 | |
84 HeapRegion* _scan_only_head; | |
85 HeapRegion* _scan_only_tail; | |
86 size_t _length; | |
87 size_t _scan_only_length; | |
88 | |
89 size_t _last_sampled_rs_lengths; | |
90 size_t _sampled_rs_lengths; | |
91 HeapRegion* _curr; | |
92 HeapRegion* _curr_scan_only; | |
93 | |
94 HeapRegion* _survivor_head; | |
545 | 95 HeapRegion* _survivor_tail; |
342 | 96 size_t _survivor_length; |
97 | |
98 void empty_list(HeapRegion* list); | |
99 | |
100 public: | |
101 YoungList(G1CollectedHeap* g1h); | |
102 | |
103 void push_region(HeapRegion* hr); | |
104 void add_survivor_region(HeapRegion* hr); | |
105 HeapRegion* pop_region(); | |
106 void empty_list(); | |
107 bool is_empty() { return _length == 0; } | |
108 size_t length() { return _length; } | |
109 size_t scan_only_length() { return _scan_only_length; } | |
545 | 110 size_t survivor_length() { return _survivor_length; } |
342 | 111 |
112 void rs_length_sampling_init(); | |
113 bool rs_length_sampling_more(); | |
114 void rs_length_sampling_next(); | |
115 | |
116 void reset_sampled_info() { | |
117 _last_sampled_rs_lengths = 0; | |
118 } | |
119 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } | |
120 | |
121 // for development purposes | |
122 void reset_auxilary_lists(); | |
123 HeapRegion* first_region() { return _head; } | |
124 HeapRegion* first_scan_only_region() { return _scan_only_head; } | |
125 HeapRegion* first_survivor_region() { return _survivor_head; } | |
545 | 126 HeapRegion* last_survivor_region() { return _survivor_tail; } |
342 | 127 HeapRegion* par_get_next_scan_only_region() { |
128 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
129 HeapRegion* ret = _curr_scan_only; | |
130 if (ret != NULL) | |
131 _curr_scan_only = ret->get_next_young_region(); | |
132 return ret; | |
133 } | |
134 | |
135 // debugging | |
136 bool check_list_well_formed(); | |
137 bool check_list_empty(bool ignore_scan_only_list, | |
138 bool check_sample = true); | |
139 void print(); | |
140 }; | |
141 | |
142 class RefineCardTableEntryClosure; | |
143 class G1CollectedHeap : public SharedHeap { | |
144 friend class VM_G1CollectForAllocation; | |
145 friend class VM_GenCollectForPermanentAllocation; | |
146 friend class VM_G1CollectFull; | |
147 friend class VM_G1IncCollectionPause; | |
148 friend class VMStructs; | |
149 | |
150 // Closures used in implementation. | |
151 friend class G1ParCopyHelper; | |
152 friend class G1IsAliveClosure; | |
153 friend class G1EvacuateFollowersClosure; | |
154 friend class G1ParScanThreadState; | |
155 friend class G1ParScanClosureSuper; | |
156 friend class G1ParEvacuateFollowersClosure; | |
157 friend class G1ParTask; | |
158 friend class G1FreeGarbageRegionClosure; | |
159 friend class RefineCardTableEntryClosure; | |
160 friend class G1PrepareCompactClosure; | |
161 friend class RegionSorter; | |
162 friend class CountRCClosure; | |
163 friend class EvacPopObjClosure; | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
164 friend class G1ParCleanupCTTask; |
342 | 165 |
166 // Other related classes. | |
167 friend class G1MarkSweep; | |
168 | |
169 private: | |
170 enum SomePrivateConstants { | |
171 VeryLargeInBytes = HeapRegion::GrainBytes/2, | |
172 VeryLargeInWords = VeryLargeInBytes/HeapWordSize, | |
173 MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME | |
174 NumAPIs = HeapRegion::MaxAge | |
175 }; | |
176 | |
177 // The one and only G1CollectedHeap, so static functions can find it. | |
178 static G1CollectedHeap* _g1h; | |
179 | |
180 // Storage for the G1 heap (excludes the permanent generation). | |
181 VirtualSpace _g1_storage; | |
182 MemRegion _g1_reserved; | |
183 | |
184 // The part of _g1_storage that is currently committed. | |
185 MemRegion _g1_committed; | |
186 | |
187 // The maximum part of _g1_storage that has ever been committed. | |
188 MemRegion _g1_max_committed; | |
189 | |
190 // The number of regions that are completely free. | |
191 size_t _free_regions; | |
192 | |
193 // The number of regions we could create by expansion. | |
194 size_t _expansion_regions; | |
195 | |
196 // Return the number of free regions in the heap (by direct counting.) | |
197 size_t count_free_regions(); | |
198 // Return the number of free regions on the free and unclean lists. | |
199 size_t count_free_regions_list(); | |
200 | |
201 // The block offset table for the G1 heap. | |
202 G1BlockOffsetSharedArray* _bot_shared; | |
203 | |
204 // Move all of the regions off the free lists, then rebuild those free | |
205 // lists, before and after full GC. | |
206 void tear_down_region_lists(); | |
207 void rebuild_region_lists(); | |
208 // This sets all non-empty regions to need zero-fill (which they will if | |
209 // they are empty after full collection.) | |
210 void set_used_regions_to_need_zero_fill(); | |
211 | |
212 // The sequence of all heap regions in the heap. | |
213 HeapRegionSeq* _hrs; | |
214 | |
215 // The region from which normal-sized objects are currently being | |
216 // allocated. May be NULL. | |
217 HeapRegion* _cur_alloc_region; | |
218 | |
219 // Postcondition: cur_alloc_region == NULL. | |
220 void abandon_cur_alloc_region(); | |
636 | 221 void abandon_gc_alloc_regions(); |
342 | 222 |
223 // The to-space memory regions into which objects are being copied during | |
224 // a GC. | |
225 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; | |
545 | 226 size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
636 | 227 // These are the regions, one per GCAllocPurpose, that are half-full |
228 // at the end of a collection and that we want to reuse during the | |
229 // next collection. | |
230 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; | |
231 // This specifies whether we will keep the last half-full region at | |
232 // the end of a collection so that it can be reused during the next | |
233 // collection (this is specified per GCAllocPurpose) | |
234 bool _retain_gc_alloc_region[GCAllocPurposeCount]; | |
342 | 235 |
236 // A list of the regions that have been set to be alloc regions in the | |
237 // current collection. | |
238 HeapRegion* _gc_alloc_region_list; | |
239 | |
240 // When called by par thread, require par_alloc_during_gc_lock() to be held. | |
241 void push_gc_alloc_region(HeapRegion* hr); | |
242 | |
243 // This should only be called single-threaded. Undeclares all GC alloc | |
244 // regions. | |
245 void forget_alloc_region_list(); | |
246 | |
247 // Should be used to set an alloc region, because there's other | |
248 // associated bookkeeping. | |
249 void set_gc_alloc_region(int purpose, HeapRegion* r); | |
250 | |
251 // Check well-formedness of alloc region list. | |
252 bool check_gc_alloc_regions(); | |
253 | |
254 // Outside of GC pauses, the number of bytes used in all regions other | |
255 // than the current allocation region. | |
256 size_t _summary_bytes_used; | |
257 | |
526 | 258 // This is used for a quick test on whether a reference points into |
259 // the collection set or not. Basically, we have an array, with one | |
260 // byte per region, and that byte denotes whether the corresponding | |
261 // region is in the collection set or not. The entry corresponding | |
262 // the bottom of the heap, i.e., region 0, is pointed to by | |
263 // _in_cset_fast_test_base. The _in_cset_fast_test field has been | |
264 // biased so that it actually points to address 0 of the address | |
265 // space, to make the test as fast as possible (we can simply shift | |
266 // the address to address into it, instead of having to subtract the | |
267 // bottom of the heap from the address before shifting it; basically | |
268 // it works in the same way the card table works). | |
269 bool* _in_cset_fast_test; | |
270 | |
271 // The allocated array used for the fast test on whether a reference | |
272 // points into the collection set or not. This field is also used to | |
273 // free the array. | |
274 bool* _in_cset_fast_test_base; | |
275 | |
276 // The length of the _in_cset_fast_test_base array. | |
277 size_t _in_cset_fast_test_length; | |
278 | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
279 volatile unsigned _gc_time_stamp; |
342 | 280 |
281 size_t* _surviving_young_words; | |
282 | |
283 void setup_surviving_young_words(); | |
284 void update_surviving_young_words(size_t* surv_young_words); | |
285 void cleanup_surviving_young_words(); | |
286 | |
287 protected: | |
288 | |
289 // Returns "true" iff none of the gc alloc regions have any allocations | |
290 // since the last call to "save_marks". | |
291 bool all_alloc_regions_no_allocs_since_save_marks(); | |
545 | 292 // Perform finalization stuff on all allocation regions. |
293 void retire_all_alloc_regions(); | |
342 | 294 |
295 // The number of regions allocated to hold humongous objects. | |
296 int _num_humongous_regions; | |
297 YoungList* _young_list; | |
298 | |
299 // The current policy object for the collector. | |
300 G1CollectorPolicy* _g1_policy; | |
301 | |
302 // Parallel allocation lock to protect the current allocation region. | |
303 Mutex _par_alloc_during_gc_lock; | |
304 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } | |
305 | |
306 // If possible/desirable, allocate a new HeapRegion for normal object | |
307 // allocation sufficient for an allocation of the given "word_size". | |
308 // If "do_expand" is true, will attempt to expand the heap if necessary | |
309 // to to satisfy the request. If "zero_filled" is true, requires a | |
310 // zero-filled region. | |
311 // (Returning NULL will trigger a GC.) | |
312 virtual HeapRegion* newAllocRegion_work(size_t word_size, | |
313 bool do_expand, | |
314 bool zero_filled); | |
315 | |
316 virtual HeapRegion* newAllocRegion(size_t word_size, | |
317 bool zero_filled = true) { | |
318 return newAllocRegion_work(word_size, false, zero_filled); | |
319 } | |
320 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, | |
321 size_t word_size, | |
322 bool zero_filled = true); | |
323 | |
324 // Attempt to allocate an object of the given (very large) "word_size". | |
325 // Returns "NULL" on failure. | |
326 virtual HeapWord* humongousObjAllocate(size_t word_size); | |
327 | |
328 // If possible, allocate a block of the given word_size, else return "NULL". | |
329 // Returning NULL will trigger GC or heap expansion. | |
330 // These two methods have rather awkward pre- and | |
331 // post-conditions. If they are called outside a safepoint, then | |
332 // they assume that the caller is holding the heap lock. Upon return | |
333 // they release the heap lock, if they are returning a non-NULL | |
334 // value. attempt_allocation_slow() also dirties the cards of a | |
335 // newly-allocated young region after it releases the heap | |
336 // lock. This change in interface was the neatest way to achieve | |
337 // this card dirtying without affecting mem_allocate(), which is a | |
338 // more frequently called method. We tried two or three different | |
339 // approaches, but they were even more hacky. | |
340 HeapWord* attempt_allocation(size_t word_size, | |
341 bool permit_collection_pause = true); | |
342 | |
343 HeapWord* attempt_allocation_slow(size_t word_size, | |
344 bool permit_collection_pause = true); | |
345 | |
346 // Allocate blocks during garbage collection. Will ensure an | |
347 // allocation region, either by picking one or expanding the | |
348 // heap, and then allocate a block of the given size. The block | |
349 // may not be a humongous - it must fit into a single heap region. | |
350 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
351 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
352 | |
353 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, | |
354 HeapRegion* alloc_region, | |
355 bool par, | |
356 size_t word_size); | |
357 | |
358 // Ensure that no further allocations can happen in "r", bearing in mind | |
359 // that parallel threads might be attempting allocations. | |
360 void par_allocate_remaining_space(HeapRegion* r); | |
361 | |
545 | 362 // Retires an allocation region when it is full or at the end of a |
363 // GC pause. | |
364 void retire_alloc_region(HeapRegion* alloc_region, bool par); | |
365 | |
342 | 366 // Helper function for two callbacks below. |
367 // "full", if true, indicates that the GC is for a System.gc() request, | |
368 // and should collect the entire heap. If "clear_all_soft_refs" is true, | |
369 // all soft references are cleared during the GC. If "full" is false, | |
370 // "word_size" describes the allocation that the GC should | |
371 // attempt (at least) to satisfy. | |
372 void do_collection(bool full, bool clear_all_soft_refs, | |
373 size_t word_size); | |
374 | |
375 // Callback from VM_G1CollectFull operation. | |
376 // Perform a full collection. | |
377 void do_full_collection(bool clear_all_soft_refs); | |
378 | |
379 // Resize the heap if necessary after a full collection. If this is | |
380 // after a collect-for allocation, "word_size" is the allocation size, | |
381 // and will be considered part of the used portion of the heap. | |
382 void resize_if_necessary_after_full_collection(size_t word_size); | |
383 | |
384 // Callback from VM_G1CollectForAllocation operation. | |
385 // This function does everything necessary/possible to satisfy a | |
386 // failed allocation request (including collection, expansion, etc.) | |
387 HeapWord* satisfy_failed_allocation(size_t word_size); | |
388 | |
389 // Attempting to expand the heap sufficiently | |
390 // to support an allocation of the given "word_size". If | |
391 // successful, perform the allocation and return the address of the | |
392 // allocated block, or else "NULL". | |
393 virtual HeapWord* expand_and_allocate(size_t word_size); | |
394 | |
395 public: | |
396 // Expand the garbage-first heap by at least the given size (in bytes!). | |
397 // (Rounds up to a HeapRegion boundary.) | |
398 virtual void expand(size_t expand_bytes); | |
399 | |
400 // Do anything common to GC's. | |
401 virtual void gc_prologue(bool full); | |
402 virtual void gc_epilogue(bool full); | |
403 | |
526 | 404 // We register a region with the fast "in collection set" test. We |
405 // simply set to true the array slot corresponding to this region. | |
406 void register_region_with_in_cset_fast_test(HeapRegion* r) { | |
407 assert(_in_cset_fast_test_base != NULL, "sanity"); | |
408 assert(r->in_collection_set(), "invariant"); | |
409 int index = r->hrs_index(); | |
410 assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, | |
411 "invariant"); | |
412 assert(!_in_cset_fast_test_base[index], "invariant"); | |
413 _in_cset_fast_test_base[index] = true; | |
414 } | |
415 | |
416 // This is a fast test on whether a reference points into the | |
417 // collection set or not. It does not assume that the reference | |
418 // points into the heap; if it doesn't, it will return false. | |
419 bool in_cset_fast_test(oop obj) { | |
420 assert(_in_cset_fast_test != NULL, "sanity"); | |
421 if (_g1_committed.contains((HeapWord*) obj)) { | |
422 // no need to subtract the bottom of the heap from obj, | |
423 // _in_cset_fast_test is biased | |
424 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; | |
425 bool ret = _in_cset_fast_test[index]; | |
426 // let's make sure the result is consistent with what the slower | |
427 // test returns | |
428 assert( ret || !obj_in_cs(obj), "sanity"); | |
429 assert(!ret || obj_in_cs(obj), "sanity"); | |
430 return ret; | |
431 } else { | |
432 return false; | |
433 } | |
434 } | |
435 | |
342 | 436 protected: |
437 | |
438 // Shrink the garbage-first heap by at most the given size (in bytes!). | |
439 // (Rounds down to a HeapRegion boundary.) | |
440 virtual void shrink(size_t expand_bytes); | |
441 void shrink_helper(size_t expand_bytes); | |
442 | |
443 // Do an incremental collection: identify a collection set, and evacuate | |
444 // its live objects elsewhere. | |
445 virtual void do_collection_pause(); | |
446 | |
447 // The guts of the incremental collection pause, executed by the vm | |
677 | 448 // thread. |
449 virtual void do_collection_pause_at_safepoint(); | |
342 | 450 |
451 // Actually do the work of evacuating the collection set. | |
452 virtual void evacuate_collection_set(); | |
453 | |
454 // If this is an appropriate right time, do a collection pause. | |
455 // The "word_size" argument, if non-zero, indicates the size of an | |
456 // allocation request that is prompting this query. | |
457 void do_collection_pause_if_appropriate(size_t word_size); | |
458 | |
459 // The g1 remembered set of the heap. | |
460 G1RemSet* _g1_rem_set; | |
461 // And it's mod ref barrier set, used to track updates for the above. | |
462 ModRefBarrierSet* _mr_bs; | |
463 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
464 // A set of cards that cover the objects for which the Rsets should be updated |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
465 // concurrently after the collection. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
466 DirtyCardQueueSet _dirty_card_queue_set; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
467 |
342 | 468 // The Heap Region Rem Set Iterator. |
469 HeapRegionRemSetIterator** _rem_set_iterator; | |
470 | |
471 // The closure used to refine a single card. | |
472 RefineCardTableEntryClosure* _refine_cte_cl; | |
473 | |
474 // A function to check the consistency of dirty card logs. | |
475 void check_ct_logs_at_safepoint(); | |
476 | |
477 // After a collection pause, make the regions in the CS into free | |
478 // regions. | |
479 void free_collection_set(HeapRegion* cs_head); | |
480 | |
481 // Applies "scan_non_heap_roots" to roots outside the heap, | |
482 // "scan_rs" to roots inside the heap (having done "set_region" to | |
483 // indicate the region in which the root resides), and does "scan_perm" | |
484 // (setting the generation to the perm generation.) If "scan_rs" is | |
485 // NULL, then this step is skipped. The "worker_i" | |
486 // param is for use with parallel roots processing, and should be | |
487 // the "i" of the calling parallel worker thread's work(i) function. | |
488 // In the sequential case this param will be ignored. | |
489 void g1_process_strong_roots(bool collecting_perm_gen, | |
490 SharedHeap::ScanningOption so, | |
491 OopClosure* scan_non_heap_roots, | |
492 OopsInHeapRegionClosure* scan_rs, | |
493 OopsInHeapRegionClosure* scan_so, | |
494 OopsInGenClosure* scan_perm, | |
495 int worker_i); | |
496 | |
497 void scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
498 int worker_i); | |
499 void scan_scan_only_region(HeapRegion* hr, | |
500 OopsInHeapRegionClosure* oc, | |
501 int worker_i); | |
502 | |
503 // Apply "blk" to all the weak roots of the system. These include | |
504 // JNI weak roots, the code cache, system dictionary, symbol table, | |
505 // string table, and referents of reachable weak refs. | |
506 void g1_process_weak_roots(OopClosure* root_closure, | |
507 OopClosure* non_root_closure); | |
508 | |
509 // Invoke "save_marks" on all heap regions. | |
510 void save_marks(); | |
511 | |
512 // Free a heap region. | |
513 void free_region(HeapRegion* hr); | |
514 // A component of "free_region", exposed for 'batching'. | |
515 // All the params after "hr" are out params: the used bytes of the freed | |
516 // region(s), the number of H regions cleared, the number of regions | |
517 // freed, and pointers to the head and tail of a list of freed contig | |
518 // regions, linked throught the "next_on_unclean_list" field. | |
519 void free_region_work(HeapRegion* hr, | |
520 size_t& pre_used, | |
521 size_t& cleared_h, | |
522 size_t& freed_regions, | |
523 UncleanRegionList* list, | |
524 bool par = false); | |
525 | |
526 | |
527 // The concurrent marker (and the thread it runs in.) | |
528 ConcurrentMark* _cm; | |
529 ConcurrentMarkThread* _cmThread; | |
530 bool _mark_in_progress; | |
531 | |
532 // The concurrent refiner. | |
533 ConcurrentG1Refine* _cg1r; | |
534 | |
535 // The concurrent zero-fill thread. | |
536 ConcurrentZFThread* _czft; | |
537 | |
538 // The parallel task queues | |
539 RefToScanQueueSet *_task_queues; | |
540 | |
541 // True iff a evacuation has failed in the current collection. | |
542 bool _evacuation_failed; | |
543 | |
544 // Set the attribute indicating whether evacuation has failed in the | |
545 // current collection. | |
546 void set_evacuation_failed(bool b) { _evacuation_failed = b; } | |
547 | |
548 // Failed evacuations cause some logical from-space objects to have | |
549 // forwarding pointers to themselves. Reset them. | |
550 void remove_self_forwarding_pointers(); | |
551 | |
552 // When one is non-null, so is the other. Together, they each pair is | |
553 // an object with a preserved mark, and its mark value. | |
554 GrowableArray<oop>* _objs_with_preserved_marks; | |
555 GrowableArray<markOop>* _preserved_marks_of_objs; | |
556 | |
557 // Preserve the mark of "obj", if necessary, in preparation for its mark | |
558 // word being overwritten with a self-forwarding-pointer. | |
559 void preserve_mark_if_necessary(oop obj, markOop m); | |
560 | |
561 // The stack of evac-failure objects left to be scanned. | |
562 GrowableArray<oop>* _evac_failure_scan_stack; | |
563 // The closure to apply to evac-failure objects. | |
564 | |
565 OopsInHeapRegionClosure* _evac_failure_closure; | |
566 // Set the field above. | |
567 void | |
568 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { | |
569 _evac_failure_closure = evac_failure_closure; | |
570 } | |
571 | |
572 // Push "obj" on the scan stack. | |
573 void push_on_evac_failure_scan_stack(oop obj); | |
574 // Process scan stack entries until the stack is empty. | |
575 void drain_evac_failure_scan_stack(); | |
576 // True iff an invocation of "drain_scan_stack" is in progress; to | |
577 // prevent unnecessary recursion. | |
578 bool _drain_in_progress; | |
579 | |
580 // Do any necessary initialization for evacuation-failure handling. | |
581 // "cl" is the closure that will be used to process evac-failure | |
582 // objects. | |
583 void init_for_evac_failure(OopsInHeapRegionClosure* cl); | |
584 // Do any necessary cleanup for evacuation-failure handling data | |
585 // structures. | |
586 void finalize_for_evac_failure(); | |
587 | |
588 // An attempt to evacuate "obj" has failed; take necessary steps. | |
589 void handle_evacuation_failure(oop obj); | |
590 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); | |
591 void handle_evacuation_failure_common(oop obj, markOop m); | |
592 | |
593 | |
594 // Ensure that the relevant gc_alloc regions are set. | |
595 void get_gc_alloc_regions(); | |
636 | 596 // We're done with GC alloc regions. We are going to tear down the |
597 // gc alloc list and remove the gc alloc tag from all the regions on | |
598 // that list. However, we will also retain the last (i.e., the one | |
599 // that is half-full) GC alloc region, per GCAllocPurpose, for | |
600 // possible reuse during the next collection, provided | |
601 // _retain_gc_alloc_region[] indicates that it should be the | |
602 // case. Said regions are kept in the _retained_gc_alloc_regions[] | |
603 // array. If the parameter totally is set, we will not retain any | |
604 // regions, irrespective of what _retain_gc_alloc_region[] | |
605 // indicates. | |
606 void release_gc_alloc_regions(bool totally); | |
607 #ifndef PRODUCT | |
608 // Useful for debugging. | |
609 void print_gc_alloc_regions(); | |
610 #endif // !PRODUCT | |
342 | 611 |
612 // ("Weak") Reference processing support | |
613 ReferenceProcessor* _ref_processor; | |
614 | |
615 enum G1H_process_strong_roots_tasks { | |
616 G1H_PS_mark_stack_oops_do, | |
617 G1H_PS_refProcessor_oops_do, | |
618 // Leave this one last. | |
619 G1H_PS_NumElements | |
620 }; | |
621 | |
622 SubTasksDone* _process_strong_tasks; | |
623 | |
624 // List of regions which require zero filling. | |
625 UncleanRegionList _unclean_region_list; | |
626 bool _unclean_regions_coming; | |
627 | |
628 public: | |
629 void set_refine_cte_cl_concurrency(bool concurrent); | |
630 | |
631 RefToScanQueue *task_queue(int i); | |
632 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
633 // A set of cards where updates happened during the GC |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
634 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
635 |
342 | 636 // Create a G1CollectedHeap with the specified policy. |
637 // Must call the initialize method afterwards. | |
638 // May not return if something goes wrong. | |
639 G1CollectedHeap(G1CollectorPolicy* policy); | |
640 | |
641 // Initialize the G1CollectedHeap to have the initial and | |
642 // maximum sizes, permanent generation, and remembered and barrier sets | |
643 // specified by the policy object. | |
644 jint initialize(); | |
645 | |
646 void ref_processing_init(); | |
647 | |
648 void set_par_threads(int t) { | |
649 SharedHeap::set_par_threads(t); | |
650 _process_strong_tasks->set_par_threads(t); | |
651 } | |
652 | |
653 virtual CollectedHeap::Name kind() const { | |
654 return CollectedHeap::G1CollectedHeap; | |
655 } | |
656 | |
657 // The current policy object for the collector. | |
658 G1CollectorPolicy* g1_policy() const { return _g1_policy; } | |
659 | |
660 // Adaptive size policy. No such thing for g1. | |
661 virtual AdaptiveSizePolicy* size_policy() { return NULL; } | |
662 | |
663 // The rem set and barrier set. | |
664 G1RemSet* g1_rem_set() const { return _g1_rem_set; } | |
665 ModRefBarrierSet* mr_bs() const { return _mr_bs; } | |
666 | |
667 // The rem set iterator. | |
668 HeapRegionRemSetIterator* rem_set_iterator(int i) { | |
669 return _rem_set_iterator[i]; | |
670 } | |
671 | |
672 HeapRegionRemSetIterator* rem_set_iterator() { | |
673 return _rem_set_iterator[0]; | |
674 } | |
675 | |
676 unsigned get_gc_time_stamp() { | |
677 return _gc_time_stamp; | |
678 } | |
679 | |
680 void reset_gc_time_stamp() { | |
681 _gc_time_stamp = 0; | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
682 OrderAccess::fence(); |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
683 } |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
684 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
685 void increment_gc_time_stamp() { |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
686 ++_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
687 OrderAccess::fence(); |
342 | 688 } |
689 | |
690 void iterate_dirty_card_closure(bool concurrent, int worker_i); | |
691 | |
692 // The shared block offset table array. | |
693 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } | |
694 | |
695 // Reference Processing accessor | |
696 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
697 | |
698 // Reserved (g1 only; super method includes perm), capacity and the used | |
699 // portion in bytes. | |
700 size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } | |
701 virtual size_t capacity() const; | |
702 virtual size_t used() const; | |
703 size_t recalculate_used() const; | |
704 #ifndef PRODUCT | |
705 size_t recalculate_used_regions() const; | |
706 #endif // PRODUCT | |
707 | |
708 // These virtual functions do the actual allocation. | |
709 virtual HeapWord* mem_allocate(size_t word_size, | |
710 bool is_noref, | |
711 bool is_tlab, | |
712 bool* gc_overhead_limit_was_exceeded); | |
713 | |
714 // Some heaps may offer a contiguous region for shared non-blocking | |
715 // allocation, via inlined code (by exporting the address of the top and | |
716 // end fields defining the extent of the contiguous allocation region.) | |
717 // But G1CollectedHeap doesn't yet support this. | |
718 | |
719 // Return an estimate of the maximum allocation that could be performed | |
720 // without triggering any collection or expansion activity. In a | |
721 // generational collector, for example, this is probably the largest | |
722 // allocation that could be supported (without expansion) in the youngest | |
723 // generation. It is "unsafe" because no locks are taken; the result | |
724 // should be treated as an approximation, not a guarantee, for use in | |
725 // heuristic resizing decisions. | |
726 virtual size_t unsafe_max_alloc(); | |
727 | |
728 virtual bool is_maximal_no_gc() const { | |
729 return _g1_storage.uncommitted_size() == 0; | |
730 } | |
731 | |
732 // The total number of regions in the heap. | |
733 size_t n_regions(); | |
734 | |
735 // The number of regions that are completely free. | |
736 size_t max_regions(); | |
737 | |
738 // The number of regions that are completely free. | |
739 size_t free_regions(); | |
740 | |
741 // The number of regions that are not completely free. | |
742 size_t used_regions() { return n_regions() - free_regions(); } | |
743 | |
744 // True iff the ZF thread should run. | |
745 bool should_zf(); | |
746 | |
747 // The number of regions available for "regular" expansion. | |
748 size_t expansion_regions() { return _expansion_regions; } | |
749 | |
750 #ifndef PRODUCT | |
751 bool regions_accounted_for(); | |
752 bool print_region_accounting_info(); | |
753 void print_region_counts(); | |
754 #endif | |
755 | |
756 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); | |
757 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); | |
758 | |
759 void put_region_on_unclean_list(HeapRegion* r); | |
760 void put_region_on_unclean_list_locked(HeapRegion* r); | |
761 | |
762 void prepend_region_list_on_unclean_list(UncleanRegionList* list); | |
763 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); | |
764 | |
765 void set_unclean_regions_coming(bool b); | |
766 void set_unclean_regions_coming_locked(bool b); | |
767 // Wait for cleanup to be complete. | |
768 void wait_for_cleanup_complete(); | |
769 // Like above, but assumes that the calling thread owns the Heap_lock. | |
770 void wait_for_cleanup_complete_locked(); | |
771 | |
772 // Return the head of the unclean list. | |
773 HeapRegion* peek_unclean_region_list_locked(); | |
774 // Remove and return the head of the unclean list. | |
775 HeapRegion* pop_unclean_region_list_locked(); | |
776 | |
777 // List of regions which are zero filled and ready for allocation. | |
778 HeapRegion* _free_region_list; | |
779 // Number of elements on the free list. | |
780 size_t _free_region_list_size; | |
781 | |
782 // If the head of the unclean list is ZeroFilled, move it to the free | |
783 // list. | |
784 bool move_cleaned_region_to_free_list_locked(); | |
785 bool move_cleaned_region_to_free_list(); | |
786 | |
787 void put_free_region_on_list_locked(HeapRegion* r); | |
788 void put_free_region_on_list(HeapRegion* r); | |
789 | |
790 // Remove and return the head element of the free list. | |
791 HeapRegion* pop_free_region_list_locked(); | |
792 | |
793 // If "zero_filled" is true, we first try the free list, then we try the | |
794 // unclean list, zero-filling the result. If "zero_filled" is false, we | |
795 // first try the unclean list, then the zero-filled list. | |
796 HeapRegion* alloc_free_region_from_lists(bool zero_filled); | |
797 | |
798 // Verify the integrity of the region lists. | |
799 void remove_allocated_regions_from_lists(); | |
800 bool verify_region_lists(); | |
801 bool verify_region_lists_locked(); | |
802 size_t unclean_region_list_length(); | |
803 size_t free_region_list_length(); | |
804 | |
805 // Perform a collection of the heap; intended for use in implementing | |
806 // "System.gc". This probably implies as full a collection as the | |
807 // "CollectedHeap" supports. | |
808 virtual void collect(GCCause::Cause cause); | |
809 | |
810 // The same as above but assume that the caller holds the Heap_lock. | |
811 void collect_locked(GCCause::Cause cause); | |
812 | |
813 // This interface assumes that it's being called by the | |
814 // vm thread. It collects the heap assuming that the | |
815 // heap lock is already held and that we are executing in | |
816 // the context of the vm thread. | |
817 virtual void collect_as_vm_thread(GCCause::Cause cause); | |
818 | |
819 // True iff a evacuation has failed in the most-recent collection. | |
820 bool evacuation_failed() { return _evacuation_failed; } | |
821 | |
822 // Free a region if it is totally full of garbage. Returns the number of | |
823 // bytes freed (0 ==> didn't free it). | |
824 size_t free_region_if_totally_empty(HeapRegion *hr); | |
825 void free_region_if_totally_empty_work(HeapRegion *hr, | |
826 size_t& pre_used, | |
827 size_t& cleared_h_regions, | |
828 size_t& freed_regions, | |
829 UncleanRegionList* list, | |
830 bool par = false); | |
831 | |
832 // If we've done free region work that yields the given changes, update | |
833 // the relevant global variables. | |
834 void finish_free_region_work(size_t pre_used, | |
835 size_t cleared_h_regions, | |
836 size_t freed_regions, | |
837 UncleanRegionList* list); | |
838 | |
839 | |
840 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
841 virtual bool is_in(const void* p) const; | |
842 | |
843 // Return "TRUE" iff the given object address is within the collection | |
844 // set. | |
845 inline bool obj_in_cs(oop obj); | |
846 | |
847 // Return "TRUE" iff the given object address is in the reserved | |
848 // region of g1 (excluding the permanent generation). | |
849 bool is_in_g1_reserved(const void* p) const { | |
850 return _g1_reserved.contains(p); | |
851 } | |
852 | |
853 // Returns a MemRegion that corresponds to the space that has been | |
854 // committed in the heap | |
855 MemRegion g1_committed() { | |
856 return _g1_committed; | |
857 } | |
858 | |
859 NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) | |
860 | |
861 // Dirty card table entries covering a list of young regions. | |
862 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); | |
863 | |
864 // This resets the card table to all zeros. It is used after | |
865 // a collection pause which used the card table to claim cards. | |
866 void cleanUpCardTable(); | |
867 | |
868 // Iteration functions. | |
869 | |
870 // Iterate over all the ref-containing fields of all objects, calling | |
871 // "cl.do_oop" on each. | |
678 | 872 virtual void oop_iterate(OopClosure* cl) { |
873 oop_iterate(cl, true); | |
874 } | |
875 void oop_iterate(OopClosure* cl, bool do_perm); | |
342 | 876 |
877 // Same as above, restricted to a memory region. | |
678 | 878 virtual void oop_iterate(MemRegion mr, OopClosure* cl) { |
879 oop_iterate(mr, cl, true); | |
880 } | |
881 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); | |
342 | 882 |
883 // Iterate over all objects, calling "cl.do_object" on each. | |
678 | 884 virtual void object_iterate(ObjectClosure* cl) { |
885 object_iterate(cl, true); | |
886 } | |
887 virtual void safe_object_iterate(ObjectClosure* cl) { | |
888 object_iterate(cl, true); | |
889 } | |
890 void object_iterate(ObjectClosure* cl, bool do_perm); | |
342 | 891 |
892 // Iterate over all objects allocated since the last collection, calling | |
893 // "cl.do_object" on each. The heap must have been initialized properly | |
894 // to support this function, or else this call will fail. | |
895 virtual void object_iterate_since_last_GC(ObjectClosure* cl); | |
896 | |
897 // Iterate over all spaces in use in the heap, in ascending address order. | |
898 virtual void space_iterate(SpaceClosure* cl); | |
899 | |
900 // Iterate over heap regions, in address order, terminating the | |
901 // iteration early if the "doHeapRegion" method returns "true". | |
902 void heap_region_iterate(HeapRegionClosure* blk); | |
903 | |
904 // Iterate over heap regions starting with r (or the first region if "r" | |
905 // is NULL), in address order, terminating early if the "doHeapRegion" | |
906 // method returns "true". | |
907 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); | |
908 | |
909 // As above but starting from the region at index idx. | |
910 void heap_region_iterate_from(int idx, HeapRegionClosure* blk); | |
911 | |
912 HeapRegion* region_at(size_t idx); | |
913 | |
914 // Divide the heap region sequence into "chunks" of some size (the number | |
915 // of regions divided by the number of parallel threads times some | |
916 // overpartition factor, currently 4). Assumes that this will be called | |
917 // in parallel by ParallelGCThreads worker threads with discinct worker | |
918 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel | |
919 // calls will use the same "claim_value", and that that claim value is | |
920 // different from the claim_value of any heap region before the start of | |
921 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by | |
922 // attempting to claim the first region in each chunk, and, if | |
923 // successful, applying the closure to each region in the chunk (and | |
924 // setting the claim value of the second and subsequent regions of the | |
925 // chunk.) For now requires that "doHeapRegion" always returns "false", | |
926 // i.e., that a closure never attempt to abort a traversal. | |
927 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | |
928 int worker, | |
929 jint claim_value); | |
930 | |
390 | 931 // It resets all the region claim values to the default. |
932 void reset_heap_region_claim_values(); | |
933 | |
355 | 934 #ifdef ASSERT |
935 bool check_heap_region_claim_values(jint claim_value); | |
936 #endif // ASSERT | |
937 | |
342 | 938 // Iterate over the regions (if any) in the current collection set. |
939 void collection_set_iterate(HeapRegionClosure* blk); | |
940 | |
941 // As above but starting from region r | |
942 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); | |
943 | |
944 // Returns the first (lowest address) compactible space in the heap. | |
945 virtual CompactibleSpace* first_compactible_space(); | |
946 | |
947 // A CollectedHeap will contain some number of spaces. This finds the | |
948 // space containing a given address, or else returns NULL. | |
949 virtual Space* space_containing(const void* addr) const; | |
950 | |
951 // A G1CollectedHeap will contain some number of heap regions. This | |
952 // finds the region containing a given address, or else returns NULL. | |
953 HeapRegion* heap_region_containing(const void* addr) const; | |
954 | |
955 // Like the above, but requires "addr" to be in the heap (to avoid a | |
956 // null-check), and unlike the above, may return an continuing humongous | |
957 // region. | |
958 HeapRegion* heap_region_containing_raw(const void* addr) const; | |
959 | |
960 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
961 // each address in the (reserved) heap is a member of exactly | |
962 // one block. The defining characteristic of a block is that it is | |
963 // possible to find its size, and thus to progress forward to the next | |
964 // block. (Blocks may be of different sizes.) Thus, blocks may | |
965 // represent Java objects, or they might be free blocks in a | |
966 // free-list-based heap (or subheap), as long as the two kinds are | |
967 // distinguishable and the size of each is determinable. | |
968 | |
969 // Returns the address of the start of the "block" that contains the | |
970 // address "addr". We say "blocks" instead of "object" since some heaps | |
971 // may not pack objects densely; a chunk may either be an object or a | |
972 // non-object. | |
973 virtual HeapWord* block_start(const void* addr) const; | |
974 | |
975 // Requires "addr" to be the start of a chunk, and returns its size. | |
976 // "addr + size" is required to be the start of a new chunk, or the end | |
977 // of the active area of the heap. | |
978 virtual size_t block_size(const HeapWord* addr) const; | |
979 | |
980 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
981 // the block is an object. | |
982 virtual bool block_is_obj(const HeapWord* addr) const; | |
983 | |
984 // Does this heap support heap inspection? (+PrintClassHistogram) | |
985 virtual bool supports_heap_inspection() const { return true; } | |
986 | |
987 // Section on thread-local allocation buffers (TLABs) | |
988 // See CollectedHeap for semantics. | |
989 | |
990 virtual bool supports_tlab_allocation() const; | |
991 virtual size_t tlab_capacity(Thread* thr) const; | |
992 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
993 virtual HeapWord* allocate_new_tlab(size_t size); | |
994 | |
995 // Can a compiler initialize a new object without store barriers? | |
996 // This permission only extends from the creation of a new object | |
997 // via a TLAB up to the first subsequent safepoint. | |
998 virtual bool can_elide_tlab_store_barriers() const { | |
999 // Since G1's TLAB's may, on occasion, come from non-young regions | |
1000 // as well. (Is there a flag controlling that? XXX) | |
1001 return false; | |
1002 } | |
1003 | |
1004 // Can a compiler elide a store barrier when it writes | |
1005 // a permanent oop into the heap? Applies when the compiler | |
1006 // is storing x to the heap, where x->is_perm() is true. | |
1007 virtual bool can_elide_permanent_oop_store_barriers() const { | |
1008 // At least until perm gen collection is also G1-ified, at | |
1009 // which point this should return false. | |
1010 return true; | |
1011 } | |
1012 | |
1013 virtual bool allocs_are_zero_filled(); | |
1014 | |
1015 // The boundary between a "large" and "small" array of primitives, in | |
1016 // words. | |
1017 virtual size_t large_typearray_limit(); | |
1018 | |
1019 // Returns "true" iff the given word_size is "very large". | |
1020 static bool isHumongous(size_t word_size) { | |
1021 return word_size >= VeryLargeInWords; | |
1022 } | |
1023 | |
1024 // Update mod union table with the set of dirty cards. | |
1025 void updateModUnion(); | |
1026 | |
1027 // Set the mod union bits corresponding to the given memRegion. Note | |
1028 // that this is always a safe operation, since it doesn't clear any | |
1029 // bits. | |
1030 void markModUnionRange(MemRegion mr); | |
1031 | |
1032 // Records the fact that a marking phase is no longer in progress. | |
1033 void set_marking_complete() { | |
1034 _mark_in_progress = false; | |
1035 } | |
1036 void set_marking_started() { | |
1037 _mark_in_progress = true; | |
1038 } | |
1039 bool mark_in_progress() { | |
1040 return _mark_in_progress; | |
1041 } | |
1042 | |
1043 // Print the maximum heap capacity. | |
1044 virtual size_t max_capacity() const; | |
1045 | |
1046 virtual jlong millis_since_last_gc(); | |
1047 | |
1048 // Perform any cleanup actions necessary before allowing a verification. | |
1049 virtual void prepare_for_verify(); | |
1050 | |
1051 // Perform verification. | |
1052 virtual void verify(bool allow_dirty, bool silent); | |
1053 virtual void print() const; | |
1054 virtual void print_on(outputStream* st) const; | |
1055 | |
1056 virtual void print_gc_threads_on(outputStream* st) const; | |
1057 virtual void gc_threads_do(ThreadClosure* tc) const; | |
1058 | |
1059 // Override | |
1060 void print_tracing_info() const; | |
1061 | |
1062 // If "addr" is a pointer into the (reserved?) heap, returns a positive | |
1063 // number indicating the "arena" within the heap in which "addr" falls. | |
1064 // Or else returns 0. | |
1065 virtual int addr_to_arena_id(void* addr) const; | |
1066 | |
1067 // Convenience function to be used in situations where the heap type can be | |
1068 // asserted to be this type. | |
1069 static G1CollectedHeap* heap(); | |
1070 | |
1071 void empty_young_list(); | |
1072 bool should_set_young_locked(); | |
1073 | |
1074 void set_region_short_lived_locked(HeapRegion* hr); | |
1075 // add appropriate methods for any other surv rate groups | |
1076 | |
1077 void young_list_rs_length_sampling_init() { | |
1078 _young_list->rs_length_sampling_init(); | |
1079 } | |
1080 bool young_list_rs_length_sampling_more() { | |
1081 return _young_list->rs_length_sampling_more(); | |
1082 } | |
1083 void young_list_rs_length_sampling_next() { | |
1084 _young_list->rs_length_sampling_next(); | |
1085 } | |
1086 size_t young_list_sampled_rs_lengths() { | |
1087 return _young_list->sampled_rs_lengths(); | |
1088 } | |
1089 | |
1090 size_t young_list_length() { return _young_list->length(); } | |
1091 size_t young_list_scan_only_length() { | |
1092 return _young_list->scan_only_length(); } | |
1093 | |
1094 HeapRegion* pop_region_from_young_list() { | |
1095 return _young_list->pop_region(); | |
1096 } | |
1097 | |
1098 HeapRegion* young_list_first_region() { | |
1099 return _young_list->first_region(); | |
1100 } | |
1101 | |
1102 // debugging | |
1103 bool check_young_list_well_formed() { | |
1104 return _young_list->check_list_well_formed(); | |
1105 } | |
1106 bool check_young_list_empty(bool ignore_scan_only_list, | |
1107 bool check_sample = true); | |
1108 | |
1109 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1110 // many of these need to be public. | |
1111 | |
1112 // The functions below are helper functions that a subclass of | |
1113 // "CollectedHeap" can use in the implementation of its virtual | |
1114 // functions. | |
1115 // This performs a concurrent marking of the live objects in a | |
1116 // bitmap off to the side. | |
1117 void doConcurrentMark(); | |
1118 | |
1119 // This is called from the marksweep collector which then does | |
1120 // a concurrent mark and verifies that the results agree with | |
1121 // the stop the world marking. | |
1122 void checkConcurrentMark(); | |
1123 void do_sync_mark(); | |
1124 | |
1125 bool isMarkedPrev(oop obj) const; | |
1126 bool isMarkedNext(oop obj) const; | |
1127 | |
1128 // Determine if an object is dead, given the object and also | |
1129 // the region to which the object belongs. An object is dead | |
1130 // iff a) it was not allocated since the last mark and b) it | |
1131 // is not marked. | |
1132 | |
1133 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1134 return | |
1135 !hr->obj_allocated_since_prev_marking(obj) && | |
1136 !isMarkedPrev(obj); | |
1137 } | |
1138 | |
1139 // This is used when copying an object to survivor space. | |
1140 // If the object is marked live, then we mark the copy live. | |
1141 // If the object is allocated since the start of this mark | |
1142 // cycle, then we mark the copy live. | |
1143 // If the object has been around since the previous mark | |
1144 // phase, and hasn't been marked yet during this phase, | |
1145 // then we don't mark it, we just wait for the | |
1146 // current marking cycle to get to it. | |
1147 | |
1148 // This function returns true when an object has been | |
1149 // around since the previous marking and hasn't yet | |
1150 // been marked during this marking. | |
1151 | |
1152 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1153 return | |
1154 !hr->obj_allocated_since_next_marking(obj) && | |
1155 !isMarkedNext(obj); | |
1156 } | |
1157 | |
1158 // Determine if an object is dead, given only the object itself. | |
1159 // This will find the region to which the object belongs and | |
1160 // then call the region version of the same function. | |
1161 | |
1162 // Added if it is in permanent gen it isn't dead. | |
1163 // Added if it is NULL it isn't dead. | |
1164 | |
1165 bool is_obj_dead(oop obj) { | |
1166 HeapRegion* hr = heap_region_containing(obj); | |
1167 if (hr == NULL) { | |
1168 if (Universe::heap()->is_in_permanent(obj)) | |
1169 return false; | |
1170 else if (obj == NULL) return false; | |
1171 else return true; | |
1172 } | |
1173 else return is_obj_dead(obj, hr); | |
1174 } | |
1175 | |
1176 bool is_obj_ill(oop obj) { | |
1177 HeapRegion* hr = heap_region_containing(obj); | |
1178 if (hr == NULL) { | |
1179 if (Universe::heap()->is_in_permanent(obj)) | |
1180 return false; | |
1181 else if (obj == NULL) return false; | |
1182 else return true; | |
1183 } | |
1184 else return is_obj_ill(obj, hr); | |
1185 } | |
1186 | |
1187 // The following is just to alert the verification code | |
1188 // that a full collection has occurred and that the | |
1189 // remembered sets are no longer up to date. | |
1190 bool _full_collection; | |
1191 void set_full_collection() { _full_collection = true;} | |
1192 void clear_full_collection() {_full_collection = false;} | |
1193 bool full_collection() {return _full_collection;} | |
1194 | |
1195 ConcurrentMark* concurrent_mark() const { return _cm; } | |
1196 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | |
1197 | |
796
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1198 // The dirty cards region list is used to record a subset of regions |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1199 // whose cards need clearing. The list if populated during the |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1200 // remembered set scanning and drained during the card table |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1201 // cleanup. Although the methods are reentrant, population/draining |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1202 // phases must not overlap. For synchronization purposes the last |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1203 // element on the list points to itself. |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1204 HeapRegion* _dirty_cards_region_list; |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1205 void push_dirty_cards_region(HeapRegion* hr); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1206 HeapRegion* pop_dirty_cards_region(); |
29e7d79232b9
6819065: G1: eliminate high serial card table clearing time
apetrusenko
parents:
678
diff
changeset
|
1207 |
342 | 1208 public: |
1209 void stop_conc_gc_threads(); | |
1210 | |
1211 // <NEW PREDICTION> | |
1212 | |
1213 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); | |
1214 void check_if_region_is_too_expensive(double predicted_time_ms); | |
1215 size_t pending_card_num(); | |
1216 size_t max_pending_card_num(); | |
1217 size_t cards_scanned(); | |
1218 | |
1219 // </NEW PREDICTION> | |
1220 | |
1221 protected: | |
1222 size_t _max_heap_capacity; | |
1223 | |
1224 // debug_only(static void check_for_valid_allocation_state();) | |
1225 | |
1226 public: | |
1227 // Temporary: call to mark things unimplemented for the G1 heap (e.g., | |
1228 // MemoryService). In productization, we can make this assert false | |
1229 // to catch such places (as well as searching for calls to this...) | |
1230 static void g1_unimplemented(); | |
1231 | |
1232 }; | |
1233 | |
1234 // Local Variables: *** | |
1235 // c-indentation-style: gnu *** | |
1236 // End: *** |