Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 795:215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
Summary: During parallel rset updating we have to make sure that the worker ids of the refinement threads do not intersect with the worker ids that can be claimed by the mutator threads.
Reviewed-by: tonyp
author | iveresov |
---|---|
date | Mon, 18 May 2009 11:52:46 -0700 |
parents | 4ac7d97e6101 |
children | 29e7d79232b9 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | |
26 // It uses the "Garbage First" heap organization and algorithm, which | |
27 // may combine concurrent marking with parallel, incremental compaction of | |
28 // heap subsets that will yield large amounts of garbage. | |
29 | |
30 class HeapRegion; | |
31 class HeapRegionSeq; | |
32 class PermanentGenerationSpec; | |
33 class GenerationSpec; | |
34 class OopsInHeapRegionClosure; | |
35 class G1ScanHeapEvacClosure; | |
36 class ObjectClosure; | |
37 class SpaceClosure; | |
38 class CompactibleSpaceClosure; | |
39 class Space; | |
40 class G1CollectorPolicy; | |
41 class GenRemSet; | |
42 class G1RemSet; | |
43 class HeapRegionRemSetIterator; | |
44 class ConcurrentMark; | |
45 class ConcurrentMarkThread; | |
46 class ConcurrentG1Refine; | |
47 class ConcurrentZFThread; | |
48 | |
49 // If want to accumulate detailed statistics on work queues | |
50 // turn this on. | |
51 #define G1_DETAILED_STATS 0 | |
52 | |
53 #if G1_DETAILED_STATS | |
54 # define IF_G1_DETAILED_STATS(code) code | |
55 #else | |
56 # define IF_G1_DETAILED_STATS(code) | |
57 #endif | |
58 | |
59 typedef GenericTaskQueue<oop*> RefToScanQueue; | |
60 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet; | |
61 | |
62 enum G1GCThreadGroups { | |
63 G1CRGroup = 0, | |
64 G1ZFGroup = 1, | |
65 G1CMGroup = 2, | |
66 G1CLGroup = 3 | |
67 }; | |
68 | |
69 enum GCAllocPurpose { | |
70 GCAllocForTenured, | |
71 GCAllocForSurvived, | |
72 GCAllocPurposeCount | |
73 }; | |
74 | |
75 class YoungList : public CHeapObj { | |
76 private: | |
77 G1CollectedHeap* _g1h; | |
78 | |
79 HeapRegion* _head; | |
80 | |
81 HeapRegion* _scan_only_head; | |
82 HeapRegion* _scan_only_tail; | |
83 size_t _length; | |
84 size_t _scan_only_length; | |
85 | |
86 size_t _last_sampled_rs_lengths; | |
87 size_t _sampled_rs_lengths; | |
88 HeapRegion* _curr; | |
89 HeapRegion* _curr_scan_only; | |
90 | |
91 HeapRegion* _survivor_head; | |
545 | 92 HeapRegion* _survivor_tail; |
342 | 93 size_t _survivor_length; |
94 | |
95 void empty_list(HeapRegion* list); | |
96 | |
97 public: | |
98 YoungList(G1CollectedHeap* g1h); | |
99 | |
100 void push_region(HeapRegion* hr); | |
101 void add_survivor_region(HeapRegion* hr); | |
102 HeapRegion* pop_region(); | |
103 void empty_list(); | |
104 bool is_empty() { return _length == 0; } | |
105 size_t length() { return _length; } | |
106 size_t scan_only_length() { return _scan_only_length; } | |
545 | 107 size_t survivor_length() { return _survivor_length; } |
342 | 108 |
109 void rs_length_sampling_init(); | |
110 bool rs_length_sampling_more(); | |
111 void rs_length_sampling_next(); | |
112 | |
113 void reset_sampled_info() { | |
114 _last_sampled_rs_lengths = 0; | |
115 } | |
116 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } | |
117 | |
118 // for development purposes | |
119 void reset_auxilary_lists(); | |
120 HeapRegion* first_region() { return _head; } | |
121 HeapRegion* first_scan_only_region() { return _scan_only_head; } | |
122 HeapRegion* first_survivor_region() { return _survivor_head; } | |
545 | 123 HeapRegion* last_survivor_region() { return _survivor_tail; } |
342 | 124 HeapRegion* par_get_next_scan_only_region() { |
125 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
126 HeapRegion* ret = _curr_scan_only; | |
127 if (ret != NULL) | |
128 _curr_scan_only = ret->get_next_young_region(); | |
129 return ret; | |
130 } | |
131 | |
132 // debugging | |
133 bool check_list_well_formed(); | |
134 bool check_list_empty(bool ignore_scan_only_list, | |
135 bool check_sample = true); | |
136 void print(); | |
137 }; | |
138 | |
139 class RefineCardTableEntryClosure; | |
140 class G1CollectedHeap : public SharedHeap { | |
141 friend class VM_G1CollectForAllocation; | |
142 friend class VM_GenCollectForPermanentAllocation; | |
143 friend class VM_G1CollectFull; | |
144 friend class VM_G1IncCollectionPause; | |
145 friend class VMStructs; | |
146 | |
147 // Closures used in implementation. | |
148 friend class G1ParCopyHelper; | |
149 friend class G1IsAliveClosure; | |
150 friend class G1EvacuateFollowersClosure; | |
151 friend class G1ParScanThreadState; | |
152 friend class G1ParScanClosureSuper; | |
153 friend class G1ParEvacuateFollowersClosure; | |
154 friend class G1ParTask; | |
155 friend class G1FreeGarbageRegionClosure; | |
156 friend class RefineCardTableEntryClosure; | |
157 friend class G1PrepareCompactClosure; | |
158 friend class RegionSorter; | |
159 friend class CountRCClosure; | |
160 friend class EvacPopObjClosure; | |
161 | |
162 // Other related classes. | |
163 friend class G1MarkSweep; | |
164 | |
165 private: | |
166 enum SomePrivateConstants { | |
167 VeryLargeInBytes = HeapRegion::GrainBytes/2, | |
168 VeryLargeInWords = VeryLargeInBytes/HeapWordSize, | |
169 MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME | |
170 NumAPIs = HeapRegion::MaxAge | |
171 }; | |
172 | |
173 // The one and only G1CollectedHeap, so static functions can find it. | |
174 static G1CollectedHeap* _g1h; | |
175 | |
176 // Storage for the G1 heap (excludes the permanent generation). | |
177 VirtualSpace _g1_storage; | |
178 MemRegion _g1_reserved; | |
179 | |
180 // The part of _g1_storage that is currently committed. | |
181 MemRegion _g1_committed; | |
182 | |
183 // The maximum part of _g1_storage that has ever been committed. | |
184 MemRegion _g1_max_committed; | |
185 | |
186 // The number of regions that are completely free. | |
187 size_t _free_regions; | |
188 | |
189 // The number of regions we could create by expansion. | |
190 size_t _expansion_regions; | |
191 | |
192 // Return the number of free regions in the heap (by direct counting.) | |
193 size_t count_free_regions(); | |
194 // Return the number of free regions on the free and unclean lists. | |
195 size_t count_free_regions_list(); | |
196 | |
197 // The block offset table for the G1 heap. | |
198 G1BlockOffsetSharedArray* _bot_shared; | |
199 | |
200 // Move all of the regions off the free lists, then rebuild those free | |
201 // lists, before and after full GC. | |
202 void tear_down_region_lists(); | |
203 void rebuild_region_lists(); | |
204 // This sets all non-empty regions to need zero-fill (which they will if | |
205 // they are empty after full collection.) | |
206 void set_used_regions_to_need_zero_fill(); | |
207 | |
208 // The sequence of all heap regions in the heap. | |
209 HeapRegionSeq* _hrs; | |
210 | |
211 // The region from which normal-sized objects are currently being | |
212 // allocated. May be NULL. | |
213 HeapRegion* _cur_alloc_region; | |
214 | |
215 // Postcondition: cur_alloc_region == NULL. | |
216 void abandon_cur_alloc_region(); | |
636 | 217 void abandon_gc_alloc_regions(); |
342 | 218 |
219 // The to-space memory regions into which objects are being copied during | |
220 // a GC. | |
221 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; | |
545 | 222 size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
636 | 223 // These are the regions, one per GCAllocPurpose, that are half-full |
224 // at the end of a collection and that we want to reuse during the | |
225 // next collection. | |
226 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; | |
227 // This specifies whether we will keep the last half-full region at | |
228 // the end of a collection so that it can be reused during the next | |
229 // collection (this is specified per GCAllocPurpose) | |
230 bool _retain_gc_alloc_region[GCAllocPurposeCount]; | |
342 | 231 |
232 // A list of the regions that have been set to be alloc regions in the | |
233 // current collection. | |
234 HeapRegion* _gc_alloc_region_list; | |
235 | |
236 // When called by par thread, require par_alloc_during_gc_lock() to be held. | |
237 void push_gc_alloc_region(HeapRegion* hr); | |
238 | |
239 // This should only be called single-threaded. Undeclares all GC alloc | |
240 // regions. | |
241 void forget_alloc_region_list(); | |
242 | |
243 // Should be used to set an alloc region, because there's other | |
244 // associated bookkeeping. | |
245 void set_gc_alloc_region(int purpose, HeapRegion* r); | |
246 | |
247 // Check well-formedness of alloc region list. | |
248 bool check_gc_alloc_regions(); | |
249 | |
250 // Outside of GC pauses, the number of bytes used in all regions other | |
251 // than the current allocation region. | |
252 size_t _summary_bytes_used; | |
253 | |
526 | 254 // This is used for a quick test on whether a reference points into |
255 // the collection set or not. Basically, we have an array, with one | |
256 // byte per region, and that byte denotes whether the corresponding | |
257 // region is in the collection set or not. The entry corresponding | |
258 // the bottom of the heap, i.e., region 0, is pointed to by | |
259 // _in_cset_fast_test_base. The _in_cset_fast_test field has been | |
260 // biased so that it actually points to address 0 of the address | |
261 // space, to make the test as fast as possible (we can simply shift | |
262 // the address to address into it, instead of having to subtract the | |
263 // bottom of the heap from the address before shifting it; basically | |
264 // it works in the same way the card table works). | |
265 bool* _in_cset_fast_test; | |
266 | |
267 // The allocated array used for the fast test on whether a reference | |
268 // points into the collection set or not. This field is also used to | |
269 // free the array. | |
270 bool* _in_cset_fast_test_base; | |
271 | |
272 // The length of the _in_cset_fast_test_base array. | |
273 size_t _in_cset_fast_test_length; | |
274 | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
275 volatile unsigned _gc_time_stamp; |
342 | 276 |
277 size_t* _surviving_young_words; | |
278 | |
279 void setup_surviving_young_words(); | |
280 void update_surviving_young_words(size_t* surv_young_words); | |
281 void cleanup_surviving_young_words(); | |
282 | |
283 protected: | |
284 | |
285 // Returns "true" iff none of the gc alloc regions have any allocations | |
286 // since the last call to "save_marks". | |
287 bool all_alloc_regions_no_allocs_since_save_marks(); | |
545 | 288 // Perform finalization stuff on all allocation regions. |
289 void retire_all_alloc_regions(); | |
342 | 290 |
291 // The number of regions allocated to hold humongous objects. | |
292 int _num_humongous_regions; | |
293 YoungList* _young_list; | |
294 | |
295 // The current policy object for the collector. | |
296 G1CollectorPolicy* _g1_policy; | |
297 | |
298 // Parallel allocation lock to protect the current allocation region. | |
299 Mutex _par_alloc_during_gc_lock; | |
300 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } | |
301 | |
302 // If possible/desirable, allocate a new HeapRegion for normal object | |
303 // allocation sufficient for an allocation of the given "word_size". | |
304 // If "do_expand" is true, will attempt to expand the heap if necessary | |
305 // to to satisfy the request. If "zero_filled" is true, requires a | |
306 // zero-filled region. | |
307 // (Returning NULL will trigger a GC.) | |
308 virtual HeapRegion* newAllocRegion_work(size_t word_size, | |
309 bool do_expand, | |
310 bool zero_filled); | |
311 | |
312 virtual HeapRegion* newAllocRegion(size_t word_size, | |
313 bool zero_filled = true) { | |
314 return newAllocRegion_work(word_size, false, zero_filled); | |
315 } | |
316 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, | |
317 size_t word_size, | |
318 bool zero_filled = true); | |
319 | |
320 // Attempt to allocate an object of the given (very large) "word_size". | |
321 // Returns "NULL" on failure. | |
322 virtual HeapWord* humongousObjAllocate(size_t word_size); | |
323 | |
324 // If possible, allocate a block of the given word_size, else return "NULL". | |
325 // Returning NULL will trigger GC or heap expansion. | |
326 // These two methods have rather awkward pre- and | |
327 // post-conditions. If they are called outside a safepoint, then | |
328 // they assume that the caller is holding the heap lock. Upon return | |
329 // they release the heap lock, if they are returning a non-NULL | |
330 // value. attempt_allocation_slow() also dirties the cards of a | |
331 // newly-allocated young region after it releases the heap | |
332 // lock. This change in interface was the neatest way to achieve | |
333 // this card dirtying without affecting mem_allocate(), which is a | |
334 // more frequently called method. We tried two or three different | |
335 // approaches, but they were even more hacky. | |
336 HeapWord* attempt_allocation(size_t word_size, | |
337 bool permit_collection_pause = true); | |
338 | |
339 HeapWord* attempt_allocation_slow(size_t word_size, | |
340 bool permit_collection_pause = true); | |
341 | |
342 // Allocate blocks during garbage collection. Will ensure an | |
343 // allocation region, either by picking one or expanding the | |
344 // heap, and then allocate a block of the given size. The block | |
345 // may not be a humongous - it must fit into a single heap region. | |
346 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
347 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
348 | |
349 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, | |
350 HeapRegion* alloc_region, | |
351 bool par, | |
352 size_t word_size); | |
353 | |
354 // Ensure that no further allocations can happen in "r", bearing in mind | |
355 // that parallel threads might be attempting allocations. | |
356 void par_allocate_remaining_space(HeapRegion* r); | |
357 | |
545 | 358 // Retires an allocation region when it is full or at the end of a |
359 // GC pause. | |
360 void retire_alloc_region(HeapRegion* alloc_region, bool par); | |
361 | |
342 | 362 // Helper function for two callbacks below. |
363 // "full", if true, indicates that the GC is for a System.gc() request, | |
364 // and should collect the entire heap. If "clear_all_soft_refs" is true, | |
365 // all soft references are cleared during the GC. If "full" is false, | |
366 // "word_size" describes the allocation that the GC should | |
367 // attempt (at least) to satisfy. | |
368 void do_collection(bool full, bool clear_all_soft_refs, | |
369 size_t word_size); | |
370 | |
371 // Callback from VM_G1CollectFull operation. | |
372 // Perform a full collection. | |
373 void do_full_collection(bool clear_all_soft_refs); | |
374 | |
375 // Resize the heap if necessary after a full collection. If this is | |
376 // after a collect-for allocation, "word_size" is the allocation size, | |
377 // and will be considered part of the used portion of the heap. | |
378 void resize_if_necessary_after_full_collection(size_t word_size); | |
379 | |
380 // Callback from VM_G1CollectForAllocation operation. | |
381 // This function does everything necessary/possible to satisfy a | |
382 // failed allocation request (including collection, expansion, etc.) | |
383 HeapWord* satisfy_failed_allocation(size_t word_size); | |
384 | |
385 // Attempting to expand the heap sufficiently | |
386 // to support an allocation of the given "word_size". If | |
387 // successful, perform the allocation and return the address of the | |
388 // allocated block, or else "NULL". | |
389 virtual HeapWord* expand_and_allocate(size_t word_size); | |
390 | |
391 public: | |
392 // Expand the garbage-first heap by at least the given size (in bytes!). | |
393 // (Rounds up to a HeapRegion boundary.) | |
394 virtual void expand(size_t expand_bytes); | |
395 | |
396 // Do anything common to GC's. | |
397 virtual void gc_prologue(bool full); | |
398 virtual void gc_epilogue(bool full); | |
399 | |
526 | 400 // We register a region with the fast "in collection set" test. We |
401 // simply set to true the array slot corresponding to this region. | |
402 void register_region_with_in_cset_fast_test(HeapRegion* r) { | |
403 assert(_in_cset_fast_test_base != NULL, "sanity"); | |
404 assert(r->in_collection_set(), "invariant"); | |
405 int index = r->hrs_index(); | |
406 assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, | |
407 "invariant"); | |
408 assert(!_in_cset_fast_test_base[index], "invariant"); | |
409 _in_cset_fast_test_base[index] = true; | |
410 } | |
411 | |
412 // This is a fast test on whether a reference points into the | |
413 // collection set or not. It does not assume that the reference | |
414 // points into the heap; if it doesn't, it will return false. | |
415 bool in_cset_fast_test(oop obj) { | |
416 assert(_in_cset_fast_test != NULL, "sanity"); | |
417 if (_g1_committed.contains((HeapWord*) obj)) { | |
418 // no need to subtract the bottom of the heap from obj, | |
419 // _in_cset_fast_test is biased | |
420 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; | |
421 bool ret = _in_cset_fast_test[index]; | |
422 // let's make sure the result is consistent with what the slower | |
423 // test returns | |
424 assert( ret || !obj_in_cs(obj), "sanity"); | |
425 assert(!ret || obj_in_cs(obj), "sanity"); | |
426 return ret; | |
427 } else { | |
428 return false; | |
429 } | |
430 } | |
431 | |
342 | 432 protected: |
433 | |
434 // Shrink the garbage-first heap by at most the given size (in bytes!). | |
435 // (Rounds down to a HeapRegion boundary.) | |
436 virtual void shrink(size_t expand_bytes); | |
437 void shrink_helper(size_t expand_bytes); | |
438 | |
439 // Do an incremental collection: identify a collection set, and evacuate | |
440 // its live objects elsewhere. | |
441 virtual void do_collection_pause(); | |
442 | |
443 // The guts of the incremental collection pause, executed by the vm | |
677 | 444 // thread. |
445 virtual void do_collection_pause_at_safepoint(); | |
342 | 446 |
447 // Actually do the work of evacuating the collection set. | |
448 virtual void evacuate_collection_set(); | |
449 | |
450 // If this is an appropriate right time, do a collection pause. | |
451 // The "word_size" argument, if non-zero, indicates the size of an | |
452 // allocation request that is prompting this query. | |
453 void do_collection_pause_if_appropriate(size_t word_size); | |
454 | |
455 // The g1 remembered set of the heap. | |
456 G1RemSet* _g1_rem_set; | |
457 // And it's mod ref barrier set, used to track updates for the above. | |
458 ModRefBarrierSet* _mr_bs; | |
459 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
460 // A set of cards that cover the objects for which the Rsets should be updated |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
461 // concurrently after the collection. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
462 DirtyCardQueueSet _dirty_card_queue_set; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
463 |
342 | 464 // The Heap Region Rem Set Iterator. |
465 HeapRegionRemSetIterator** _rem_set_iterator; | |
466 | |
467 // The closure used to refine a single card. | |
468 RefineCardTableEntryClosure* _refine_cte_cl; | |
469 | |
470 // A function to check the consistency of dirty card logs. | |
471 void check_ct_logs_at_safepoint(); | |
472 | |
473 // After a collection pause, make the regions in the CS into free | |
474 // regions. | |
475 void free_collection_set(HeapRegion* cs_head); | |
476 | |
477 // Applies "scan_non_heap_roots" to roots outside the heap, | |
478 // "scan_rs" to roots inside the heap (having done "set_region" to | |
479 // indicate the region in which the root resides), and does "scan_perm" | |
480 // (setting the generation to the perm generation.) If "scan_rs" is | |
481 // NULL, then this step is skipped. The "worker_i" | |
482 // param is for use with parallel roots processing, and should be | |
483 // the "i" of the calling parallel worker thread's work(i) function. | |
484 // In the sequential case this param will be ignored. | |
485 void g1_process_strong_roots(bool collecting_perm_gen, | |
486 SharedHeap::ScanningOption so, | |
487 OopClosure* scan_non_heap_roots, | |
488 OopsInHeapRegionClosure* scan_rs, | |
489 OopsInHeapRegionClosure* scan_so, | |
490 OopsInGenClosure* scan_perm, | |
491 int worker_i); | |
492 | |
493 void scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
494 int worker_i); | |
495 void scan_scan_only_region(HeapRegion* hr, | |
496 OopsInHeapRegionClosure* oc, | |
497 int worker_i); | |
498 | |
499 // Apply "blk" to all the weak roots of the system. These include | |
500 // JNI weak roots, the code cache, system dictionary, symbol table, | |
501 // string table, and referents of reachable weak refs. | |
502 void g1_process_weak_roots(OopClosure* root_closure, | |
503 OopClosure* non_root_closure); | |
504 | |
505 // Invoke "save_marks" on all heap regions. | |
506 void save_marks(); | |
507 | |
508 // Free a heap region. | |
509 void free_region(HeapRegion* hr); | |
510 // A component of "free_region", exposed for 'batching'. | |
511 // All the params after "hr" are out params: the used bytes of the freed | |
512 // region(s), the number of H regions cleared, the number of regions | |
513 // freed, and pointers to the head and tail of a list of freed contig | |
514 // regions, linked throught the "next_on_unclean_list" field. | |
515 void free_region_work(HeapRegion* hr, | |
516 size_t& pre_used, | |
517 size_t& cleared_h, | |
518 size_t& freed_regions, | |
519 UncleanRegionList* list, | |
520 bool par = false); | |
521 | |
522 | |
523 // The concurrent marker (and the thread it runs in.) | |
524 ConcurrentMark* _cm; | |
525 ConcurrentMarkThread* _cmThread; | |
526 bool _mark_in_progress; | |
527 | |
528 // The concurrent refiner. | |
529 ConcurrentG1Refine* _cg1r; | |
530 | |
531 // The concurrent zero-fill thread. | |
532 ConcurrentZFThread* _czft; | |
533 | |
534 // The parallel task queues | |
535 RefToScanQueueSet *_task_queues; | |
536 | |
537 // True iff a evacuation has failed in the current collection. | |
538 bool _evacuation_failed; | |
539 | |
540 // Set the attribute indicating whether evacuation has failed in the | |
541 // current collection. | |
542 void set_evacuation_failed(bool b) { _evacuation_failed = b; } | |
543 | |
544 // Failed evacuations cause some logical from-space objects to have | |
545 // forwarding pointers to themselves. Reset them. | |
546 void remove_self_forwarding_pointers(); | |
547 | |
548 // When one is non-null, so is the other. Together, they each pair is | |
549 // an object with a preserved mark, and its mark value. | |
550 GrowableArray<oop>* _objs_with_preserved_marks; | |
551 GrowableArray<markOop>* _preserved_marks_of_objs; | |
552 | |
553 // Preserve the mark of "obj", if necessary, in preparation for its mark | |
554 // word being overwritten with a self-forwarding-pointer. | |
555 void preserve_mark_if_necessary(oop obj, markOop m); | |
556 | |
557 // The stack of evac-failure objects left to be scanned. | |
558 GrowableArray<oop>* _evac_failure_scan_stack; | |
559 // The closure to apply to evac-failure objects. | |
560 | |
561 OopsInHeapRegionClosure* _evac_failure_closure; | |
562 // Set the field above. | |
563 void | |
564 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { | |
565 _evac_failure_closure = evac_failure_closure; | |
566 } | |
567 | |
568 // Push "obj" on the scan stack. | |
569 void push_on_evac_failure_scan_stack(oop obj); | |
570 // Process scan stack entries until the stack is empty. | |
571 void drain_evac_failure_scan_stack(); | |
572 // True iff an invocation of "drain_scan_stack" is in progress; to | |
573 // prevent unnecessary recursion. | |
574 bool _drain_in_progress; | |
575 | |
576 // Do any necessary initialization for evacuation-failure handling. | |
577 // "cl" is the closure that will be used to process evac-failure | |
578 // objects. | |
579 void init_for_evac_failure(OopsInHeapRegionClosure* cl); | |
580 // Do any necessary cleanup for evacuation-failure handling data | |
581 // structures. | |
582 void finalize_for_evac_failure(); | |
583 | |
584 // An attempt to evacuate "obj" has failed; take necessary steps. | |
585 void handle_evacuation_failure(oop obj); | |
586 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); | |
587 void handle_evacuation_failure_common(oop obj, markOop m); | |
588 | |
589 | |
590 // Ensure that the relevant gc_alloc regions are set. | |
591 void get_gc_alloc_regions(); | |
636 | 592 // We're done with GC alloc regions. We are going to tear down the |
593 // gc alloc list and remove the gc alloc tag from all the regions on | |
594 // that list. However, we will also retain the last (i.e., the one | |
595 // that is half-full) GC alloc region, per GCAllocPurpose, for | |
596 // possible reuse during the next collection, provided | |
597 // _retain_gc_alloc_region[] indicates that it should be the | |
598 // case. Said regions are kept in the _retained_gc_alloc_regions[] | |
599 // array. If the parameter totally is set, we will not retain any | |
600 // regions, irrespective of what _retain_gc_alloc_region[] | |
601 // indicates. | |
602 void release_gc_alloc_regions(bool totally); | |
603 #ifndef PRODUCT | |
604 // Useful for debugging. | |
605 void print_gc_alloc_regions(); | |
606 #endif // !PRODUCT | |
342 | 607 |
608 // ("Weak") Reference processing support | |
609 ReferenceProcessor* _ref_processor; | |
610 | |
611 enum G1H_process_strong_roots_tasks { | |
612 G1H_PS_mark_stack_oops_do, | |
613 G1H_PS_refProcessor_oops_do, | |
614 // Leave this one last. | |
615 G1H_PS_NumElements | |
616 }; | |
617 | |
618 SubTasksDone* _process_strong_tasks; | |
619 | |
620 // List of regions which require zero filling. | |
621 UncleanRegionList _unclean_region_list; | |
622 bool _unclean_regions_coming; | |
623 | |
624 public: | |
625 void set_refine_cte_cl_concurrency(bool concurrent); | |
626 | |
627 RefToScanQueue *task_queue(int i); | |
628 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
629 // A set of cards where updates happened during the GC |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
630 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
631 |
342 | 632 // Create a G1CollectedHeap with the specified policy. |
633 // Must call the initialize method afterwards. | |
634 // May not return if something goes wrong. | |
635 G1CollectedHeap(G1CollectorPolicy* policy); | |
636 | |
637 // Initialize the G1CollectedHeap to have the initial and | |
638 // maximum sizes, permanent generation, and remembered and barrier sets | |
639 // specified by the policy object. | |
640 jint initialize(); | |
641 | |
642 void ref_processing_init(); | |
643 | |
644 void set_par_threads(int t) { | |
645 SharedHeap::set_par_threads(t); | |
646 _process_strong_tasks->set_par_threads(t); | |
647 } | |
648 | |
649 virtual CollectedHeap::Name kind() const { | |
650 return CollectedHeap::G1CollectedHeap; | |
651 } | |
652 | |
653 // The current policy object for the collector. | |
654 G1CollectorPolicy* g1_policy() const { return _g1_policy; } | |
655 | |
656 // Adaptive size policy. No such thing for g1. | |
657 virtual AdaptiveSizePolicy* size_policy() { return NULL; } | |
658 | |
659 // The rem set and barrier set. | |
660 G1RemSet* g1_rem_set() const { return _g1_rem_set; } | |
661 ModRefBarrierSet* mr_bs() const { return _mr_bs; } | |
662 | |
663 // The rem set iterator. | |
664 HeapRegionRemSetIterator* rem_set_iterator(int i) { | |
665 return _rem_set_iterator[i]; | |
666 } | |
667 | |
668 HeapRegionRemSetIterator* rem_set_iterator() { | |
669 return _rem_set_iterator[0]; | |
670 } | |
671 | |
672 unsigned get_gc_time_stamp() { | |
673 return _gc_time_stamp; | |
674 } | |
675 | |
676 void reset_gc_time_stamp() { | |
677 _gc_time_stamp = 0; | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
678 OrderAccess::fence(); |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
679 } |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
680 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
681 void increment_gc_time_stamp() { |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
682 ++_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
683 OrderAccess::fence(); |
342 | 684 } |
685 | |
686 void iterate_dirty_card_closure(bool concurrent, int worker_i); | |
687 | |
688 // The shared block offset table array. | |
689 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } | |
690 | |
691 // Reference Processing accessor | |
692 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
693 | |
694 // Reserved (g1 only; super method includes perm), capacity and the used | |
695 // portion in bytes. | |
696 size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } | |
697 virtual size_t capacity() const; | |
698 virtual size_t used() const; | |
699 size_t recalculate_used() const; | |
700 #ifndef PRODUCT | |
701 size_t recalculate_used_regions() const; | |
702 #endif // PRODUCT | |
703 | |
704 // These virtual functions do the actual allocation. | |
705 virtual HeapWord* mem_allocate(size_t word_size, | |
706 bool is_noref, | |
707 bool is_tlab, | |
708 bool* gc_overhead_limit_was_exceeded); | |
709 | |
710 // Some heaps may offer a contiguous region for shared non-blocking | |
711 // allocation, via inlined code (by exporting the address of the top and | |
712 // end fields defining the extent of the contiguous allocation region.) | |
713 // But G1CollectedHeap doesn't yet support this. | |
714 | |
715 // Return an estimate of the maximum allocation that could be performed | |
716 // without triggering any collection or expansion activity. In a | |
717 // generational collector, for example, this is probably the largest | |
718 // allocation that could be supported (without expansion) in the youngest | |
719 // generation. It is "unsafe" because no locks are taken; the result | |
720 // should be treated as an approximation, not a guarantee, for use in | |
721 // heuristic resizing decisions. | |
722 virtual size_t unsafe_max_alloc(); | |
723 | |
724 virtual bool is_maximal_no_gc() const { | |
725 return _g1_storage.uncommitted_size() == 0; | |
726 } | |
727 | |
728 // The total number of regions in the heap. | |
729 size_t n_regions(); | |
730 | |
731 // The number of regions that are completely free. | |
732 size_t max_regions(); | |
733 | |
734 // The number of regions that are completely free. | |
735 size_t free_regions(); | |
736 | |
737 // The number of regions that are not completely free. | |
738 size_t used_regions() { return n_regions() - free_regions(); } | |
739 | |
740 // True iff the ZF thread should run. | |
741 bool should_zf(); | |
742 | |
743 // The number of regions available for "regular" expansion. | |
744 size_t expansion_regions() { return _expansion_regions; } | |
745 | |
746 #ifndef PRODUCT | |
747 bool regions_accounted_for(); | |
748 bool print_region_accounting_info(); | |
749 void print_region_counts(); | |
750 #endif | |
751 | |
752 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); | |
753 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); | |
754 | |
755 void put_region_on_unclean_list(HeapRegion* r); | |
756 void put_region_on_unclean_list_locked(HeapRegion* r); | |
757 | |
758 void prepend_region_list_on_unclean_list(UncleanRegionList* list); | |
759 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); | |
760 | |
761 void set_unclean_regions_coming(bool b); | |
762 void set_unclean_regions_coming_locked(bool b); | |
763 // Wait for cleanup to be complete. | |
764 void wait_for_cleanup_complete(); | |
765 // Like above, but assumes that the calling thread owns the Heap_lock. | |
766 void wait_for_cleanup_complete_locked(); | |
767 | |
768 // Return the head of the unclean list. | |
769 HeapRegion* peek_unclean_region_list_locked(); | |
770 // Remove and return the head of the unclean list. | |
771 HeapRegion* pop_unclean_region_list_locked(); | |
772 | |
773 // List of regions which are zero filled and ready for allocation. | |
774 HeapRegion* _free_region_list; | |
775 // Number of elements on the free list. | |
776 size_t _free_region_list_size; | |
777 | |
778 // If the head of the unclean list is ZeroFilled, move it to the free | |
779 // list. | |
780 bool move_cleaned_region_to_free_list_locked(); | |
781 bool move_cleaned_region_to_free_list(); | |
782 | |
783 void put_free_region_on_list_locked(HeapRegion* r); | |
784 void put_free_region_on_list(HeapRegion* r); | |
785 | |
786 // Remove and return the head element of the free list. | |
787 HeapRegion* pop_free_region_list_locked(); | |
788 | |
789 // If "zero_filled" is true, we first try the free list, then we try the | |
790 // unclean list, zero-filling the result. If "zero_filled" is false, we | |
791 // first try the unclean list, then the zero-filled list. | |
792 HeapRegion* alloc_free_region_from_lists(bool zero_filled); | |
793 | |
794 // Verify the integrity of the region lists. | |
795 void remove_allocated_regions_from_lists(); | |
796 bool verify_region_lists(); | |
797 bool verify_region_lists_locked(); | |
798 size_t unclean_region_list_length(); | |
799 size_t free_region_list_length(); | |
800 | |
801 // Perform a collection of the heap; intended for use in implementing | |
802 // "System.gc". This probably implies as full a collection as the | |
803 // "CollectedHeap" supports. | |
804 virtual void collect(GCCause::Cause cause); | |
805 | |
806 // The same as above but assume that the caller holds the Heap_lock. | |
807 void collect_locked(GCCause::Cause cause); | |
808 | |
809 // This interface assumes that it's being called by the | |
810 // vm thread. It collects the heap assuming that the | |
811 // heap lock is already held and that we are executing in | |
812 // the context of the vm thread. | |
813 virtual void collect_as_vm_thread(GCCause::Cause cause); | |
814 | |
815 // True iff a evacuation has failed in the most-recent collection. | |
816 bool evacuation_failed() { return _evacuation_failed; } | |
817 | |
818 // Free a region if it is totally full of garbage. Returns the number of | |
819 // bytes freed (0 ==> didn't free it). | |
820 size_t free_region_if_totally_empty(HeapRegion *hr); | |
821 void free_region_if_totally_empty_work(HeapRegion *hr, | |
822 size_t& pre_used, | |
823 size_t& cleared_h_regions, | |
824 size_t& freed_regions, | |
825 UncleanRegionList* list, | |
826 bool par = false); | |
827 | |
828 // If we've done free region work that yields the given changes, update | |
829 // the relevant global variables. | |
830 void finish_free_region_work(size_t pre_used, | |
831 size_t cleared_h_regions, | |
832 size_t freed_regions, | |
833 UncleanRegionList* list); | |
834 | |
835 | |
836 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
837 virtual bool is_in(const void* p) const; | |
838 | |
839 // Return "TRUE" iff the given object address is within the collection | |
840 // set. | |
841 inline bool obj_in_cs(oop obj); | |
842 | |
843 // Return "TRUE" iff the given object address is in the reserved | |
844 // region of g1 (excluding the permanent generation). | |
845 bool is_in_g1_reserved(const void* p) const { | |
846 return _g1_reserved.contains(p); | |
847 } | |
848 | |
849 // Returns a MemRegion that corresponds to the space that has been | |
850 // committed in the heap | |
851 MemRegion g1_committed() { | |
852 return _g1_committed; | |
853 } | |
854 | |
855 NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) | |
856 | |
857 // Dirty card table entries covering a list of young regions. | |
858 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); | |
859 | |
860 // This resets the card table to all zeros. It is used after | |
861 // a collection pause which used the card table to claim cards. | |
862 void cleanUpCardTable(); | |
863 | |
864 // Iteration functions. | |
865 | |
866 // Iterate over all the ref-containing fields of all objects, calling | |
867 // "cl.do_oop" on each. | |
678 | 868 virtual void oop_iterate(OopClosure* cl) { |
869 oop_iterate(cl, true); | |
870 } | |
871 void oop_iterate(OopClosure* cl, bool do_perm); | |
342 | 872 |
873 // Same as above, restricted to a memory region. | |
678 | 874 virtual void oop_iterate(MemRegion mr, OopClosure* cl) { |
875 oop_iterate(mr, cl, true); | |
876 } | |
877 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); | |
342 | 878 |
879 // Iterate over all objects, calling "cl.do_object" on each. | |
678 | 880 virtual void object_iterate(ObjectClosure* cl) { |
881 object_iterate(cl, true); | |
882 } | |
883 virtual void safe_object_iterate(ObjectClosure* cl) { | |
884 object_iterate(cl, true); | |
885 } | |
886 void object_iterate(ObjectClosure* cl, bool do_perm); | |
342 | 887 |
888 // Iterate over all objects allocated since the last collection, calling | |
889 // "cl.do_object" on each. The heap must have been initialized properly | |
890 // to support this function, or else this call will fail. | |
891 virtual void object_iterate_since_last_GC(ObjectClosure* cl); | |
892 | |
893 // Iterate over all spaces in use in the heap, in ascending address order. | |
894 virtual void space_iterate(SpaceClosure* cl); | |
895 | |
896 // Iterate over heap regions, in address order, terminating the | |
897 // iteration early if the "doHeapRegion" method returns "true". | |
898 void heap_region_iterate(HeapRegionClosure* blk); | |
899 | |
900 // Iterate over heap regions starting with r (or the first region if "r" | |
901 // is NULL), in address order, terminating early if the "doHeapRegion" | |
902 // method returns "true". | |
903 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); | |
904 | |
905 // As above but starting from the region at index idx. | |
906 void heap_region_iterate_from(int idx, HeapRegionClosure* blk); | |
907 | |
908 HeapRegion* region_at(size_t idx); | |
909 | |
910 // Divide the heap region sequence into "chunks" of some size (the number | |
911 // of regions divided by the number of parallel threads times some | |
912 // overpartition factor, currently 4). Assumes that this will be called | |
913 // in parallel by ParallelGCThreads worker threads with discinct worker | |
914 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel | |
915 // calls will use the same "claim_value", and that that claim value is | |
916 // different from the claim_value of any heap region before the start of | |
917 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by | |
918 // attempting to claim the first region in each chunk, and, if | |
919 // successful, applying the closure to each region in the chunk (and | |
920 // setting the claim value of the second and subsequent regions of the | |
921 // chunk.) For now requires that "doHeapRegion" always returns "false", | |
922 // i.e., that a closure never attempt to abort a traversal. | |
923 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | |
924 int worker, | |
925 jint claim_value); | |
926 | |
390 | 927 // It resets all the region claim values to the default. |
928 void reset_heap_region_claim_values(); | |
929 | |
355 | 930 #ifdef ASSERT |
931 bool check_heap_region_claim_values(jint claim_value); | |
932 #endif // ASSERT | |
933 | |
342 | 934 // Iterate over the regions (if any) in the current collection set. |
935 void collection_set_iterate(HeapRegionClosure* blk); | |
936 | |
937 // As above but starting from region r | |
938 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); | |
939 | |
940 // Returns the first (lowest address) compactible space in the heap. | |
941 virtual CompactibleSpace* first_compactible_space(); | |
942 | |
943 // A CollectedHeap will contain some number of spaces. This finds the | |
944 // space containing a given address, or else returns NULL. | |
945 virtual Space* space_containing(const void* addr) const; | |
946 | |
947 // A G1CollectedHeap will contain some number of heap regions. This | |
948 // finds the region containing a given address, or else returns NULL. | |
949 HeapRegion* heap_region_containing(const void* addr) const; | |
950 | |
951 // Like the above, but requires "addr" to be in the heap (to avoid a | |
952 // null-check), and unlike the above, may return an continuing humongous | |
953 // region. | |
954 HeapRegion* heap_region_containing_raw(const void* addr) const; | |
955 | |
956 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
957 // each address in the (reserved) heap is a member of exactly | |
958 // one block. The defining characteristic of a block is that it is | |
959 // possible to find its size, and thus to progress forward to the next | |
960 // block. (Blocks may be of different sizes.) Thus, blocks may | |
961 // represent Java objects, or they might be free blocks in a | |
962 // free-list-based heap (or subheap), as long as the two kinds are | |
963 // distinguishable and the size of each is determinable. | |
964 | |
965 // Returns the address of the start of the "block" that contains the | |
966 // address "addr". We say "blocks" instead of "object" since some heaps | |
967 // may not pack objects densely; a chunk may either be an object or a | |
968 // non-object. | |
969 virtual HeapWord* block_start(const void* addr) const; | |
970 | |
971 // Requires "addr" to be the start of a chunk, and returns its size. | |
972 // "addr + size" is required to be the start of a new chunk, or the end | |
973 // of the active area of the heap. | |
974 virtual size_t block_size(const HeapWord* addr) const; | |
975 | |
976 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
977 // the block is an object. | |
978 virtual bool block_is_obj(const HeapWord* addr) const; | |
979 | |
980 // Does this heap support heap inspection? (+PrintClassHistogram) | |
981 virtual bool supports_heap_inspection() const { return true; } | |
982 | |
983 // Section on thread-local allocation buffers (TLABs) | |
984 // See CollectedHeap for semantics. | |
985 | |
986 virtual bool supports_tlab_allocation() const; | |
987 virtual size_t tlab_capacity(Thread* thr) const; | |
988 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
989 virtual HeapWord* allocate_new_tlab(size_t size); | |
990 | |
991 // Can a compiler initialize a new object without store barriers? | |
992 // This permission only extends from the creation of a new object | |
993 // via a TLAB up to the first subsequent safepoint. | |
994 virtual bool can_elide_tlab_store_barriers() const { | |
995 // Since G1's TLAB's may, on occasion, come from non-young regions | |
996 // as well. (Is there a flag controlling that? XXX) | |
997 return false; | |
998 } | |
999 | |
1000 // Can a compiler elide a store barrier when it writes | |
1001 // a permanent oop into the heap? Applies when the compiler | |
1002 // is storing x to the heap, where x->is_perm() is true. | |
1003 virtual bool can_elide_permanent_oop_store_barriers() const { | |
1004 // At least until perm gen collection is also G1-ified, at | |
1005 // which point this should return false. | |
1006 return true; | |
1007 } | |
1008 | |
1009 virtual bool allocs_are_zero_filled(); | |
1010 | |
1011 // The boundary between a "large" and "small" array of primitives, in | |
1012 // words. | |
1013 virtual size_t large_typearray_limit(); | |
1014 | |
1015 // Returns "true" iff the given word_size is "very large". | |
1016 static bool isHumongous(size_t word_size) { | |
1017 return word_size >= VeryLargeInWords; | |
1018 } | |
1019 | |
1020 // Update mod union table with the set of dirty cards. | |
1021 void updateModUnion(); | |
1022 | |
1023 // Set the mod union bits corresponding to the given memRegion. Note | |
1024 // that this is always a safe operation, since it doesn't clear any | |
1025 // bits. | |
1026 void markModUnionRange(MemRegion mr); | |
1027 | |
1028 // Records the fact that a marking phase is no longer in progress. | |
1029 void set_marking_complete() { | |
1030 _mark_in_progress = false; | |
1031 } | |
1032 void set_marking_started() { | |
1033 _mark_in_progress = true; | |
1034 } | |
1035 bool mark_in_progress() { | |
1036 return _mark_in_progress; | |
1037 } | |
1038 | |
1039 // Print the maximum heap capacity. | |
1040 virtual size_t max_capacity() const; | |
1041 | |
1042 virtual jlong millis_since_last_gc(); | |
1043 | |
1044 // Perform any cleanup actions necessary before allowing a verification. | |
1045 virtual void prepare_for_verify(); | |
1046 | |
1047 // Perform verification. | |
1048 virtual void verify(bool allow_dirty, bool silent); | |
1049 virtual void print() const; | |
1050 virtual void print_on(outputStream* st) const; | |
1051 | |
1052 virtual void print_gc_threads_on(outputStream* st) const; | |
1053 virtual void gc_threads_do(ThreadClosure* tc) const; | |
1054 | |
1055 // Override | |
1056 void print_tracing_info() const; | |
1057 | |
1058 // If "addr" is a pointer into the (reserved?) heap, returns a positive | |
1059 // number indicating the "arena" within the heap in which "addr" falls. | |
1060 // Or else returns 0. | |
1061 virtual int addr_to_arena_id(void* addr) const; | |
1062 | |
1063 // Convenience function to be used in situations where the heap type can be | |
1064 // asserted to be this type. | |
1065 static G1CollectedHeap* heap(); | |
1066 | |
1067 void empty_young_list(); | |
1068 bool should_set_young_locked(); | |
1069 | |
1070 void set_region_short_lived_locked(HeapRegion* hr); | |
1071 // add appropriate methods for any other surv rate groups | |
1072 | |
1073 void young_list_rs_length_sampling_init() { | |
1074 _young_list->rs_length_sampling_init(); | |
1075 } | |
1076 bool young_list_rs_length_sampling_more() { | |
1077 return _young_list->rs_length_sampling_more(); | |
1078 } | |
1079 void young_list_rs_length_sampling_next() { | |
1080 _young_list->rs_length_sampling_next(); | |
1081 } | |
1082 size_t young_list_sampled_rs_lengths() { | |
1083 return _young_list->sampled_rs_lengths(); | |
1084 } | |
1085 | |
1086 size_t young_list_length() { return _young_list->length(); } | |
1087 size_t young_list_scan_only_length() { | |
1088 return _young_list->scan_only_length(); } | |
1089 | |
1090 HeapRegion* pop_region_from_young_list() { | |
1091 return _young_list->pop_region(); | |
1092 } | |
1093 | |
1094 HeapRegion* young_list_first_region() { | |
1095 return _young_list->first_region(); | |
1096 } | |
1097 | |
1098 // debugging | |
1099 bool check_young_list_well_formed() { | |
1100 return _young_list->check_list_well_formed(); | |
1101 } | |
1102 bool check_young_list_empty(bool ignore_scan_only_list, | |
1103 bool check_sample = true); | |
1104 | |
1105 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1106 // many of these need to be public. | |
1107 | |
1108 // The functions below are helper functions that a subclass of | |
1109 // "CollectedHeap" can use in the implementation of its virtual | |
1110 // functions. | |
1111 // This performs a concurrent marking of the live objects in a | |
1112 // bitmap off to the side. | |
1113 void doConcurrentMark(); | |
1114 | |
1115 // This is called from the marksweep collector which then does | |
1116 // a concurrent mark and verifies that the results agree with | |
1117 // the stop the world marking. | |
1118 void checkConcurrentMark(); | |
1119 void do_sync_mark(); | |
1120 | |
1121 bool isMarkedPrev(oop obj) const; | |
1122 bool isMarkedNext(oop obj) const; | |
1123 | |
1124 // Determine if an object is dead, given the object and also | |
1125 // the region to which the object belongs. An object is dead | |
1126 // iff a) it was not allocated since the last mark and b) it | |
1127 // is not marked. | |
1128 | |
1129 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1130 return | |
1131 !hr->obj_allocated_since_prev_marking(obj) && | |
1132 !isMarkedPrev(obj); | |
1133 } | |
1134 | |
1135 // This is used when copying an object to survivor space. | |
1136 // If the object is marked live, then we mark the copy live. | |
1137 // If the object is allocated since the start of this mark | |
1138 // cycle, then we mark the copy live. | |
1139 // If the object has been around since the previous mark | |
1140 // phase, and hasn't been marked yet during this phase, | |
1141 // then we don't mark it, we just wait for the | |
1142 // current marking cycle to get to it. | |
1143 | |
1144 // This function returns true when an object has been | |
1145 // around since the previous marking and hasn't yet | |
1146 // been marked during this marking. | |
1147 | |
1148 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1149 return | |
1150 !hr->obj_allocated_since_next_marking(obj) && | |
1151 !isMarkedNext(obj); | |
1152 } | |
1153 | |
1154 // Determine if an object is dead, given only the object itself. | |
1155 // This will find the region to which the object belongs and | |
1156 // then call the region version of the same function. | |
1157 | |
1158 // Added if it is in permanent gen it isn't dead. | |
1159 // Added if it is NULL it isn't dead. | |
1160 | |
1161 bool is_obj_dead(oop obj) { | |
1162 HeapRegion* hr = heap_region_containing(obj); | |
1163 if (hr == NULL) { | |
1164 if (Universe::heap()->is_in_permanent(obj)) | |
1165 return false; | |
1166 else if (obj == NULL) return false; | |
1167 else return true; | |
1168 } | |
1169 else return is_obj_dead(obj, hr); | |
1170 } | |
1171 | |
1172 bool is_obj_ill(oop obj) { | |
1173 HeapRegion* hr = heap_region_containing(obj); | |
1174 if (hr == NULL) { | |
1175 if (Universe::heap()->is_in_permanent(obj)) | |
1176 return false; | |
1177 else if (obj == NULL) return false; | |
1178 else return true; | |
1179 } | |
1180 else return is_obj_ill(obj, hr); | |
1181 } | |
1182 | |
1183 // The following is just to alert the verification code | |
1184 // that a full collection has occurred and that the | |
1185 // remembered sets are no longer up to date. | |
1186 bool _full_collection; | |
1187 void set_full_collection() { _full_collection = true;} | |
1188 void clear_full_collection() {_full_collection = false;} | |
1189 bool full_collection() {return _full_collection;} | |
1190 | |
1191 ConcurrentMark* concurrent_mark() const { return _cm; } | |
1192 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | |
1193 | |
1194 public: | |
1195 void stop_conc_gc_threads(); | |
1196 | |
1197 // <NEW PREDICTION> | |
1198 | |
1199 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); | |
1200 void check_if_region_is_too_expensive(double predicted_time_ms); | |
1201 size_t pending_card_num(); | |
1202 size_t max_pending_card_num(); | |
1203 size_t cards_scanned(); | |
1204 | |
1205 // </NEW PREDICTION> | |
1206 | |
1207 protected: | |
1208 size_t _max_heap_capacity; | |
1209 | |
1210 // debug_only(static void check_for_valid_allocation_state();) | |
1211 | |
1212 public: | |
1213 // Temporary: call to mark things unimplemented for the G1 heap (e.g., | |
1214 // MemoryService). In productization, we can make this assert false | |
1215 // to catch such places (as well as searching for calls to this...) | |
1216 static void g1_unimplemented(); | |
1217 | |
1218 }; | |
1219 | |
1220 // Local Variables: *** | |
1221 // c-indentation-style: gnu *** | |
1222 // End: *** |