Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 648:2314b7336582
6820321: G1: Error: guarantee(check_nums(total, n, parts), "all seq lengths should match")
Summary: Small fixes to sort out some verbosegc-related incorrectness and a failure
Reviewed-by: apetrusenko
author | tonyp |
---|---|
date | Sat, 21 Mar 2009 22:53:04 -0400 |
parents | ba50942c8138 |
children | 96b229c54d1e |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | |
26 // It uses the "Garbage First" heap organization and algorithm, which | |
27 // may combine concurrent marking with parallel, incremental compaction of | |
28 // heap subsets that will yield large amounts of garbage. | |
29 | |
30 class HeapRegion; | |
31 class HeapRegionSeq; | |
32 class HeapRegionList; | |
33 class PermanentGenerationSpec; | |
34 class GenerationSpec; | |
35 class OopsInHeapRegionClosure; | |
36 class G1ScanHeapEvacClosure; | |
37 class ObjectClosure; | |
38 class SpaceClosure; | |
39 class CompactibleSpaceClosure; | |
40 class Space; | |
41 class G1CollectorPolicy; | |
42 class GenRemSet; | |
43 class G1RemSet; | |
44 class HeapRegionRemSetIterator; | |
45 class ConcurrentMark; | |
46 class ConcurrentMarkThread; | |
47 class ConcurrentG1Refine; | |
48 class ConcurrentZFThread; | |
49 | |
50 // If want to accumulate detailed statistics on work queues | |
51 // turn this on. | |
52 #define G1_DETAILED_STATS 0 | |
53 | |
54 #if G1_DETAILED_STATS | |
55 # define IF_G1_DETAILED_STATS(code) code | |
56 #else | |
57 # define IF_G1_DETAILED_STATS(code) | |
58 #endif | |
59 | |
60 typedef GenericTaskQueue<oop*> RefToScanQueue; | |
61 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet; | |
62 | |
63 enum G1GCThreadGroups { | |
64 G1CRGroup = 0, | |
65 G1ZFGroup = 1, | |
66 G1CMGroup = 2, | |
67 G1CLGroup = 3 | |
68 }; | |
69 | |
70 enum GCAllocPurpose { | |
71 GCAllocForTenured, | |
72 GCAllocForSurvived, | |
73 GCAllocPurposeCount | |
74 }; | |
75 | |
76 class YoungList : public CHeapObj { | |
77 private: | |
78 G1CollectedHeap* _g1h; | |
79 | |
80 HeapRegion* _head; | |
81 | |
82 HeapRegion* _scan_only_head; | |
83 HeapRegion* _scan_only_tail; | |
84 size_t _length; | |
85 size_t _scan_only_length; | |
86 | |
87 size_t _last_sampled_rs_lengths; | |
88 size_t _sampled_rs_lengths; | |
89 HeapRegion* _curr; | |
90 HeapRegion* _curr_scan_only; | |
91 | |
92 HeapRegion* _survivor_head; | |
545 | 93 HeapRegion* _survivor_tail; |
342 | 94 size_t _survivor_length; |
95 | |
96 void empty_list(HeapRegion* list); | |
97 | |
98 public: | |
99 YoungList(G1CollectedHeap* g1h); | |
100 | |
101 void push_region(HeapRegion* hr); | |
102 void add_survivor_region(HeapRegion* hr); | |
103 HeapRegion* pop_region(); | |
104 void empty_list(); | |
105 bool is_empty() { return _length == 0; } | |
106 size_t length() { return _length; } | |
107 size_t scan_only_length() { return _scan_only_length; } | |
545 | 108 size_t survivor_length() { return _survivor_length; } |
342 | 109 |
110 void rs_length_sampling_init(); | |
111 bool rs_length_sampling_more(); | |
112 void rs_length_sampling_next(); | |
113 | |
114 void reset_sampled_info() { | |
115 _last_sampled_rs_lengths = 0; | |
116 } | |
117 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } | |
118 | |
119 // for development purposes | |
120 void reset_auxilary_lists(); | |
121 HeapRegion* first_region() { return _head; } | |
122 HeapRegion* first_scan_only_region() { return _scan_only_head; } | |
123 HeapRegion* first_survivor_region() { return _survivor_head; } | |
545 | 124 HeapRegion* last_survivor_region() { return _survivor_tail; } |
342 | 125 HeapRegion* par_get_next_scan_only_region() { |
126 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
127 HeapRegion* ret = _curr_scan_only; | |
128 if (ret != NULL) | |
129 _curr_scan_only = ret->get_next_young_region(); | |
130 return ret; | |
131 } | |
132 | |
133 // debugging | |
134 bool check_list_well_formed(); | |
135 bool check_list_empty(bool ignore_scan_only_list, | |
136 bool check_sample = true); | |
137 void print(); | |
138 }; | |
139 | |
140 class RefineCardTableEntryClosure; | |
141 class G1CollectedHeap : public SharedHeap { | |
142 friend class VM_G1CollectForAllocation; | |
143 friend class VM_GenCollectForPermanentAllocation; | |
144 friend class VM_G1CollectFull; | |
145 friend class VM_G1IncCollectionPause; | |
146 friend class VM_G1PopRegionCollectionPause; | |
147 friend class VMStructs; | |
148 | |
149 // Closures used in implementation. | |
150 friend class G1ParCopyHelper; | |
151 friend class G1IsAliveClosure; | |
152 friend class G1EvacuateFollowersClosure; | |
153 friend class G1ParScanThreadState; | |
154 friend class G1ParScanClosureSuper; | |
155 friend class G1ParEvacuateFollowersClosure; | |
156 friend class G1ParTask; | |
157 friend class G1FreeGarbageRegionClosure; | |
158 friend class RefineCardTableEntryClosure; | |
159 friend class G1PrepareCompactClosure; | |
160 friend class RegionSorter; | |
161 friend class CountRCClosure; | |
162 friend class EvacPopObjClosure; | |
163 | |
164 // Other related classes. | |
165 friend class G1MarkSweep; | |
166 | |
167 private: | |
168 enum SomePrivateConstants { | |
169 VeryLargeInBytes = HeapRegion::GrainBytes/2, | |
170 VeryLargeInWords = VeryLargeInBytes/HeapWordSize, | |
171 MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME | |
172 NumAPIs = HeapRegion::MaxAge | |
173 }; | |
174 | |
175 // The one and only G1CollectedHeap, so static functions can find it. | |
176 static G1CollectedHeap* _g1h; | |
177 | |
178 // Storage for the G1 heap (excludes the permanent generation). | |
179 VirtualSpace _g1_storage; | |
180 MemRegion _g1_reserved; | |
181 | |
182 // The part of _g1_storage that is currently committed. | |
183 MemRegion _g1_committed; | |
184 | |
185 // The maximum part of _g1_storage that has ever been committed. | |
186 MemRegion _g1_max_committed; | |
187 | |
188 // The number of regions that are completely free. | |
189 size_t _free_regions; | |
190 | |
191 // The number of regions we could create by expansion. | |
192 size_t _expansion_regions; | |
193 | |
194 // Return the number of free regions in the heap (by direct counting.) | |
195 size_t count_free_regions(); | |
196 // Return the number of free regions on the free and unclean lists. | |
197 size_t count_free_regions_list(); | |
198 | |
199 // The block offset table for the G1 heap. | |
200 G1BlockOffsetSharedArray* _bot_shared; | |
201 | |
202 // Move all of the regions off the free lists, then rebuild those free | |
203 // lists, before and after full GC. | |
204 void tear_down_region_lists(); | |
205 void rebuild_region_lists(); | |
206 // This sets all non-empty regions to need zero-fill (which they will if | |
207 // they are empty after full collection.) | |
208 void set_used_regions_to_need_zero_fill(); | |
209 | |
210 // The sequence of all heap regions in the heap. | |
211 HeapRegionSeq* _hrs; | |
212 | |
213 // The region from which normal-sized objects are currently being | |
214 // allocated. May be NULL. | |
215 HeapRegion* _cur_alloc_region; | |
216 | |
217 // Postcondition: cur_alloc_region == NULL. | |
218 void abandon_cur_alloc_region(); | |
636 | 219 void abandon_gc_alloc_regions(); |
342 | 220 |
221 // The to-space memory regions into which objects are being copied during | |
222 // a GC. | |
223 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; | |
545 | 224 size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
636 | 225 // These are the regions, one per GCAllocPurpose, that are half-full |
226 // at the end of a collection and that we want to reuse during the | |
227 // next collection. | |
228 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; | |
229 // This specifies whether we will keep the last half-full region at | |
230 // the end of a collection so that it can be reused during the next | |
231 // collection (this is specified per GCAllocPurpose) | |
232 bool _retain_gc_alloc_region[GCAllocPurposeCount]; | |
342 | 233 |
234 // A list of the regions that have been set to be alloc regions in the | |
235 // current collection. | |
236 HeapRegion* _gc_alloc_region_list; | |
237 | |
238 // When called by par thread, require par_alloc_during_gc_lock() to be held. | |
239 void push_gc_alloc_region(HeapRegion* hr); | |
240 | |
241 // This should only be called single-threaded. Undeclares all GC alloc | |
242 // regions. | |
243 void forget_alloc_region_list(); | |
244 | |
245 // Should be used to set an alloc region, because there's other | |
246 // associated bookkeeping. | |
247 void set_gc_alloc_region(int purpose, HeapRegion* r); | |
248 | |
249 // Check well-formedness of alloc region list. | |
250 bool check_gc_alloc_regions(); | |
251 | |
252 // Outside of GC pauses, the number of bytes used in all regions other | |
253 // than the current allocation region. | |
254 size_t _summary_bytes_used; | |
255 | |
256 // Summary information about popular objects; method to print it. | |
257 NumberSeq _pop_obj_rc_at_copy; | |
258 void print_popularity_summary_info() const; | |
259 | |
526 | 260 // This is used for a quick test on whether a reference points into |
261 // the collection set or not. Basically, we have an array, with one | |
262 // byte per region, and that byte denotes whether the corresponding | |
263 // region is in the collection set or not. The entry corresponding | |
264 // the bottom of the heap, i.e., region 0, is pointed to by | |
265 // _in_cset_fast_test_base. The _in_cset_fast_test field has been | |
266 // biased so that it actually points to address 0 of the address | |
267 // space, to make the test as fast as possible (we can simply shift | |
268 // the address to address into it, instead of having to subtract the | |
269 // bottom of the heap from the address before shifting it; basically | |
270 // it works in the same way the card table works). | |
271 bool* _in_cset_fast_test; | |
272 | |
273 // The allocated array used for the fast test on whether a reference | |
274 // points into the collection set or not. This field is also used to | |
275 // free the array. | |
276 bool* _in_cset_fast_test_base; | |
277 | |
278 // The length of the _in_cset_fast_test_base array. | |
279 size_t _in_cset_fast_test_length; | |
280 | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
281 volatile unsigned _gc_time_stamp; |
342 | 282 |
283 size_t* _surviving_young_words; | |
284 | |
285 void setup_surviving_young_words(); | |
286 void update_surviving_young_words(size_t* surv_young_words); | |
287 void cleanup_surviving_young_words(); | |
288 | |
289 protected: | |
290 | |
291 // Returns "true" iff none of the gc alloc regions have any allocations | |
292 // since the last call to "save_marks". | |
293 bool all_alloc_regions_no_allocs_since_save_marks(); | |
545 | 294 // Perform finalization stuff on all allocation regions. |
295 void retire_all_alloc_regions(); | |
342 | 296 |
297 // The number of regions allocated to hold humongous objects. | |
298 int _num_humongous_regions; | |
299 YoungList* _young_list; | |
300 | |
301 // The current policy object for the collector. | |
302 G1CollectorPolicy* _g1_policy; | |
303 | |
304 // Parallel allocation lock to protect the current allocation region. | |
305 Mutex _par_alloc_during_gc_lock; | |
306 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } | |
307 | |
308 // If possible/desirable, allocate a new HeapRegion for normal object | |
309 // allocation sufficient for an allocation of the given "word_size". | |
310 // If "do_expand" is true, will attempt to expand the heap if necessary | |
311 // to to satisfy the request. If "zero_filled" is true, requires a | |
312 // zero-filled region. | |
313 // (Returning NULL will trigger a GC.) | |
314 virtual HeapRegion* newAllocRegion_work(size_t word_size, | |
315 bool do_expand, | |
316 bool zero_filled); | |
317 | |
318 virtual HeapRegion* newAllocRegion(size_t word_size, | |
319 bool zero_filled = true) { | |
320 return newAllocRegion_work(word_size, false, zero_filled); | |
321 } | |
322 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, | |
323 size_t word_size, | |
324 bool zero_filled = true); | |
325 | |
326 // Attempt to allocate an object of the given (very large) "word_size". | |
327 // Returns "NULL" on failure. | |
328 virtual HeapWord* humongousObjAllocate(size_t word_size); | |
329 | |
330 // If possible, allocate a block of the given word_size, else return "NULL". | |
331 // Returning NULL will trigger GC or heap expansion. | |
332 // These two methods have rather awkward pre- and | |
333 // post-conditions. If they are called outside a safepoint, then | |
334 // they assume that the caller is holding the heap lock. Upon return | |
335 // they release the heap lock, if they are returning a non-NULL | |
336 // value. attempt_allocation_slow() also dirties the cards of a | |
337 // newly-allocated young region after it releases the heap | |
338 // lock. This change in interface was the neatest way to achieve | |
339 // this card dirtying without affecting mem_allocate(), which is a | |
340 // more frequently called method. We tried two or three different | |
341 // approaches, but they were even more hacky. | |
342 HeapWord* attempt_allocation(size_t word_size, | |
343 bool permit_collection_pause = true); | |
344 | |
345 HeapWord* attempt_allocation_slow(size_t word_size, | |
346 bool permit_collection_pause = true); | |
347 | |
348 // Allocate blocks during garbage collection. Will ensure an | |
349 // allocation region, either by picking one or expanding the | |
350 // heap, and then allocate a block of the given size. The block | |
351 // may not be a humongous - it must fit into a single heap region. | |
352 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
353 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
354 | |
355 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, | |
356 HeapRegion* alloc_region, | |
357 bool par, | |
358 size_t word_size); | |
359 | |
360 // Ensure that no further allocations can happen in "r", bearing in mind | |
361 // that parallel threads might be attempting allocations. | |
362 void par_allocate_remaining_space(HeapRegion* r); | |
363 | |
545 | 364 // Retires an allocation region when it is full or at the end of a |
365 // GC pause. | |
366 void retire_alloc_region(HeapRegion* alloc_region, bool par); | |
367 | |
342 | 368 // Helper function for two callbacks below. |
369 // "full", if true, indicates that the GC is for a System.gc() request, | |
370 // and should collect the entire heap. If "clear_all_soft_refs" is true, | |
371 // all soft references are cleared during the GC. If "full" is false, | |
372 // "word_size" describes the allocation that the GC should | |
373 // attempt (at least) to satisfy. | |
374 void do_collection(bool full, bool clear_all_soft_refs, | |
375 size_t word_size); | |
376 | |
377 // Callback from VM_G1CollectFull operation. | |
378 // Perform a full collection. | |
379 void do_full_collection(bool clear_all_soft_refs); | |
380 | |
381 // Resize the heap if necessary after a full collection. If this is | |
382 // after a collect-for allocation, "word_size" is the allocation size, | |
383 // and will be considered part of the used portion of the heap. | |
384 void resize_if_necessary_after_full_collection(size_t word_size); | |
385 | |
386 // Callback from VM_G1CollectForAllocation operation. | |
387 // This function does everything necessary/possible to satisfy a | |
388 // failed allocation request (including collection, expansion, etc.) | |
389 HeapWord* satisfy_failed_allocation(size_t word_size); | |
390 | |
391 // Attempting to expand the heap sufficiently | |
392 // to support an allocation of the given "word_size". If | |
393 // successful, perform the allocation and return the address of the | |
394 // allocated block, or else "NULL". | |
395 virtual HeapWord* expand_and_allocate(size_t word_size); | |
396 | |
397 public: | |
398 // Expand the garbage-first heap by at least the given size (in bytes!). | |
399 // (Rounds up to a HeapRegion boundary.) | |
400 virtual void expand(size_t expand_bytes); | |
401 | |
402 // Do anything common to GC's. | |
403 virtual void gc_prologue(bool full); | |
404 virtual void gc_epilogue(bool full); | |
405 | |
526 | 406 // We register a region with the fast "in collection set" test. We |
407 // simply set to true the array slot corresponding to this region. | |
408 void register_region_with_in_cset_fast_test(HeapRegion* r) { | |
409 assert(_in_cset_fast_test_base != NULL, "sanity"); | |
410 assert(r->in_collection_set(), "invariant"); | |
411 int index = r->hrs_index(); | |
412 assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, | |
413 "invariant"); | |
414 assert(!_in_cset_fast_test_base[index], "invariant"); | |
415 _in_cset_fast_test_base[index] = true; | |
416 } | |
417 | |
418 // This is a fast test on whether a reference points into the | |
419 // collection set or not. It does not assume that the reference | |
420 // points into the heap; if it doesn't, it will return false. | |
421 bool in_cset_fast_test(oop obj) { | |
422 assert(_in_cset_fast_test != NULL, "sanity"); | |
423 if (_g1_committed.contains((HeapWord*) obj)) { | |
424 // no need to subtract the bottom of the heap from obj, | |
425 // _in_cset_fast_test is biased | |
426 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; | |
427 bool ret = _in_cset_fast_test[index]; | |
428 // let's make sure the result is consistent with what the slower | |
429 // test returns | |
430 assert( ret || !obj_in_cs(obj), "sanity"); | |
431 assert(!ret || obj_in_cs(obj), "sanity"); | |
432 return ret; | |
433 } else { | |
434 return false; | |
435 } | |
436 } | |
437 | |
342 | 438 protected: |
439 | |
440 // Shrink the garbage-first heap by at most the given size (in bytes!). | |
441 // (Rounds down to a HeapRegion boundary.) | |
442 virtual void shrink(size_t expand_bytes); | |
443 void shrink_helper(size_t expand_bytes); | |
444 | |
445 // Do an incremental collection: identify a collection set, and evacuate | |
446 // its live objects elsewhere. | |
447 virtual void do_collection_pause(); | |
448 | |
449 // The guts of the incremental collection pause, executed by the vm | |
450 // thread. If "popular_region" is non-NULL, this pause should evacuate | |
451 // this single region whose remembered set has gotten large, moving | |
452 // any popular objects to one of the popular regions. | |
453 virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region); | |
454 | |
455 // Actually do the work of evacuating the collection set. | |
456 virtual void evacuate_collection_set(); | |
457 | |
458 // If this is an appropriate right time, do a collection pause. | |
459 // The "word_size" argument, if non-zero, indicates the size of an | |
460 // allocation request that is prompting this query. | |
461 void do_collection_pause_if_appropriate(size_t word_size); | |
462 | |
463 // The g1 remembered set of the heap. | |
464 G1RemSet* _g1_rem_set; | |
465 // And it's mod ref barrier set, used to track updates for the above. | |
466 ModRefBarrierSet* _mr_bs; | |
467 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
468 // A set of cards that cover the objects for which the Rsets should be updated |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
469 // concurrently after the collection. |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
470 DirtyCardQueueSet _dirty_card_queue_set; |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
471 |
342 | 472 // The Heap Region Rem Set Iterator. |
473 HeapRegionRemSetIterator** _rem_set_iterator; | |
474 | |
475 // The closure used to refine a single card. | |
476 RefineCardTableEntryClosure* _refine_cte_cl; | |
477 | |
478 // A function to check the consistency of dirty card logs. | |
479 void check_ct_logs_at_safepoint(); | |
480 | |
481 // After a collection pause, make the regions in the CS into free | |
482 // regions. | |
483 void free_collection_set(HeapRegion* cs_head); | |
484 | |
485 // Applies "scan_non_heap_roots" to roots outside the heap, | |
486 // "scan_rs" to roots inside the heap (having done "set_region" to | |
487 // indicate the region in which the root resides), and does "scan_perm" | |
488 // (setting the generation to the perm generation.) If "scan_rs" is | |
489 // NULL, then this step is skipped. The "worker_i" | |
490 // param is for use with parallel roots processing, and should be | |
491 // the "i" of the calling parallel worker thread's work(i) function. | |
492 // In the sequential case this param will be ignored. | |
493 void g1_process_strong_roots(bool collecting_perm_gen, | |
494 SharedHeap::ScanningOption so, | |
495 OopClosure* scan_non_heap_roots, | |
496 OopsInHeapRegionClosure* scan_rs, | |
497 OopsInHeapRegionClosure* scan_so, | |
498 OopsInGenClosure* scan_perm, | |
499 int worker_i); | |
500 | |
501 void scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
502 int worker_i); | |
503 void scan_scan_only_region(HeapRegion* hr, | |
504 OopsInHeapRegionClosure* oc, | |
505 int worker_i); | |
506 | |
507 // Apply "blk" to all the weak roots of the system. These include | |
508 // JNI weak roots, the code cache, system dictionary, symbol table, | |
509 // string table, and referents of reachable weak refs. | |
510 void g1_process_weak_roots(OopClosure* root_closure, | |
511 OopClosure* non_root_closure); | |
512 | |
513 // Invoke "save_marks" on all heap regions. | |
514 void save_marks(); | |
515 | |
516 // Free a heap region. | |
517 void free_region(HeapRegion* hr); | |
518 // A component of "free_region", exposed for 'batching'. | |
519 // All the params after "hr" are out params: the used bytes of the freed | |
520 // region(s), the number of H regions cleared, the number of regions | |
521 // freed, and pointers to the head and tail of a list of freed contig | |
522 // regions, linked throught the "next_on_unclean_list" field. | |
523 void free_region_work(HeapRegion* hr, | |
524 size_t& pre_used, | |
525 size_t& cleared_h, | |
526 size_t& freed_regions, | |
527 UncleanRegionList* list, | |
528 bool par = false); | |
529 | |
530 | |
531 // The concurrent marker (and the thread it runs in.) | |
532 ConcurrentMark* _cm; | |
533 ConcurrentMarkThread* _cmThread; | |
534 bool _mark_in_progress; | |
535 | |
536 // The concurrent refiner. | |
537 ConcurrentG1Refine* _cg1r; | |
538 | |
539 // The concurrent zero-fill thread. | |
540 ConcurrentZFThread* _czft; | |
541 | |
542 // The parallel task queues | |
543 RefToScanQueueSet *_task_queues; | |
544 | |
545 // True iff a evacuation has failed in the current collection. | |
546 bool _evacuation_failed; | |
547 | |
548 // Set the attribute indicating whether evacuation has failed in the | |
549 // current collection. | |
550 void set_evacuation_failed(bool b) { _evacuation_failed = b; } | |
551 | |
552 // Failed evacuations cause some logical from-space objects to have | |
553 // forwarding pointers to themselves. Reset them. | |
554 void remove_self_forwarding_pointers(); | |
555 | |
556 // When one is non-null, so is the other. Together, they each pair is | |
557 // an object with a preserved mark, and its mark value. | |
558 GrowableArray<oop>* _objs_with_preserved_marks; | |
559 GrowableArray<markOop>* _preserved_marks_of_objs; | |
560 | |
561 // Preserve the mark of "obj", if necessary, in preparation for its mark | |
562 // word being overwritten with a self-forwarding-pointer. | |
563 void preserve_mark_if_necessary(oop obj, markOop m); | |
564 | |
565 // The stack of evac-failure objects left to be scanned. | |
566 GrowableArray<oop>* _evac_failure_scan_stack; | |
567 // The closure to apply to evac-failure objects. | |
568 | |
569 OopsInHeapRegionClosure* _evac_failure_closure; | |
570 // Set the field above. | |
571 void | |
572 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { | |
573 _evac_failure_closure = evac_failure_closure; | |
574 } | |
575 | |
576 // Push "obj" on the scan stack. | |
577 void push_on_evac_failure_scan_stack(oop obj); | |
578 // Process scan stack entries until the stack is empty. | |
579 void drain_evac_failure_scan_stack(); | |
580 // True iff an invocation of "drain_scan_stack" is in progress; to | |
581 // prevent unnecessary recursion. | |
582 bool _drain_in_progress; | |
583 | |
584 // Do any necessary initialization for evacuation-failure handling. | |
585 // "cl" is the closure that will be used to process evac-failure | |
586 // objects. | |
587 void init_for_evac_failure(OopsInHeapRegionClosure* cl); | |
588 // Do any necessary cleanup for evacuation-failure handling data | |
589 // structures. | |
590 void finalize_for_evac_failure(); | |
591 | |
592 // An attempt to evacuate "obj" has failed; take necessary steps. | |
593 void handle_evacuation_failure(oop obj); | |
594 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); | |
595 void handle_evacuation_failure_common(oop obj, markOop m); | |
596 | |
597 | |
598 // Ensure that the relevant gc_alloc regions are set. | |
599 void get_gc_alloc_regions(); | |
636 | 600 // We're done with GC alloc regions. We are going to tear down the |
601 // gc alloc list and remove the gc alloc tag from all the regions on | |
602 // that list. However, we will also retain the last (i.e., the one | |
603 // that is half-full) GC alloc region, per GCAllocPurpose, for | |
604 // possible reuse during the next collection, provided | |
605 // _retain_gc_alloc_region[] indicates that it should be the | |
606 // case. Said regions are kept in the _retained_gc_alloc_regions[] | |
607 // array. If the parameter totally is set, we will not retain any | |
608 // regions, irrespective of what _retain_gc_alloc_region[] | |
609 // indicates. | |
610 void release_gc_alloc_regions(bool totally); | |
611 #ifndef PRODUCT | |
612 // Useful for debugging. | |
613 void print_gc_alloc_regions(); | |
614 #endif // !PRODUCT | |
342 | 615 |
616 // ("Weak") Reference processing support | |
617 ReferenceProcessor* _ref_processor; | |
618 | |
619 enum G1H_process_strong_roots_tasks { | |
620 G1H_PS_mark_stack_oops_do, | |
621 G1H_PS_refProcessor_oops_do, | |
622 // Leave this one last. | |
623 G1H_PS_NumElements | |
624 }; | |
625 | |
626 SubTasksDone* _process_strong_tasks; | |
627 | |
628 // Allocate space to hold a popular object. Result is guaranteed below | |
629 // "popular_object_boundary()". Note: CURRENTLY halts the system if we | |
630 // run out of space to hold popular objects. | |
631 HeapWord* allocate_popular_object(size_t word_size); | |
632 | |
633 // The boundary between popular and non-popular objects. | |
634 HeapWord* _popular_object_boundary; | |
635 | |
636 HeapRegionList* _popular_regions_to_be_evacuated; | |
637 | |
638 // Compute which objects in "single_region" are popular. If any are, | |
639 // evacuate them to a popular region, leaving behind forwarding pointers, | |
640 // and select "popular_region" as the single collection set region. | |
641 // Otherwise, leave the collection set null. | |
642 void popularity_pause_preamble(HeapRegion* populer_region); | |
643 | |
644 // Compute which objects in "single_region" are popular, and evacuate | |
645 // them to a popular region, leaving behind forwarding pointers. | |
646 // Returns "true" if at least one popular object is discovered and | |
647 // evacuated. In any case, "*max_rc" is set to the maximum reference | |
648 // count of an object in the region. | |
649 bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region, | |
650 size_t* max_rc); | |
651 // Subroutines used in the above. | |
652 bool _rc_region_above; | |
653 size_t _rc_region_diff; | |
654 jint* obj_rc_addr(oop obj) { | |
655 uintptr_t obj_addr = (uintptr_t)obj; | |
656 if (_rc_region_above) { | |
657 jint* res = (jint*)(obj_addr + _rc_region_diff); | |
658 assert((uintptr_t)res > obj_addr, "RC region is above."); | |
659 return res; | |
660 } else { | |
661 jint* res = (jint*)(obj_addr - _rc_region_diff); | |
662 assert((uintptr_t)res < obj_addr, "RC region is below."); | |
663 return res; | |
664 } | |
665 } | |
666 jint obj_rc(oop obj) { | |
667 return *obj_rc_addr(obj); | |
668 } | |
669 void inc_obj_rc(oop obj) { | |
670 (*obj_rc_addr(obj))++; | |
671 } | |
672 void atomic_inc_obj_rc(oop obj); | |
673 | |
674 | |
675 // Number of popular objects and bytes (latter is cheaper!). | |
676 size_t pop_object_used_objs(); | |
677 size_t pop_object_used_bytes(); | |
678 | |
679 // Index of the popular region in which allocation is currently being | |
680 // done. | |
681 int _cur_pop_hr_index; | |
682 | |
683 // List of regions which require zero filling. | |
684 UncleanRegionList _unclean_region_list; | |
685 bool _unclean_regions_coming; | |
686 | |
687 bool check_age_cohort_well_formed_work(int a, HeapRegion* hr); | |
688 | |
689 public: | |
690 void set_refine_cte_cl_concurrency(bool concurrent); | |
691 | |
692 RefToScanQueue *task_queue(int i); | |
693 | |
616
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
694 // A set of cards where updates happened during the GC |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
695 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
4f360ec815ba
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
545
diff
changeset
|
696 |
342 | 697 // Create a G1CollectedHeap with the specified policy. |
698 // Must call the initialize method afterwards. | |
699 // May not return if something goes wrong. | |
700 G1CollectedHeap(G1CollectorPolicy* policy); | |
701 | |
702 // Initialize the G1CollectedHeap to have the initial and | |
703 // maximum sizes, permanent generation, and remembered and barrier sets | |
704 // specified by the policy object. | |
705 jint initialize(); | |
706 | |
707 void ref_processing_init(); | |
708 | |
709 void set_par_threads(int t) { | |
710 SharedHeap::set_par_threads(t); | |
711 _process_strong_tasks->set_par_threads(t); | |
712 } | |
713 | |
714 virtual CollectedHeap::Name kind() const { | |
715 return CollectedHeap::G1CollectedHeap; | |
716 } | |
717 | |
718 // The current policy object for the collector. | |
719 G1CollectorPolicy* g1_policy() const { return _g1_policy; } | |
720 | |
721 // Adaptive size policy. No such thing for g1. | |
722 virtual AdaptiveSizePolicy* size_policy() { return NULL; } | |
723 | |
724 // The rem set and barrier set. | |
725 G1RemSet* g1_rem_set() const { return _g1_rem_set; } | |
726 ModRefBarrierSet* mr_bs() const { return _mr_bs; } | |
727 | |
728 // The rem set iterator. | |
729 HeapRegionRemSetIterator* rem_set_iterator(int i) { | |
730 return _rem_set_iterator[i]; | |
731 } | |
732 | |
733 HeapRegionRemSetIterator* rem_set_iterator() { | |
734 return _rem_set_iterator[0]; | |
735 } | |
736 | |
737 unsigned get_gc_time_stamp() { | |
738 return _gc_time_stamp; | |
739 } | |
740 | |
741 void reset_gc_time_stamp() { | |
742 _gc_time_stamp = 0; | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
743 OrderAccess::fence(); |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
744 } |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
745 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
746 void increment_gc_time_stamp() { |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
747 ++_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
748 OrderAccess::fence(); |
342 | 749 } |
750 | |
751 void iterate_dirty_card_closure(bool concurrent, int worker_i); | |
752 | |
753 // The shared block offset table array. | |
754 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } | |
755 | |
756 // Reference Processing accessor | |
757 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
758 | |
759 // Reserved (g1 only; super method includes perm), capacity and the used | |
760 // portion in bytes. | |
761 size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } | |
762 virtual size_t capacity() const; | |
763 virtual size_t used() const; | |
764 size_t recalculate_used() const; | |
765 #ifndef PRODUCT | |
766 size_t recalculate_used_regions() const; | |
767 #endif // PRODUCT | |
768 | |
769 // These virtual functions do the actual allocation. | |
770 virtual HeapWord* mem_allocate(size_t word_size, | |
771 bool is_noref, | |
772 bool is_tlab, | |
773 bool* gc_overhead_limit_was_exceeded); | |
774 | |
775 // Some heaps may offer a contiguous region for shared non-blocking | |
776 // allocation, via inlined code (by exporting the address of the top and | |
777 // end fields defining the extent of the contiguous allocation region.) | |
778 // But G1CollectedHeap doesn't yet support this. | |
779 | |
780 // Return an estimate of the maximum allocation that could be performed | |
781 // without triggering any collection or expansion activity. In a | |
782 // generational collector, for example, this is probably the largest | |
783 // allocation that could be supported (without expansion) in the youngest | |
784 // generation. It is "unsafe" because no locks are taken; the result | |
785 // should be treated as an approximation, not a guarantee, for use in | |
786 // heuristic resizing decisions. | |
787 virtual size_t unsafe_max_alloc(); | |
788 | |
789 virtual bool is_maximal_no_gc() const { | |
790 return _g1_storage.uncommitted_size() == 0; | |
791 } | |
792 | |
793 // The total number of regions in the heap. | |
794 size_t n_regions(); | |
795 | |
796 // The number of regions that are completely free. | |
797 size_t max_regions(); | |
798 | |
799 // The number of regions that are completely free. | |
800 size_t free_regions(); | |
801 | |
802 // The number of regions that are not completely free. | |
803 size_t used_regions() { return n_regions() - free_regions(); } | |
804 | |
805 // True iff the ZF thread should run. | |
806 bool should_zf(); | |
807 | |
808 // The number of regions available for "regular" expansion. | |
809 size_t expansion_regions() { return _expansion_regions; } | |
810 | |
811 #ifndef PRODUCT | |
812 bool regions_accounted_for(); | |
813 bool print_region_accounting_info(); | |
814 void print_region_counts(); | |
815 #endif | |
816 | |
817 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); | |
818 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); | |
819 | |
820 void put_region_on_unclean_list(HeapRegion* r); | |
821 void put_region_on_unclean_list_locked(HeapRegion* r); | |
822 | |
823 void prepend_region_list_on_unclean_list(UncleanRegionList* list); | |
824 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); | |
825 | |
826 void set_unclean_regions_coming(bool b); | |
827 void set_unclean_regions_coming_locked(bool b); | |
828 // Wait for cleanup to be complete. | |
829 void wait_for_cleanup_complete(); | |
830 // Like above, but assumes that the calling thread owns the Heap_lock. | |
831 void wait_for_cleanup_complete_locked(); | |
832 | |
833 // Return the head of the unclean list. | |
834 HeapRegion* peek_unclean_region_list_locked(); | |
835 // Remove and return the head of the unclean list. | |
836 HeapRegion* pop_unclean_region_list_locked(); | |
837 | |
838 // List of regions which are zero filled and ready for allocation. | |
839 HeapRegion* _free_region_list; | |
840 // Number of elements on the free list. | |
841 size_t _free_region_list_size; | |
842 | |
843 // If the head of the unclean list is ZeroFilled, move it to the free | |
844 // list. | |
845 bool move_cleaned_region_to_free_list_locked(); | |
846 bool move_cleaned_region_to_free_list(); | |
847 | |
848 void put_free_region_on_list_locked(HeapRegion* r); | |
849 void put_free_region_on_list(HeapRegion* r); | |
850 | |
851 // Remove and return the head element of the free list. | |
852 HeapRegion* pop_free_region_list_locked(); | |
853 | |
854 // If "zero_filled" is true, we first try the free list, then we try the | |
855 // unclean list, zero-filling the result. If "zero_filled" is false, we | |
856 // first try the unclean list, then the zero-filled list. | |
857 HeapRegion* alloc_free_region_from_lists(bool zero_filled); | |
858 | |
859 // Verify the integrity of the region lists. | |
860 void remove_allocated_regions_from_lists(); | |
861 bool verify_region_lists(); | |
862 bool verify_region_lists_locked(); | |
863 size_t unclean_region_list_length(); | |
864 size_t free_region_list_length(); | |
865 | |
866 // Perform a collection of the heap; intended for use in implementing | |
867 // "System.gc". This probably implies as full a collection as the | |
868 // "CollectedHeap" supports. | |
869 virtual void collect(GCCause::Cause cause); | |
870 | |
871 // The same as above but assume that the caller holds the Heap_lock. | |
872 void collect_locked(GCCause::Cause cause); | |
873 | |
874 // This interface assumes that it's being called by the | |
875 // vm thread. It collects the heap assuming that the | |
876 // heap lock is already held and that we are executing in | |
877 // the context of the vm thread. | |
878 virtual void collect_as_vm_thread(GCCause::Cause cause); | |
879 | |
880 // True iff a evacuation has failed in the most-recent collection. | |
881 bool evacuation_failed() { return _evacuation_failed; } | |
882 | |
883 // Free a region if it is totally full of garbage. Returns the number of | |
884 // bytes freed (0 ==> didn't free it). | |
885 size_t free_region_if_totally_empty(HeapRegion *hr); | |
886 void free_region_if_totally_empty_work(HeapRegion *hr, | |
887 size_t& pre_used, | |
888 size_t& cleared_h_regions, | |
889 size_t& freed_regions, | |
890 UncleanRegionList* list, | |
891 bool par = false); | |
892 | |
893 // If we've done free region work that yields the given changes, update | |
894 // the relevant global variables. | |
895 void finish_free_region_work(size_t pre_used, | |
896 size_t cleared_h_regions, | |
897 size_t freed_regions, | |
898 UncleanRegionList* list); | |
899 | |
900 | |
901 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
902 virtual bool is_in(const void* p) const; | |
903 | |
904 // Return "TRUE" iff the given object address is within the collection | |
905 // set. | |
906 inline bool obj_in_cs(oop obj); | |
907 | |
908 // Return "TRUE" iff the given object address is in the reserved | |
909 // region of g1 (excluding the permanent generation). | |
910 bool is_in_g1_reserved(const void* p) const { | |
911 return _g1_reserved.contains(p); | |
912 } | |
913 | |
914 // Returns a MemRegion that corresponds to the space that has been | |
915 // committed in the heap | |
916 MemRegion g1_committed() { | |
917 return _g1_committed; | |
918 } | |
919 | |
920 NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) | |
921 | |
922 // Dirty card table entries covering a list of young regions. | |
923 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); | |
924 | |
925 // This resets the card table to all zeros. It is used after | |
926 // a collection pause which used the card table to claim cards. | |
927 void cleanUpCardTable(); | |
928 | |
929 // Iteration functions. | |
930 | |
931 // Iterate over all the ref-containing fields of all objects, calling | |
932 // "cl.do_oop" on each. | |
933 virtual void oop_iterate(OopClosure* cl); | |
934 | |
935 // Same as above, restricted to a memory region. | |
936 virtual void oop_iterate(MemRegion mr, OopClosure* cl); | |
937 | |
938 // Iterate over all objects, calling "cl.do_object" on each. | |
939 virtual void object_iterate(ObjectClosure* cl); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
390
diff
changeset
|
940 virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } |
342 | 941 |
942 // Iterate over all objects allocated since the last collection, calling | |
943 // "cl.do_object" on each. The heap must have been initialized properly | |
944 // to support this function, or else this call will fail. | |
945 virtual void object_iterate_since_last_GC(ObjectClosure* cl); | |
946 | |
947 // Iterate over all spaces in use in the heap, in ascending address order. | |
948 virtual void space_iterate(SpaceClosure* cl); | |
949 | |
950 // Iterate over heap regions, in address order, terminating the | |
951 // iteration early if the "doHeapRegion" method returns "true". | |
952 void heap_region_iterate(HeapRegionClosure* blk); | |
953 | |
954 // Iterate over heap regions starting with r (or the first region if "r" | |
955 // is NULL), in address order, terminating early if the "doHeapRegion" | |
956 // method returns "true". | |
957 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); | |
958 | |
959 // As above but starting from the region at index idx. | |
960 void heap_region_iterate_from(int idx, HeapRegionClosure* blk); | |
961 | |
962 HeapRegion* region_at(size_t idx); | |
963 | |
964 // Divide the heap region sequence into "chunks" of some size (the number | |
965 // of regions divided by the number of parallel threads times some | |
966 // overpartition factor, currently 4). Assumes that this will be called | |
967 // in parallel by ParallelGCThreads worker threads with discinct worker | |
968 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel | |
969 // calls will use the same "claim_value", and that that claim value is | |
970 // different from the claim_value of any heap region before the start of | |
971 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by | |
972 // attempting to claim the first region in each chunk, and, if | |
973 // successful, applying the closure to each region in the chunk (and | |
974 // setting the claim value of the second and subsequent regions of the | |
975 // chunk.) For now requires that "doHeapRegion" always returns "false", | |
976 // i.e., that a closure never attempt to abort a traversal. | |
977 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | |
978 int worker, | |
979 jint claim_value); | |
980 | |
390 | 981 // It resets all the region claim values to the default. |
982 void reset_heap_region_claim_values(); | |
983 | |
355 | 984 #ifdef ASSERT |
985 bool check_heap_region_claim_values(jint claim_value); | |
986 #endif // ASSERT | |
987 | |
342 | 988 // Iterate over the regions (if any) in the current collection set. |
989 void collection_set_iterate(HeapRegionClosure* blk); | |
990 | |
991 // As above but starting from region r | |
992 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); | |
993 | |
994 // Returns the first (lowest address) compactible space in the heap. | |
995 virtual CompactibleSpace* first_compactible_space(); | |
996 | |
997 // A CollectedHeap will contain some number of spaces. This finds the | |
998 // space containing a given address, or else returns NULL. | |
999 virtual Space* space_containing(const void* addr) const; | |
1000 | |
1001 // A G1CollectedHeap will contain some number of heap regions. This | |
1002 // finds the region containing a given address, or else returns NULL. | |
1003 HeapRegion* heap_region_containing(const void* addr) const; | |
1004 | |
1005 // Like the above, but requires "addr" to be in the heap (to avoid a | |
1006 // null-check), and unlike the above, may return an continuing humongous | |
1007 // region. | |
1008 HeapRegion* heap_region_containing_raw(const void* addr) const; | |
1009 | |
1010 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
1011 // each address in the (reserved) heap is a member of exactly | |
1012 // one block. The defining characteristic of a block is that it is | |
1013 // possible to find its size, and thus to progress forward to the next | |
1014 // block. (Blocks may be of different sizes.) Thus, blocks may | |
1015 // represent Java objects, or they might be free blocks in a | |
1016 // free-list-based heap (or subheap), as long as the two kinds are | |
1017 // distinguishable and the size of each is determinable. | |
1018 | |
1019 // Returns the address of the start of the "block" that contains the | |
1020 // address "addr". We say "blocks" instead of "object" since some heaps | |
1021 // may not pack objects densely; a chunk may either be an object or a | |
1022 // non-object. | |
1023 virtual HeapWord* block_start(const void* addr) const; | |
1024 | |
1025 // Requires "addr" to be the start of a chunk, and returns its size. | |
1026 // "addr + size" is required to be the start of a new chunk, or the end | |
1027 // of the active area of the heap. | |
1028 virtual size_t block_size(const HeapWord* addr) const; | |
1029 | |
1030 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
1031 // the block is an object. | |
1032 virtual bool block_is_obj(const HeapWord* addr) const; | |
1033 | |
1034 // Does this heap support heap inspection? (+PrintClassHistogram) | |
1035 virtual bool supports_heap_inspection() const { return true; } | |
1036 | |
1037 // Section on thread-local allocation buffers (TLABs) | |
1038 // See CollectedHeap for semantics. | |
1039 | |
1040 virtual bool supports_tlab_allocation() const; | |
1041 virtual size_t tlab_capacity(Thread* thr) const; | |
1042 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
1043 virtual HeapWord* allocate_new_tlab(size_t size); | |
1044 | |
1045 // Can a compiler initialize a new object without store barriers? | |
1046 // This permission only extends from the creation of a new object | |
1047 // via a TLAB up to the first subsequent safepoint. | |
1048 virtual bool can_elide_tlab_store_barriers() const { | |
1049 // Since G1's TLAB's may, on occasion, come from non-young regions | |
1050 // as well. (Is there a flag controlling that? XXX) | |
1051 return false; | |
1052 } | |
1053 | |
1054 // Can a compiler elide a store barrier when it writes | |
1055 // a permanent oop into the heap? Applies when the compiler | |
1056 // is storing x to the heap, where x->is_perm() is true. | |
1057 virtual bool can_elide_permanent_oop_store_barriers() const { | |
1058 // At least until perm gen collection is also G1-ified, at | |
1059 // which point this should return false. | |
1060 return true; | |
1061 } | |
1062 | |
1063 virtual bool allocs_are_zero_filled(); | |
1064 | |
1065 // The boundary between a "large" and "small" array of primitives, in | |
1066 // words. | |
1067 virtual size_t large_typearray_limit(); | |
1068 | |
1069 // All popular objects are guaranteed to have addresses below this | |
1070 // boundary. | |
1071 HeapWord* popular_object_boundary() { | |
1072 return _popular_object_boundary; | |
1073 } | |
1074 | |
1075 // Declare the region as one that should be evacuated because its | |
1076 // remembered set is too large. | |
1077 void schedule_popular_region_evac(HeapRegion* r); | |
1078 // If there is a popular region to evacuate it, remove it from the list | |
1079 // and return it. | |
1080 HeapRegion* popular_region_to_evac(); | |
1081 // Evacuate the given popular region. | |
1082 void evac_popular_region(HeapRegion* r); | |
1083 | |
1084 // Returns "true" iff the given word_size is "very large". | |
1085 static bool isHumongous(size_t word_size) { | |
1086 return word_size >= VeryLargeInWords; | |
1087 } | |
1088 | |
1089 // Update mod union table with the set of dirty cards. | |
1090 void updateModUnion(); | |
1091 | |
1092 // Set the mod union bits corresponding to the given memRegion. Note | |
1093 // that this is always a safe operation, since it doesn't clear any | |
1094 // bits. | |
1095 void markModUnionRange(MemRegion mr); | |
1096 | |
1097 // Records the fact that a marking phase is no longer in progress. | |
1098 void set_marking_complete() { | |
1099 _mark_in_progress = false; | |
1100 } | |
1101 void set_marking_started() { | |
1102 _mark_in_progress = true; | |
1103 } | |
1104 bool mark_in_progress() { | |
1105 return _mark_in_progress; | |
1106 } | |
1107 | |
1108 // Print the maximum heap capacity. | |
1109 virtual size_t max_capacity() const; | |
1110 | |
1111 virtual jlong millis_since_last_gc(); | |
1112 | |
1113 // Perform any cleanup actions necessary before allowing a verification. | |
1114 virtual void prepare_for_verify(); | |
1115 | |
1116 // Perform verification. | |
1117 virtual void verify(bool allow_dirty, bool silent); | |
1118 virtual void print() const; | |
1119 virtual void print_on(outputStream* st) const; | |
1120 | |
1121 virtual void print_gc_threads_on(outputStream* st) const; | |
1122 virtual void gc_threads_do(ThreadClosure* tc) const; | |
1123 | |
1124 // Override | |
1125 void print_tracing_info() const; | |
1126 | |
1127 // If "addr" is a pointer into the (reserved?) heap, returns a positive | |
1128 // number indicating the "arena" within the heap in which "addr" falls. | |
1129 // Or else returns 0. | |
1130 virtual int addr_to_arena_id(void* addr) const; | |
1131 | |
1132 // Convenience function to be used in situations where the heap type can be | |
1133 // asserted to be this type. | |
1134 static G1CollectedHeap* heap(); | |
1135 | |
1136 void empty_young_list(); | |
1137 bool should_set_young_locked(); | |
1138 | |
1139 void set_region_short_lived_locked(HeapRegion* hr); | |
1140 // add appropriate methods for any other surv rate groups | |
1141 | |
1142 void young_list_rs_length_sampling_init() { | |
1143 _young_list->rs_length_sampling_init(); | |
1144 } | |
1145 bool young_list_rs_length_sampling_more() { | |
1146 return _young_list->rs_length_sampling_more(); | |
1147 } | |
1148 void young_list_rs_length_sampling_next() { | |
1149 _young_list->rs_length_sampling_next(); | |
1150 } | |
1151 size_t young_list_sampled_rs_lengths() { | |
1152 return _young_list->sampled_rs_lengths(); | |
1153 } | |
1154 | |
1155 size_t young_list_length() { return _young_list->length(); } | |
1156 size_t young_list_scan_only_length() { | |
1157 return _young_list->scan_only_length(); } | |
1158 | |
1159 HeapRegion* pop_region_from_young_list() { | |
1160 return _young_list->pop_region(); | |
1161 } | |
1162 | |
1163 HeapRegion* young_list_first_region() { | |
1164 return _young_list->first_region(); | |
1165 } | |
1166 | |
1167 // debugging | |
1168 bool check_young_list_well_formed() { | |
1169 return _young_list->check_list_well_formed(); | |
1170 } | |
1171 bool check_young_list_empty(bool ignore_scan_only_list, | |
1172 bool check_sample = true); | |
1173 | |
1174 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1175 // many of these need to be public. | |
1176 | |
1177 // The functions below are helper functions that a subclass of | |
1178 // "CollectedHeap" can use in the implementation of its virtual | |
1179 // functions. | |
1180 // This performs a concurrent marking of the live objects in a | |
1181 // bitmap off to the side. | |
1182 void doConcurrentMark(); | |
1183 | |
1184 // This is called from the marksweep collector which then does | |
1185 // a concurrent mark and verifies that the results agree with | |
1186 // the stop the world marking. | |
1187 void checkConcurrentMark(); | |
1188 void do_sync_mark(); | |
1189 | |
1190 bool isMarkedPrev(oop obj) const; | |
1191 bool isMarkedNext(oop obj) const; | |
1192 | |
1193 // Determine if an object is dead, given the object and also | |
1194 // the region to which the object belongs. An object is dead | |
1195 // iff a) it was not allocated since the last mark and b) it | |
1196 // is not marked. | |
1197 | |
1198 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1199 return | |
1200 !hr->obj_allocated_since_prev_marking(obj) && | |
1201 !isMarkedPrev(obj); | |
1202 } | |
1203 | |
1204 // This is used when copying an object to survivor space. | |
1205 // If the object is marked live, then we mark the copy live. | |
1206 // If the object is allocated since the start of this mark | |
1207 // cycle, then we mark the copy live. | |
1208 // If the object has been around since the previous mark | |
1209 // phase, and hasn't been marked yet during this phase, | |
1210 // then we don't mark it, we just wait for the | |
1211 // current marking cycle to get to it. | |
1212 | |
1213 // This function returns true when an object has been | |
1214 // around since the previous marking and hasn't yet | |
1215 // been marked during this marking. | |
1216 | |
1217 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1218 return | |
1219 !hr->obj_allocated_since_next_marking(obj) && | |
1220 !isMarkedNext(obj); | |
1221 } | |
1222 | |
1223 // Determine if an object is dead, given only the object itself. | |
1224 // This will find the region to which the object belongs and | |
1225 // then call the region version of the same function. | |
1226 | |
1227 // Added if it is in permanent gen it isn't dead. | |
1228 // Added if it is NULL it isn't dead. | |
1229 | |
1230 bool is_obj_dead(oop obj) { | |
1231 HeapRegion* hr = heap_region_containing(obj); | |
1232 if (hr == NULL) { | |
1233 if (Universe::heap()->is_in_permanent(obj)) | |
1234 return false; | |
1235 else if (obj == NULL) return false; | |
1236 else return true; | |
1237 } | |
1238 else return is_obj_dead(obj, hr); | |
1239 } | |
1240 | |
1241 bool is_obj_ill(oop obj) { | |
1242 HeapRegion* hr = heap_region_containing(obj); | |
1243 if (hr == NULL) { | |
1244 if (Universe::heap()->is_in_permanent(obj)) | |
1245 return false; | |
1246 else if (obj == NULL) return false; | |
1247 else return true; | |
1248 } | |
1249 else return is_obj_ill(obj, hr); | |
1250 } | |
1251 | |
1252 // The following is just to alert the verification code | |
1253 // that a full collection has occurred and that the | |
1254 // remembered sets are no longer up to date. | |
1255 bool _full_collection; | |
1256 void set_full_collection() { _full_collection = true;} | |
1257 void clear_full_collection() {_full_collection = false;} | |
1258 bool full_collection() {return _full_collection;} | |
1259 | |
1260 ConcurrentMark* concurrent_mark() const { return _cm; } | |
1261 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | |
1262 | |
1263 public: | |
1264 void stop_conc_gc_threads(); | |
1265 | |
1266 // <NEW PREDICTION> | |
1267 | |
1268 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); | |
1269 void check_if_region_is_too_expensive(double predicted_time_ms); | |
1270 size_t pending_card_num(); | |
1271 size_t max_pending_card_num(); | |
1272 size_t cards_scanned(); | |
1273 | |
1274 // </NEW PREDICTION> | |
1275 | |
1276 protected: | |
1277 size_t _max_heap_capacity; | |
1278 | |
1279 // debug_only(static void check_for_valid_allocation_state();) | |
1280 | |
1281 public: | |
1282 // Temporary: call to mark things unimplemented for the G1 heap (e.g., | |
1283 // MemoryService). In productization, we can make this assert false | |
1284 // to catch such places (as well as searching for calls to this...) | |
1285 static void g1_unimplemented(); | |
1286 | |
1287 }; | |
1288 | |
1289 // Local Variables: *** | |
1290 // c-indentation-style: gnu *** | |
1291 // End: *** |