Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 579:0fbdb4381b99
6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair
author | xdono |
---|---|
date | Mon, 09 Mar 2009 13:28:46 -0700 |
parents | 58054a18d735 |
children | 7bb995fbd3c0 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | |
26 // It uses the "Garbage First" heap organization and algorithm, which | |
27 // may combine concurrent marking with parallel, incremental compaction of | |
28 // heap subsets that will yield large amounts of garbage. | |
29 | |
30 class HeapRegion; | |
31 class HeapRegionSeq; | |
32 class HeapRegionList; | |
33 class PermanentGenerationSpec; | |
34 class GenerationSpec; | |
35 class OopsInHeapRegionClosure; | |
36 class G1ScanHeapEvacClosure; | |
37 class ObjectClosure; | |
38 class SpaceClosure; | |
39 class CompactibleSpaceClosure; | |
40 class Space; | |
41 class G1CollectorPolicy; | |
42 class GenRemSet; | |
43 class G1RemSet; | |
44 class HeapRegionRemSetIterator; | |
45 class ConcurrentMark; | |
46 class ConcurrentMarkThread; | |
47 class ConcurrentG1Refine; | |
48 class ConcurrentZFThread; | |
49 | |
50 // If want to accumulate detailed statistics on work queues | |
51 // turn this on. | |
52 #define G1_DETAILED_STATS 0 | |
53 | |
54 #if G1_DETAILED_STATS | |
55 # define IF_G1_DETAILED_STATS(code) code | |
56 #else | |
57 # define IF_G1_DETAILED_STATS(code) | |
58 #endif | |
59 | |
60 typedef GenericTaskQueue<oop*> RefToScanQueue; | |
61 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet; | |
62 | |
63 enum G1GCThreadGroups { | |
64 G1CRGroup = 0, | |
65 G1ZFGroup = 1, | |
66 G1CMGroup = 2, | |
67 G1CLGroup = 3 | |
68 }; | |
69 | |
70 enum GCAllocPurpose { | |
71 GCAllocForTenured, | |
72 GCAllocForSurvived, | |
73 GCAllocPurposeCount | |
74 }; | |
75 | |
76 class YoungList : public CHeapObj { | |
77 private: | |
78 G1CollectedHeap* _g1h; | |
79 | |
80 HeapRegion* _head; | |
81 | |
82 HeapRegion* _scan_only_head; | |
83 HeapRegion* _scan_only_tail; | |
84 size_t _length; | |
85 size_t _scan_only_length; | |
86 | |
87 size_t _last_sampled_rs_lengths; | |
88 size_t _sampled_rs_lengths; | |
89 HeapRegion* _curr; | |
90 HeapRegion* _curr_scan_only; | |
91 | |
92 HeapRegion* _survivor_head; | |
545 | 93 HeapRegion* _survivor_tail; |
342 | 94 size_t _survivor_length; |
95 | |
96 void empty_list(HeapRegion* list); | |
97 | |
98 public: | |
99 YoungList(G1CollectedHeap* g1h); | |
100 | |
101 void push_region(HeapRegion* hr); | |
102 void add_survivor_region(HeapRegion* hr); | |
103 HeapRegion* pop_region(); | |
104 void empty_list(); | |
105 bool is_empty() { return _length == 0; } | |
106 size_t length() { return _length; } | |
107 size_t scan_only_length() { return _scan_only_length; } | |
545 | 108 size_t survivor_length() { return _survivor_length; } |
342 | 109 |
110 void rs_length_sampling_init(); | |
111 bool rs_length_sampling_more(); | |
112 void rs_length_sampling_next(); | |
113 | |
114 void reset_sampled_info() { | |
115 _last_sampled_rs_lengths = 0; | |
116 } | |
117 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } | |
118 | |
119 // for development purposes | |
120 void reset_auxilary_lists(); | |
121 HeapRegion* first_region() { return _head; } | |
122 HeapRegion* first_scan_only_region() { return _scan_only_head; } | |
123 HeapRegion* first_survivor_region() { return _survivor_head; } | |
545 | 124 HeapRegion* last_survivor_region() { return _survivor_tail; } |
342 | 125 HeapRegion* par_get_next_scan_only_region() { |
126 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); | |
127 HeapRegion* ret = _curr_scan_only; | |
128 if (ret != NULL) | |
129 _curr_scan_only = ret->get_next_young_region(); | |
130 return ret; | |
131 } | |
132 | |
133 // debugging | |
134 bool check_list_well_formed(); | |
135 bool check_list_empty(bool ignore_scan_only_list, | |
136 bool check_sample = true); | |
137 void print(); | |
138 }; | |
139 | |
140 class RefineCardTableEntryClosure; | |
141 class G1CollectedHeap : public SharedHeap { | |
142 friend class VM_G1CollectForAllocation; | |
143 friend class VM_GenCollectForPermanentAllocation; | |
144 friend class VM_G1CollectFull; | |
145 friend class VM_G1IncCollectionPause; | |
146 friend class VM_G1PopRegionCollectionPause; | |
147 friend class VMStructs; | |
148 | |
149 // Closures used in implementation. | |
150 friend class G1ParCopyHelper; | |
151 friend class G1IsAliveClosure; | |
152 friend class G1EvacuateFollowersClosure; | |
153 friend class G1ParScanThreadState; | |
154 friend class G1ParScanClosureSuper; | |
155 friend class G1ParEvacuateFollowersClosure; | |
156 friend class G1ParTask; | |
157 friend class G1FreeGarbageRegionClosure; | |
158 friend class RefineCardTableEntryClosure; | |
159 friend class G1PrepareCompactClosure; | |
160 friend class RegionSorter; | |
161 friend class CountRCClosure; | |
162 friend class EvacPopObjClosure; | |
163 | |
164 // Other related classes. | |
165 friend class G1MarkSweep; | |
166 | |
167 private: | |
168 enum SomePrivateConstants { | |
169 VeryLargeInBytes = HeapRegion::GrainBytes/2, | |
170 VeryLargeInWords = VeryLargeInBytes/HeapWordSize, | |
171 MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME | |
172 NumAPIs = HeapRegion::MaxAge | |
173 }; | |
174 | |
175 | |
176 // The one and only G1CollectedHeap, so static functions can find it. | |
177 static G1CollectedHeap* _g1h; | |
178 | |
179 // Storage for the G1 heap (excludes the permanent generation). | |
180 VirtualSpace _g1_storage; | |
181 MemRegion _g1_reserved; | |
182 | |
183 // The part of _g1_storage that is currently committed. | |
184 MemRegion _g1_committed; | |
185 | |
186 // The maximum part of _g1_storage that has ever been committed. | |
187 MemRegion _g1_max_committed; | |
188 | |
189 // The number of regions that are completely free. | |
190 size_t _free_regions; | |
191 | |
192 // The number of regions we could create by expansion. | |
193 size_t _expansion_regions; | |
194 | |
195 // Return the number of free regions in the heap (by direct counting.) | |
196 size_t count_free_regions(); | |
197 // Return the number of free regions on the free and unclean lists. | |
198 size_t count_free_regions_list(); | |
199 | |
200 // The block offset table for the G1 heap. | |
201 G1BlockOffsetSharedArray* _bot_shared; | |
202 | |
203 // Move all of the regions off the free lists, then rebuild those free | |
204 // lists, before and after full GC. | |
205 void tear_down_region_lists(); | |
206 void rebuild_region_lists(); | |
207 // This sets all non-empty regions to need zero-fill (which they will if | |
208 // they are empty after full collection.) | |
209 void set_used_regions_to_need_zero_fill(); | |
210 | |
211 // The sequence of all heap regions in the heap. | |
212 HeapRegionSeq* _hrs; | |
213 | |
214 // The region from which normal-sized objects are currently being | |
215 // allocated. May be NULL. | |
216 HeapRegion* _cur_alloc_region; | |
217 | |
218 // Postcondition: cur_alloc_region == NULL. | |
219 void abandon_cur_alloc_region(); | |
220 | |
221 // The to-space memory regions into which objects are being copied during | |
222 // a GC. | |
223 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; | |
545 | 224 size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
342 | 225 |
226 // A list of the regions that have been set to be alloc regions in the | |
227 // current collection. | |
228 HeapRegion* _gc_alloc_region_list; | |
229 | |
230 // When called by par thread, require par_alloc_during_gc_lock() to be held. | |
231 void push_gc_alloc_region(HeapRegion* hr); | |
232 | |
233 // This should only be called single-threaded. Undeclares all GC alloc | |
234 // regions. | |
235 void forget_alloc_region_list(); | |
236 | |
237 // Should be used to set an alloc region, because there's other | |
238 // associated bookkeeping. | |
239 void set_gc_alloc_region(int purpose, HeapRegion* r); | |
240 | |
241 // Check well-formedness of alloc region list. | |
242 bool check_gc_alloc_regions(); | |
243 | |
244 // Outside of GC pauses, the number of bytes used in all regions other | |
245 // than the current allocation region. | |
246 size_t _summary_bytes_used; | |
247 | |
248 // Summary information about popular objects; method to print it. | |
249 NumberSeq _pop_obj_rc_at_copy; | |
250 void print_popularity_summary_info() const; | |
251 | |
526 | 252 // This is used for a quick test on whether a reference points into |
253 // the collection set or not. Basically, we have an array, with one | |
254 // byte per region, and that byte denotes whether the corresponding | |
255 // region is in the collection set or not. The entry corresponding | |
256 // the bottom of the heap, i.e., region 0, is pointed to by | |
257 // _in_cset_fast_test_base. The _in_cset_fast_test field has been | |
258 // biased so that it actually points to address 0 of the address | |
259 // space, to make the test as fast as possible (we can simply shift | |
260 // the address to address into it, instead of having to subtract the | |
261 // bottom of the heap from the address before shifting it; basically | |
262 // it works in the same way the card table works). | |
263 bool* _in_cset_fast_test; | |
264 | |
265 // The allocated array used for the fast test on whether a reference | |
266 // points into the collection set or not. This field is also used to | |
267 // free the array. | |
268 bool* _in_cset_fast_test_base; | |
269 | |
270 // The length of the _in_cset_fast_test_base array. | |
271 size_t _in_cset_fast_test_length; | |
272 | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
273 volatile unsigned _gc_time_stamp; |
342 | 274 |
275 size_t* _surviving_young_words; | |
276 | |
277 void setup_surviving_young_words(); | |
278 void update_surviving_young_words(size_t* surv_young_words); | |
279 void cleanup_surviving_young_words(); | |
280 | |
281 protected: | |
282 | |
283 // Returns "true" iff none of the gc alloc regions have any allocations | |
284 // since the last call to "save_marks". | |
285 bool all_alloc_regions_no_allocs_since_save_marks(); | |
545 | 286 // Perform finalization stuff on all allocation regions. |
287 void retire_all_alloc_regions(); | |
342 | 288 |
289 // The number of regions allocated to hold humongous objects. | |
290 int _num_humongous_regions; | |
291 YoungList* _young_list; | |
292 | |
293 // The current policy object for the collector. | |
294 G1CollectorPolicy* _g1_policy; | |
295 | |
296 // Parallel allocation lock to protect the current allocation region. | |
297 Mutex _par_alloc_during_gc_lock; | |
298 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } | |
299 | |
300 // If possible/desirable, allocate a new HeapRegion for normal object | |
301 // allocation sufficient for an allocation of the given "word_size". | |
302 // If "do_expand" is true, will attempt to expand the heap if necessary | |
303 // to to satisfy the request. If "zero_filled" is true, requires a | |
304 // zero-filled region. | |
305 // (Returning NULL will trigger a GC.) | |
306 virtual HeapRegion* newAllocRegion_work(size_t word_size, | |
307 bool do_expand, | |
308 bool zero_filled); | |
309 | |
310 virtual HeapRegion* newAllocRegion(size_t word_size, | |
311 bool zero_filled = true) { | |
312 return newAllocRegion_work(word_size, false, zero_filled); | |
313 } | |
314 virtual HeapRegion* newAllocRegionWithExpansion(int purpose, | |
315 size_t word_size, | |
316 bool zero_filled = true); | |
317 | |
318 // Attempt to allocate an object of the given (very large) "word_size". | |
319 // Returns "NULL" on failure. | |
320 virtual HeapWord* humongousObjAllocate(size_t word_size); | |
321 | |
322 // If possible, allocate a block of the given word_size, else return "NULL". | |
323 // Returning NULL will trigger GC or heap expansion. | |
324 // These two methods have rather awkward pre- and | |
325 // post-conditions. If they are called outside a safepoint, then | |
326 // they assume that the caller is holding the heap lock. Upon return | |
327 // they release the heap lock, if they are returning a non-NULL | |
328 // value. attempt_allocation_slow() also dirties the cards of a | |
329 // newly-allocated young region after it releases the heap | |
330 // lock. This change in interface was the neatest way to achieve | |
331 // this card dirtying without affecting mem_allocate(), which is a | |
332 // more frequently called method. We tried two or three different | |
333 // approaches, but they were even more hacky. | |
334 HeapWord* attempt_allocation(size_t word_size, | |
335 bool permit_collection_pause = true); | |
336 | |
337 HeapWord* attempt_allocation_slow(size_t word_size, | |
338 bool permit_collection_pause = true); | |
339 | |
340 // Allocate blocks during garbage collection. Will ensure an | |
341 // allocation region, either by picking one or expanding the | |
342 // heap, and then allocate a block of the given size. The block | |
343 // may not be a humongous - it must fit into a single heap region. | |
344 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
345 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); | |
346 | |
347 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, | |
348 HeapRegion* alloc_region, | |
349 bool par, | |
350 size_t word_size); | |
351 | |
352 // Ensure that no further allocations can happen in "r", bearing in mind | |
353 // that parallel threads might be attempting allocations. | |
354 void par_allocate_remaining_space(HeapRegion* r); | |
355 | |
545 | 356 // Retires an allocation region when it is full or at the end of a |
357 // GC pause. | |
358 void retire_alloc_region(HeapRegion* alloc_region, bool par); | |
359 | |
342 | 360 // Helper function for two callbacks below. |
361 // "full", if true, indicates that the GC is for a System.gc() request, | |
362 // and should collect the entire heap. If "clear_all_soft_refs" is true, | |
363 // all soft references are cleared during the GC. If "full" is false, | |
364 // "word_size" describes the allocation that the GC should | |
365 // attempt (at least) to satisfy. | |
366 void do_collection(bool full, bool clear_all_soft_refs, | |
367 size_t word_size); | |
368 | |
369 // Callback from VM_G1CollectFull operation. | |
370 // Perform a full collection. | |
371 void do_full_collection(bool clear_all_soft_refs); | |
372 | |
373 // Resize the heap if necessary after a full collection. If this is | |
374 // after a collect-for allocation, "word_size" is the allocation size, | |
375 // and will be considered part of the used portion of the heap. | |
376 void resize_if_necessary_after_full_collection(size_t word_size); | |
377 | |
378 // Callback from VM_G1CollectForAllocation operation. | |
379 // This function does everything necessary/possible to satisfy a | |
380 // failed allocation request (including collection, expansion, etc.) | |
381 HeapWord* satisfy_failed_allocation(size_t word_size); | |
382 | |
383 // Attempting to expand the heap sufficiently | |
384 // to support an allocation of the given "word_size". If | |
385 // successful, perform the allocation and return the address of the | |
386 // allocated block, or else "NULL". | |
387 virtual HeapWord* expand_and_allocate(size_t word_size); | |
388 | |
389 public: | |
390 // Expand the garbage-first heap by at least the given size (in bytes!). | |
391 // (Rounds up to a HeapRegion boundary.) | |
392 virtual void expand(size_t expand_bytes); | |
393 | |
394 // Do anything common to GC's. | |
395 virtual void gc_prologue(bool full); | |
396 virtual void gc_epilogue(bool full); | |
397 | |
526 | 398 // We register a region with the fast "in collection set" test. We |
399 // simply set to true the array slot corresponding to this region. | |
400 void register_region_with_in_cset_fast_test(HeapRegion* r) { | |
401 assert(_in_cset_fast_test_base != NULL, "sanity"); | |
402 assert(r->in_collection_set(), "invariant"); | |
403 int index = r->hrs_index(); | |
404 assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, | |
405 "invariant"); | |
406 assert(!_in_cset_fast_test_base[index], "invariant"); | |
407 _in_cset_fast_test_base[index] = true; | |
408 } | |
409 | |
410 // This is a fast test on whether a reference points into the | |
411 // collection set or not. It does not assume that the reference | |
412 // points into the heap; if it doesn't, it will return false. | |
413 bool in_cset_fast_test(oop obj) { | |
414 assert(_in_cset_fast_test != NULL, "sanity"); | |
415 if (_g1_committed.contains((HeapWord*) obj)) { | |
416 // no need to subtract the bottom of the heap from obj, | |
417 // _in_cset_fast_test is biased | |
418 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; | |
419 bool ret = _in_cset_fast_test[index]; | |
420 // let's make sure the result is consistent with what the slower | |
421 // test returns | |
422 assert( ret || !obj_in_cs(obj), "sanity"); | |
423 assert(!ret || obj_in_cs(obj), "sanity"); | |
424 return ret; | |
425 } else { | |
426 return false; | |
427 } | |
428 } | |
429 | |
342 | 430 protected: |
431 | |
432 // Shrink the garbage-first heap by at most the given size (in bytes!). | |
433 // (Rounds down to a HeapRegion boundary.) | |
434 virtual void shrink(size_t expand_bytes); | |
435 void shrink_helper(size_t expand_bytes); | |
436 | |
437 // Do an incremental collection: identify a collection set, and evacuate | |
438 // its live objects elsewhere. | |
439 virtual void do_collection_pause(); | |
440 | |
441 // The guts of the incremental collection pause, executed by the vm | |
442 // thread. If "popular_region" is non-NULL, this pause should evacuate | |
443 // this single region whose remembered set has gotten large, moving | |
444 // any popular objects to one of the popular regions. | |
445 virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region); | |
446 | |
447 // Actually do the work of evacuating the collection set. | |
448 virtual void evacuate_collection_set(); | |
449 | |
450 // If this is an appropriate right time, do a collection pause. | |
451 // The "word_size" argument, if non-zero, indicates the size of an | |
452 // allocation request that is prompting this query. | |
453 void do_collection_pause_if_appropriate(size_t word_size); | |
454 | |
455 // The g1 remembered set of the heap. | |
456 G1RemSet* _g1_rem_set; | |
457 // And it's mod ref barrier set, used to track updates for the above. | |
458 ModRefBarrierSet* _mr_bs; | |
459 | |
460 // The Heap Region Rem Set Iterator. | |
461 HeapRegionRemSetIterator** _rem_set_iterator; | |
462 | |
463 // The closure used to refine a single card. | |
464 RefineCardTableEntryClosure* _refine_cte_cl; | |
465 | |
466 // A function to check the consistency of dirty card logs. | |
467 void check_ct_logs_at_safepoint(); | |
468 | |
469 // After a collection pause, make the regions in the CS into free | |
470 // regions. | |
471 void free_collection_set(HeapRegion* cs_head); | |
472 | |
473 // Applies "scan_non_heap_roots" to roots outside the heap, | |
474 // "scan_rs" to roots inside the heap (having done "set_region" to | |
475 // indicate the region in which the root resides), and does "scan_perm" | |
476 // (setting the generation to the perm generation.) If "scan_rs" is | |
477 // NULL, then this step is skipped. The "worker_i" | |
478 // param is for use with parallel roots processing, and should be | |
479 // the "i" of the calling parallel worker thread's work(i) function. | |
480 // In the sequential case this param will be ignored. | |
481 void g1_process_strong_roots(bool collecting_perm_gen, | |
482 SharedHeap::ScanningOption so, | |
483 OopClosure* scan_non_heap_roots, | |
484 OopsInHeapRegionClosure* scan_rs, | |
485 OopsInHeapRegionClosure* scan_so, | |
486 OopsInGenClosure* scan_perm, | |
487 int worker_i); | |
488 | |
489 void scan_scan_only_set(OopsInHeapRegionClosure* oc, | |
490 int worker_i); | |
491 void scan_scan_only_region(HeapRegion* hr, | |
492 OopsInHeapRegionClosure* oc, | |
493 int worker_i); | |
494 | |
495 // Apply "blk" to all the weak roots of the system. These include | |
496 // JNI weak roots, the code cache, system dictionary, symbol table, | |
497 // string table, and referents of reachable weak refs. | |
498 void g1_process_weak_roots(OopClosure* root_closure, | |
499 OopClosure* non_root_closure); | |
500 | |
501 // Invoke "save_marks" on all heap regions. | |
502 void save_marks(); | |
503 | |
504 // Free a heap region. | |
505 void free_region(HeapRegion* hr); | |
506 // A component of "free_region", exposed for 'batching'. | |
507 // All the params after "hr" are out params: the used bytes of the freed | |
508 // region(s), the number of H regions cleared, the number of regions | |
509 // freed, and pointers to the head and tail of a list of freed contig | |
510 // regions, linked throught the "next_on_unclean_list" field. | |
511 void free_region_work(HeapRegion* hr, | |
512 size_t& pre_used, | |
513 size_t& cleared_h, | |
514 size_t& freed_regions, | |
515 UncleanRegionList* list, | |
516 bool par = false); | |
517 | |
518 | |
519 // The concurrent marker (and the thread it runs in.) | |
520 ConcurrentMark* _cm; | |
521 ConcurrentMarkThread* _cmThread; | |
522 bool _mark_in_progress; | |
523 | |
524 // The concurrent refiner. | |
525 ConcurrentG1Refine* _cg1r; | |
526 | |
527 // The concurrent zero-fill thread. | |
528 ConcurrentZFThread* _czft; | |
529 | |
530 // The parallel task queues | |
531 RefToScanQueueSet *_task_queues; | |
532 | |
533 // True iff a evacuation has failed in the current collection. | |
534 bool _evacuation_failed; | |
535 | |
536 // Set the attribute indicating whether evacuation has failed in the | |
537 // current collection. | |
538 void set_evacuation_failed(bool b) { _evacuation_failed = b; } | |
539 | |
540 // Failed evacuations cause some logical from-space objects to have | |
541 // forwarding pointers to themselves. Reset them. | |
542 void remove_self_forwarding_pointers(); | |
543 | |
544 // When one is non-null, so is the other. Together, they each pair is | |
545 // an object with a preserved mark, and its mark value. | |
546 GrowableArray<oop>* _objs_with_preserved_marks; | |
547 GrowableArray<markOop>* _preserved_marks_of_objs; | |
548 | |
549 // Preserve the mark of "obj", if necessary, in preparation for its mark | |
550 // word being overwritten with a self-forwarding-pointer. | |
551 void preserve_mark_if_necessary(oop obj, markOop m); | |
552 | |
553 // The stack of evac-failure objects left to be scanned. | |
554 GrowableArray<oop>* _evac_failure_scan_stack; | |
555 // The closure to apply to evac-failure objects. | |
556 | |
557 OopsInHeapRegionClosure* _evac_failure_closure; | |
558 // Set the field above. | |
559 void | |
560 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { | |
561 _evac_failure_closure = evac_failure_closure; | |
562 } | |
563 | |
564 // Push "obj" on the scan stack. | |
565 void push_on_evac_failure_scan_stack(oop obj); | |
566 // Process scan stack entries until the stack is empty. | |
567 void drain_evac_failure_scan_stack(); | |
568 // True iff an invocation of "drain_scan_stack" is in progress; to | |
569 // prevent unnecessary recursion. | |
570 bool _drain_in_progress; | |
571 | |
572 // Do any necessary initialization for evacuation-failure handling. | |
573 // "cl" is the closure that will be used to process evac-failure | |
574 // objects. | |
575 void init_for_evac_failure(OopsInHeapRegionClosure* cl); | |
576 // Do any necessary cleanup for evacuation-failure handling data | |
577 // structures. | |
578 void finalize_for_evac_failure(); | |
579 | |
580 // An attempt to evacuate "obj" has failed; take necessary steps. | |
581 void handle_evacuation_failure(oop obj); | |
582 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); | |
583 void handle_evacuation_failure_common(oop obj, markOop m); | |
584 | |
585 | |
586 // Ensure that the relevant gc_alloc regions are set. | |
587 void get_gc_alloc_regions(); | |
588 // We're done with GC alloc regions; release them, as appropriate. | |
589 void release_gc_alloc_regions(); | |
590 | |
591 // ("Weak") Reference processing support | |
592 ReferenceProcessor* _ref_processor; | |
593 | |
594 enum G1H_process_strong_roots_tasks { | |
595 G1H_PS_mark_stack_oops_do, | |
596 G1H_PS_refProcessor_oops_do, | |
597 // Leave this one last. | |
598 G1H_PS_NumElements | |
599 }; | |
600 | |
601 SubTasksDone* _process_strong_tasks; | |
602 | |
603 // Allocate space to hold a popular object. Result is guaranteed below | |
604 // "popular_object_boundary()". Note: CURRENTLY halts the system if we | |
605 // run out of space to hold popular objects. | |
606 HeapWord* allocate_popular_object(size_t word_size); | |
607 | |
608 // The boundary between popular and non-popular objects. | |
609 HeapWord* _popular_object_boundary; | |
610 | |
611 HeapRegionList* _popular_regions_to_be_evacuated; | |
612 | |
613 // Compute which objects in "single_region" are popular. If any are, | |
614 // evacuate them to a popular region, leaving behind forwarding pointers, | |
615 // and select "popular_region" as the single collection set region. | |
616 // Otherwise, leave the collection set null. | |
617 void popularity_pause_preamble(HeapRegion* populer_region); | |
618 | |
619 // Compute which objects in "single_region" are popular, and evacuate | |
620 // them to a popular region, leaving behind forwarding pointers. | |
621 // Returns "true" if at least one popular object is discovered and | |
622 // evacuated. In any case, "*max_rc" is set to the maximum reference | |
623 // count of an object in the region. | |
624 bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region, | |
625 size_t* max_rc); | |
626 // Subroutines used in the above. | |
627 bool _rc_region_above; | |
628 size_t _rc_region_diff; | |
629 jint* obj_rc_addr(oop obj) { | |
630 uintptr_t obj_addr = (uintptr_t)obj; | |
631 if (_rc_region_above) { | |
632 jint* res = (jint*)(obj_addr + _rc_region_diff); | |
633 assert((uintptr_t)res > obj_addr, "RC region is above."); | |
634 return res; | |
635 } else { | |
636 jint* res = (jint*)(obj_addr - _rc_region_diff); | |
637 assert((uintptr_t)res < obj_addr, "RC region is below."); | |
638 return res; | |
639 } | |
640 } | |
641 jint obj_rc(oop obj) { | |
642 return *obj_rc_addr(obj); | |
643 } | |
644 void inc_obj_rc(oop obj) { | |
645 (*obj_rc_addr(obj))++; | |
646 } | |
647 void atomic_inc_obj_rc(oop obj); | |
648 | |
649 | |
650 // Number of popular objects and bytes (latter is cheaper!). | |
651 size_t pop_object_used_objs(); | |
652 size_t pop_object_used_bytes(); | |
653 | |
654 // Index of the popular region in which allocation is currently being | |
655 // done. | |
656 int _cur_pop_hr_index; | |
657 | |
658 // List of regions which require zero filling. | |
659 UncleanRegionList _unclean_region_list; | |
660 bool _unclean_regions_coming; | |
661 | |
662 bool check_age_cohort_well_formed_work(int a, HeapRegion* hr); | |
663 | |
664 public: | |
665 void set_refine_cte_cl_concurrency(bool concurrent); | |
666 | |
667 RefToScanQueue *task_queue(int i); | |
668 | |
669 // Create a G1CollectedHeap with the specified policy. | |
670 // Must call the initialize method afterwards. | |
671 // May not return if something goes wrong. | |
672 G1CollectedHeap(G1CollectorPolicy* policy); | |
673 | |
674 // Initialize the G1CollectedHeap to have the initial and | |
675 // maximum sizes, permanent generation, and remembered and barrier sets | |
676 // specified by the policy object. | |
677 jint initialize(); | |
678 | |
679 void ref_processing_init(); | |
680 | |
681 void set_par_threads(int t) { | |
682 SharedHeap::set_par_threads(t); | |
683 _process_strong_tasks->set_par_threads(t); | |
684 } | |
685 | |
686 virtual CollectedHeap::Name kind() const { | |
687 return CollectedHeap::G1CollectedHeap; | |
688 } | |
689 | |
690 // The current policy object for the collector. | |
691 G1CollectorPolicy* g1_policy() const { return _g1_policy; } | |
692 | |
693 // Adaptive size policy. No such thing for g1. | |
694 virtual AdaptiveSizePolicy* size_policy() { return NULL; } | |
695 | |
696 // The rem set and barrier set. | |
697 G1RemSet* g1_rem_set() const { return _g1_rem_set; } | |
698 ModRefBarrierSet* mr_bs() const { return _mr_bs; } | |
699 | |
700 // The rem set iterator. | |
701 HeapRegionRemSetIterator* rem_set_iterator(int i) { | |
702 return _rem_set_iterator[i]; | |
703 } | |
704 | |
705 HeapRegionRemSetIterator* rem_set_iterator() { | |
706 return _rem_set_iterator[0]; | |
707 } | |
708 | |
709 unsigned get_gc_time_stamp() { | |
710 return _gc_time_stamp; | |
711 } | |
712 | |
713 void reset_gc_time_stamp() { | |
714 _gc_time_stamp = 0; | |
353
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
715 OrderAccess::fence(); |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
716 } |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
717 |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
718 void increment_gc_time_stamp() { |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
719 ++_gc_time_stamp; |
9bb2c10ac07b
6723570: G1: assertion failure: p == current_top or oop(p)->is_oop(),"p is not a block start" (revisited!)
iveresov
parents:
342
diff
changeset
|
720 OrderAccess::fence(); |
342 | 721 } |
722 | |
723 void iterate_dirty_card_closure(bool concurrent, int worker_i); | |
724 | |
725 // The shared block offset table array. | |
726 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } | |
727 | |
728 // Reference Processing accessor | |
729 ReferenceProcessor* ref_processor() { return _ref_processor; } | |
730 | |
731 // Reserved (g1 only; super method includes perm), capacity and the used | |
732 // portion in bytes. | |
733 size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } | |
734 virtual size_t capacity() const; | |
735 virtual size_t used() const; | |
736 size_t recalculate_used() const; | |
737 #ifndef PRODUCT | |
738 size_t recalculate_used_regions() const; | |
739 #endif // PRODUCT | |
740 | |
741 // These virtual functions do the actual allocation. | |
742 virtual HeapWord* mem_allocate(size_t word_size, | |
743 bool is_noref, | |
744 bool is_tlab, | |
745 bool* gc_overhead_limit_was_exceeded); | |
746 | |
747 // Some heaps may offer a contiguous region for shared non-blocking | |
748 // allocation, via inlined code (by exporting the address of the top and | |
749 // end fields defining the extent of the contiguous allocation region.) | |
750 // But G1CollectedHeap doesn't yet support this. | |
751 | |
752 // Return an estimate of the maximum allocation that could be performed | |
753 // without triggering any collection or expansion activity. In a | |
754 // generational collector, for example, this is probably the largest | |
755 // allocation that could be supported (without expansion) in the youngest | |
756 // generation. It is "unsafe" because no locks are taken; the result | |
757 // should be treated as an approximation, not a guarantee, for use in | |
758 // heuristic resizing decisions. | |
759 virtual size_t unsafe_max_alloc(); | |
760 | |
761 virtual bool is_maximal_no_gc() const { | |
762 return _g1_storage.uncommitted_size() == 0; | |
763 } | |
764 | |
765 // The total number of regions in the heap. | |
766 size_t n_regions(); | |
767 | |
768 // The number of regions that are completely free. | |
769 size_t max_regions(); | |
770 | |
771 // The number of regions that are completely free. | |
772 size_t free_regions(); | |
773 | |
774 // The number of regions that are not completely free. | |
775 size_t used_regions() { return n_regions() - free_regions(); } | |
776 | |
777 // True iff the ZF thread should run. | |
778 bool should_zf(); | |
779 | |
780 // The number of regions available for "regular" expansion. | |
781 size_t expansion_regions() { return _expansion_regions; } | |
782 | |
783 #ifndef PRODUCT | |
784 bool regions_accounted_for(); | |
785 bool print_region_accounting_info(); | |
786 void print_region_counts(); | |
787 #endif | |
788 | |
789 HeapRegion* alloc_region_from_unclean_list(bool zero_filled); | |
790 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); | |
791 | |
792 void put_region_on_unclean_list(HeapRegion* r); | |
793 void put_region_on_unclean_list_locked(HeapRegion* r); | |
794 | |
795 void prepend_region_list_on_unclean_list(UncleanRegionList* list); | |
796 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); | |
797 | |
798 void set_unclean_regions_coming(bool b); | |
799 void set_unclean_regions_coming_locked(bool b); | |
800 // Wait for cleanup to be complete. | |
801 void wait_for_cleanup_complete(); | |
802 // Like above, but assumes that the calling thread owns the Heap_lock. | |
803 void wait_for_cleanup_complete_locked(); | |
804 | |
805 // Return the head of the unclean list. | |
806 HeapRegion* peek_unclean_region_list_locked(); | |
807 // Remove and return the head of the unclean list. | |
808 HeapRegion* pop_unclean_region_list_locked(); | |
809 | |
810 // List of regions which are zero filled and ready for allocation. | |
811 HeapRegion* _free_region_list; | |
812 // Number of elements on the free list. | |
813 size_t _free_region_list_size; | |
814 | |
815 // If the head of the unclean list is ZeroFilled, move it to the free | |
816 // list. | |
817 bool move_cleaned_region_to_free_list_locked(); | |
818 bool move_cleaned_region_to_free_list(); | |
819 | |
820 void put_free_region_on_list_locked(HeapRegion* r); | |
821 void put_free_region_on_list(HeapRegion* r); | |
822 | |
823 // Remove and return the head element of the free list. | |
824 HeapRegion* pop_free_region_list_locked(); | |
825 | |
826 // If "zero_filled" is true, we first try the free list, then we try the | |
827 // unclean list, zero-filling the result. If "zero_filled" is false, we | |
828 // first try the unclean list, then the zero-filled list. | |
829 HeapRegion* alloc_free_region_from_lists(bool zero_filled); | |
830 | |
831 // Verify the integrity of the region lists. | |
832 void remove_allocated_regions_from_lists(); | |
833 bool verify_region_lists(); | |
834 bool verify_region_lists_locked(); | |
835 size_t unclean_region_list_length(); | |
836 size_t free_region_list_length(); | |
837 | |
838 // Perform a collection of the heap; intended for use in implementing | |
839 // "System.gc". This probably implies as full a collection as the | |
840 // "CollectedHeap" supports. | |
841 virtual void collect(GCCause::Cause cause); | |
842 | |
843 // The same as above but assume that the caller holds the Heap_lock. | |
844 void collect_locked(GCCause::Cause cause); | |
845 | |
846 // This interface assumes that it's being called by the | |
847 // vm thread. It collects the heap assuming that the | |
848 // heap lock is already held and that we are executing in | |
849 // the context of the vm thread. | |
850 virtual void collect_as_vm_thread(GCCause::Cause cause); | |
851 | |
852 // True iff a evacuation has failed in the most-recent collection. | |
853 bool evacuation_failed() { return _evacuation_failed; } | |
854 | |
855 // Free a region if it is totally full of garbage. Returns the number of | |
856 // bytes freed (0 ==> didn't free it). | |
857 size_t free_region_if_totally_empty(HeapRegion *hr); | |
858 void free_region_if_totally_empty_work(HeapRegion *hr, | |
859 size_t& pre_used, | |
860 size_t& cleared_h_regions, | |
861 size_t& freed_regions, | |
862 UncleanRegionList* list, | |
863 bool par = false); | |
864 | |
865 // If we've done free region work that yields the given changes, update | |
866 // the relevant global variables. | |
867 void finish_free_region_work(size_t pre_used, | |
868 size_t cleared_h_regions, | |
869 size_t freed_regions, | |
870 UncleanRegionList* list); | |
871 | |
872 | |
873 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
874 virtual bool is_in(const void* p) const; | |
875 | |
876 // Return "TRUE" iff the given object address is within the collection | |
877 // set. | |
878 inline bool obj_in_cs(oop obj); | |
879 | |
880 // Return "TRUE" iff the given object address is in the reserved | |
881 // region of g1 (excluding the permanent generation). | |
882 bool is_in_g1_reserved(const void* p) const { | |
883 return _g1_reserved.contains(p); | |
884 } | |
885 | |
886 // Returns a MemRegion that corresponds to the space that has been | |
887 // committed in the heap | |
888 MemRegion g1_committed() { | |
889 return _g1_committed; | |
890 } | |
891 | |
892 NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) | |
893 | |
894 // Dirty card table entries covering a list of young regions. | |
895 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); | |
896 | |
897 // This resets the card table to all zeros. It is used after | |
898 // a collection pause which used the card table to claim cards. | |
899 void cleanUpCardTable(); | |
900 | |
901 // Iteration functions. | |
902 | |
903 // Iterate over all the ref-containing fields of all objects, calling | |
904 // "cl.do_oop" on each. | |
905 virtual void oop_iterate(OopClosure* cl); | |
906 | |
907 // Same as above, restricted to a memory region. | |
908 virtual void oop_iterate(MemRegion mr, OopClosure* cl); | |
909 | |
910 // Iterate over all objects, calling "cl.do_object" on each. | |
911 virtual void object_iterate(ObjectClosure* cl); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
390
diff
changeset
|
912 virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } |
342 | 913 |
914 // Iterate over all objects allocated since the last collection, calling | |
915 // "cl.do_object" on each. The heap must have been initialized properly | |
916 // to support this function, or else this call will fail. | |
917 virtual void object_iterate_since_last_GC(ObjectClosure* cl); | |
918 | |
919 // Iterate over all spaces in use in the heap, in ascending address order. | |
920 virtual void space_iterate(SpaceClosure* cl); | |
921 | |
922 // Iterate over heap regions, in address order, terminating the | |
923 // iteration early if the "doHeapRegion" method returns "true". | |
924 void heap_region_iterate(HeapRegionClosure* blk); | |
925 | |
926 // Iterate over heap regions starting with r (or the first region if "r" | |
927 // is NULL), in address order, terminating early if the "doHeapRegion" | |
928 // method returns "true". | |
929 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); | |
930 | |
931 // As above but starting from the region at index idx. | |
932 void heap_region_iterate_from(int idx, HeapRegionClosure* blk); | |
933 | |
934 HeapRegion* region_at(size_t idx); | |
935 | |
936 // Divide the heap region sequence into "chunks" of some size (the number | |
937 // of regions divided by the number of parallel threads times some | |
938 // overpartition factor, currently 4). Assumes that this will be called | |
939 // in parallel by ParallelGCThreads worker threads with discinct worker | |
940 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel | |
941 // calls will use the same "claim_value", and that that claim value is | |
942 // different from the claim_value of any heap region before the start of | |
943 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by | |
944 // attempting to claim the first region in each chunk, and, if | |
945 // successful, applying the closure to each region in the chunk (and | |
946 // setting the claim value of the second and subsequent regions of the | |
947 // chunk.) For now requires that "doHeapRegion" always returns "false", | |
948 // i.e., that a closure never attempt to abort a traversal. | |
949 void heap_region_par_iterate_chunked(HeapRegionClosure* blk, | |
950 int worker, | |
951 jint claim_value); | |
952 | |
390 | 953 // It resets all the region claim values to the default. |
954 void reset_heap_region_claim_values(); | |
955 | |
355 | 956 #ifdef ASSERT |
957 bool check_heap_region_claim_values(jint claim_value); | |
958 #endif // ASSERT | |
959 | |
342 | 960 // Iterate over the regions (if any) in the current collection set. |
961 void collection_set_iterate(HeapRegionClosure* blk); | |
962 | |
963 // As above but starting from region r | |
964 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); | |
965 | |
966 // Returns the first (lowest address) compactible space in the heap. | |
967 virtual CompactibleSpace* first_compactible_space(); | |
968 | |
969 // A CollectedHeap will contain some number of spaces. This finds the | |
970 // space containing a given address, or else returns NULL. | |
971 virtual Space* space_containing(const void* addr) const; | |
972 | |
973 // A G1CollectedHeap will contain some number of heap regions. This | |
974 // finds the region containing a given address, or else returns NULL. | |
975 HeapRegion* heap_region_containing(const void* addr) const; | |
976 | |
977 // Like the above, but requires "addr" to be in the heap (to avoid a | |
978 // null-check), and unlike the above, may return an continuing humongous | |
979 // region. | |
980 HeapRegion* heap_region_containing_raw(const void* addr) const; | |
981 | |
982 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
983 // each address in the (reserved) heap is a member of exactly | |
984 // one block. The defining characteristic of a block is that it is | |
985 // possible to find its size, and thus to progress forward to the next | |
986 // block. (Blocks may be of different sizes.) Thus, blocks may | |
987 // represent Java objects, or they might be free blocks in a | |
988 // free-list-based heap (or subheap), as long as the two kinds are | |
989 // distinguishable and the size of each is determinable. | |
990 | |
991 // Returns the address of the start of the "block" that contains the | |
992 // address "addr". We say "blocks" instead of "object" since some heaps | |
993 // may not pack objects densely; a chunk may either be an object or a | |
994 // non-object. | |
995 virtual HeapWord* block_start(const void* addr) const; | |
996 | |
997 // Requires "addr" to be the start of a chunk, and returns its size. | |
998 // "addr + size" is required to be the start of a new chunk, or the end | |
999 // of the active area of the heap. | |
1000 virtual size_t block_size(const HeapWord* addr) const; | |
1001 | |
1002 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
1003 // the block is an object. | |
1004 virtual bool block_is_obj(const HeapWord* addr) const; | |
1005 | |
1006 // Does this heap support heap inspection? (+PrintClassHistogram) | |
1007 virtual bool supports_heap_inspection() const { return true; } | |
1008 | |
1009 // Section on thread-local allocation buffers (TLABs) | |
1010 // See CollectedHeap for semantics. | |
1011 | |
1012 virtual bool supports_tlab_allocation() const; | |
1013 virtual size_t tlab_capacity(Thread* thr) const; | |
1014 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
1015 virtual HeapWord* allocate_new_tlab(size_t size); | |
1016 | |
1017 // Can a compiler initialize a new object without store barriers? | |
1018 // This permission only extends from the creation of a new object | |
1019 // via a TLAB up to the first subsequent safepoint. | |
1020 virtual bool can_elide_tlab_store_barriers() const { | |
1021 // Since G1's TLAB's may, on occasion, come from non-young regions | |
1022 // as well. (Is there a flag controlling that? XXX) | |
1023 return false; | |
1024 } | |
1025 | |
1026 // Can a compiler elide a store barrier when it writes | |
1027 // a permanent oop into the heap? Applies when the compiler | |
1028 // is storing x to the heap, where x->is_perm() is true. | |
1029 virtual bool can_elide_permanent_oop_store_barriers() const { | |
1030 // At least until perm gen collection is also G1-ified, at | |
1031 // which point this should return false. | |
1032 return true; | |
1033 } | |
1034 | |
1035 virtual bool allocs_are_zero_filled(); | |
1036 | |
1037 // The boundary between a "large" and "small" array of primitives, in | |
1038 // words. | |
1039 virtual size_t large_typearray_limit(); | |
1040 | |
1041 // All popular objects are guaranteed to have addresses below this | |
1042 // boundary. | |
1043 HeapWord* popular_object_boundary() { | |
1044 return _popular_object_boundary; | |
1045 } | |
1046 | |
1047 // Declare the region as one that should be evacuated because its | |
1048 // remembered set is too large. | |
1049 void schedule_popular_region_evac(HeapRegion* r); | |
1050 // If there is a popular region to evacuate it, remove it from the list | |
1051 // and return it. | |
1052 HeapRegion* popular_region_to_evac(); | |
1053 // Evacuate the given popular region. | |
1054 void evac_popular_region(HeapRegion* r); | |
1055 | |
1056 // Returns "true" iff the given word_size is "very large". | |
1057 static bool isHumongous(size_t word_size) { | |
1058 return word_size >= VeryLargeInWords; | |
1059 } | |
1060 | |
1061 // Update mod union table with the set of dirty cards. | |
1062 void updateModUnion(); | |
1063 | |
1064 // Set the mod union bits corresponding to the given memRegion. Note | |
1065 // that this is always a safe operation, since it doesn't clear any | |
1066 // bits. | |
1067 void markModUnionRange(MemRegion mr); | |
1068 | |
1069 // Records the fact that a marking phase is no longer in progress. | |
1070 void set_marking_complete() { | |
1071 _mark_in_progress = false; | |
1072 } | |
1073 void set_marking_started() { | |
1074 _mark_in_progress = true; | |
1075 } | |
1076 bool mark_in_progress() { | |
1077 return _mark_in_progress; | |
1078 } | |
1079 | |
1080 // Print the maximum heap capacity. | |
1081 virtual size_t max_capacity() const; | |
1082 | |
1083 virtual jlong millis_since_last_gc(); | |
1084 | |
1085 // Perform any cleanup actions necessary before allowing a verification. | |
1086 virtual void prepare_for_verify(); | |
1087 | |
1088 // Perform verification. | |
1089 virtual void verify(bool allow_dirty, bool silent); | |
1090 virtual void print() const; | |
1091 virtual void print_on(outputStream* st) const; | |
1092 | |
1093 virtual void print_gc_threads_on(outputStream* st) const; | |
1094 virtual void gc_threads_do(ThreadClosure* tc) const; | |
1095 | |
1096 // Override | |
1097 void print_tracing_info() const; | |
1098 | |
1099 // If "addr" is a pointer into the (reserved?) heap, returns a positive | |
1100 // number indicating the "arena" within the heap in which "addr" falls. | |
1101 // Or else returns 0. | |
1102 virtual int addr_to_arena_id(void* addr) const; | |
1103 | |
1104 // Convenience function to be used in situations where the heap type can be | |
1105 // asserted to be this type. | |
1106 static G1CollectedHeap* heap(); | |
1107 | |
1108 void empty_young_list(); | |
1109 bool should_set_young_locked(); | |
1110 | |
1111 void set_region_short_lived_locked(HeapRegion* hr); | |
1112 // add appropriate methods for any other surv rate groups | |
1113 | |
1114 void young_list_rs_length_sampling_init() { | |
1115 _young_list->rs_length_sampling_init(); | |
1116 } | |
1117 bool young_list_rs_length_sampling_more() { | |
1118 return _young_list->rs_length_sampling_more(); | |
1119 } | |
1120 void young_list_rs_length_sampling_next() { | |
1121 _young_list->rs_length_sampling_next(); | |
1122 } | |
1123 size_t young_list_sampled_rs_lengths() { | |
1124 return _young_list->sampled_rs_lengths(); | |
1125 } | |
1126 | |
1127 size_t young_list_length() { return _young_list->length(); } | |
1128 size_t young_list_scan_only_length() { | |
1129 return _young_list->scan_only_length(); } | |
1130 | |
1131 HeapRegion* pop_region_from_young_list() { | |
1132 return _young_list->pop_region(); | |
1133 } | |
1134 | |
1135 HeapRegion* young_list_first_region() { | |
1136 return _young_list->first_region(); | |
1137 } | |
1138 | |
1139 // debugging | |
1140 bool check_young_list_well_formed() { | |
1141 return _young_list->check_list_well_formed(); | |
1142 } | |
1143 bool check_young_list_empty(bool ignore_scan_only_list, | |
1144 bool check_sample = true); | |
1145 | |
1146 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1147 // many of these need to be public. | |
1148 | |
1149 // The functions below are helper functions that a subclass of | |
1150 // "CollectedHeap" can use in the implementation of its virtual | |
1151 // functions. | |
1152 // This performs a concurrent marking of the live objects in a | |
1153 // bitmap off to the side. | |
1154 void doConcurrentMark(); | |
1155 | |
1156 // This is called from the marksweep collector which then does | |
1157 // a concurrent mark and verifies that the results agree with | |
1158 // the stop the world marking. | |
1159 void checkConcurrentMark(); | |
1160 void do_sync_mark(); | |
1161 | |
1162 bool isMarkedPrev(oop obj) const; | |
1163 bool isMarkedNext(oop obj) const; | |
1164 | |
1165 // Determine if an object is dead, given the object and also | |
1166 // the region to which the object belongs. An object is dead | |
1167 // iff a) it was not allocated since the last mark and b) it | |
1168 // is not marked. | |
1169 | |
1170 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1171 return | |
1172 !hr->obj_allocated_since_prev_marking(obj) && | |
1173 !isMarkedPrev(obj); | |
1174 } | |
1175 | |
1176 // This is used when copying an object to survivor space. | |
1177 // If the object is marked live, then we mark the copy live. | |
1178 // If the object is allocated since the start of this mark | |
1179 // cycle, then we mark the copy live. | |
1180 // If the object has been around since the previous mark | |
1181 // phase, and hasn't been marked yet during this phase, | |
1182 // then we don't mark it, we just wait for the | |
1183 // current marking cycle to get to it. | |
1184 | |
1185 // This function returns true when an object has been | |
1186 // around since the previous marking and hasn't yet | |
1187 // been marked during this marking. | |
1188 | |
1189 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1190 return | |
1191 !hr->obj_allocated_since_next_marking(obj) && | |
1192 !isMarkedNext(obj); | |
1193 } | |
1194 | |
1195 // Determine if an object is dead, given only the object itself. | |
1196 // This will find the region to which the object belongs and | |
1197 // then call the region version of the same function. | |
1198 | |
1199 // Added if it is in permanent gen it isn't dead. | |
1200 // Added if it is NULL it isn't dead. | |
1201 | |
1202 bool is_obj_dead(oop obj) { | |
1203 HeapRegion* hr = heap_region_containing(obj); | |
1204 if (hr == NULL) { | |
1205 if (Universe::heap()->is_in_permanent(obj)) | |
1206 return false; | |
1207 else if (obj == NULL) return false; | |
1208 else return true; | |
1209 } | |
1210 else return is_obj_dead(obj, hr); | |
1211 } | |
1212 | |
1213 bool is_obj_ill(oop obj) { | |
1214 HeapRegion* hr = heap_region_containing(obj); | |
1215 if (hr == NULL) { | |
1216 if (Universe::heap()->is_in_permanent(obj)) | |
1217 return false; | |
1218 else if (obj == NULL) return false; | |
1219 else return true; | |
1220 } | |
1221 else return is_obj_ill(obj, hr); | |
1222 } | |
1223 | |
1224 // The following is just to alert the verification code | |
1225 // that a full collection has occurred and that the | |
1226 // remembered sets are no longer up to date. | |
1227 bool _full_collection; | |
1228 void set_full_collection() { _full_collection = true;} | |
1229 void clear_full_collection() {_full_collection = false;} | |
1230 bool full_collection() {return _full_collection;} | |
1231 | |
1232 ConcurrentMark* concurrent_mark() const { return _cm; } | |
1233 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | |
1234 | |
1235 public: | |
1236 void stop_conc_gc_threads(); | |
1237 | |
1238 // <NEW PREDICTION> | |
1239 | |
1240 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); | |
1241 void check_if_region_is_too_expensive(double predicted_time_ms); | |
1242 size_t pending_card_num(); | |
1243 size_t max_pending_card_num(); | |
1244 size_t cards_scanned(); | |
1245 | |
1246 // </NEW PREDICTION> | |
1247 | |
1248 protected: | |
1249 size_t _max_heap_capacity; | |
1250 | |
1251 // debug_only(static void check_for_valid_allocation_state();) | |
1252 | |
1253 public: | |
1254 // Temporary: call to mark things unimplemented for the G1 heap (e.g., | |
1255 // MemoryService). In productization, we can make this assert false | |
1256 // to catch such places (as well as searching for calls to this...) | |
1257 static void g1_unimplemented(); | |
1258 | |
1259 }; | |
1260 | |
1261 // Local Variables: *** | |
1262 // c-indentation-style: gnu *** | |
1263 // End: *** |