comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents
children 9bb2c10ac07b
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
26 // It uses the "Garbage First" heap organization and algorithm, which
27 // may combine concurrent marking with parallel, incremental compaction of
28 // heap subsets that will yield large amounts of garbage.
29
30 class HeapRegion;
31 class HeapRegionSeq;
32 class HeapRegionList;
33 class PermanentGenerationSpec;
34 class GenerationSpec;
35 class OopsInHeapRegionClosure;
36 class G1ScanHeapEvacClosure;
37 class ObjectClosure;
38 class SpaceClosure;
39 class CompactibleSpaceClosure;
40 class Space;
41 class G1CollectorPolicy;
42 class GenRemSet;
43 class G1RemSet;
44 class HeapRegionRemSetIterator;
45 class ConcurrentMark;
46 class ConcurrentMarkThread;
47 class ConcurrentG1Refine;
48 class ConcurrentZFThread;
49
50 // If want to accumulate detailed statistics on work queues
51 // turn this on.
52 #define G1_DETAILED_STATS 0
53
54 #if G1_DETAILED_STATS
55 # define IF_G1_DETAILED_STATS(code) code
56 #else
57 # define IF_G1_DETAILED_STATS(code)
58 #endif
59
60 typedef GenericTaskQueue<oop*> RefToScanQueue;
61 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
62
63 enum G1GCThreadGroups {
64 G1CRGroup = 0,
65 G1ZFGroup = 1,
66 G1CMGroup = 2,
67 G1CLGroup = 3
68 };
69
70 enum GCAllocPurpose {
71 GCAllocForTenured,
72 GCAllocForSurvived,
73 GCAllocPurposeCount
74 };
75
76 class YoungList : public CHeapObj {
77 private:
78 G1CollectedHeap* _g1h;
79
80 HeapRegion* _head;
81
82 HeapRegion* _scan_only_head;
83 HeapRegion* _scan_only_tail;
84 size_t _length;
85 size_t _scan_only_length;
86
87 size_t _last_sampled_rs_lengths;
88 size_t _sampled_rs_lengths;
89 HeapRegion* _curr;
90 HeapRegion* _curr_scan_only;
91
92 HeapRegion* _survivor_head;
93 HeapRegion* _survivors_tail;
94 size_t _survivor_length;
95
96 void empty_list(HeapRegion* list);
97
98 public:
99 YoungList(G1CollectedHeap* g1h);
100
101 void push_region(HeapRegion* hr);
102 void add_survivor_region(HeapRegion* hr);
103 HeapRegion* pop_region();
104 void empty_list();
105 bool is_empty() { return _length == 0; }
106 size_t length() { return _length; }
107 size_t scan_only_length() { return _scan_only_length; }
108
109 void rs_length_sampling_init();
110 bool rs_length_sampling_more();
111 void rs_length_sampling_next();
112
113 void reset_sampled_info() {
114 _last_sampled_rs_lengths = 0;
115 }
116 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
117
118 // for development purposes
119 void reset_auxilary_lists();
120 HeapRegion* first_region() { return _head; }
121 HeapRegion* first_scan_only_region() { return _scan_only_head; }
122 HeapRegion* first_survivor_region() { return _survivor_head; }
123 HeapRegion* par_get_next_scan_only_region() {
124 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
125 HeapRegion* ret = _curr_scan_only;
126 if (ret != NULL)
127 _curr_scan_only = ret->get_next_young_region();
128 return ret;
129 }
130
131 // debugging
132 bool check_list_well_formed();
133 bool check_list_empty(bool ignore_scan_only_list,
134 bool check_sample = true);
135 void print();
136 };
137
138 class RefineCardTableEntryClosure;
139 class G1CollectedHeap : public SharedHeap {
140 friend class VM_G1CollectForAllocation;
141 friend class VM_GenCollectForPermanentAllocation;
142 friend class VM_G1CollectFull;
143 friend class VM_G1IncCollectionPause;
144 friend class VM_G1PopRegionCollectionPause;
145 friend class VMStructs;
146
147 // Closures used in implementation.
148 friend class G1ParCopyHelper;
149 friend class G1IsAliveClosure;
150 friend class G1EvacuateFollowersClosure;
151 friend class G1ParScanThreadState;
152 friend class G1ParScanClosureSuper;
153 friend class G1ParEvacuateFollowersClosure;
154 friend class G1ParTask;
155 friend class G1FreeGarbageRegionClosure;
156 friend class RefineCardTableEntryClosure;
157 friend class G1PrepareCompactClosure;
158 friend class RegionSorter;
159 friend class CountRCClosure;
160 friend class EvacPopObjClosure;
161
162 // Other related classes.
163 friend class G1MarkSweep;
164
165 private:
166 enum SomePrivateConstants {
167 VeryLargeInBytes = HeapRegion::GrainBytes/2,
168 VeryLargeInWords = VeryLargeInBytes/HeapWordSize,
169 MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME
170 NumAPIs = HeapRegion::MaxAge
171 };
172
173
174 // The one and only G1CollectedHeap, so static functions can find it.
175 static G1CollectedHeap* _g1h;
176
177 // Storage for the G1 heap (excludes the permanent generation).
178 VirtualSpace _g1_storage;
179 MemRegion _g1_reserved;
180
181 // The part of _g1_storage that is currently committed.
182 MemRegion _g1_committed;
183
184 // The maximum part of _g1_storage that has ever been committed.
185 MemRegion _g1_max_committed;
186
187 // The number of regions that are completely free.
188 size_t _free_regions;
189
190 // The number of regions we could create by expansion.
191 size_t _expansion_regions;
192
193 // Return the number of free regions in the heap (by direct counting.)
194 size_t count_free_regions();
195 // Return the number of free regions on the free and unclean lists.
196 size_t count_free_regions_list();
197
198 // The block offset table for the G1 heap.
199 G1BlockOffsetSharedArray* _bot_shared;
200
201 // Move all of the regions off the free lists, then rebuild those free
202 // lists, before and after full GC.
203 void tear_down_region_lists();
204 void rebuild_region_lists();
205 // This sets all non-empty regions to need zero-fill (which they will if
206 // they are empty after full collection.)
207 void set_used_regions_to_need_zero_fill();
208
209 // The sequence of all heap regions in the heap.
210 HeapRegionSeq* _hrs;
211
212 // The region from which normal-sized objects are currently being
213 // allocated. May be NULL.
214 HeapRegion* _cur_alloc_region;
215
216 // Postcondition: cur_alloc_region == NULL.
217 void abandon_cur_alloc_region();
218
219 // The to-space memory regions into which objects are being copied during
220 // a GC.
221 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
222 uint _gc_alloc_region_counts[GCAllocPurposeCount];
223
224 // A list of the regions that have been set to be alloc regions in the
225 // current collection.
226 HeapRegion* _gc_alloc_region_list;
227
228 // When called by par thread, require par_alloc_during_gc_lock() to be held.
229 void push_gc_alloc_region(HeapRegion* hr);
230
231 // This should only be called single-threaded. Undeclares all GC alloc
232 // regions.
233 void forget_alloc_region_list();
234
235 // Should be used to set an alloc region, because there's other
236 // associated bookkeeping.
237 void set_gc_alloc_region(int purpose, HeapRegion* r);
238
239 // Check well-formedness of alloc region list.
240 bool check_gc_alloc_regions();
241
242 // Outside of GC pauses, the number of bytes used in all regions other
243 // than the current allocation region.
244 size_t _summary_bytes_used;
245
246 // Summary information about popular objects; method to print it.
247 NumberSeq _pop_obj_rc_at_copy;
248 void print_popularity_summary_info() const;
249
250 unsigned _gc_time_stamp;
251
252 size_t* _surviving_young_words;
253
254 void setup_surviving_young_words();
255 void update_surviving_young_words(size_t* surv_young_words);
256 void cleanup_surviving_young_words();
257
258 protected:
259
260 // Returns "true" iff none of the gc alloc regions have any allocations
261 // since the last call to "save_marks".
262 bool all_alloc_regions_no_allocs_since_save_marks();
263 // Calls "note_end_of_copying on all gc alloc_regions.
264 void all_alloc_regions_note_end_of_copying();
265
266 // The number of regions allocated to hold humongous objects.
267 int _num_humongous_regions;
268 YoungList* _young_list;
269
270 // The current policy object for the collector.
271 G1CollectorPolicy* _g1_policy;
272
273 // Parallel allocation lock to protect the current allocation region.
274 Mutex _par_alloc_during_gc_lock;
275 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
276
277 // If possible/desirable, allocate a new HeapRegion for normal object
278 // allocation sufficient for an allocation of the given "word_size".
279 // If "do_expand" is true, will attempt to expand the heap if necessary
280 // to to satisfy the request. If "zero_filled" is true, requires a
281 // zero-filled region.
282 // (Returning NULL will trigger a GC.)
283 virtual HeapRegion* newAllocRegion_work(size_t word_size,
284 bool do_expand,
285 bool zero_filled);
286
287 virtual HeapRegion* newAllocRegion(size_t word_size,
288 bool zero_filled = true) {
289 return newAllocRegion_work(word_size, false, zero_filled);
290 }
291 virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
292 size_t word_size,
293 bool zero_filled = true);
294
295 // Attempt to allocate an object of the given (very large) "word_size".
296 // Returns "NULL" on failure.
297 virtual HeapWord* humongousObjAllocate(size_t word_size);
298
299 // If possible, allocate a block of the given word_size, else return "NULL".
300 // Returning NULL will trigger GC or heap expansion.
301 // These two methods have rather awkward pre- and
302 // post-conditions. If they are called outside a safepoint, then
303 // they assume that the caller is holding the heap lock. Upon return
304 // they release the heap lock, if they are returning a non-NULL
305 // value. attempt_allocation_slow() also dirties the cards of a
306 // newly-allocated young region after it releases the heap
307 // lock. This change in interface was the neatest way to achieve
308 // this card dirtying without affecting mem_allocate(), which is a
309 // more frequently called method. We tried two or three different
310 // approaches, but they were even more hacky.
311 HeapWord* attempt_allocation(size_t word_size,
312 bool permit_collection_pause = true);
313
314 HeapWord* attempt_allocation_slow(size_t word_size,
315 bool permit_collection_pause = true);
316
317 // Allocate blocks during garbage collection. Will ensure an
318 // allocation region, either by picking one or expanding the
319 // heap, and then allocate a block of the given size. The block
320 // may not be a humongous - it must fit into a single heap region.
321 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
322 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
323
324 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
325 HeapRegion* alloc_region,
326 bool par,
327 size_t word_size);
328
329 // Ensure that no further allocations can happen in "r", bearing in mind
330 // that parallel threads might be attempting allocations.
331 void par_allocate_remaining_space(HeapRegion* r);
332
333 // Helper function for two callbacks below.
334 // "full", if true, indicates that the GC is for a System.gc() request,
335 // and should collect the entire heap. If "clear_all_soft_refs" is true,
336 // all soft references are cleared during the GC. If "full" is false,
337 // "word_size" describes the allocation that the GC should
338 // attempt (at least) to satisfy.
339 void do_collection(bool full, bool clear_all_soft_refs,
340 size_t word_size);
341
342 // Callback from VM_G1CollectFull operation.
343 // Perform a full collection.
344 void do_full_collection(bool clear_all_soft_refs);
345
346 // Resize the heap if necessary after a full collection. If this is
347 // after a collect-for allocation, "word_size" is the allocation size,
348 // and will be considered part of the used portion of the heap.
349 void resize_if_necessary_after_full_collection(size_t word_size);
350
351 // Callback from VM_G1CollectForAllocation operation.
352 // This function does everything necessary/possible to satisfy a
353 // failed allocation request (including collection, expansion, etc.)
354 HeapWord* satisfy_failed_allocation(size_t word_size);
355
356 // Attempting to expand the heap sufficiently
357 // to support an allocation of the given "word_size". If
358 // successful, perform the allocation and return the address of the
359 // allocated block, or else "NULL".
360 virtual HeapWord* expand_and_allocate(size_t word_size);
361
362 public:
363 // Expand the garbage-first heap by at least the given size (in bytes!).
364 // (Rounds up to a HeapRegion boundary.)
365 virtual void expand(size_t expand_bytes);
366
367 // Do anything common to GC's.
368 virtual void gc_prologue(bool full);
369 virtual void gc_epilogue(bool full);
370
371 protected:
372
373 // Shrink the garbage-first heap by at most the given size (in bytes!).
374 // (Rounds down to a HeapRegion boundary.)
375 virtual void shrink(size_t expand_bytes);
376 void shrink_helper(size_t expand_bytes);
377
378 // Do an incremental collection: identify a collection set, and evacuate
379 // its live objects elsewhere.
380 virtual void do_collection_pause();
381
382 // The guts of the incremental collection pause, executed by the vm
383 // thread. If "popular_region" is non-NULL, this pause should evacuate
384 // this single region whose remembered set has gotten large, moving
385 // any popular objects to one of the popular regions.
386 virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region);
387
388 // Actually do the work of evacuating the collection set.
389 virtual void evacuate_collection_set();
390
391 // If this is an appropriate right time, do a collection pause.
392 // The "word_size" argument, if non-zero, indicates the size of an
393 // allocation request that is prompting this query.
394 void do_collection_pause_if_appropriate(size_t word_size);
395
396 // The g1 remembered set of the heap.
397 G1RemSet* _g1_rem_set;
398 // And it's mod ref barrier set, used to track updates for the above.
399 ModRefBarrierSet* _mr_bs;
400
401 // The Heap Region Rem Set Iterator.
402 HeapRegionRemSetIterator** _rem_set_iterator;
403
404 // The closure used to refine a single card.
405 RefineCardTableEntryClosure* _refine_cte_cl;
406
407 // A function to check the consistency of dirty card logs.
408 void check_ct_logs_at_safepoint();
409
410 // After a collection pause, make the regions in the CS into free
411 // regions.
412 void free_collection_set(HeapRegion* cs_head);
413
414 // Applies "scan_non_heap_roots" to roots outside the heap,
415 // "scan_rs" to roots inside the heap (having done "set_region" to
416 // indicate the region in which the root resides), and does "scan_perm"
417 // (setting the generation to the perm generation.) If "scan_rs" is
418 // NULL, then this step is skipped. The "worker_i"
419 // param is for use with parallel roots processing, and should be
420 // the "i" of the calling parallel worker thread's work(i) function.
421 // In the sequential case this param will be ignored.
422 void g1_process_strong_roots(bool collecting_perm_gen,
423 SharedHeap::ScanningOption so,
424 OopClosure* scan_non_heap_roots,
425 OopsInHeapRegionClosure* scan_rs,
426 OopsInHeapRegionClosure* scan_so,
427 OopsInGenClosure* scan_perm,
428 int worker_i);
429
430 void scan_scan_only_set(OopsInHeapRegionClosure* oc,
431 int worker_i);
432 void scan_scan_only_region(HeapRegion* hr,
433 OopsInHeapRegionClosure* oc,
434 int worker_i);
435
436 // Apply "blk" to all the weak roots of the system. These include
437 // JNI weak roots, the code cache, system dictionary, symbol table,
438 // string table, and referents of reachable weak refs.
439 void g1_process_weak_roots(OopClosure* root_closure,
440 OopClosure* non_root_closure);
441
442 // Invoke "save_marks" on all heap regions.
443 void save_marks();
444
445 // Free a heap region.
446 void free_region(HeapRegion* hr);
447 // A component of "free_region", exposed for 'batching'.
448 // All the params after "hr" are out params: the used bytes of the freed
449 // region(s), the number of H regions cleared, the number of regions
450 // freed, and pointers to the head and tail of a list of freed contig
451 // regions, linked throught the "next_on_unclean_list" field.
452 void free_region_work(HeapRegion* hr,
453 size_t& pre_used,
454 size_t& cleared_h,
455 size_t& freed_regions,
456 UncleanRegionList* list,
457 bool par = false);
458
459
460 // The concurrent marker (and the thread it runs in.)
461 ConcurrentMark* _cm;
462 ConcurrentMarkThread* _cmThread;
463 bool _mark_in_progress;
464
465 // The concurrent refiner.
466 ConcurrentG1Refine* _cg1r;
467
468 // The concurrent zero-fill thread.
469 ConcurrentZFThread* _czft;
470
471 // The parallel task queues
472 RefToScanQueueSet *_task_queues;
473
474 // True iff a evacuation has failed in the current collection.
475 bool _evacuation_failed;
476
477 // Set the attribute indicating whether evacuation has failed in the
478 // current collection.
479 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
480
481 // Failed evacuations cause some logical from-space objects to have
482 // forwarding pointers to themselves. Reset them.
483 void remove_self_forwarding_pointers();
484
485 // When one is non-null, so is the other. Together, they each pair is
486 // an object with a preserved mark, and its mark value.
487 GrowableArray<oop>* _objs_with_preserved_marks;
488 GrowableArray<markOop>* _preserved_marks_of_objs;
489
490 // Preserve the mark of "obj", if necessary, in preparation for its mark
491 // word being overwritten with a self-forwarding-pointer.
492 void preserve_mark_if_necessary(oop obj, markOop m);
493
494 // The stack of evac-failure objects left to be scanned.
495 GrowableArray<oop>* _evac_failure_scan_stack;
496 // The closure to apply to evac-failure objects.
497
498 OopsInHeapRegionClosure* _evac_failure_closure;
499 // Set the field above.
500 void
501 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
502 _evac_failure_closure = evac_failure_closure;
503 }
504
505 // Push "obj" on the scan stack.
506 void push_on_evac_failure_scan_stack(oop obj);
507 // Process scan stack entries until the stack is empty.
508 void drain_evac_failure_scan_stack();
509 // True iff an invocation of "drain_scan_stack" is in progress; to
510 // prevent unnecessary recursion.
511 bool _drain_in_progress;
512
513 // Do any necessary initialization for evacuation-failure handling.
514 // "cl" is the closure that will be used to process evac-failure
515 // objects.
516 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
517 // Do any necessary cleanup for evacuation-failure handling data
518 // structures.
519 void finalize_for_evac_failure();
520
521 // An attempt to evacuate "obj" has failed; take necessary steps.
522 void handle_evacuation_failure(oop obj);
523 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
524 void handle_evacuation_failure_common(oop obj, markOop m);
525
526
527 // Ensure that the relevant gc_alloc regions are set.
528 void get_gc_alloc_regions();
529 // We're done with GC alloc regions; release them, as appropriate.
530 void release_gc_alloc_regions();
531
532 // ("Weak") Reference processing support
533 ReferenceProcessor* _ref_processor;
534
535 enum G1H_process_strong_roots_tasks {
536 G1H_PS_mark_stack_oops_do,
537 G1H_PS_refProcessor_oops_do,
538 // Leave this one last.
539 G1H_PS_NumElements
540 };
541
542 SubTasksDone* _process_strong_tasks;
543
544 // Allocate space to hold a popular object. Result is guaranteed below
545 // "popular_object_boundary()". Note: CURRENTLY halts the system if we
546 // run out of space to hold popular objects.
547 HeapWord* allocate_popular_object(size_t word_size);
548
549 // The boundary between popular and non-popular objects.
550 HeapWord* _popular_object_boundary;
551
552 HeapRegionList* _popular_regions_to_be_evacuated;
553
554 // Compute which objects in "single_region" are popular. If any are,
555 // evacuate them to a popular region, leaving behind forwarding pointers,
556 // and select "popular_region" as the single collection set region.
557 // Otherwise, leave the collection set null.
558 void popularity_pause_preamble(HeapRegion* populer_region);
559
560 // Compute which objects in "single_region" are popular, and evacuate
561 // them to a popular region, leaving behind forwarding pointers.
562 // Returns "true" if at least one popular object is discovered and
563 // evacuated. In any case, "*max_rc" is set to the maximum reference
564 // count of an object in the region.
565 bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region,
566 size_t* max_rc);
567 // Subroutines used in the above.
568 bool _rc_region_above;
569 size_t _rc_region_diff;
570 jint* obj_rc_addr(oop obj) {
571 uintptr_t obj_addr = (uintptr_t)obj;
572 if (_rc_region_above) {
573 jint* res = (jint*)(obj_addr + _rc_region_diff);
574 assert((uintptr_t)res > obj_addr, "RC region is above.");
575 return res;
576 } else {
577 jint* res = (jint*)(obj_addr - _rc_region_diff);
578 assert((uintptr_t)res < obj_addr, "RC region is below.");
579 return res;
580 }
581 }
582 jint obj_rc(oop obj) {
583 return *obj_rc_addr(obj);
584 }
585 void inc_obj_rc(oop obj) {
586 (*obj_rc_addr(obj))++;
587 }
588 void atomic_inc_obj_rc(oop obj);
589
590
591 // Number of popular objects and bytes (latter is cheaper!).
592 size_t pop_object_used_objs();
593 size_t pop_object_used_bytes();
594
595 // Index of the popular region in which allocation is currently being
596 // done.
597 int _cur_pop_hr_index;
598
599 // List of regions which require zero filling.
600 UncleanRegionList _unclean_region_list;
601 bool _unclean_regions_coming;
602
603 bool check_age_cohort_well_formed_work(int a, HeapRegion* hr);
604
605 public:
606 void set_refine_cte_cl_concurrency(bool concurrent);
607
608 RefToScanQueue *task_queue(int i);
609
610 // Create a G1CollectedHeap with the specified policy.
611 // Must call the initialize method afterwards.
612 // May not return if something goes wrong.
613 G1CollectedHeap(G1CollectorPolicy* policy);
614
615 // Initialize the G1CollectedHeap to have the initial and
616 // maximum sizes, permanent generation, and remembered and barrier sets
617 // specified by the policy object.
618 jint initialize();
619
620 void ref_processing_init();
621
622 void set_par_threads(int t) {
623 SharedHeap::set_par_threads(t);
624 _process_strong_tasks->set_par_threads(t);
625 }
626
627 virtual CollectedHeap::Name kind() const {
628 return CollectedHeap::G1CollectedHeap;
629 }
630
631 // The current policy object for the collector.
632 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
633
634 // Adaptive size policy. No such thing for g1.
635 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
636
637 // The rem set and barrier set.
638 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
639 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
640
641 // The rem set iterator.
642 HeapRegionRemSetIterator* rem_set_iterator(int i) {
643 return _rem_set_iterator[i];
644 }
645
646 HeapRegionRemSetIterator* rem_set_iterator() {
647 return _rem_set_iterator[0];
648 }
649
650 unsigned get_gc_time_stamp() {
651 return _gc_time_stamp;
652 }
653
654 void reset_gc_time_stamp() {
655 _gc_time_stamp = 0;
656 }
657
658 void iterate_dirty_card_closure(bool concurrent, int worker_i);
659
660 // The shared block offset table array.
661 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
662
663 // Reference Processing accessor
664 ReferenceProcessor* ref_processor() { return _ref_processor; }
665
666 // Reserved (g1 only; super method includes perm), capacity and the used
667 // portion in bytes.
668 size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
669 virtual size_t capacity() const;
670 virtual size_t used() const;
671 size_t recalculate_used() const;
672 #ifndef PRODUCT
673 size_t recalculate_used_regions() const;
674 #endif // PRODUCT
675
676 // These virtual functions do the actual allocation.
677 virtual HeapWord* mem_allocate(size_t word_size,
678 bool is_noref,
679 bool is_tlab,
680 bool* gc_overhead_limit_was_exceeded);
681
682 // Some heaps may offer a contiguous region for shared non-blocking
683 // allocation, via inlined code (by exporting the address of the top and
684 // end fields defining the extent of the contiguous allocation region.)
685 // But G1CollectedHeap doesn't yet support this.
686
687 // Return an estimate of the maximum allocation that could be performed
688 // without triggering any collection or expansion activity. In a
689 // generational collector, for example, this is probably the largest
690 // allocation that could be supported (without expansion) in the youngest
691 // generation. It is "unsafe" because no locks are taken; the result
692 // should be treated as an approximation, not a guarantee, for use in
693 // heuristic resizing decisions.
694 virtual size_t unsafe_max_alloc();
695
696 virtual bool is_maximal_no_gc() const {
697 return _g1_storage.uncommitted_size() == 0;
698 }
699
700 // The total number of regions in the heap.
701 size_t n_regions();
702
703 // The number of regions that are completely free.
704 size_t max_regions();
705
706 // The number of regions that are completely free.
707 size_t free_regions();
708
709 // The number of regions that are not completely free.
710 size_t used_regions() { return n_regions() - free_regions(); }
711
712 // True iff the ZF thread should run.
713 bool should_zf();
714
715 // The number of regions available for "regular" expansion.
716 size_t expansion_regions() { return _expansion_regions; }
717
718 #ifndef PRODUCT
719 bool regions_accounted_for();
720 bool print_region_accounting_info();
721 void print_region_counts();
722 #endif
723
724 HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
725 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
726
727 void put_region_on_unclean_list(HeapRegion* r);
728 void put_region_on_unclean_list_locked(HeapRegion* r);
729
730 void prepend_region_list_on_unclean_list(UncleanRegionList* list);
731 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
732
733 void set_unclean_regions_coming(bool b);
734 void set_unclean_regions_coming_locked(bool b);
735 // Wait for cleanup to be complete.
736 void wait_for_cleanup_complete();
737 // Like above, but assumes that the calling thread owns the Heap_lock.
738 void wait_for_cleanup_complete_locked();
739
740 // Return the head of the unclean list.
741 HeapRegion* peek_unclean_region_list_locked();
742 // Remove and return the head of the unclean list.
743 HeapRegion* pop_unclean_region_list_locked();
744
745 // List of regions which are zero filled and ready for allocation.
746 HeapRegion* _free_region_list;
747 // Number of elements on the free list.
748 size_t _free_region_list_size;
749
750 // If the head of the unclean list is ZeroFilled, move it to the free
751 // list.
752 bool move_cleaned_region_to_free_list_locked();
753 bool move_cleaned_region_to_free_list();
754
755 void put_free_region_on_list_locked(HeapRegion* r);
756 void put_free_region_on_list(HeapRegion* r);
757
758 // Remove and return the head element of the free list.
759 HeapRegion* pop_free_region_list_locked();
760
761 // If "zero_filled" is true, we first try the free list, then we try the
762 // unclean list, zero-filling the result. If "zero_filled" is false, we
763 // first try the unclean list, then the zero-filled list.
764 HeapRegion* alloc_free_region_from_lists(bool zero_filled);
765
766 // Verify the integrity of the region lists.
767 void remove_allocated_regions_from_lists();
768 bool verify_region_lists();
769 bool verify_region_lists_locked();
770 size_t unclean_region_list_length();
771 size_t free_region_list_length();
772
773 // Perform a collection of the heap; intended for use in implementing
774 // "System.gc". This probably implies as full a collection as the
775 // "CollectedHeap" supports.
776 virtual void collect(GCCause::Cause cause);
777
778 // The same as above but assume that the caller holds the Heap_lock.
779 void collect_locked(GCCause::Cause cause);
780
781 // This interface assumes that it's being called by the
782 // vm thread. It collects the heap assuming that the
783 // heap lock is already held and that we are executing in
784 // the context of the vm thread.
785 virtual void collect_as_vm_thread(GCCause::Cause cause);
786
787 // True iff a evacuation has failed in the most-recent collection.
788 bool evacuation_failed() { return _evacuation_failed; }
789
790 // Free a region if it is totally full of garbage. Returns the number of
791 // bytes freed (0 ==> didn't free it).
792 size_t free_region_if_totally_empty(HeapRegion *hr);
793 void free_region_if_totally_empty_work(HeapRegion *hr,
794 size_t& pre_used,
795 size_t& cleared_h_regions,
796 size_t& freed_regions,
797 UncleanRegionList* list,
798 bool par = false);
799
800 // If we've done free region work that yields the given changes, update
801 // the relevant global variables.
802 void finish_free_region_work(size_t pre_used,
803 size_t cleared_h_regions,
804 size_t freed_regions,
805 UncleanRegionList* list);
806
807
808 // Returns "TRUE" iff "p" points into the allocated area of the heap.
809 virtual bool is_in(const void* p) const;
810
811 // Return "TRUE" iff the given object address is within the collection
812 // set.
813 inline bool obj_in_cs(oop obj);
814
815 // Return "TRUE" iff the given object address is in the reserved
816 // region of g1 (excluding the permanent generation).
817 bool is_in_g1_reserved(const void* p) const {
818 return _g1_reserved.contains(p);
819 }
820
821 // Returns a MemRegion that corresponds to the space that has been
822 // committed in the heap
823 MemRegion g1_committed() {
824 return _g1_committed;
825 }
826
827 NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; )
828
829 // Dirty card table entries covering a list of young regions.
830 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
831
832 // This resets the card table to all zeros. It is used after
833 // a collection pause which used the card table to claim cards.
834 void cleanUpCardTable();
835
836 // Iteration functions.
837
838 // Iterate over all the ref-containing fields of all objects, calling
839 // "cl.do_oop" on each.
840 virtual void oop_iterate(OopClosure* cl);
841
842 // Same as above, restricted to a memory region.
843 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
844
845 // Iterate over all objects, calling "cl.do_object" on each.
846 virtual void object_iterate(ObjectClosure* cl);
847
848 // Iterate over all objects allocated since the last collection, calling
849 // "cl.do_object" on each. The heap must have been initialized properly
850 // to support this function, or else this call will fail.
851 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
852
853 // Iterate over all spaces in use in the heap, in ascending address order.
854 virtual void space_iterate(SpaceClosure* cl);
855
856 // Iterate over heap regions, in address order, terminating the
857 // iteration early if the "doHeapRegion" method returns "true".
858 void heap_region_iterate(HeapRegionClosure* blk);
859
860 // Iterate over heap regions starting with r (or the first region if "r"
861 // is NULL), in address order, terminating early if the "doHeapRegion"
862 // method returns "true".
863 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk);
864
865 // As above but starting from the region at index idx.
866 void heap_region_iterate_from(int idx, HeapRegionClosure* blk);
867
868 HeapRegion* region_at(size_t idx);
869
870
871 // Divide the heap region sequence into "chunks" of some size (the number
872 // of regions divided by the number of parallel threads times some
873 // overpartition factor, currently 4). Assumes that this will be called
874 // in parallel by ParallelGCThreads worker threads with discinct worker
875 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
876 // calls will use the same "claim_value", and that that claim value is
877 // different from the claim_value of any heap region before the start of
878 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
879 // attempting to claim the first region in each chunk, and, if
880 // successful, applying the closure to each region in the chunk (and
881 // setting the claim value of the second and subsequent regions of the
882 // chunk.) For now requires that "doHeapRegion" always returns "false",
883 // i.e., that a closure never attempt to abort a traversal.
884 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
885 int worker,
886 jint claim_value);
887
888 // Iterate over the regions (if any) in the current collection set.
889 void collection_set_iterate(HeapRegionClosure* blk);
890
891 // As above but starting from region r
892 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
893
894 // Returns the first (lowest address) compactible space in the heap.
895 virtual CompactibleSpace* first_compactible_space();
896
897 // A CollectedHeap will contain some number of spaces. This finds the
898 // space containing a given address, or else returns NULL.
899 virtual Space* space_containing(const void* addr) const;
900
901 // A G1CollectedHeap will contain some number of heap regions. This
902 // finds the region containing a given address, or else returns NULL.
903 HeapRegion* heap_region_containing(const void* addr) const;
904
905 // Like the above, but requires "addr" to be in the heap (to avoid a
906 // null-check), and unlike the above, may return an continuing humongous
907 // region.
908 HeapRegion* heap_region_containing_raw(const void* addr) const;
909
910 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
911 // each address in the (reserved) heap is a member of exactly
912 // one block. The defining characteristic of a block is that it is
913 // possible to find its size, and thus to progress forward to the next
914 // block. (Blocks may be of different sizes.) Thus, blocks may
915 // represent Java objects, or they might be free blocks in a
916 // free-list-based heap (or subheap), as long as the two kinds are
917 // distinguishable and the size of each is determinable.
918
919 // Returns the address of the start of the "block" that contains the
920 // address "addr". We say "blocks" instead of "object" since some heaps
921 // may not pack objects densely; a chunk may either be an object or a
922 // non-object.
923 virtual HeapWord* block_start(const void* addr) const;
924
925 // Requires "addr" to be the start of a chunk, and returns its size.
926 // "addr + size" is required to be the start of a new chunk, or the end
927 // of the active area of the heap.
928 virtual size_t block_size(const HeapWord* addr) const;
929
930 // Requires "addr" to be the start of a block, and returns "TRUE" iff
931 // the block is an object.
932 virtual bool block_is_obj(const HeapWord* addr) const;
933
934 // Does this heap support heap inspection? (+PrintClassHistogram)
935 virtual bool supports_heap_inspection() const { return true; }
936
937 // Section on thread-local allocation buffers (TLABs)
938 // See CollectedHeap for semantics.
939
940 virtual bool supports_tlab_allocation() const;
941 virtual size_t tlab_capacity(Thread* thr) const;
942 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
943 virtual HeapWord* allocate_new_tlab(size_t size);
944
945 // Can a compiler initialize a new object without store barriers?
946 // This permission only extends from the creation of a new object
947 // via a TLAB up to the first subsequent safepoint.
948 virtual bool can_elide_tlab_store_barriers() const {
949 // Since G1's TLAB's may, on occasion, come from non-young regions
950 // as well. (Is there a flag controlling that? XXX)
951 return false;
952 }
953
954 // Can a compiler elide a store barrier when it writes
955 // a permanent oop into the heap? Applies when the compiler
956 // is storing x to the heap, where x->is_perm() is true.
957 virtual bool can_elide_permanent_oop_store_barriers() const {
958 // At least until perm gen collection is also G1-ified, at
959 // which point this should return false.
960 return true;
961 }
962
963 virtual bool allocs_are_zero_filled();
964
965 // The boundary between a "large" and "small" array of primitives, in
966 // words.
967 virtual size_t large_typearray_limit();
968
969 // All popular objects are guaranteed to have addresses below this
970 // boundary.
971 HeapWord* popular_object_boundary() {
972 return _popular_object_boundary;
973 }
974
975 // Declare the region as one that should be evacuated because its
976 // remembered set is too large.
977 void schedule_popular_region_evac(HeapRegion* r);
978 // If there is a popular region to evacuate it, remove it from the list
979 // and return it.
980 HeapRegion* popular_region_to_evac();
981 // Evacuate the given popular region.
982 void evac_popular_region(HeapRegion* r);
983
984 // Returns "true" iff the given word_size is "very large".
985 static bool isHumongous(size_t word_size) {
986 return word_size >= VeryLargeInWords;
987 }
988
989 // Update mod union table with the set of dirty cards.
990 void updateModUnion();
991
992 // Set the mod union bits corresponding to the given memRegion. Note
993 // that this is always a safe operation, since it doesn't clear any
994 // bits.
995 void markModUnionRange(MemRegion mr);
996
997 // Records the fact that a marking phase is no longer in progress.
998 void set_marking_complete() {
999 _mark_in_progress = false;
1000 }
1001 void set_marking_started() {
1002 _mark_in_progress = true;
1003 }
1004 bool mark_in_progress() {
1005 return _mark_in_progress;
1006 }
1007
1008 // Print the maximum heap capacity.
1009 virtual size_t max_capacity() const;
1010
1011 virtual jlong millis_since_last_gc();
1012
1013 // Perform any cleanup actions necessary before allowing a verification.
1014 virtual void prepare_for_verify();
1015
1016 // Perform verification.
1017 virtual void verify(bool allow_dirty, bool silent);
1018 virtual void print() const;
1019 virtual void print_on(outputStream* st) const;
1020
1021 virtual void print_gc_threads_on(outputStream* st) const;
1022 virtual void gc_threads_do(ThreadClosure* tc) const;
1023
1024 // Override
1025 void print_tracing_info() const;
1026
1027 // If "addr" is a pointer into the (reserved?) heap, returns a positive
1028 // number indicating the "arena" within the heap in which "addr" falls.
1029 // Or else returns 0.
1030 virtual int addr_to_arena_id(void* addr) const;
1031
1032 // Convenience function to be used in situations where the heap type can be
1033 // asserted to be this type.
1034 static G1CollectedHeap* heap();
1035
1036 void empty_young_list();
1037 bool should_set_young_locked();
1038
1039 void set_region_short_lived_locked(HeapRegion* hr);
1040 // add appropriate methods for any other surv rate groups
1041
1042 void young_list_rs_length_sampling_init() {
1043 _young_list->rs_length_sampling_init();
1044 }
1045 bool young_list_rs_length_sampling_more() {
1046 return _young_list->rs_length_sampling_more();
1047 }
1048 void young_list_rs_length_sampling_next() {
1049 _young_list->rs_length_sampling_next();
1050 }
1051 size_t young_list_sampled_rs_lengths() {
1052 return _young_list->sampled_rs_lengths();
1053 }
1054
1055 size_t young_list_length() { return _young_list->length(); }
1056 size_t young_list_scan_only_length() {
1057 return _young_list->scan_only_length(); }
1058
1059 HeapRegion* pop_region_from_young_list() {
1060 return _young_list->pop_region();
1061 }
1062
1063 HeapRegion* young_list_first_region() {
1064 return _young_list->first_region();
1065 }
1066
1067 // debugging
1068 bool check_young_list_well_formed() {
1069 return _young_list->check_list_well_formed();
1070 }
1071 bool check_young_list_empty(bool ignore_scan_only_list,
1072 bool check_sample = true);
1073
1074 // *** Stuff related to concurrent marking. It's not clear to me that so
1075 // many of these need to be public.
1076
1077 // The functions below are helper functions that a subclass of
1078 // "CollectedHeap" can use in the implementation of its virtual
1079 // functions.
1080 // This performs a concurrent marking of the live objects in a
1081 // bitmap off to the side.
1082 void doConcurrentMark();
1083
1084 // This is called from the marksweep collector which then does
1085 // a concurrent mark and verifies that the results agree with
1086 // the stop the world marking.
1087 void checkConcurrentMark();
1088 void do_sync_mark();
1089
1090 bool isMarkedPrev(oop obj) const;
1091 bool isMarkedNext(oop obj) const;
1092
1093 // Determine if an object is dead, given the object and also
1094 // the region to which the object belongs. An object is dead
1095 // iff a) it was not allocated since the last mark and b) it
1096 // is not marked.
1097
1098 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1099 return
1100 !hr->obj_allocated_since_prev_marking(obj) &&
1101 !isMarkedPrev(obj);
1102 }
1103
1104 // This is used when copying an object to survivor space.
1105 // If the object is marked live, then we mark the copy live.
1106 // If the object is allocated since the start of this mark
1107 // cycle, then we mark the copy live.
1108 // If the object has been around since the previous mark
1109 // phase, and hasn't been marked yet during this phase,
1110 // then we don't mark it, we just wait for the
1111 // current marking cycle to get to it.
1112
1113 // This function returns true when an object has been
1114 // around since the previous marking and hasn't yet
1115 // been marked during this marking.
1116
1117 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1118 return
1119 !hr->obj_allocated_since_next_marking(obj) &&
1120 !isMarkedNext(obj);
1121 }
1122
1123 // Determine if an object is dead, given only the object itself.
1124 // This will find the region to which the object belongs and
1125 // then call the region version of the same function.
1126
1127 // Added if it is in permanent gen it isn't dead.
1128 // Added if it is NULL it isn't dead.
1129
1130 bool is_obj_dead(oop obj) {
1131 HeapRegion* hr = heap_region_containing(obj);
1132 if (hr == NULL) {
1133 if (Universe::heap()->is_in_permanent(obj))
1134 return false;
1135 else if (obj == NULL) return false;
1136 else return true;
1137 }
1138 else return is_obj_dead(obj, hr);
1139 }
1140
1141 bool is_obj_ill(oop obj) {
1142 HeapRegion* hr = heap_region_containing(obj);
1143 if (hr == NULL) {
1144 if (Universe::heap()->is_in_permanent(obj))
1145 return false;
1146 else if (obj == NULL) return false;
1147 else return true;
1148 }
1149 else return is_obj_ill(obj, hr);
1150 }
1151
1152 // The following is just to alert the verification code
1153 // that a full collection has occurred and that the
1154 // remembered sets are no longer up to date.
1155 bool _full_collection;
1156 void set_full_collection() { _full_collection = true;}
1157 void clear_full_collection() {_full_collection = false;}
1158 bool full_collection() {return _full_collection;}
1159
1160 ConcurrentMark* concurrent_mark() const { return _cm; }
1161 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1162
1163 public:
1164 void stop_conc_gc_threads();
1165
1166 // <NEW PREDICTION>
1167
1168 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1169 void check_if_region_is_too_expensive(double predicted_time_ms);
1170 size_t pending_card_num();
1171 size_t max_pending_card_num();
1172 size_t cards_scanned();
1173
1174 // </NEW PREDICTION>
1175
1176 protected:
1177 size_t _max_heap_capacity;
1178
1179 // debug_only(static void check_for_valid_allocation_state();)
1180
1181 public:
1182 // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
1183 // MemoryService). In productization, we can make this assert false
1184 // to catch such places (as well as searching for calls to this...)
1185 static void g1_unimplemented();
1186
1187 };
1188
1189 // Local Variables: ***
1190 // c-indentation-style: gnu ***
1191 // End: ***