comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 10408:836a62f43af9

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 19 Jun 2013 10:45:56 +0200
parents 89e4d67fdd2a f2110083203d
children 6b0fd0964b87
comparison
equal deleted inserted replaced
10086:e0fb8a213650 10408:836a62f43af9
1 /* 1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
24 24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
29 #include "gc_implementation/g1/g1AllocRegion.hpp" 30 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 #include "gc_implementation/g1/g1HRPrinter.hpp" 31 #include "gc_implementation/g1/g1HRPrinter.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
31 #include "gc_implementation/g1/g1RemSet.hpp" 33 #include "gc_implementation/g1/g1RemSet.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 34 #include "gc_implementation/g1/g1YCTypes.hpp"
33 #include "gc_implementation/g1/heapRegionSeq.hpp" 35 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 #include "gc_implementation/g1/heapRegionSets.hpp" 36 #include "gc_implementation/g1/heapRegionSets.hpp"
35 #include "gc_implementation/shared/hSpaceCounters.hpp" 37 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 38 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
37 #include "memory/barrierSet.hpp" 39 #include "memory/barrierSet.hpp"
59 class G1RemSet; 61 class G1RemSet;
60 class HeapRegionRemSetIterator; 62 class HeapRegionRemSetIterator;
61 class ConcurrentMark; 63 class ConcurrentMark;
62 class ConcurrentMarkThread; 64 class ConcurrentMarkThread;
63 class ConcurrentG1Refine; 65 class ConcurrentG1Refine;
66 class ConcurrentGCTimer;
64 class GenerationCounters; 67 class GenerationCounters;
68 class STWGCTimer;
69 class G1NewTracer;
70 class G1OldTracer;
71 class EvacuationFailedInfo;
65 72
66 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; 73 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 74 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
68 75
69 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 76 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
158 165
159 // The G1 STW is alive closure. 166 // The G1 STW is alive closure.
160 // An instance is embedded into the G1CH and used as the 167 // An instance is embedded into the G1CH and used as the
161 // (optional) _is_alive_non_header closure in the STW 168 // (optional) _is_alive_non_header closure in the STW
162 // reference processor. It is also extensively used during 169 // reference processor. It is also extensively used during
163 // refence processing during STW evacuation pauses. 170 // reference processing during STW evacuation pauses.
164 class G1STWIsAliveClosure: public BoolObjectClosure { 171 class G1STWIsAliveClosure: public BoolObjectClosure {
165 G1CollectedHeap* _g1; 172 G1CollectedHeap* _g1;
166 public: 173 public:
167 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 174 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
168 void do_object(oop p) { assert(false, "Do not call."); }
169 bool do_object_b(oop p); 175 bool do_object_b(oop p);
170 }; 176 };
171 177
172 class SurvivorGCAllocRegion : public G1AllocRegion { 178 class SurvivorGCAllocRegion : public G1AllocRegion {
173 protected: 179 protected:
322 328
323 // It releases the mutator alloc region. 329 // It releases the mutator alloc region.
324 void release_mutator_alloc_region(); 330 void release_mutator_alloc_region();
325 331
326 // It initializes the GC alloc regions at the start of a GC. 332 // It initializes the GC alloc regions at the start of a GC.
327 void init_gc_alloc_regions(); 333 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
328 334
329 // It releases the GC alloc regions at the end of a GC. 335 // It releases the GC alloc regions at the end of a GC.
330 void release_gc_alloc_regions(uint no_of_gc_workers); 336 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
331 337
332 // It does any cleanup that needs to be done on the GC alloc regions 338 // It does any cleanup that needs to be done on the GC alloc regions
333 // before a Full GC. 339 // before a Full GC.
334 void abandon_gc_alloc_regions(); 340 void abandon_gc_alloc_regions();
335 341
387 volatile unsigned int _old_marking_cycles_started; 393 volatile unsigned int _old_marking_cycles_started;
388 394
389 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 395 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
390 // concurrent cycles) we have completed. 396 // concurrent cycles) we have completed.
391 volatile unsigned int _old_marking_cycles_completed; 397 volatile unsigned int _old_marking_cycles_completed;
398
399 bool _concurrent_cycle_started;
392 400
393 // This is a non-product method that is helpful for testing. It is 401 // This is a non-product method that is helpful for testing. It is
394 // called at the end of a GC and artificially expands the heap by 402 // called at the end of a GC and artificially expands the heap by
395 // allocating a number of dead regions. This way we can induce very 403 // allocating a number of dead regions. This way we can induce very
396 // frequent marking cycles and stress the cleanup / concurrent 404 // frequent marking cycles and stress the cleanup / concurrent
592 // allocation region, either by picking one or expanding the 600 // allocation region, either by picking one or expanding the
593 // heap, and then allocate a block of the given size. The block 601 // heap, and then allocate a block of the given size. The block
594 // may not be a humongous - it must fit into a single heap region. 602 // may not be a humongous - it must fit into a single heap region.
595 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 603 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
596 604
597 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
598 HeapRegion* alloc_region,
599 bool par,
600 size_t word_size);
601
602 // Ensure that no further allocations can happen in "r", bearing in mind 605 // Ensure that no further allocations can happen in "r", bearing in mind
603 // that parallel threads might be attempting allocations. 606 // that parallel threads might be attempting allocations.
604 void par_allocate_remaining_space(HeapRegion* r); 607 void par_allocate_remaining_space(HeapRegion* r);
605 608
606 // Allocation attempt during GC for a survivor object / PLAB. 609 // Allocation attempt during GC for a survivor object / PLAB.
744 747
745 unsigned int old_marking_cycles_completed() { 748 unsigned int old_marking_cycles_completed() {
746 return _old_marking_cycles_completed; 749 return _old_marking_cycles_completed;
747 } 750 }
748 751
752 void register_concurrent_cycle_start(jlong start_time);
753 void register_concurrent_cycle_end();
754 void trace_heap_after_concurrent_cycle();
755
756 G1YCType yc_type();
757
749 G1HRPrinter* hr_printer() { return &_hr_printer; } 758 G1HRPrinter* hr_printer() { return &_hr_printer; }
750 759
751 protected: 760 protected:
752 761
753 // Shrink the garbage-first heap by at most the given size (in bytes!). 762 // Shrink the garbage-first heap by at most the given size (in bytes!).
779 // thread. It returns false if it is unable to do the collection due 788 // thread. It returns false if it is unable to do the collection due
780 // to the GC locker being active, true otherwise 789 // to the GC locker being active, true otherwise
781 bool do_collection_pause_at_safepoint(double target_pause_time_ms); 790 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
782 791
783 // Actually do the work of evacuating the collection set. 792 // Actually do the work of evacuating the collection set.
784 void evacuate_collection_set(); 793 void evacuate_collection_set(EvacuationInfo& evacuation_info);
785 794
786 // The g1 remembered set of the heap. 795 // The g1 remembered set of the heap.
787 G1RemSet* _g1_rem_set; 796 G1RemSet* _g1_rem_set;
788 // And it's mod ref barrier set, used to track updates for the above. 797 // And it's mod ref barrier set, used to track updates for the above.
789 ModRefBarrierSet* _mr_bs; 798 ModRefBarrierSet* _mr_bs;
790 799
791 // A set of cards that cover the objects for which the Rsets should be updated 800 // A set of cards that cover the objects for which the Rsets should be updated
792 // concurrently after the collection. 801 // concurrently after the collection.
793 DirtyCardQueueSet _dirty_card_queue_set; 802 DirtyCardQueueSet _dirty_card_queue_set;
794
795 // The Heap Region Rem Set Iterator.
796 HeapRegionRemSetIterator** _rem_set_iterator;
797 803
798 // The closure used to refine a single card. 804 // The closure used to refine a single card.
799 RefineCardTableEntryClosure* _refine_cte_cl; 805 RefineCardTableEntryClosure* _refine_cte_cl;
800 806
801 // A function to check the consistency of dirty card logs. 807 // A function to check the consistency of dirty card logs.
807 // set in the event of an evacuation failure. 813 // set in the event of an evacuation failure.
808 DirtyCardQueueSet _into_cset_dirty_card_queue_set; 814 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
809 815
810 // After a collection pause, make the regions in the CS into free 816 // After a collection pause, make the regions in the CS into free
811 // regions. 817 // regions.
812 void free_collection_set(HeapRegion* cs_head); 818 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
813 819
814 // Abandon the current collection set without recording policy 820 // Abandon the current collection set without recording policy
815 // statistics or updating free lists. 821 // statistics or updating free lists.
816 void abandon_collection_set(HeapRegion* cs_head); 822 void abandon_collection_set(HeapRegion* cs_head);
817 823
831 int worker_i); 837 int worker_i);
832 838
833 // Apply "blk" to all the weak roots of the system. These include 839 // Apply "blk" to all the weak roots of the system. These include
834 // JNI weak roots, the code cache, system dictionary, symbol table, 840 // JNI weak roots, the code cache, system dictionary, symbol table,
835 // string table, and referents of reachable weak refs. 841 // string table, and referents of reachable weak refs.
836 void g1_process_weak_roots(OopClosure* root_closure, 842 void g1_process_weak_roots(OopClosure* root_closure);
837 OopClosure* non_root_closure);
838 843
839 // Frees a non-humongous region by initializing its contents and 844 // Frees a non-humongous region by initializing its contents and
840 // adding it to the free list that's passed as a parameter (this is 845 // adding it to the free list that's passed as a parameter (this is
841 // usually a local list which will be appended to the master free 846 // usually a local list which will be appended to the master free
842 // list later). The used bytes of freed regions are accumulated in 847 // list later). The used bytes of freed regions are accumulated in
877 RefToScanQueueSet *_task_queues; 882 RefToScanQueueSet *_task_queues;
878 883
879 // True iff a evacuation has failed in the current collection. 884 // True iff a evacuation has failed in the current collection.
880 bool _evacuation_failed; 885 bool _evacuation_failed;
881 886
882 // Set the attribute indicating whether evacuation has failed in the 887 EvacuationFailedInfo* _evacuation_failed_info_array;
883 // current collection.
884 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
885 888
886 // Failed evacuations cause some logical from-space objects to have 889 // Failed evacuations cause some logical from-space objects to have
887 // forwarding pointers to themselves. Reset them. 890 // forwarding pointers to themselves. Reset them.
888 void remove_self_forwarding_pointers(); 891 void remove_self_forwarding_pointers();
889 892
921 // Do any necessary cleanup for evacuation-failure handling data 924 // Do any necessary cleanup for evacuation-failure handling data
922 // structures. 925 // structures.
923 void finalize_for_evac_failure(); 926 void finalize_for_evac_failure();
924 927
925 // An attempt to evacuate "obj" has failed; take necessary steps. 928 // An attempt to evacuate "obj" has failed; take necessary steps.
926 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); 929 oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
927 void handle_evacuation_failure_common(oop obj, markOop m); 930 void handle_evacuation_failure_common(oop obj, markOop m);
928 931
929 #ifndef PRODUCT 932 #ifndef PRODUCT
930 // Support for forcing evacuation failures. Analogous to 933 // Support for forcing evacuation failures. Analogous to
931 // PromotionFailureALot for the other collectors. 934 // PromotionFailureALot for the other collectors.
953 956
954 // Return true if it's time to cause an evacuation failure. 957 // Return true if it's time to cause an evacuation failure.
955 inline bool evacuation_should_fail(); 958 inline bool evacuation_should_fail();
956 959
957 // Reset the G1EvacuationFailureALot counters. Should be called at 960 // Reset the G1EvacuationFailureALot counters. Should be called at
958 // the end of an evacuation pause in which an evacuation failure ocurred. 961 // the end of an evacuation pause in which an evacuation failure occurred.
959 inline void reset_evacuation_should_fail(); 962 inline void reset_evacuation_should_fail();
960 #endif // !PRODUCT 963 #endif // !PRODUCT
961 964
962 // ("Weak") Reference processing support. 965 // ("Weak") Reference processing support.
963 // 966 //
964 // G1 has 2 instances of the referece processor class. One 967 // G1 has 2 instances of the reference processor class. One
965 // (_ref_processor_cm) handles reference object discovery 968 // (_ref_processor_cm) handles reference object discovery
966 // and subsequent processing during concurrent marking cycles. 969 // and subsequent processing during concurrent marking cycles.
967 // 970 //
968 // The other (_ref_processor_stw) handles reference object 971 // The other (_ref_processor_stw) handles reference object
969 // discovery and processing during full GCs and incremental 972 // discovery and processing during full GCs and incremental
1009 // lists (also checked as a precondition during initial marking). 1012 // lists (also checked as a precondition during initial marking).
1010 1013
1011 // The (stw) reference processor... 1014 // The (stw) reference processor...
1012 ReferenceProcessor* _ref_processor_stw; 1015 ReferenceProcessor* _ref_processor_stw;
1013 1016
1017 STWGCTimer* _gc_timer_stw;
1018 ConcurrentGCTimer* _gc_timer_cm;
1019
1020 G1OldTracer* _gc_tracer_cm;
1021 G1NewTracer* _gc_tracer_stw;
1022
1014 // During reference object discovery, the _is_alive_non_header 1023 // During reference object discovery, the _is_alive_non_header
1015 // closure (if non-null) is applied to the referent object to 1024 // closure (if non-null) is applied to the referent object to
1016 // determine whether the referent is live. If so then the 1025 // determine whether the referent is live. If so then the
1017 // reference object does not need to be 'discovered' and can 1026 // reference object does not need to be 'discovered' and can
1018 // be treated as a regular oop. This has the benefit of reducing 1027 // be treated as a regular oop. This has the benefit of reducing
1118 1127
1119 // The rem set and barrier set. 1128 // The rem set and barrier set.
1120 G1RemSet* g1_rem_set() const { return _g1_rem_set; } 1129 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1121 ModRefBarrierSet* mr_bs() const { return _mr_bs; } 1130 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1122 1131
1123 // The rem set iterator.
1124 HeapRegionRemSetIterator* rem_set_iterator(int i) {
1125 return _rem_set_iterator[i];
1126 }
1127
1128 HeapRegionRemSetIterator* rem_set_iterator() {
1129 return _rem_set_iterator[0];
1130 }
1131
1132 unsigned get_gc_time_stamp() { 1132 unsigned get_gc_time_stamp() {
1133 return _gc_time_stamp; 1133 return _gc_time_stamp;
1134 } 1134 }
1135 1135
1136 void reset_gc_time_stamp() { 1136 void reset_gc_time_stamp() {
1163 // Reference Processing accessors 1163 // Reference Processing accessors
1164 1164
1165 // The STW reference processor.... 1165 // The STW reference processor....
1166 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } 1166 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1167 1167
1168 // The Concurent Marking reference processor... 1168 // The Concurrent Marking reference processor...
1169 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } 1169 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1170
1171 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1172 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1170 1173
1171 virtual size_t capacity() const; 1174 virtual size_t capacity() const;
1172 virtual size_t used() const; 1175 virtual size_t used() const;
1173 // This should be called when we're not holding the heap lock. The 1176 // This should be called when we're not holding the heap lock. The
1174 // result might be a bit inaccurate. 1177 // result might be a bit inaccurate.
1223 // necessary (i.e., during heap verification). 1226 // necessary (i.e., during heap verification).
1224 void verify_region_sets(); 1227 void verify_region_sets();
1225 1228
1226 // verify_region_sets_optional() is planted in the code for 1229 // verify_region_sets_optional() is planted in the code for
1227 // list verification in non-product builds (and it can be enabled in 1230 // list verification in non-product builds (and it can be enabled in
1228 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). 1231 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1229 #if HEAP_REGION_SET_FORCE_VERIFY 1232 #if HEAP_REGION_SET_FORCE_VERIFY
1230 void verify_region_sets_optional() { 1233 void verify_region_sets_optional() {
1231 verify_region_sets(); 1234 verify_region_sets();
1232 } 1235 }
1233 #else // HEAP_REGION_SET_FORCE_VERIFY 1236 #else // HEAP_REGION_SET_FORCE_VERIFY
1289 virtual void collect(GCCause::Cause cause); 1292 virtual void collect(GCCause::Cause cause);
1290 1293
1291 // The same as above but assume that the caller holds the Heap_lock. 1294 // The same as above but assume that the caller holds the Heap_lock.
1292 void collect_locked(GCCause::Cause cause); 1295 void collect_locked(GCCause::Cause cause);
1293 1296
1294 // True iff a evacuation has failed in the most-recent collection. 1297 // True iff an evacuation has failed in the most-recent collection.
1295 bool evacuation_failed() { return _evacuation_failed; } 1298 bool evacuation_failed() { return _evacuation_failed; }
1296 1299
1297 // It will free a region if it has allocated objects in it that are 1300 // It will free a region if it has allocated objects in it that are
1298 // all dead. It calls either free_region() or 1301 // all dead. It calls either free_region() or
1299 // free_humongous_region() depending on the type of the region that 1302 // free_humongous_region() depending on the type of the region that
1577 // full GC. 1580 // full GC.
1578 void verify(bool silent, VerifyOption vo); 1581 void verify(bool silent, VerifyOption vo);
1579 1582
1580 // Override; it uses the "prev" marking information 1583 // Override; it uses the "prev" marking information
1581 virtual void verify(bool silent); 1584 virtual void verify(bool silent);
1585
1582 virtual void print_on(outputStream* st) const; 1586 virtual void print_on(outputStream* st) const;
1583 virtual void print_extended_on(outputStream* st) const; 1587 virtual void print_extended_on(outputStream* st) const;
1584 virtual void print_on_error(outputStream* st) const; 1588 virtual void print_on_error(outputStream* st) const;
1585 1589
1586 virtual void print_gc_threads_on(outputStream* st) const; 1590 virtual void print_gc_threads_on(outputStream* st) const;
1751 if (_retired) 1755 if (_retired)
1752 return; 1756 return;
1753 ParGCAllocBuffer::retire(end_of_gc, retain); 1757 ParGCAllocBuffer::retire(end_of_gc, retain);
1754 _retired = true; 1758 _retired = true;
1755 } 1759 }
1760
1761 bool is_retired() {
1762 return _retired;
1763 }
1764 };
1765
1766 class G1ParGCAllocBufferContainer {
1767 protected:
1768 static int const _priority_max = 2;
1769 G1ParGCAllocBuffer* _priority_buffer[_priority_max];
1770
1771 public:
1772 G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1773 for (int pr = 0; pr < _priority_max; ++pr) {
1774 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1775 }
1776 }
1777
1778 ~G1ParGCAllocBufferContainer() {
1779 for (int pr = 0; pr < _priority_max; ++pr) {
1780 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1781 delete _priority_buffer[pr];
1782 }
1783 }
1784
1785 HeapWord* allocate(size_t word_sz) {
1786 HeapWord* obj;
1787 for (int pr = 0; pr < _priority_max; ++pr) {
1788 obj = _priority_buffer[pr]->allocate(word_sz);
1789 if (obj != NULL) return obj;
1790 }
1791 return obj;
1792 }
1793
1794 bool contains(void* addr) {
1795 for (int pr = 0; pr < _priority_max; ++pr) {
1796 if (_priority_buffer[pr]->contains(addr)) return true;
1797 }
1798 return false;
1799 }
1800
1801 void undo_allocation(HeapWord* obj, size_t word_sz) {
1802 bool finish_undo;
1803 for (int pr = 0; pr < _priority_max; ++pr) {
1804 if (_priority_buffer[pr]->contains(obj)) {
1805 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1806 finish_undo = true;
1807 }
1808 }
1809 if (!finish_undo) ShouldNotReachHere();
1810 }
1811
1812 size_t words_remaining() {
1813 size_t result = 0;
1814 for (int pr = 0; pr < _priority_max; ++pr) {
1815 result += _priority_buffer[pr]->words_remaining();
1816 }
1817 return result;
1818 }
1819
1820 size_t words_remaining_in_retired_buffer() {
1821 G1ParGCAllocBuffer* retired = _priority_buffer[0];
1822 return retired->words_remaining();
1823 }
1824
1825 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1826 for (int pr = 0; pr < _priority_max; ++pr) {
1827 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1828 }
1829 }
1830
1831 void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1832 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1833 retired_and_set->retire(end_of_gc, retain);
1834 retired_and_set->set_buf(buf);
1835 retired_and_set->set_word_size(word_sz);
1836 adjust_priority_order();
1837 }
1838
1839 private:
1840 void adjust_priority_order() {
1841 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1842
1843 int last = _priority_max - 1;
1844 for (int pr = 0; pr < last; ++pr) {
1845 _priority_buffer[pr] = _priority_buffer[pr + 1];
1846 }
1847 _priority_buffer[last] = retired_and_set;
1848 }
1756 }; 1849 };
1757 1850
1758 class G1ParScanThreadState : public StackObj { 1851 class G1ParScanThreadState : public StackObj {
1759 protected: 1852 protected:
1760 G1CollectedHeap* _g1h; 1853 G1CollectedHeap* _g1h;
1761 RefToScanQueue* _refs; 1854 RefToScanQueue* _refs;
1762 DirtyCardQueue _dcq; 1855 DirtyCardQueue _dcq;
1763 CardTableModRefBS* _ct_bs; 1856 CardTableModRefBS* _ct_bs;
1764 G1RemSet* _g1_rem; 1857 G1RemSet* _g1_rem;
1765 1858
1766 G1ParGCAllocBuffer _surviving_alloc_buffer; 1859 G1ParGCAllocBufferContainer _surviving_alloc_buffer;
1767 G1ParGCAllocBuffer _tenured_alloc_buffer; 1860 G1ParGCAllocBufferContainer _tenured_alloc_buffer;
1768 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; 1861 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
1769 ageTable _age_table; 1862 ageTable _age_table;
1770 1863
1771 size_t _alloc_buffer_waste; 1864 size_t _alloc_buffer_waste;
1772 size_t _undo_waste; 1865 size_t _undo_waste;
1773 1866
1774 OopsInHeapRegionClosure* _evac_failure_cl; 1867 OopsInHeapRegionClosure* _evac_failure_cl;
1775 G1ParScanHeapEvacClosure* _evac_cl; 1868 G1ParScanHeapEvacClosure* _evac_cl;
1776 G1ParScanPartialArrayClosure* _partial_scan_cl; 1869 G1ParScanPartialArrayClosure* _partial_scan_cl;
1777 1870
1778 int _hash_seed; 1871 int _hash_seed;
1779 uint _queue_num; 1872 uint _queue_num;
1780 1873
1781 size_t _term_attempts; 1874 size_t _term_attempts;
1782 1875
1783 double _start; 1876 double _start;
1827 } 1920 }
1828 1921
1829 RefToScanQueue* refs() { return _refs; } 1922 RefToScanQueue* refs() { return _refs; }
1830 ageTable* age_table() { return &_age_table; } 1923 ageTable* age_table() { return &_age_table; }
1831 1924
1832 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 1925 G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
1833 return _alloc_buffers[purpose]; 1926 return _alloc_buffers[purpose];
1834 } 1927 }
1835 1928
1836 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1929 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1837 size_t undo_waste() const { return _undo_waste; } 1930 size_t undo_waste() const { return _undo_waste; }
1857 1950
1858 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1951 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1859 HeapWord* obj = NULL; 1952 HeapWord* obj = NULL;
1860 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1953 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1861 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1954 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1862 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1955 G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
1863 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1864 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1865 1956
1866 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1957 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1867 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1958 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1868 // Otherwise. 1959
1869 alloc_buf->set_word_size(gclab_word_size); 1960 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
1870 alloc_buf->set_buf(buf); 1961 alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
1871 1962
1872 obj = alloc_buf->allocate(word_sz); 1963 obj = alloc_buf->allocate(word_sz);
1873 assert(obj != NULL, "buffer was definitely big enough..."); 1964 assert(obj != NULL, "buffer was definitely big enough...");
1874 } else { 1965 } else {
1875 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1966 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1977 } else { 2068 } else {
1978 deal_with_reference((oop*)ref); 2069 deal_with_reference((oop*)ref);
1979 } 2070 }
1980 } 2071 }
1981 2072
1982 public:
1983 void trim_queue(); 2073 void trim_queue();
1984 }; 2074 };
1985 2075
1986 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 2076 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP