comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 12355:cefad50507d8

Merge with hs25-b53
author Gilles Duboscq <duboscq@ssw.jku.at>
date Fri, 11 Oct 2013 10:38:03 +0200
parents 6b0fd0964b87 798522662fcd
children 359f7e70ae7f
comparison
equal deleted inserted replaced
12058:ccb4f2af2319 12355:cefad50507d8
29 #include "gc_implementation/g1/evacuationInfo.hpp" 29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp" 30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1HRPrinter.hpp" 31 #include "gc_implementation/g1/g1HRPrinter.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp" 33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
34 #include "gc_implementation/g1/g1YCTypes.hpp" 35 #include "gc_implementation/g1/g1YCTypes.hpp"
35 #include "gc_implementation/g1/heapRegionSeq.hpp" 36 #include "gc_implementation/g1/heapRegionSeq.hpp"
36 #include "gc_implementation/g1/heapRegionSets.hpp" 37 #include "gc_implementation/g1/heapRegionSets.hpp"
37 #include "gc_implementation/shared/hSpaceCounters.hpp" 38 #include "gc_implementation/shared/hSpaceCounters.hpp"
38 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
44 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. 45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
45 // It uses the "Garbage First" heap organization and algorithm, which 46 // It uses the "Garbage First" heap organization and algorithm, which
46 // may combine concurrent marking with parallel, incremental compaction of 47 // may combine concurrent marking with parallel, incremental compaction of
47 // heap subsets that will yield large amounts of garbage. 48 // heap subsets that will yield large amounts of garbage.
48 49
50 // Forward declarations
49 class HeapRegion; 51 class HeapRegion;
50 class HRRSCleanupTask; 52 class HRRSCleanupTask;
51 class GenerationSpec; 53 class GenerationSpec;
52 class OopsInHeapRegionClosure; 54 class OopsInHeapRegionClosure;
53 class G1KlassScanClosure; 55 class G1KlassScanClosure;
67 class GenerationCounters; 69 class GenerationCounters;
68 class STWGCTimer; 70 class STWGCTimer;
69 class G1NewTracer; 71 class G1NewTracer;
70 class G1OldTracer; 72 class G1OldTracer;
71 class EvacuationFailedInfo; 73 class EvacuationFailedInfo;
74 class nmethod;
72 75
73 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; 76 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
74 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 77 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
75 78
76 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 79 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
159 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 162 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
160 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 163 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
161 public: 164 public:
162 MutatorAllocRegion() 165 MutatorAllocRegion()
163 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } 166 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
167 };
168
169 class SurvivorGCAllocRegion : public G1AllocRegion {
170 protected:
171 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
172 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
173 public:
174 SurvivorGCAllocRegion()
175 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
176 };
177
178 class OldGCAllocRegion : public G1AllocRegion {
179 protected:
180 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
181 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
182 public:
183 OldGCAllocRegion()
184 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
164 }; 185 };
165 186
166 // The G1 STW is alive closure. 187 // The G1 STW is alive closure.
167 // An instance is embedded into the G1CH and used as the 188 // An instance is embedded into the G1CH and used as the
168 // (optional) _is_alive_non_header closure in the STW 189 // (optional) _is_alive_non_header closure in the STW
171 class G1STWIsAliveClosure: public BoolObjectClosure { 192 class G1STWIsAliveClosure: public BoolObjectClosure {
172 G1CollectedHeap* _g1; 193 G1CollectedHeap* _g1;
173 public: 194 public:
174 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 195 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
175 bool do_object_b(oop p); 196 bool do_object_b(oop p);
176 };
177
178 class SurvivorGCAllocRegion : public G1AllocRegion {
179 protected:
180 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
181 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
182 public:
183 SurvivorGCAllocRegion()
184 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
185 };
186
187 class OldGCAllocRegion : public G1AllocRegion {
188 protected:
189 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
190 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
191 public:
192 OldGCAllocRegion()
193 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
194 }; 197 };
195 198
196 class RefineCardTableEntryClosure; 199 class RefineCardTableEntryClosure;
197 200
198 class G1CollectedHeap : public SharedHeap { 201 class G1CollectedHeap : public SharedHeap {
705 bool in_cset_fast_test(oop obj) { 708 bool in_cset_fast_test(oop obj) {
706 assert(_in_cset_fast_test != NULL, "sanity"); 709 assert(_in_cset_fast_test != NULL, "sanity");
707 if (_g1_committed.contains((HeapWord*) obj)) { 710 if (_g1_committed.contains((HeapWord*) obj)) {
708 // no need to subtract the bottom of the heap from obj, 711 // no need to subtract the bottom of the heap from obj,
709 // _in_cset_fast_test is biased 712 // _in_cset_fast_test is biased
710 uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes; 713 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
711 bool ret = _in_cset_fast_test[index]; 714 bool ret = _in_cset_fast_test[index];
712 // let's make sure the result is consistent with what the slower 715 // let's make sure the result is consistent with what the slower
713 // test returns 716 // test returns
714 assert( ret || !obj_in_cs(obj), "sanity"); 717 assert( ret || !obj_in_cs(obj), "sanity");
715 assert(!ret || obj_in_cs(obj), "sanity"); 718 assert(!ret || obj_in_cs(obj), "sanity");
778 // Heap_lock when we enter this method, we will pass the 781 // Heap_lock when we enter this method, we will pass the
779 // gc_count_before (i.e., total_collections()) as a parameter since 782 // gc_count_before (i.e., total_collections()) as a parameter since
780 // it has to be read while holding the Heap_lock. Currently, both 783 // it has to be read while holding the Heap_lock. Currently, both
781 // methods that call do_collection_pause() release the Heap_lock 784 // methods that call do_collection_pause() release the Heap_lock
782 // before the call, so it's easy to read gc_count_before just before. 785 // before the call, so it's easy to read gc_count_before just before.
783 HeapWord* do_collection_pause(size_t word_size, 786 HeapWord* do_collection_pause(size_t word_size,
784 unsigned int gc_count_before, 787 unsigned int gc_count_before,
785 bool* succeeded); 788 bool* succeeded,
789 GCCause::Cause gc_cause);
786 790
787 // The guts of the incremental collection pause, executed by the vm 791 // The guts of the incremental collection pause, executed by the vm
788 // thread. It returns false if it is unable to do the collection due 792 // thread. It returns false if it is unable to do the collection due
789 // to the GC locker being active, true otherwise 793 // to the GC locker being active, true otherwise
790 bool do_collection_pause_at_safepoint(double target_pause_time_ms); 794 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
792 // Actually do the work of evacuating the collection set. 796 // Actually do the work of evacuating the collection set.
793 void evacuate_collection_set(EvacuationInfo& evacuation_info); 797 void evacuate_collection_set(EvacuationInfo& evacuation_info);
794 798
795 // The g1 remembered set of the heap. 799 // The g1 remembered set of the heap.
796 G1RemSet* _g1_rem_set; 800 G1RemSet* _g1_rem_set;
797 // And it's mod ref barrier set, used to track updates for the above.
798 ModRefBarrierSet* _mr_bs;
799 801
800 // A set of cards that cover the objects for which the Rsets should be updated 802 // A set of cards that cover the objects for which the Rsets should be updated
801 // concurrently after the collection. 803 // concurrently after the collection.
802 DirtyCardQueueSet _dirty_card_queue_set; 804 DirtyCardQueueSet _dirty_card_queue_set;
803 805
1093 // Initialize the G1CollectedHeap to have the initial and 1095 // Initialize the G1CollectedHeap to have the initial and
1094 // maximum sizes and remembered and barrier sets 1096 // maximum sizes and remembered and barrier sets
1095 // specified by the policy object. 1097 // specified by the policy object.
1096 jint initialize(); 1098 jint initialize();
1097 1099
1100 // Return the (conservative) maximum heap alignment for any G1 heap
1101 static size_t conservative_max_heap_alignment();
1102
1098 // Initialize weak reference processing. 1103 // Initialize weak reference processing.
1099 virtual void ref_processing_init(); 1104 virtual void ref_processing_init();
1100 1105
1101 void set_par_threads(uint t) { 1106 void set_par_threads(uint t) {
1102 SharedHeap::set_par_threads(t); 1107 SharedHeap::set_par_threads(t);
1125 // Adaptive size policy. No such thing for g1. 1130 // Adaptive size policy. No such thing for g1.
1126 virtual AdaptiveSizePolicy* size_policy() { return NULL; } 1131 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1127 1132
1128 // The rem set and barrier set. 1133 // The rem set and barrier set.
1129 G1RemSet* g1_rem_set() const { return _g1_rem_set; } 1134 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1130 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1131 1135
1132 unsigned get_gc_time_stamp() { 1136 unsigned get_gc_time_stamp() {
1133 return _gc_time_stamp; 1137 return _gc_time_stamp;
1134 } 1138 }
1135 1139
1343 MemRegion g1_committed() { 1347 MemRegion g1_committed() {
1344 return _g1_committed; 1348 return _g1_committed;
1345 } 1349 }
1346 1350
1347 virtual bool is_in_closed_subset(const void* p) const; 1351 virtual bool is_in_closed_subset(const void* p) const;
1352
1353 G1SATBCardTableModRefBS* g1_barrier_set() {
1354 return (G1SATBCardTableModRefBS*) barrier_set();
1355 }
1348 1356
1349 // This resets the card table to all zeros. It is used after 1357 // This resets the card table to all zeros. It is used after
1350 // a collection pause which used the card table to claim cards. 1358 // a collection pause which used the card table to claim cards.
1351 void cleanUpCardTable(); 1359 void cleanUpCardTable();
1352 1360
1553 // Print the maximum heap capacity. 1561 // Print the maximum heap capacity.
1554 virtual size_t max_capacity() const; 1562 virtual size_t max_capacity() const;
1555 1563
1556 virtual jlong millis_since_last_gc(); 1564 virtual jlong millis_since_last_gc();
1557 1565
1566
1567 // Convenience function to be used in situations where the heap type can be
1568 // asserted to be this type.
1569 static G1CollectedHeap* heap();
1570
1571 void set_region_short_lived_locked(HeapRegion* hr);
1572 // add appropriate methods for any other surv rate groups
1573
1574 YoungList* young_list() { return _young_list; }
1575
1576 // debugging
1577 bool check_young_list_well_formed() {
1578 return _young_list->check_list_well_formed();
1579 }
1580
1581 bool check_young_list_empty(bool check_heap,
1582 bool check_sample = true);
1583
1584 // *** Stuff related to concurrent marking. It's not clear to me that so
1585 // many of these need to be public.
1586
1587 // The functions below are helper functions that a subclass of
1588 // "CollectedHeap" can use in the implementation of its virtual
1589 // functions.
1590 // This performs a concurrent marking of the live objects in a
1591 // bitmap off to the side.
1592 void doConcurrentMark();
1593
1594 bool isMarkedPrev(oop obj) const;
1595 bool isMarkedNext(oop obj) const;
1596
1597 // Determine if an object is dead, given the object and also
1598 // the region to which the object belongs. An object is dead
1599 // iff a) it was not allocated since the last mark and b) it
1600 // is not marked.
1601
1602 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1603 return
1604 !hr->obj_allocated_since_prev_marking(obj) &&
1605 !isMarkedPrev(obj);
1606 }
1607
1608 // This function returns true when an object has been
1609 // around since the previous marking and hasn't yet
1610 // been marked during this marking.
1611
1612 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1613 return
1614 !hr->obj_allocated_since_next_marking(obj) &&
1615 !isMarkedNext(obj);
1616 }
1617
1618 // Determine if an object is dead, given only the object itself.
1619 // This will find the region to which the object belongs and
1620 // then call the region version of the same function.
1621
1622 // Added if it is NULL it isn't dead.
1623
1624 bool is_obj_dead(const oop obj) const {
1625 const HeapRegion* hr = heap_region_containing(obj);
1626 if (hr == NULL) {
1627 if (obj == NULL) return false;
1628 else return true;
1629 }
1630 else return is_obj_dead(obj, hr);
1631 }
1632
1633 bool is_obj_ill(const oop obj) const {
1634 const HeapRegion* hr = heap_region_containing(obj);
1635 if (hr == NULL) {
1636 if (obj == NULL) return false;
1637 else return true;
1638 }
1639 else return is_obj_ill(obj, hr);
1640 }
1641
1642 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1643 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1644 bool is_marked(oop obj, VerifyOption vo);
1645 const char* top_at_mark_start_str(VerifyOption vo);
1646
1647 ConcurrentMark* concurrent_mark() const { return _cm; }
1648
1649 // Refinement
1650
1651 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1652
1653 // The dirty cards region list is used to record a subset of regions
1654 // whose cards need clearing. The list if populated during the
1655 // remembered set scanning and drained during the card table
1656 // cleanup. Although the methods are reentrant, population/draining
1657 // phases must not overlap. For synchronization purposes the last
1658 // element on the list points to itself.
1659 HeapRegion* _dirty_cards_region_list;
1660 void push_dirty_cards_region(HeapRegion* hr);
1661 HeapRegion* pop_dirty_cards_region();
1662
1663 // Optimized nmethod scanning support routines
1664
1665 // Register the given nmethod with the G1 heap
1666 virtual void register_nmethod(nmethod* nm);
1667
1668 // Unregister the given nmethod from the G1 heap
1669 virtual void unregister_nmethod(nmethod* nm);
1670
1671 // Migrate the nmethods in the code root lists of the regions
1672 // in the collection set to regions in to-space. In the event
1673 // of an evacuation failure, nmethods that reference objects
1674 // that were not successfullly evacuated are not migrated.
1675 void migrate_strong_code_roots();
1676
1677 // During an initial mark pause, mark all the code roots that
1678 // point into regions *not* in the collection set.
1679 void mark_strong_code_roots(uint worker_id);
1680
1681 // Rebuild the stong code root lists for each region
1682 // after a full GC
1683 void rebuild_strong_code_roots();
1684
1685 // Verification
1686
1687 // The following is just to alert the verification code
1688 // that a full collection has occurred and that the
1689 // remembered sets are no longer up to date.
1690 bool _full_collection;
1691 void set_full_collection() { _full_collection = true;}
1692 void clear_full_collection() {_full_collection = false;}
1693 bool full_collection() {return _full_collection;}
1694
1558 // Perform any cleanup actions necessary before allowing a verification. 1695 // Perform any cleanup actions necessary before allowing a verification.
1559 virtual void prepare_for_verify(); 1696 virtual void prepare_for_verify();
1560 1697
1561 // Perform verification. 1698 // Perform verification.
1562 1699
1576 void verify(bool silent, VerifyOption vo); 1713 void verify(bool silent, VerifyOption vo);
1577 1714
1578 // Override; it uses the "prev" marking information 1715 // Override; it uses the "prev" marking information
1579 virtual void verify(bool silent); 1716 virtual void verify(bool silent);
1580 1717
1581 virtual void print_on(outputStream* st) const;
1582 virtual void print_extended_on(outputStream* st) const;
1583 virtual void print_on_error(outputStream* st) const;
1584
1585 virtual void print_gc_threads_on(outputStream* st) const;
1586 virtual void gc_threads_do(ThreadClosure* tc) const;
1587
1588 // Override
1589 void print_tracing_info() const;
1590
1591 // The following two methods are helpful for debugging RSet issues.
1592 void print_cset_rsets() PRODUCT_RETURN;
1593 void print_all_rsets() PRODUCT_RETURN;
1594
1595 // Convenience function to be used in situations where the heap type can be
1596 // asserted to be this type.
1597 static G1CollectedHeap* heap();
1598
1599 void set_region_short_lived_locked(HeapRegion* hr);
1600 // add appropriate methods for any other surv rate groups
1601
1602 YoungList* young_list() { return _young_list; }
1603
1604 // debugging
1605 bool check_young_list_well_formed() {
1606 return _young_list->check_list_well_formed();
1607 }
1608
1609 bool check_young_list_empty(bool check_heap,
1610 bool check_sample = true);
1611
1612 // *** Stuff related to concurrent marking. It's not clear to me that so
1613 // many of these need to be public.
1614
1615 // The functions below are helper functions that a subclass of
1616 // "CollectedHeap" can use in the implementation of its virtual
1617 // functions.
1618 // This performs a concurrent marking of the live objects in a
1619 // bitmap off to the side.
1620 void doConcurrentMark();
1621
1622 bool isMarkedPrev(oop obj) const;
1623 bool isMarkedNext(oop obj) const;
1624
1625 // Determine if an object is dead, given the object and also
1626 // the region to which the object belongs. An object is dead
1627 // iff a) it was not allocated since the last mark and b) it
1628 // is not marked.
1629
1630 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1631 return
1632 !hr->obj_allocated_since_prev_marking(obj) &&
1633 !isMarkedPrev(obj);
1634 }
1635
1636 // This function returns true when an object has been
1637 // around since the previous marking and hasn't yet
1638 // been marked during this marking.
1639
1640 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1641 return
1642 !hr->obj_allocated_since_next_marking(obj) &&
1643 !isMarkedNext(obj);
1644 }
1645
1646 // Determine if an object is dead, given only the object itself.
1647 // This will find the region to which the object belongs and
1648 // then call the region version of the same function.
1649
1650 // Added if it is NULL it isn't dead.
1651
1652 bool is_obj_dead(const oop obj) const {
1653 const HeapRegion* hr = heap_region_containing(obj);
1654 if (hr == NULL) {
1655 if (obj == NULL) return false;
1656 else return true;
1657 }
1658 else return is_obj_dead(obj, hr);
1659 }
1660
1661 bool is_obj_ill(const oop obj) const {
1662 const HeapRegion* hr = heap_region_containing(obj);
1663 if (hr == NULL) {
1664 if (obj == NULL) return false;
1665 else return true;
1666 }
1667 else return is_obj_ill(obj, hr);
1668 }
1669
1670 // The methods below are here for convenience and dispatch the 1718 // The methods below are here for convenience and dispatch the
1671 // appropriate method depending on value of the given VerifyOption 1719 // appropriate method depending on value of the given VerifyOption
1672 // parameter. The options for that parameter are: 1720 // parameter. The values for that parameter, and their meanings,
1673 // 1721 // are the same as those above.
1674 // vo == UsePrevMarking -> use "prev" marking information,
1675 // vo == UseNextMarking -> use "next" marking information,
1676 // vo == UseMarkWord -> use mark word from object header
1677 1722
1678 bool is_obj_dead_cond(const oop obj, 1723 bool is_obj_dead_cond(const oop obj,
1679 const HeapRegion* hr, 1724 const HeapRegion* hr,
1680 const VerifyOption vo) const { 1725 const VerifyOption vo) const {
1681 switch (vo) { 1726 switch (vo) {
1696 default: ShouldNotReachHere(); 1741 default: ShouldNotReachHere();
1697 } 1742 }
1698 return false; // keep some compilers happy 1743 return false; // keep some compilers happy
1699 } 1744 }
1700 1745
1701 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); 1746 // Printing
1702 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); 1747
1703 bool is_marked(oop obj, VerifyOption vo); 1748 virtual void print_on(outputStream* st) const;
1704 const char* top_at_mark_start_str(VerifyOption vo); 1749 virtual void print_extended_on(outputStream* st) const;
1705 1750 virtual void print_on_error(outputStream* st) const;
1706 // The following is just to alert the verification code 1751
1707 // that a full collection has occurred and that the 1752 virtual void print_gc_threads_on(outputStream* st) const;
1708 // remembered sets are no longer up to date. 1753 virtual void gc_threads_do(ThreadClosure* tc) const;
1709 bool _full_collection; 1754
1710 void set_full_collection() { _full_collection = true;} 1755 // Override
1711 void clear_full_collection() {_full_collection = false;} 1756 void print_tracing_info() const;
1712 bool full_collection() {return _full_collection;} 1757
1713 1758 // The following two methods are helpful for debugging RSet issues.
1714 ConcurrentMark* concurrent_mark() const { return _cm; } 1759 void print_cset_rsets() PRODUCT_RETURN;
1715 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } 1760 void print_all_rsets() PRODUCT_RETURN;
1716
1717 // The dirty cards region list is used to record a subset of regions
1718 // whose cards need clearing. The list if populated during the
1719 // remembered set scanning and drained during the card table
1720 // cleanup. Although the methods are reentrant, population/draining
1721 // phases must not overlap. For synchronization purposes the last
1722 // element on the list points to itself.
1723 HeapRegion* _dirty_cards_region_list;
1724 void push_dirty_cards_region(HeapRegion* hr);
1725 HeapRegion* pop_dirty_cards_region();
1726 1761
1727 public: 1762 public:
1728 void stop_conc_gc_threads(); 1763 void stop_conc_gc_threads();
1729 1764
1730 size_t pending_card_num(); 1765 size_t pending_card_num();
1846 class G1ParScanThreadState : public StackObj { 1881 class G1ParScanThreadState : public StackObj {
1847 protected: 1882 protected:
1848 G1CollectedHeap* _g1h; 1883 G1CollectedHeap* _g1h;
1849 RefToScanQueue* _refs; 1884 RefToScanQueue* _refs;
1850 DirtyCardQueue _dcq; 1885 DirtyCardQueue _dcq;
1851 CardTableModRefBS* _ct_bs; 1886 G1SATBCardTableModRefBS* _ct_bs;
1852 G1RemSet* _g1_rem; 1887 G1RemSet* _g1_rem;
1853 1888
1854 G1ParGCAllocBufferContainer _surviving_alloc_buffer; 1889 G1ParGCAllocBufferContainer _surviving_alloc_buffer;
1855 G1ParGCAllocBufferContainer _tenured_alloc_buffer; 1890 G1ParGCAllocBufferContainer _tenured_alloc_buffer;
1856 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount]; 1891 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
1885 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 1920 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1886 1921
1887 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 1922 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1888 1923
1889 DirtyCardQueue& dirty_card_queue() { return _dcq; } 1924 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1890 CardTableModRefBS* ctbs() { return _ct_bs; } 1925 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
1891 1926
1892 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { 1927 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1893 if (!from->is_survivor()) { 1928 if (!from->is_survivor()) {
1894 _g1_rem->par_write_ref(from, p, tid); 1929 _g1_rem->par_write_ref(from, p, tid);
1895 } 1930 }