Mercurial > hg > graal-compiler
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 12080:5888334c9c24
7145569: G1: optimize nmethods scanning
Summary: Add a list of nmethods to the RSet for a region that contain references into the region. Skip scanning the code cache during root scanning and scan the nmethod lists during RSet scanning instead.
Reviewed-by: tschatzl, brutisso, mgerdin, twisti, kvn
author | johnc |
---|---|
date | Thu, 15 Aug 2013 10:52:18 +0200 |
parents | 71180a6e5080 |
children | f7d3b4387a16 |
comparison
equal
deleted
inserted
replaced
12033:bd902affe102 | 12080:5888334c9c24 |
---|---|
44 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. | 44 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. |
45 // It uses the "Garbage First" heap organization and algorithm, which | 45 // It uses the "Garbage First" heap organization and algorithm, which |
46 // may combine concurrent marking with parallel, incremental compaction of | 46 // may combine concurrent marking with parallel, incremental compaction of |
47 // heap subsets that will yield large amounts of garbage. | 47 // heap subsets that will yield large amounts of garbage. |
48 | 48 |
49 // Forward declarations | |
49 class HeapRegion; | 50 class HeapRegion; |
50 class HRRSCleanupTask; | 51 class HRRSCleanupTask; |
51 class GenerationSpec; | 52 class GenerationSpec; |
52 class OopsInHeapRegionClosure; | 53 class OopsInHeapRegionClosure; |
53 class G1KlassScanClosure; | 54 class G1KlassScanClosure; |
67 class GenerationCounters; | 68 class GenerationCounters; |
68 class STWGCTimer; | 69 class STWGCTimer; |
69 class G1NewTracer; | 70 class G1NewTracer; |
70 class G1OldTracer; | 71 class G1OldTracer; |
71 class EvacuationFailedInfo; | 72 class EvacuationFailedInfo; |
73 class nmethod; | |
72 | 74 |
73 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; | 75 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; |
74 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; | 76 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; |
75 | 77 |
76 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) | 78 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
159 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | 161 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
160 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | 162 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
161 public: | 163 public: |
162 MutatorAllocRegion() | 164 MutatorAllocRegion() |
163 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } | 165 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } |
166 }; | |
167 | |
168 class SurvivorGCAllocRegion : public G1AllocRegion { | |
169 protected: | |
170 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
171 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
172 public: | |
173 SurvivorGCAllocRegion() | |
174 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } | |
175 }; | |
176 | |
177 class OldGCAllocRegion : public G1AllocRegion { | |
178 protected: | |
179 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
180 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
181 public: | |
182 OldGCAllocRegion() | |
183 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } | |
164 }; | 184 }; |
165 | 185 |
166 // The G1 STW is alive closure. | 186 // The G1 STW is alive closure. |
167 // An instance is embedded into the G1CH and used as the | 187 // An instance is embedded into the G1CH and used as the |
168 // (optional) _is_alive_non_header closure in the STW | 188 // (optional) _is_alive_non_header closure in the STW |
171 class G1STWIsAliveClosure: public BoolObjectClosure { | 191 class G1STWIsAliveClosure: public BoolObjectClosure { |
172 G1CollectedHeap* _g1; | 192 G1CollectedHeap* _g1; |
173 public: | 193 public: |
174 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} | 194 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
175 bool do_object_b(oop p); | 195 bool do_object_b(oop p); |
176 }; | |
177 | |
178 class SurvivorGCAllocRegion : public G1AllocRegion { | |
179 protected: | |
180 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
181 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
182 public: | |
183 SurvivorGCAllocRegion() | |
184 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } | |
185 }; | |
186 | |
187 class OldGCAllocRegion : public G1AllocRegion { | |
188 protected: | |
189 virtual HeapRegion* allocate_new_region(size_t word_size, bool force); | |
190 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); | |
191 public: | |
192 OldGCAllocRegion() | |
193 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } | |
194 }; | 196 }; |
195 | 197 |
196 class RefineCardTableEntryClosure; | 198 class RefineCardTableEntryClosure; |
197 | 199 |
198 class G1CollectedHeap : public SharedHeap { | 200 class G1CollectedHeap : public SharedHeap { |
1547 // Print the maximum heap capacity. | 1549 // Print the maximum heap capacity. |
1548 virtual size_t max_capacity() const; | 1550 virtual size_t max_capacity() const; |
1549 | 1551 |
1550 virtual jlong millis_since_last_gc(); | 1552 virtual jlong millis_since_last_gc(); |
1551 | 1553 |
1554 | |
1555 // Convenience function to be used in situations where the heap type can be | |
1556 // asserted to be this type. | |
1557 static G1CollectedHeap* heap(); | |
1558 | |
1559 void set_region_short_lived_locked(HeapRegion* hr); | |
1560 // add appropriate methods for any other surv rate groups | |
1561 | |
1562 YoungList* young_list() { return _young_list; } | |
1563 | |
1564 // debugging | |
1565 bool check_young_list_well_formed() { | |
1566 return _young_list->check_list_well_formed(); | |
1567 } | |
1568 | |
1569 bool check_young_list_empty(bool check_heap, | |
1570 bool check_sample = true); | |
1571 | |
1572 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1573 // many of these need to be public. | |
1574 | |
1575 // The functions below are helper functions that a subclass of | |
1576 // "CollectedHeap" can use in the implementation of its virtual | |
1577 // functions. | |
1578 // This performs a concurrent marking of the live objects in a | |
1579 // bitmap off to the side. | |
1580 void doConcurrentMark(); | |
1581 | |
1582 bool isMarkedPrev(oop obj) const; | |
1583 bool isMarkedNext(oop obj) const; | |
1584 | |
1585 // Determine if an object is dead, given the object and also | |
1586 // the region to which the object belongs. An object is dead | |
1587 // iff a) it was not allocated since the last mark and b) it | |
1588 // is not marked. | |
1589 | |
1590 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1591 return | |
1592 !hr->obj_allocated_since_prev_marking(obj) && | |
1593 !isMarkedPrev(obj); | |
1594 } | |
1595 | |
1596 // This function returns true when an object has been | |
1597 // around since the previous marking and hasn't yet | |
1598 // been marked during this marking. | |
1599 | |
1600 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1601 return | |
1602 !hr->obj_allocated_since_next_marking(obj) && | |
1603 !isMarkedNext(obj); | |
1604 } | |
1605 | |
1606 // Determine if an object is dead, given only the object itself. | |
1607 // This will find the region to which the object belongs and | |
1608 // then call the region version of the same function. | |
1609 | |
1610 // Added if it is NULL it isn't dead. | |
1611 | |
1612 bool is_obj_dead(const oop obj) const { | |
1613 const HeapRegion* hr = heap_region_containing(obj); | |
1614 if (hr == NULL) { | |
1615 if (obj == NULL) return false; | |
1616 else return true; | |
1617 } | |
1618 else return is_obj_dead(obj, hr); | |
1619 } | |
1620 | |
1621 bool is_obj_ill(const oop obj) const { | |
1622 const HeapRegion* hr = heap_region_containing(obj); | |
1623 if (hr == NULL) { | |
1624 if (obj == NULL) return false; | |
1625 else return true; | |
1626 } | |
1627 else return is_obj_ill(obj, hr); | |
1628 } | |
1629 | |
1630 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); | |
1631 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); | |
1632 bool is_marked(oop obj, VerifyOption vo); | |
1633 const char* top_at_mark_start_str(VerifyOption vo); | |
1634 | |
1635 ConcurrentMark* concurrent_mark() const { return _cm; } | |
1636 | |
1637 // Refinement | |
1638 | |
1639 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | |
1640 | |
1641 // The dirty cards region list is used to record a subset of regions | |
1642 // whose cards need clearing. The list if populated during the | |
1643 // remembered set scanning and drained during the card table | |
1644 // cleanup. Although the methods are reentrant, population/draining | |
1645 // phases must not overlap. For synchronization purposes the last | |
1646 // element on the list points to itself. | |
1647 HeapRegion* _dirty_cards_region_list; | |
1648 void push_dirty_cards_region(HeapRegion* hr); | |
1649 HeapRegion* pop_dirty_cards_region(); | |
1650 | |
1651 // Optimized nmethod scanning support routines | |
1652 | |
1653 // Register the given nmethod with the G1 heap | |
1654 virtual void register_nmethod(nmethod* nm); | |
1655 | |
1656 // Unregister the given nmethod from the G1 heap | |
1657 virtual void unregister_nmethod(nmethod* nm); | |
1658 | |
1659 // Migrate the nmethods in the code root lists of the regions | |
1660 // in the collection set to regions in to-space. In the event | |
1661 // of an evacuation failure, nmethods that reference objects | |
1662 // that were not successfullly evacuated are not migrated. | |
1663 void migrate_strong_code_roots(); | |
1664 | |
1665 // During an initial mark pause, mark all the code roots that | |
1666 // point into regions *not* in the collection set. | |
1667 void mark_strong_code_roots(uint worker_id); | |
1668 | |
1669 // Rebuild the stong code root lists for each region | |
1670 // after a full GC | |
1671 void rebuild_strong_code_roots(); | |
1672 | |
1673 // Verification | |
1674 | |
1675 // The following is just to alert the verification code | |
1676 // that a full collection has occurred and that the | |
1677 // remembered sets are no longer up to date. | |
1678 bool _full_collection; | |
1679 void set_full_collection() { _full_collection = true;} | |
1680 void clear_full_collection() {_full_collection = false;} | |
1681 bool full_collection() {return _full_collection;} | |
1682 | |
1552 // Perform any cleanup actions necessary before allowing a verification. | 1683 // Perform any cleanup actions necessary before allowing a verification. |
1553 virtual void prepare_for_verify(); | 1684 virtual void prepare_for_verify(); |
1554 | 1685 |
1555 // Perform verification. | 1686 // Perform verification. |
1556 | 1687 |
1570 void verify(bool silent, VerifyOption vo); | 1701 void verify(bool silent, VerifyOption vo); |
1571 | 1702 |
1572 // Override; it uses the "prev" marking information | 1703 // Override; it uses the "prev" marking information |
1573 virtual void verify(bool silent); | 1704 virtual void verify(bool silent); |
1574 | 1705 |
1575 virtual void print_on(outputStream* st) const; | |
1576 virtual void print_extended_on(outputStream* st) const; | |
1577 virtual void print_on_error(outputStream* st) const; | |
1578 | |
1579 virtual void print_gc_threads_on(outputStream* st) const; | |
1580 virtual void gc_threads_do(ThreadClosure* tc) const; | |
1581 | |
1582 // Override | |
1583 void print_tracing_info() const; | |
1584 | |
1585 // The following two methods are helpful for debugging RSet issues. | |
1586 void print_cset_rsets() PRODUCT_RETURN; | |
1587 void print_all_rsets() PRODUCT_RETURN; | |
1588 | |
1589 // Convenience function to be used in situations where the heap type can be | |
1590 // asserted to be this type. | |
1591 static G1CollectedHeap* heap(); | |
1592 | |
1593 void set_region_short_lived_locked(HeapRegion* hr); | |
1594 // add appropriate methods for any other surv rate groups | |
1595 | |
1596 YoungList* young_list() { return _young_list; } | |
1597 | |
1598 // debugging | |
1599 bool check_young_list_well_formed() { | |
1600 return _young_list->check_list_well_formed(); | |
1601 } | |
1602 | |
1603 bool check_young_list_empty(bool check_heap, | |
1604 bool check_sample = true); | |
1605 | |
1606 // *** Stuff related to concurrent marking. It's not clear to me that so | |
1607 // many of these need to be public. | |
1608 | |
1609 // The functions below are helper functions that a subclass of | |
1610 // "CollectedHeap" can use in the implementation of its virtual | |
1611 // functions. | |
1612 // This performs a concurrent marking of the live objects in a | |
1613 // bitmap off to the side. | |
1614 void doConcurrentMark(); | |
1615 | |
1616 bool isMarkedPrev(oop obj) const; | |
1617 bool isMarkedNext(oop obj) const; | |
1618 | |
1619 // Determine if an object is dead, given the object and also | |
1620 // the region to which the object belongs. An object is dead | |
1621 // iff a) it was not allocated since the last mark and b) it | |
1622 // is not marked. | |
1623 | |
1624 bool is_obj_dead(const oop obj, const HeapRegion* hr) const { | |
1625 return | |
1626 !hr->obj_allocated_since_prev_marking(obj) && | |
1627 !isMarkedPrev(obj); | |
1628 } | |
1629 | |
1630 // This function returns true when an object has been | |
1631 // around since the previous marking and hasn't yet | |
1632 // been marked during this marking. | |
1633 | |
1634 bool is_obj_ill(const oop obj, const HeapRegion* hr) const { | |
1635 return | |
1636 !hr->obj_allocated_since_next_marking(obj) && | |
1637 !isMarkedNext(obj); | |
1638 } | |
1639 | |
1640 // Determine if an object is dead, given only the object itself. | |
1641 // This will find the region to which the object belongs and | |
1642 // then call the region version of the same function. | |
1643 | |
1644 // Added if it is NULL it isn't dead. | |
1645 | |
1646 bool is_obj_dead(const oop obj) const { | |
1647 const HeapRegion* hr = heap_region_containing(obj); | |
1648 if (hr == NULL) { | |
1649 if (obj == NULL) return false; | |
1650 else return true; | |
1651 } | |
1652 else return is_obj_dead(obj, hr); | |
1653 } | |
1654 | |
1655 bool is_obj_ill(const oop obj) const { | |
1656 const HeapRegion* hr = heap_region_containing(obj); | |
1657 if (hr == NULL) { | |
1658 if (obj == NULL) return false; | |
1659 else return true; | |
1660 } | |
1661 else return is_obj_ill(obj, hr); | |
1662 } | |
1663 | |
1664 // The methods below are here for convenience and dispatch the | 1706 // The methods below are here for convenience and dispatch the |
1665 // appropriate method depending on value of the given VerifyOption | 1707 // appropriate method depending on value of the given VerifyOption |
1666 // parameter. The options for that parameter are: | 1708 // parameter. The values for that parameter, and their meanings, |
1667 // | 1709 // are the same as those above. |
1668 // vo == UsePrevMarking -> use "prev" marking information, | |
1669 // vo == UseNextMarking -> use "next" marking information, | |
1670 // vo == UseMarkWord -> use mark word from object header | |
1671 | 1710 |
1672 bool is_obj_dead_cond(const oop obj, | 1711 bool is_obj_dead_cond(const oop obj, |
1673 const HeapRegion* hr, | 1712 const HeapRegion* hr, |
1674 const VerifyOption vo) const { | 1713 const VerifyOption vo) const { |
1675 switch (vo) { | 1714 switch (vo) { |
1690 default: ShouldNotReachHere(); | 1729 default: ShouldNotReachHere(); |
1691 } | 1730 } |
1692 return false; // keep some compilers happy | 1731 return false; // keep some compilers happy |
1693 } | 1732 } |
1694 | 1733 |
1695 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); | 1734 // Printing |
1696 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); | 1735 |
1697 bool is_marked(oop obj, VerifyOption vo); | 1736 virtual void print_on(outputStream* st) const; |
1698 const char* top_at_mark_start_str(VerifyOption vo); | 1737 virtual void print_extended_on(outputStream* st) const; |
1699 | 1738 virtual void print_on_error(outputStream* st) const; |
1700 // The following is just to alert the verification code | 1739 |
1701 // that a full collection has occurred and that the | 1740 virtual void print_gc_threads_on(outputStream* st) const; |
1702 // remembered sets are no longer up to date. | 1741 virtual void gc_threads_do(ThreadClosure* tc) const; |
1703 bool _full_collection; | 1742 |
1704 void set_full_collection() { _full_collection = true;} | 1743 // Override |
1705 void clear_full_collection() {_full_collection = false;} | 1744 void print_tracing_info() const; |
1706 bool full_collection() {return _full_collection;} | 1745 |
1707 | 1746 // The following two methods are helpful for debugging RSet issues. |
1708 ConcurrentMark* concurrent_mark() const { return _cm; } | 1747 void print_cset_rsets() PRODUCT_RETURN; |
1709 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } | 1748 void print_all_rsets() PRODUCT_RETURN; |
1710 | |
1711 // The dirty cards region list is used to record a subset of regions | |
1712 // whose cards need clearing. The list if populated during the | |
1713 // remembered set scanning and drained during the card table | |
1714 // cleanup. Although the methods are reentrant, population/draining | |
1715 // phases must not overlap. For synchronization purposes the last | |
1716 // element on the list points to itself. | |
1717 HeapRegion* _dirty_cards_region_list; | |
1718 void push_dirty_cards_region(HeapRegion* hr); | |
1719 HeapRegion* pop_dirty_cards_region(); | |
1720 | 1749 |
1721 public: | 1750 public: |
1722 void stop_conc_gc_threads(); | 1751 void stop_conc_gc_threads(); |
1723 | 1752 |
1724 size_t pending_card_num(); | 1753 size_t pending_card_num(); |