comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 14518:d8041d695d19

Merged with jdk9/dev/hotspot changeset 3812c088b945
author twisti
date Tue, 11 Mar 2014 18:45:59 -0700
parents 02f27ecb4f3a 60fd6d24f49f
children 4ca6dc0799b6
comparison
equal deleted inserted replaced
14141:f97c5ec83832 14518:d8041d695d19
207 friend class MutatorAllocRegion; 207 friend class MutatorAllocRegion;
208 friend class SurvivorGCAllocRegion; 208 friend class SurvivorGCAllocRegion;
209 friend class OldGCAllocRegion; 209 friend class OldGCAllocRegion;
210 210
211 // Closures used in implementation. 211 // Closures used in implementation.
212 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object> 212 template <G1Barrier barrier, bool do_mark_object>
213 friend class G1ParCopyClosure; 213 friend class G1ParCopyClosure;
214 friend class G1IsAliveClosure; 214 friend class G1IsAliveClosure;
215 friend class G1EvacuateFollowersClosure; 215 friend class G1EvacuateFollowersClosure;
216 friend class G1ParScanThreadState; 216 friend class G1ParScanThreadState;
217 friend class G1ParScanClosureSuper; 217 friend class G1ParScanClosureSuper;
604 // allocation region, either by picking one or expanding the 604 // allocation region, either by picking one or expanding the
605 // heap, and then allocate a block of the given size. The block 605 // heap, and then allocate a block of the given size. The block
606 // may not be a humongous - it must fit into a single heap region. 606 // may not be a humongous - it must fit into a single heap region.
607 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 607 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
608 608
609 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
610 HeapRegion* alloc_region,
611 bool par,
612 size_t word_size);
613
609 // Ensure that no further allocations can happen in "r", bearing in mind 614 // Ensure that no further allocations can happen in "r", bearing in mind
610 // that parallel threads might be attempting allocations. 615 // that parallel threads might be attempting allocations.
611 void par_allocate_remaining_space(HeapRegion* r); 616 void par_allocate_remaining_space(HeapRegion* r);
612 617
613 // Allocation attempt during GC for a survivor object / PLAB. 618 // Allocation attempt during GC for a survivor object / PLAB.
701 assert(!_in_cset_fast_test_base[index], "invariant"); 706 assert(!_in_cset_fast_test_base[index], "invariant");
702 _in_cset_fast_test_base[index] = true; 707 _in_cset_fast_test_base[index] = true;
703 } 708 }
704 709
705 // This is a fast test on whether a reference points into the 710 // This is a fast test on whether a reference points into the
706 // collection set or not. It does not assume that the reference 711 // collection set or not. Assume that the reference
707 // points into the heap; if it doesn't, it will return false. 712 // points into the heap.
708 bool in_cset_fast_test(oop obj) { 713 bool in_cset_fast_test(oop obj) {
709 assert(_in_cset_fast_test != NULL, "sanity"); 714 assert(_in_cset_fast_test != NULL, "sanity");
710 if (_g1_committed.contains((HeapWord*) obj)) { 715 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
711 // no need to subtract the bottom of the heap from obj, 716 // no need to subtract the bottom of the heap from obj,
712 // _in_cset_fast_test is biased 717 // _in_cset_fast_test is biased
713 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes; 718 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
714 bool ret = _in_cset_fast_test[index]; 719 bool ret = _in_cset_fast_test[index];
715 // let's make sure the result is consistent with what the slower 720 // let's make sure the result is consistent with what the slower
716 // test returns 721 // test returns
717 assert( ret || !obj_in_cs(obj), "sanity"); 722 assert( ret || !obj_in_cs(obj), "sanity");
718 assert(!ret || obj_in_cs(obj), "sanity"); 723 assert(!ret || obj_in_cs(obj), "sanity");
719 return ret; 724 return ret;
720 } else {
721 return false;
722 }
723 } 725 }
724 726
725 void clear_cset_fast_test() { 727 void clear_cset_fast_test() {
726 assert(_in_cset_fast_test_base != NULL, "sanity"); 728 assert(_in_cset_fast_test_base != NULL, "sanity");
727 memset(_in_cset_fast_test_base, false, 729 memset(_in_cset_fast_test_base, false,
835 ScanningOption so, 837 ScanningOption so,
836 OopClosure* scan_non_heap_roots, 838 OopClosure* scan_non_heap_roots,
837 OopsInHeapRegionClosure* scan_rs, 839 OopsInHeapRegionClosure* scan_rs,
838 G1KlassScanClosure* scan_klasses, 840 G1KlassScanClosure* scan_klasses,
839 int worker_i); 841 int worker_i);
840
841 // Apply "blk" to all the weak roots of the system. These include
842 // JNI weak roots, the code cache, system dictionary, symbol table,
843 // string table, and referents of reachable weak refs.
844 void g1_process_weak_roots(OopClosure* root_closure);
845 842
846 // Frees a non-humongous region by initializing its contents and 843 // Frees a non-humongous region by initializing its contents and
847 // adding it to the free list that's passed as a parameter (this is 844 // adding it to the free list that's passed as a parameter (this is
848 // usually a local list which will be appended to the master free 845 // usually a local list which will be appended to the master free
849 // list later). The used bytes of freed regions are accumulated in 846 // list later). The used bytes of freed regions are accumulated in
1186 // Some heaps may offer a contiguous region for shared non-blocking 1183 // Some heaps may offer a contiguous region for shared non-blocking
1187 // allocation, via inlined code (by exporting the address of the top and 1184 // allocation, via inlined code (by exporting the address of the top and
1188 // end fields defining the extent of the contiguous allocation region.) 1185 // end fields defining the extent of the contiguous allocation region.)
1189 // But G1CollectedHeap doesn't yet support this. 1186 // But G1CollectedHeap doesn't yet support this.
1190 1187
1191 // Return an estimate of the maximum allocation that could be performed
1192 // without triggering any collection or expansion activity. In a
1193 // generational collector, for example, this is probably the largest
1194 // allocation that could be supported (without expansion) in the youngest
1195 // generation. It is "unsafe" because no locks are taken; the result
1196 // should be treated as an approximation, not a guarantee, for use in
1197 // heuristic resizing decisions.
1198 virtual size_t unsafe_max_alloc();
1199
1200 virtual bool is_maximal_no_gc() const { 1188 virtual bool is_maximal_no_gc() const {
1201 return _g1_storage.uncommitted_size() == 0; 1189 return _g1_storage.uncommitted_size() == 0;
1202 } 1190 }
1203 1191
1204 // The total number of regions in the heap. 1192 // The total number of regions in the heap.
1385 HeapRegion* region_at(uint index) const { return _hrs.at(index); } 1373 HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1386 1374
1387 // Divide the heap region sequence into "chunks" of some size (the number 1375 // Divide the heap region sequence into "chunks" of some size (the number
1388 // of regions divided by the number of parallel threads times some 1376 // of regions divided by the number of parallel threads times some
1389 // overpartition factor, currently 4). Assumes that this will be called 1377 // overpartition factor, currently 4). Assumes that this will be called
1390 // in parallel by ParallelGCThreads worker threads with discinct worker 1378 // in parallel by ParallelGCThreads worker threads with distinct worker
1391 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel 1379 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1392 // calls will use the same "claim_value", and that that claim value is 1380 // calls will use the same "claim_value", and that that claim value is
1393 // different from the claim_value of any heap region before the start of 1381 // different from the claim_value of any heap region before the start of
1394 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by 1382 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1395 // attempting to claim the first region in each chunk, and, if 1383 // attempting to claim the first region in each chunk, and, if
1482 virtual bool supports_heap_inspection() const { return true; } 1470 virtual bool supports_heap_inspection() const { return true; }
1483 1471
1484 // Section on thread-local allocation buffers (TLABs) 1472 // Section on thread-local allocation buffers (TLABs)
1485 // See CollectedHeap for semantics. 1473 // See CollectedHeap for semantics.
1486 1474
1487 virtual bool supports_tlab_allocation() const; 1475 bool supports_tlab_allocation() const;
1488 virtual size_t tlab_capacity(Thread* thr) const; 1476 size_t tlab_capacity(Thread* ignored) const;
1489 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; 1477 size_t tlab_used(Thread* ignored) const;
1478 size_t max_tlab_size() const;
1479 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1490 1480
1491 // Can a compiler initialize a new object without store barriers? 1481 // Can a compiler initialize a new object without store barriers?
1492 // This permission only extends from the creation of a new object 1482 // This permission only extends from the creation of a new object
1493 // via a TLAB up to the first subsequent safepoint. If such permission 1483 // via a TLAB up to the first subsequent safepoint. If such permission
1494 // is granted for this heap type, the compiler promises to call 1484 // is granted for this heap type, the compiler promises to call
1530 } 1520 }
1531 1521
1532 // Returns "true" iff the given word_size is "very large". 1522 // Returns "true" iff the given word_size is "very large".
1533 static bool isHumongous(size_t word_size) { 1523 static bool isHumongous(size_t word_size) {
1534 // Note this has to be strictly greater-than as the TLABs 1524 // Note this has to be strictly greater-than as the TLABs
1535 // are capped at the humongous thresold and we want to 1525 // are capped at the humongous threshold and we want to
1536 // ensure that we don't try to allocate a TLAB as 1526 // ensure that we don't try to allocate a TLAB as
1537 // humongous and that we don't allocate a humongous 1527 // humongous and that we don't allocate a humongous
1538 // object in a TLAB. 1528 // object in a TLAB.
1539 return word_size > _humongous_object_threshold_in_words; 1529 return word_size > _humongous_object_threshold_in_words;
1540 } 1530 }
1569 static G1CollectedHeap* heap(); 1559 static G1CollectedHeap* heap();
1570 1560
1571 void set_region_short_lived_locked(HeapRegion* hr); 1561 void set_region_short_lived_locked(HeapRegion* hr);
1572 // add appropriate methods for any other surv rate groups 1562 // add appropriate methods for any other surv rate groups
1573 1563
1574 YoungList* young_list() { return _young_list; } 1564 YoungList* young_list() const { return _young_list; }
1575 1565
1576 // debugging 1566 // debugging
1577 bool check_young_list_well_formed() { 1567 bool check_young_list_well_formed() {
1578 return _young_list->check_list_well_formed(); 1568 return _young_list->check_list_well_formed();
1579 } 1569 }
1660 void push_dirty_cards_region(HeapRegion* hr); 1650 void push_dirty_cards_region(HeapRegion* hr);
1661 HeapRegion* pop_dirty_cards_region(); 1651 HeapRegion* pop_dirty_cards_region();
1662 1652
1663 // Optimized nmethod scanning support routines 1653 // Optimized nmethod scanning support routines
1664 1654
1665 // Register the given nmethod with the G1 heap 1655 // Register the given nmethod with the G1 heap.
1666 virtual void register_nmethod(nmethod* nm); 1656 virtual void register_nmethod(nmethod* nm);
1667 1657
1668 // Unregister the given nmethod from the G1 heap 1658 // Unregister the given nmethod from the G1 heap.
1669 virtual void unregister_nmethod(nmethod* nm); 1659 virtual void unregister_nmethod(nmethod* nm);
1670 1660
1671 // Migrate the nmethods in the code root lists of the regions 1661 // Migrate the nmethods in the code root lists of the regions
1672 // in the collection set to regions in to-space. In the event 1662 // in the collection set to regions in to-space. In the event
1673 // of an evacuation failure, nmethods that reference objects 1663 // of an evacuation failure, nmethods that reference objects
1674 // that were not successfullly evacuated are not migrated. 1664 // that were not successfully evacuated are not migrated.
1675 void migrate_strong_code_roots(); 1665 void migrate_strong_code_roots();
1676 1666
1677 // During an initial mark pause, mark all the code roots that 1667 // During an initial mark pause, mark all the code roots that
1678 // point into regions *not* in the collection set. 1668 // point into regions *not* in the collection set.
1679 void mark_strong_code_roots(uint worker_id); 1669 void mark_strong_code_roots(uint worker_id);
1680 1670
1681 // Rebuild the stong code root lists for each region 1671 // Rebuild the strong code root lists for each region
1682 // after a full GC 1672 // after a full GC.
1683 void rebuild_strong_code_roots(); 1673 void rebuild_strong_code_roots();
1674
1675 // Delete entries for dead interned string and clean up unreferenced symbols
1676 // in symbol table, possibly in parallel.
1677 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1684 1678
1685 // Verification 1679 // Verification
1686 1680
1687 // The following is just to alert the verification code 1681 // The following is just to alert the verification code
1688 // that a full collection has occurred and that the 1682 // that a full collection has occurred and that the
1785 if (_retired) 1779 if (_retired)
1786 return; 1780 return;
1787 ParGCAllocBuffer::retire(end_of_gc, retain); 1781 ParGCAllocBuffer::retire(end_of_gc, retain);
1788 _retired = true; 1782 _retired = true;
1789 } 1783 }
1790
1791 bool is_retired() {
1792 return _retired;
1793 }
1794 };
1795
1796 class G1ParGCAllocBufferContainer {
1797 protected:
1798 static int const _priority_max = 2;
1799 G1ParGCAllocBuffer* _priority_buffer[_priority_max];
1800
1801 public:
1802 G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1803 for (int pr = 0; pr < _priority_max; ++pr) {
1804 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1805 }
1806 }
1807
1808 ~G1ParGCAllocBufferContainer() {
1809 for (int pr = 0; pr < _priority_max; ++pr) {
1810 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1811 delete _priority_buffer[pr];
1812 }
1813 }
1814
1815 HeapWord* allocate(size_t word_sz) {
1816 HeapWord* obj;
1817 for (int pr = 0; pr < _priority_max; ++pr) {
1818 obj = _priority_buffer[pr]->allocate(word_sz);
1819 if (obj != NULL) return obj;
1820 }
1821 return obj;
1822 }
1823
1824 bool contains(void* addr) {
1825 for (int pr = 0; pr < _priority_max; ++pr) {
1826 if (_priority_buffer[pr]->contains(addr)) return true;
1827 }
1828 return false;
1829 }
1830
1831 void undo_allocation(HeapWord* obj, size_t word_sz) {
1832 bool finish_undo;
1833 for (int pr = 0; pr < _priority_max; ++pr) {
1834 if (_priority_buffer[pr]->contains(obj)) {
1835 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1836 finish_undo = true;
1837 }
1838 }
1839 if (!finish_undo) ShouldNotReachHere();
1840 }
1841
1842 size_t words_remaining() {
1843 size_t result = 0;
1844 for (int pr = 0; pr < _priority_max; ++pr) {
1845 result += _priority_buffer[pr]->words_remaining();
1846 }
1847 return result;
1848 }
1849
1850 size_t words_remaining_in_retired_buffer() {
1851 G1ParGCAllocBuffer* retired = _priority_buffer[0];
1852 return retired->words_remaining();
1853 }
1854
1855 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1856 for (int pr = 0; pr < _priority_max; ++pr) {
1857 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1858 }
1859 }
1860
1861 void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1862 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1863 retired_and_set->retire(end_of_gc, retain);
1864 retired_and_set->set_buf(buf);
1865 retired_and_set->set_word_size(word_sz);
1866 adjust_priority_order();
1867 }
1868
1869 private:
1870 void adjust_priority_order() {
1871 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1872
1873 int last = _priority_max - 1;
1874 for (int pr = 0; pr < last; ++pr) {
1875 _priority_buffer[pr] = _priority_buffer[pr + 1];
1876 }
1877 _priority_buffer[last] = retired_and_set;
1878 }
1879 }; 1784 };
1880 1785
1881 class G1ParScanThreadState : public StackObj { 1786 class G1ParScanThreadState : public StackObj {
1882 protected: 1787 protected:
1883 G1CollectedHeap* _g1h; 1788 G1CollectedHeap* _g1h;
1884 RefToScanQueue* _refs; 1789 RefToScanQueue* _refs;
1885 DirtyCardQueue _dcq; 1790 DirtyCardQueue _dcq;
1886 G1SATBCardTableModRefBS* _ct_bs; 1791 G1SATBCardTableModRefBS* _ct_bs;
1887 G1RemSet* _g1_rem; 1792 G1RemSet* _g1_rem;
1888 1793
1889 G1ParGCAllocBufferContainer _surviving_alloc_buffer; 1794 G1ParGCAllocBuffer _surviving_alloc_buffer;
1890 G1ParGCAllocBufferContainer _tenured_alloc_buffer; 1795 G1ParGCAllocBuffer _tenured_alloc_buffer;
1891 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount]; 1796 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1892 ageTable _age_table; 1797 ageTable _age_table;
1798
1799 G1ParScanClosure _scanner;
1893 1800
1894 size_t _alloc_buffer_waste; 1801 size_t _alloc_buffer_waste;
1895 size_t _undo_waste; 1802 size_t _undo_waste;
1896 1803
1897 OopsInHeapRegionClosure* _evac_failure_cl; 1804 OopsInHeapRegionClosure* _evac_failure_cl;
1941 } 1848 }
1942 } 1849 }
1943 } 1850 }
1944 1851
1945 public: 1852 public:
1946 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num); 1853 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1947 1854
1948 ~G1ParScanThreadState() { 1855 ~G1ParScanThreadState() {
1949 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); 1856 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1950 } 1857 }
1951 1858
1952 RefToScanQueue* refs() { return _refs; } 1859 RefToScanQueue* refs() { return _refs; }
1953 ageTable* age_table() { return &_age_table; } 1860 ageTable* age_table() { return &_age_table; }
1954 1861
1955 G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) { 1862 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1956 return _alloc_buffers[purpose]; 1863 return _alloc_buffers[purpose];
1957 } 1864 }
1958 1865
1959 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1866 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1960 size_t undo_waste() const { return _undo_waste; } 1867 size_t undo_waste() const { return _undo_waste; }
1980 1887
1981 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1888 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1982 HeapWord* obj = NULL; 1889 HeapWord* obj = NULL;
1983 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1890 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1984 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1891 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1985 G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose); 1892 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1893 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1894 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1986 1895
1987 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1896 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1988 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1897 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1989 1898 // Otherwise.
1990 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer()); 1899 alloc_buf->set_word_size(gclab_word_size);
1991 alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size); 1900 alloc_buf->set_buf(buf);
1992 1901
1993 obj = alloc_buf->allocate(word_sz); 1902 obj = alloc_buf->allocate(word_sz);
1994 assert(obj != NULL, "buffer was definitely big enough..."); 1903 assert(obj != NULL, "buffer was definitely big enough...");
1995 } else { 1904 } else {
1996 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1905 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
2075 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 1984 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
2076 true /* end_of_gc */, 1985 true /* end_of_gc */,
2077 false /* retain */); 1986 false /* retain */);
2078 } 1987 }
2079 } 1988 }
1989
1990 oop copy_to_survivor_space(oop const obj);
2080 1991
2081 template <class T> void deal_with_reference(T* ref_to_scan) { 1992 template <class T> void deal_with_reference(T* ref_to_scan) {
2082 if (has_partial_array_mask(ref_to_scan)) { 1993 if (has_partial_array_mask(ref_to_scan)) {
2083 _partial_scan_cl->do_oop_nv(ref_to_scan); 1994 _partial_scan_cl->do_oop_nv(ref_to_scan);
2084 } else { 1995 } else {
2098 } else { 2009 } else {
2099 deal_with_reference((oop*)ref); 2010 deal_with_reference((oop*)ref);
2100 } 2011 }
2101 } 2012 }
2102 2013
2014 public:
2103 void trim_queue(); 2015 void trim_queue();
2104 }; 2016 };
2105 2017
2106 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 2018 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP