comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents d8041d695d19
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
207 friend class MutatorAllocRegion; 207 friend class MutatorAllocRegion;
208 friend class SurvivorGCAllocRegion; 208 friend class SurvivorGCAllocRegion;
209 friend class OldGCAllocRegion; 209 friend class OldGCAllocRegion;
210 210
211 // Closures used in implementation. 211 // Closures used in implementation.
212 template <G1Barrier barrier, bool do_mark_object> 212 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
213 friend class G1ParCopyClosure; 213 friend class G1ParCopyClosure;
214 friend class G1IsAliveClosure; 214 friend class G1IsAliveClosure;
215 friend class G1EvacuateFollowersClosure; 215 friend class G1EvacuateFollowersClosure;
216 friend class G1ParScanThreadState; 216 friend class G1ParScanThreadState;
217 friend class G1ParScanClosureSuper; 217 friend class G1ParScanClosureSuper;
604 // allocation region, either by picking one or expanding the 604 // allocation region, either by picking one or expanding the
605 // heap, and then allocate a block of the given size. The block 605 // heap, and then allocate a block of the given size. The block
606 // may not be a humongous - it must fit into a single heap region. 606 // may not be a humongous - it must fit into a single heap region.
607 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 607 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
608 608
609 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
610 HeapRegion* alloc_region,
611 bool par,
612 size_t word_size);
613
614 // Ensure that no further allocations can happen in "r", bearing in mind 609 // Ensure that no further allocations can happen in "r", bearing in mind
615 // that parallel threads might be attempting allocations. 610 // that parallel threads might be attempting allocations.
616 void par_allocate_remaining_space(HeapRegion* r); 611 void par_allocate_remaining_space(HeapRegion* r);
617 612
618 // Allocation attempt during GC for a survivor object / PLAB. 613 // Allocation attempt during GC for a survivor object / PLAB.
706 assert(!_in_cset_fast_test_base[index], "invariant"); 701 assert(!_in_cset_fast_test_base[index], "invariant");
707 _in_cset_fast_test_base[index] = true; 702 _in_cset_fast_test_base[index] = true;
708 } 703 }
709 704
710 // This is a fast test on whether a reference points into the 705 // This is a fast test on whether a reference points into the
711 // collection set or not. Assume that the reference 706 // collection set or not. It does not assume that the reference
712 // points into the heap. 707 // points into the heap; if it doesn't, it will return false.
713 bool in_cset_fast_test(oop obj) { 708 bool in_cset_fast_test(oop obj) {
714 assert(_in_cset_fast_test != NULL, "sanity"); 709 assert(_in_cset_fast_test != NULL, "sanity");
715 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj)); 710 if (_g1_committed.contains((HeapWord*) obj)) {
716 // no need to subtract the bottom of the heap from obj, 711 // no need to subtract the bottom of the heap from obj,
717 // _in_cset_fast_test is biased 712 // _in_cset_fast_test is biased
718 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes; 713 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
719 bool ret = _in_cset_fast_test[index]; 714 bool ret = _in_cset_fast_test[index];
720 // let's make sure the result is consistent with what the slower 715 // let's make sure the result is consistent with what the slower
721 // test returns 716 // test returns
722 assert( ret || !obj_in_cs(obj), "sanity"); 717 assert( ret || !obj_in_cs(obj), "sanity");
723 assert(!ret || obj_in_cs(obj), "sanity"); 718 assert(!ret || obj_in_cs(obj), "sanity");
724 return ret; 719 return ret;
720 } else {
721 return false;
722 }
725 } 723 }
726 724
727 void clear_cset_fast_test() { 725 void clear_cset_fast_test() {
728 assert(_in_cset_fast_test_base != NULL, "sanity"); 726 assert(_in_cset_fast_test_base != NULL, "sanity");
729 memset(_in_cset_fast_test_base, false, 727 memset(_in_cset_fast_test_base, false,
837 ScanningOption so, 835 ScanningOption so,
838 OopClosure* scan_non_heap_roots, 836 OopClosure* scan_non_heap_roots,
839 OopsInHeapRegionClosure* scan_rs, 837 OopsInHeapRegionClosure* scan_rs,
840 G1KlassScanClosure* scan_klasses, 838 G1KlassScanClosure* scan_klasses,
841 int worker_i); 839 int worker_i);
840
841 // Apply "blk" to all the weak roots of the system. These include
842 // JNI weak roots, the code cache, system dictionary, symbol table,
843 // string table, and referents of reachable weak refs.
844 void g1_process_weak_roots(OopClosure* root_closure);
842 845
843 // Frees a non-humongous region by initializing its contents and 846 // Frees a non-humongous region by initializing its contents and
844 // adding it to the free list that's passed as a parameter (this is 847 // adding it to the free list that's passed as a parameter (this is
845 // usually a local list which will be appended to the master free 848 // usually a local list which will be appended to the master free
846 // list later). The used bytes of freed regions are accumulated in 849 // list later). The used bytes of freed regions are accumulated in
1183 // Some heaps may offer a contiguous region for shared non-blocking 1186 // Some heaps may offer a contiguous region for shared non-blocking
1184 // allocation, via inlined code (by exporting the address of the top and 1187 // allocation, via inlined code (by exporting the address of the top and
1185 // end fields defining the extent of the contiguous allocation region.) 1188 // end fields defining the extent of the contiguous allocation region.)
1186 // But G1CollectedHeap doesn't yet support this. 1189 // But G1CollectedHeap doesn't yet support this.
1187 1190
1191 // Return an estimate of the maximum allocation that could be performed
1192 // without triggering any collection or expansion activity. In a
1193 // generational collector, for example, this is probably the largest
1194 // allocation that could be supported (without expansion) in the youngest
1195 // generation. It is "unsafe" because no locks are taken; the result
1196 // should be treated as an approximation, not a guarantee, for use in
1197 // heuristic resizing decisions.
1198 virtual size_t unsafe_max_alloc();
1199
1188 virtual bool is_maximal_no_gc() const { 1200 virtual bool is_maximal_no_gc() const {
1189 return _g1_storage.uncommitted_size() == 0; 1201 return _g1_storage.uncommitted_size() == 0;
1190 } 1202 }
1191 1203
1192 // The total number of regions in the heap. 1204 // The total number of regions in the heap.
1373 HeapRegion* region_at(uint index) const { return _hrs.at(index); } 1385 HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1374 1386
1375 // Divide the heap region sequence into "chunks" of some size (the number 1387 // Divide the heap region sequence into "chunks" of some size (the number
1376 // of regions divided by the number of parallel threads times some 1388 // of regions divided by the number of parallel threads times some
1377 // overpartition factor, currently 4). Assumes that this will be called 1389 // overpartition factor, currently 4). Assumes that this will be called
1378 // in parallel by ParallelGCThreads worker threads with distinct worker 1390 // in parallel by ParallelGCThreads worker threads with discinct worker
1379 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel 1391 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1380 // calls will use the same "claim_value", and that that claim value is 1392 // calls will use the same "claim_value", and that that claim value is
1381 // different from the claim_value of any heap region before the start of 1393 // different from the claim_value of any heap region before the start of
1382 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by 1394 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1383 // attempting to claim the first region in each chunk, and, if 1395 // attempting to claim the first region in each chunk, and, if
1470 virtual bool supports_heap_inspection() const { return true; } 1482 virtual bool supports_heap_inspection() const { return true; }
1471 1483
1472 // Section on thread-local allocation buffers (TLABs) 1484 // Section on thread-local allocation buffers (TLABs)
1473 // See CollectedHeap for semantics. 1485 // See CollectedHeap for semantics.
1474 1486
1475 bool supports_tlab_allocation() const; 1487 virtual bool supports_tlab_allocation() const;
1476 size_t tlab_capacity(Thread* ignored) const; 1488 virtual size_t tlab_capacity(Thread* thr) const;
1477 size_t tlab_used(Thread* ignored) const; 1489 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1478 size_t max_tlab_size() const;
1479 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1480 1490
1481 // Can a compiler initialize a new object without store barriers? 1491 // Can a compiler initialize a new object without store barriers?
1482 // This permission only extends from the creation of a new object 1492 // This permission only extends from the creation of a new object
1483 // via a TLAB up to the first subsequent safepoint. If such permission 1493 // via a TLAB up to the first subsequent safepoint. If such permission
1484 // is granted for this heap type, the compiler promises to call 1494 // is granted for this heap type, the compiler promises to call
1520 } 1530 }
1521 1531
1522 // Returns "true" iff the given word_size is "very large". 1532 // Returns "true" iff the given word_size is "very large".
1523 static bool isHumongous(size_t word_size) { 1533 static bool isHumongous(size_t word_size) {
1524 // Note this has to be strictly greater-than as the TLABs 1534 // Note this has to be strictly greater-than as the TLABs
1525 // are capped at the humongous threshold and we want to 1535 // are capped at the humongous thresold and we want to
1526 // ensure that we don't try to allocate a TLAB as 1536 // ensure that we don't try to allocate a TLAB as
1527 // humongous and that we don't allocate a humongous 1537 // humongous and that we don't allocate a humongous
1528 // object in a TLAB. 1538 // object in a TLAB.
1529 return word_size > _humongous_object_threshold_in_words; 1539 return word_size > _humongous_object_threshold_in_words;
1530 } 1540 }
1559 static G1CollectedHeap* heap(); 1569 static G1CollectedHeap* heap();
1560 1570
1561 void set_region_short_lived_locked(HeapRegion* hr); 1571 void set_region_short_lived_locked(HeapRegion* hr);
1562 // add appropriate methods for any other surv rate groups 1572 // add appropriate methods for any other surv rate groups
1563 1573
1564 YoungList* young_list() const { return _young_list; } 1574 YoungList* young_list() { return _young_list; }
1565 1575
1566 // debugging 1576 // debugging
1567 bool check_young_list_well_formed() { 1577 bool check_young_list_well_formed() {
1568 return _young_list->check_list_well_formed(); 1578 return _young_list->check_list_well_formed();
1569 } 1579 }
1650 void push_dirty_cards_region(HeapRegion* hr); 1660 void push_dirty_cards_region(HeapRegion* hr);
1651 HeapRegion* pop_dirty_cards_region(); 1661 HeapRegion* pop_dirty_cards_region();
1652 1662
1653 // Optimized nmethod scanning support routines 1663 // Optimized nmethod scanning support routines
1654 1664
1655 // Register the given nmethod with the G1 heap. 1665 // Register the given nmethod with the G1 heap
1656 virtual void register_nmethod(nmethod* nm); 1666 virtual void register_nmethod(nmethod* nm);
1657 1667
1658 // Unregister the given nmethod from the G1 heap. 1668 // Unregister the given nmethod from the G1 heap
1659 virtual void unregister_nmethod(nmethod* nm); 1669 virtual void unregister_nmethod(nmethod* nm);
1660 1670
1661 // Migrate the nmethods in the code root lists of the regions 1671 // Migrate the nmethods in the code root lists of the regions
1662 // in the collection set to regions in to-space. In the event 1672 // in the collection set to regions in to-space. In the event
1663 // of an evacuation failure, nmethods that reference objects 1673 // of an evacuation failure, nmethods that reference objects
1664 // that were not successfully evacuated are not migrated. 1674 // that were not successfullly evacuated are not migrated.
1665 void migrate_strong_code_roots(); 1675 void migrate_strong_code_roots();
1666 1676
1667 // During an initial mark pause, mark all the code roots that 1677 // During an initial mark pause, mark all the code roots that
1668 // point into regions *not* in the collection set. 1678 // point into regions *not* in the collection set.
1669 void mark_strong_code_roots(uint worker_id); 1679 void mark_strong_code_roots(uint worker_id);
1670 1680
1671 // Rebuild the strong code root lists for each region 1681 // Rebuild the stong code root lists for each region
1672 // after a full GC. 1682 // after a full GC
1673 void rebuild_strong_code_roots(); 1683 void rebuild_strong_code_roots();
1674
1675 // Delete entries for dead interned string and clean up unreferenced symbols
1676 // in symbol table, possibly in parallel.
1677 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1678 1684
1679 // Verification 1685 // Verification
1680 1686
1681 // The following is just to alert the verification code 1687 // The following is just to alert the verification code
1682 // that a full collection has occurred and that the 1688 // that a full collection has occurred and that the
1779 if (_retired) 1785 if (_retired)
1780 return; 1786 return;
1781 ParGCAllocBuffer::retire(end_of_gc, retain); 1787 ParGCAllocBuffer::retire(end_of_gc, retain);
1782 _retired = true; 1788 _retired = true;
1783 } 1789 }
1790
1791 bool is_retired() {
1792 return _retired;
1793 }
1794 };
1795
1796 class G1ParGCAllocBufferContainer {
1797 protected:
1798 static int const _priority_max = 2;
1799 G1ParGCAllocBuffer* _priority_buffer[_priority_max];
1800
1801 public:
1802 G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1803 for (int pr = 0; pr < _priority_max; ++pr) {
1804 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1805 }
1806 }
1807
1808 ~G1ParGCAllocBufferContainer() {
1809 for (int pr = 0; pr < _priority_max; ++pr) {
1810 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1811 delete _priority_buffer[pr];
1812 }
1813 }
1814
1815 HeapWord* allocate(size_t word_sz) {
1816 HeapWord* obj;
1817 for (int pr = 0; pr < _priority_max; ++pr) {
1818 obj = _priority_buffer[pr]->allocate(word_sz);
1819 if (obj != NULL) return obj;
1820 }
1821 return obj;
1822 }
1823
1824 bool contains(void* addr) {
1825 for (int pr = 0; pr < _priority_max; ++pr) {
1826 if (_priority_buffer[pr]->contains(addr)) return true;
1827 }
1828 return false;
1829 }
1830
1831 void undo_allocation(HeapWord* obj, size_t word_sz) {
1832 bool finish_undo;
1833 for (int pr = 0; pr < _priority_max; ++pr) {
1834 if (_priority_buffer[pr]->contains(obj)) {
1835 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1836 finish_undo = true;
1837 }
1838 }
1839 if (!finish_undo) ShouldNotReachHere();
1840 }
1841
1842 size_t words_remaining() {
1843 size_t result = 0;
1844 for (int pr = 0; pr < _priority_max; ++pr) {
1845 result += _priority_buffer[pr]->words_remaining();
1846 }
1847 return result;
1848 }
1849
1850 size_t words_remaining_in_retired_buffer() {
1851 G1ParGCAllocBuffer* retired = _priority_buffer[0];
1852 return retired->words_remaining();
1853 }
1854
1855 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1856 for (int pr = 0; pr < _priority_max; ++pr) {
1857 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1858 }
1859 }
1860
1861 void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1862 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1863 retired_and_set->retire(end_of_gc, retain);
1864 retired_and_set->set_buf(buf);
1865 retired_and_set->set_word_size(word_sz);
1866 adjust_priority_order();
1867 }
1868
1869 private:
1870 void adjust_priority_order() {
1871 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1872
1873 int last = _priority_max - 1;
1874 for (int pr = 0; pr < last; ++pr) {
1875 _priority_buffer[pr] = _priority_buffer[pr + 1];
1876 }
1877 _priority_buffer[last] = retired_and_set;
1878 }
1784 }; 1879 };
1785 1880
1786 class G1ParScanThreadState : public StackObj { 1881 class G1ParScanThreadState : public StackObj {
1787 protected: 1882 protected:
1788 G1CollectedHeap* _g1h; 1883 G1CollectedHeap* _g1h;
1789 RefToScanQueue* _refs; 1884 RefToScanQueue* _refs;
1790 DirtyCardQueue _dcq; 1885 DirtyCardQueue _dcq;
1791 G1SATBCardTableModRefBS* _ct_bs; 1886 G1SATBCardTableModRefBS* _ct_bs;
1792 G1RemSet* _g1_rem; 1887 G1RemSet* _g1_rem;
1793 1888
1794 G1ParGCAllocBuffer _surviving_alloc_buffer; 1889 G1ParGCAllocBufferContainer _surviving_alloc_buffer;
1795 G1ParGCAllocBuffer _tenured_alloc_buffer; 1890 G1ParGCAllocBufferContainer _tenured_alloc_buffer;
1796 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; 1891 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
1797 ageTable _age_table; 1892 ageTable _age_table;
1798
1799 G1ParScanClosure _scanner;
1800 1893
1801 size_t _alloc_buffer_waste; 1894 size_t _alloc_buffer_waste;
1802 size_t _undo_waste; 1895 size_t _undo_waste;
1803 1896
1804 OopsInHeapRegionClosure* _evac_failure_cl; 1897 OopsInHeapRegionClosure* _evac_failure_cl;
1848 } 1941 }
1849 } 1942 }
1850 } 1943 }
1851 1944
1852 public: 1945 public:
1853 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); 1946 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1854 1947
1855 ~G1ParScanThreadState() { 1948 ~G1ParScanThreadState() {
1856 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); 1949 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1857 } 1950 }
1858 1951
1859 RefToScanQueue* refs() { return _refs; } 1952 RefToScanQueue* refs() { return _refs; }
1860 ageTable* age_table() { return &_age_table; } 1953 ageTable* age_table() { return &_age_table; }
1861 1954
1862 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 1955 G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
1863 return _alloc_buffers[purpose]; 1956 return _alloc_buffers[purpose];
1864 } 1957 }
1865 1958
1866 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1959 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1867 size_t undo_waste() const { return _undo_waste; } 1960 size_t undo_waste() const { return _undo_waste; }
1887 1980
1888 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1981 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1889 HeapWord* obj = NULL; 1982 HeapWord* obj = NULL;
1890 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1983 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1891 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1984 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1892 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1985 G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
1893 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1894 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1895 1986
1896 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1987 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1897 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1988 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1898 // Otherwise. 1989
1899 alloc_buf->set_word_size(gclab_word_size); 1990 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
1900 alloc_buf->set_buf(buf); 1991 alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
1901 1992
1902 obj = alloc_buf->allocate(word_sz); 1993 obj = alloc_buf->allocate(word_sz);
1903 assert(obj != NULL, "buffer was definitely big enough..."); 1994 assert(obj != NULL, "buffer was definitely big enough...");
1904 } else { 1995 } else {
1905 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1996 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1984 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 2075 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1985 true /* end_of_gc */, 2076 true /* end_of_gc */,
1986 false /* retain */); 2077 false /* retain */);
1987 } 2078 }
1988 } 2079 }
1989
1990 oop copy_to_survivor_space(oop const obj);
1991 2080
1992 template <class T> void deal_with_reference(T* ref_to_scan) { 2081 template <class T> void deal_with_reference(T* ref_to_scan) {
1993 if (has_partial_array_mask(ref_to_scan)) { 2082 if (has_partial_array_mask(ref_to_scan)) {
1994 _partial_scan_cl->do_oop_nv(ref_to_scan); 2083 _partial_scan_cl->do_oop_nv(ref_to_scan);
1995 } else { 2084 } else {
2009 } else { 2098 } else {
2010 deal_with_reference((oop*)ref); 2099 deal_with_reference((oop*)ref);
2011 } 2100 }
2012 } 2101 }
2013 2102
2014 public:
2015 void trim_queue(); 2103 void trim_queue();
2016 }; 2104 };
2017 2105
2018 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 2106 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP