Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 355:0edda524b58c
6722565: G1: assert !r->is_on_unclean_list() fires
Summary: Under certain circumstances, two cleanup threads can claim and process the same region.
Reviewed-by: apetrusenko, ysr
author | tonyp |
---|---|
date | Wed, 06 Aug 2008 11:57:31 -0400 |
parents | c0f8f7790199 |
children | cc68c8e9b309 |
comparison
equal
deleted
inserted
replaced
354:c0f8f7790199 | 355:0edda524b58c |
---|---|
1713 _hrs->iterate_from(idx, cl); | 1713 _hrs->iterate_from(idx, cl); |
1714 } | 1714 } |
1715 | 1715 |
1716 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } | 1716 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); } |
1717 | 1717 |
1718 const int OverpartitionFactor = 4; | |
1719 void | 1718 void |
1720 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, | 1719 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
1721 int worker, | 1720 int worker, |
1722 jint claim_value) { | 1721 jint claim_value) { |
1723 // We break up the heap regions into blocks of size ParallelGCThreads (to | 1722 const size_t regions = n_regions(); |
1724 // decrease iteration costs). | 1723 const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); |
1725 const size_t nregions = n_regions(); | 1724 // try to spread out the starting points of the workers |
1726 const size_t n_thrds = (ParallelGCThreads > 0 ? ParallelGCThreads : 1); | 1725 const size_t start_index = regions / worker_num * (size_t) worker; |
1727 const size_t partitions = n_thrds * OverpartitionFactor; | 1726 |
1728 const size_t BlkSize = MAX2(nregions/partitions, (size_t)1); | 1727 // each worker will actually look at all regions |
1729 const size_t n_blocks = (nregions + BlkSize - 1)/BlkSize; | 1728 for (size_t count = 0; count < regions; ++count) { |
1730 assert(ParallelGCThreads > 0 || worker == 0, "Precondition"); | 1729 const size_t index = (start_index + count) % regions; |
1731 const int init_idx = (int) (n_blocks/n_thrds * worker); | 1730 assert(0 <= index && index < regions, "sanity"); |
1732 for (size_t blk = 0; blk < n_blocks; blk++) { | 1731 HeapRegion* r = region_at(index); |
1733 size_t idx = init_idx + blk; | 1732 // we'll ignore "continues humongous" regions (we'll process them |
1734 if (idx >= n_blocks) idx = idx - n_blocks; | 1733 // when we come across their corresponding "start humongous" |
1735 size_t reg_idx = idx * BlkSize; | 1734 // region) and regions already claimed |
1736 assert(reg_idx < nregions, "Because we rounded blk up."); | 1735 if (r->claim_value() == claim_value || r->continuesHumongous()) { |
1737 HeapRegion* r = region_at(reg_idx); | 1736 continue; |
1737 } | |
1738 // OK, try to claim it | |
1738 if (r->claimHeapRegion(claim_value)) { | 1739 if (r->claimHeapRegion(claim_value)) { |
1739 for (size_t j = 0; j < BlkSize; j++) { | 1740 // success! |
1740 size_t reg_idx2 = reg_idx + j; | 1741 assert(!r->continuesHumongous(), "sanity"); |
1741 if (reg_idx2 == nregions) break; | 1742 if (r->startsHumongous()) { |
1742 HeapRegion* r2 = region_at(reg_idx2); | 1743 // If the region is "starts humongous" we'll iterate over its |
1743 if (j > 0) r2->set_claim_value(claim_value); | 1744 // "continues humongous" first; in fact we'll do them |
1744 bool res = cl->doHeapRegion(r2); | 1745 // first. The order is important. In on case, calling the |
1745 guarantee(!res, "Should not abort."); | 1746 // closure on the "starts humongous" region might de-allocate |
1747 // and clear all its "continues humongous" regions and, as a | |
1748 // result, we might end up processing them twice. So, we'll do | |
1749 // them first (notice: most closures will ignore them anyway) and | |
1750 // then we'll do the "starts humongous" region. | |
1751 for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { | |
1752 HeapRegion* chr = region_at(ch_index); | |
1753 | |
1754 // if the region has already been claimed or it's not | |
1755 // "continues humongous" we're done | |
1756 if (chr->claim_value() == claim_value || | |
1757 !chr->continuesHumongous()) { | |
1758 break; | |
1759 } | |
1760 | |
1761 // Noone should have claimed it directly. We can given | |
1762 // that we claimed its "starts humongous" region. | |
1763 assert(chr->claim_value() != claim_value, "sanity"); | |
1764 assert(chr->humongous_start_region() == r, "sanity"); | |
1765 | |
1766 if (chr->claimHeapRegion(claim_value)) { | |
1767 // we should always be able to claim it; noone else should | |
1768 // be trying to claim this region | |
1769 | |
1770 bool res2 = cl->doHeapRegion(chr); | |
1771 assert(!res2, "Should not abort"); | |
1772 | |
1773 // Right now, this holds (i.e., no closure that actually | |
1774 // does something with "continues humongous" regions | |
1775 // clears them). We might have to weaken it in the future, | |
1776 // but let's leave these two asserts here for extra safety. | |
1777 assert(chr->continuesHumongous(), "should still be the case"); | |
1778 assert(chr->humongous_start_region() == r, "sanity"); | |
1779 } else { | |
1780 guarantee(false, "we should not reach here"); | |
1781 } | |
1782 } | |
1746 } | 1783 } |
1747 } | 1784 |
1748 } | 1785 assert(!r->continuesHumongous(), "sanity"); |
1749 } | 1786 bool res = cl->doHeapRegion(r); |
1787 assert(!res, "Should not abort"); | |
1788 } | |
1789 } | |
1790 } | |
1791 | |
1792 #ifdef ASSERT | |
1793 // This checks whether all regions in the heap have the correct claim | |
1794 // value. I also piggy-backed on this a check to ensure that the | |
1795 // humongous_start_region() information on "continues humongous" | |
1796 // regions is correct. | |
1797 | |
1798 class CheckClaimValuesClosure : public HeapRegionClosure { | |
1799 private: | |
1800 jint _claim_value; | |
1801 size_t _failures; | |
1802 HeapRegion* _sh_region; | |
1803 public: | |
1804 CheckClaimValuesClosure(jint claim_value) : | |
1805 _claim_value(claim_value), _failures(0), _sh_region(NULL) { } | |
1806 bool doHeapRegion(HeapRegion* r) { | |
1807 if (r->claim_value() != _claim_value) { | |
1808 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1809 "claim value = %d, should be %d", | |
1810 r->bottom(), r->end(), r->claim_value(), | |
1811 _claim_value); | |
1812 ++_failures; | |
1813 } | |
1814 if (!r->isHumongous()) { | |
1815 _sh_region = NULL; | |
1816 } else if (r->startsHumongous()) { | |
1817 _sh_region = r; | |
1818 } else if (r->continuesHumongous()) { | |
1819 if (r->humongous_start_region() != _sh_region) { | |
1820 gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), " | |
1821 "HS = "PTR_FORMAT", should be "PTR_FORMAT, | |
1822 r->bottom(), r->end(), | |
1823 r->humongous_start_region(), | |
1824 _sh_region); | |
1825 ++_failures; | |
1826 } | |
1827 } | |
1828 return false; | |
1829 } | |
1830 size_t failures() { | |
1831 return _failures; | |
1832 } | |
1833 }; | |
1834 | |
1835 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { | |
1836 CheckClaimValuesClosure cl(claim_value); | |
1837 heap_region_iterate(&cl); | |
1838 return cl.failures() == 0; | |
1839 } | |
1840 #endif // ASSERT | |
1750 | 1841 |
1751 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { | 1842 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { |
1752 HeapRegion* r = g1_policy()->collection_set(); | 1843 HeapRegion* r = g1_policy()->collection_set(); |
1753 while (r != NULL) { | 1844 while (r != NULL) { |
1754 HeapRegion* next = r->next_in_collection_set(); | 1845 HeapRegion* next = r->next_in_collection_set(); |