comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @ 17833:bfdf528be8e8

8038498: Fix includes and C inlining after 8035330 Summary: Change 8035330: Remove G1ParScanPartialArrayClosure and G1ParScanHeapEvacClosure broke the debug build on AIX. The method do_oop_partial_array() is added in a header, but requires the inline function par_write_ref() through several inlined calls. In some cpp files, like arguments.cpp, par_write_ref() is not defined as the corresponding inline header and is not included. The AIX debug VM does not start because of the missing symbol. This change solves this by cleaning up include dependencies. Reviewed-by: tschatzl, stefank
author tschatzl
date Fri, 04 Apr 2014 10:43:56 +0200
parents 8ee855b4e667
children 8847586c9037
comparison
equal deleted inserted replaced
17832:6df24530bf14 17833:bfdf528be8e8
704 } 704 }
705 705
706 // This is a fast test on whether a reference points into the 706 // This is a fast test on whether a reference points into the
707 // collection set or not. Assume that the reference 707 // collection set or not. Assume that the reference
708 // points into the heap. 708 // points into the heap.
709 bool in_cset_fast_test(oop obj) { 709 inline bool in_cset_fast_test(oop obj);
710 assert(_in_cset_fast_test != NULL, "sanity");
711 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
712 // no need to subtract the bottom of the heap from obj,
713 // _in_cset_fast_test is biased
714 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
715 bool ret = _in_cset_fast_test[index];
716 // let's make sure the result is consistent with what the slower
717 // test returns
718 assert( ret || !obj_in_cs(obj), "sanity");
719 assert(!ret || obj_in_cs(obj), "sanity");
720 return ret;
721 }
722 710
723 void clear_cset_fast_test() { 711 void clear_cset_fast_test() {
724 assert(_in_cset_fast_test_base != NULL, "sanity"); 712 assert(_in_cset_fast_test_base != NULL, "sanity");
725 memset(_in_cset_fast_test_base, false, 713 memset(_in_cset_fast_test_base, false,
726 (size_t) _in_cset_fast_test_length * sizeof(bool)); 714 (size_t) _in_cset_fast_test_length * sizeof(bool));
1262 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1250 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1263 append_secondary_free_list(); 1251 append_secondary_free_list();
1264 } 1252 }
1265 } 1253 }
1266 1254
1267 void old_set_remove(HeapRegion* hr) { 1255 inline void old_set_remove(HeapRegion* hr);
1268 _old_set.remove(hr);
1269 }
1270 1256
1271 size_t non_young_capacity_bytes() { 1257 size_t non_young_capacity_bytes() {
1272 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); 1258 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1273 } 1259 }
1274 1260
1355 // Iterate over heap regions, in address order, terminating the 1341 // Iterate over heap regions, in address order, terminating the
1356 // iteration early if the "doHeapRegion" method returns "true". 1342 // iteration early if the "doHeapRegion" method returns "true".
1357 void heap_region_iterate(HeapRegionClosure* blk) const; 1343 void heap_region_iterate(HeapRegionClosure* blk) const;
1358 1344
1359 // Return the region with the given index. It assumes the index is valid. 1345 // Return the region with the given index. It assumes the index is valid.
1360 HeapRegion* region_at(uint index) const { return _hrs.at(index); } 1346 inline HeapRegion* region_at(uint index) const;
1361 1347
1362 // Divide the heap region sequence into "chunks" of some size (the number 1348 // Divide the heap region sequence into "chunks" of some size (the number
1363 // of regions divided by the number of parallel threads times some 1349 // of regions divided by the number of parallel threads times some
1364 // overpartition factor, currently 4). Assumes that this will be called 1350 // overpartition factor, currently 4). Assumes that this will be called
1365 // in parallel by ParallelGCThreads worker threads with discinct worker 1351 // in parallel by ParallelGCThreads worker threads with discinct worker
1484 1470
1485 virtual bool card_mark_must_follow_store() const { 1471 virtual bool card_mark_must_follow_store() const {
1486 return true; 1472 return true;
1487 } 1473 }
1488 1474
1489 bool is_in_young(const oop obj) { 1475 inline bool is_in_young(const oop obj);
1490 HeapRegion* hr = heap_region_containing(obj);
1491 return hr != NULL && hr->is_young();
1492 }
1493 1476
1494 #ifdef ASSERT 1477 #ifdef ASSERT
1495 virtual bool is_in_partial_collection(const void* p); 1478 virtual bool is_in_partial_collection(const void* p);
1496 #endif 1479 #endif
1497 1480
1500 // We don't need barriers for initializing stores to objects 1483 // We don't need barriers for initializing stores to objects
1501 // in the young gen: for the SATB pre-barrier, there is no 1484 // in the young gen: for the SATB pre-barrier, there is no
1502 // pre-value that needs to be remembered; for the remembered-set 1485 // pre-value that needs to be remembered; for the remembered-set
1503 // update logging post-barrier, we don't maintain remembered set 1486 // update logging post-barrier, we don't maintain remembered set
1504 // information for young gen objects. 1487 // information for young gen objects.
1505 virtual bool can_elide_initializing_store_barrier(oop new_obj) { 1488 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1506 return is_in_young(new_obj);
1507 }
1508 1489
1509 // Returns "true" iff the given word_size is "very large". 1490 // Returns "true" iff the given word_size is "very large".
1510 static bool isHumongous(size_t word_size) { 1491 static bool isHumongous(size_t word_size) {
1511 // Note this has to be strictly greater-than as the TLABs 1492 // Note this has to be strictly greater-than as the TLABs
1512 // are capped at the humongous thresold and we want to 1493 // are capped at the humongous thresold and we want to
1596 // This will find the region to which the object belongs and 1577 // This will find the region to which the object belongs and
1597 // then call the region version of the same function. 1578 // then call the region version of the same function.
1598 1579
1599 // Added if it is NULL it isn't dead. 1580 // Added if it is NULL it isn't dead.
1600 1581
1601 bool is_obj_dead(const oop obj) const { 1582 inline bool is_obj_dead(const oop obj) const;
1602 const HeapRegion* hr = heap_region_containing(obj); 1583
1603 if (hr == NULL) { 1584 inline bool is_obj_ill(const oop obj) const;
1604 if (obj == NULL) return false;
1605 else return true;
1606 }
1607 else return is_obj_dead(obj, hr);
1608 }
1609
1610 bool is_obj_ill(const oop obj) const {
1611 const HeapRegion* hr = heap_region_containing(obj);
1612 if (hr == NULL) {
1613 if (obj == NULL) return false;
1614 else return true;
1615 }
1616 else return is_obj_ill(obj, hr);
1617 }
1618 1585
1619 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); 1586 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1620 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); 1587 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1621 bool is_marked(oop obj, VerifyOption vo); 1588 bool is_marked(oop obj, VerifyOption vo);
1622 const char* top_at_mark_start_str(VerifyOption vo); 1589 const char* top_at_mark_start_str(VerifyOption vo);
1706 // parameter. The values for that parameter, and their meanings, 1673 // parameter. The values for that parameter, and their meanings,
1707 // are the same as those above. 1674 // are the same as those above.
1708 1675
1709 bool is_obj_dead_cond(const oop obj, 1676 bool is_obj_dead_cond(const oop obj,
1710 const HeapRegion* hr, 1677 const HeapRegion* hr,
1711 const VerifyOption vo) const { 1678 const VerifyOption vo) const;
1712 switch (vo) {
1713 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1714 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1715 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
1716 default: ShouldNotReachHere();
1717 }
1718 return false; // keep some compilers happy
1719 }
1720 1679
1721 bool is_obj_dead_cond(const oop obj, 1680 bool is_obj_dead_cond(const oop obj,
1722 const VerifyOption vo) const { 1681 const VerifyOption vo) const;
1723 switch (vo) {
1724 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1725 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1726 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
1727 default: ShouldNotReachHere();
1728 }
1729 return false; // keep some compilers happy
1730 }
1731 1682
1732 // Printing 1683 // Printing
1733 1684
1734 virtual void print_on(outputStream* st) const; 1685 virtual void print_on(outputStream* st) const;
1735 virtual void print_extended_on(outputStream* st) const; 1686 virtual void print_extended_on(outputStream* st) const;
1819 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 1770 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1820 1771
1821 DirtyCardQueue& dirty_card_queue() { return _dcq; } 1772 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1822 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } 1773 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
1823 1774
1824 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { 1775 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1825 if (!from->is_survivor()) {
1826 _g1_rem->par_write_ref(from, p, tid);
1827 }
1828 }
1829 1776
1830 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { 1777 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1831 // If the new value of the field points to the same region or 1778 // If the new value of the field points to the same region or
1832 // is the to-space, we don't need to include it in the Rset updates. 1779 // is the to-space, we don't need to include it in the Rset updates.
1833 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { 1780 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1865 template <class T> void push_on_queue(T* ref) { 1812 template <class T> void push_on_queue(T* ref) {
1866 assert(verify_ref(ref), "sanity"); 1813 assert(verify_ref(ref), "sanity");
1867 refs()->push(ref); 1814 refs()->push(ref);
1868 } 1815 }
1869 1816
1870 template <class T> void update_rs(HeapRegion* from, T* p, int tid) { 1817 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
1871 if (G1DeferredRSUpdate) {
1872 deferred_rs_update(from, p, tid);
1873 } else {
1874 immediate_rs_update(from, p, tid);
1875 }
1876 }
1877 1818
1878 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1819 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1879 HeapWord* obj = NULL; 1820 HeapWord* obj = NULL;
1880 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1821 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1881 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1822 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1995 1936
1996 inline oop clear_partial_array_mask(oop* ref) const { 1937 inline oop clear_partial_array_mask(oop* ref) const {
1997 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); 1938 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1998 } 1939 }
1999 1940
2000 void do_oop_partial_array(oop* p) { 1941 inline void do_oop_partial_array(oop* p);
2001 assert(has_partial_array_mask(p), "invariant");
2002 oop from_obj = clear_partial_array_mask(p);
2003
2004 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
2005 assert(from_obj->is_objArray(), "must be obj array");
2006 objArrayOop from_obj_array = objArrayOop(from_obj);
2007 // The from-space object contains the real length.
2008 int length = from_obj_array->length();
2009
2010 assert(from_obj->is_forwarded(), "must be forwarded");
2011 oop to_obj = from_obj->forwardee();
2012 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
2013 objArrayOop to_obj_array = objArrayOop(to_obj);
2014 // We keep track of the next start index in the length field of the
2015 // to-space object.
2016 int next_index = to_obj_array->length();
2017 assert(0 <= next_index && next_index < length,
2018 err_msg("invariant, next index: %d, length: %d", next_index, length));
2019
2020 int start = next_index;
2021 int end = length;
2022 int remainder = end - start;
2023 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
2024 if (remainder > 2 * ParGCArrayScanChunk) {
2025 end = start + ParGCArrayScanChunk;
2026 to_obj_array->set_length(end);
2027 // Push the remainder before we process the range in case another
2028 // worker has run out of things to do and can steal it.
2029 oop* from_obj_p = set_partial_array_mask(from_obj);
2030 push_on_queue(from_obj_p);
2031 } else {
2032 assert(length == end, "sanity");
2033 // We'll process the final range for this object. Restore the length
2034 // so that the heap remains parsable in case of evacuation failure.
2035 to_obj_array->set_length(end);
2036 }
2037 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
2038 // Process indexes [start,end). It will also process the header
2039 // along with the first chunk (i.e., the chunk with start == 0).
2040 // Note that at this point the length field of to_obj_array is not
2041 // correct given that we are using it to keep track of the next
2042 // start index. oop_iterate_range() (thankfully!) ignores the length
2043 // field and only relies on the start / end parameters. It does
2044 // however return the size of the object which will be incorrect. So
2045 // we have to ignore it even if we wanted to use it.
2046 to_obj_array->oop_iterate_range(&_scanner, start, end);
2047 }
2048 1942
2049 // This method is applied to the fields of the objects that have just been copied. 1943 // This method is applied to the fields of the objects that have just been copied.
2050 template <class T> void do_oop_evac(T* p, HeapRegion* from) { 1944 template <class T> void do_oop_evac(T* p, HeapRegion* from) {
2051 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), 1945 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
2052 "Reference should not be NULL here as such are never pushed to the task queue."); 1946 "Reference should not be NULL here as such are never pushed to the task queue.");
2072 } 1966 }
2073 public: 1967 public:
2074 1968
2075 oop copy_to_survivor_space(oop const obj); 1969 oop copy_to_survivor_space(oop const obj);
2076 1970
2077 template <class T> void deal_with_reference(T* ref_to_scan) { 1971 template <class T> inline void deal_with_reference(T* ref_to_scan);
2078 if (!has_partial_array_mask(ref_to_scan)) { 1972
2079 // Note: we can use "raw" versions of "region_containing" because 1973 inline void deal_with_reference(StarTask ref);
2080 // "obj_to_scan" is definitely in the heap, and is not in a
2081 // humongous region.
2082 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2083 do_oop_evac(ref_to_scan, r);
2084 } else {
2085 do_oop_partial_array((oop*)ref_to_scan);
2086 }
2087 }
2088
2089 void deal_with_reference(StarTask ref) {
2090 assert(verify_task(ref), "sanity");
2091 if (ref.is_narrow()) {
2092 deal_with_reference((narrowOop*)ref);
2093 } else {
2094 deal_with_reference((oop*)ref);
2095 }
2096 }
2097 1974
2098 public: 1975 public:
2099 void trim_queue(); 1976 void trim_queue();
2100 }; 1977 };
2101 1978