# HG changeset patch # User brutisso # Date 1408444746 -7200 # Node ID eec72fa4b108f8656c584ad5984c20bbf0b55a2d # Parent ff3169f2562100b932438c4f3d16fc83d90c770a 8040722: G1: Clean up usages of heap_region_containing Reviewed-by: tschatzl, jmasa diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Aug 19 12:39:06 2014 +0200 @@ -2800,7 +2800,6 @@ str = " O"; } else { HeapRegion* hr = _g1h->heap_region_containing(obj); - guarantee(hr != NULL, "invariant"); bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo); bool marked = _g1h->is_marked(obj, _vo); @@ -3565,9 +3564,8 @@ } void CMTask::setup_for_region(HeapRegion* hr) { - // Separated the asserts so that we know which one fires. assert(hr != NULL, - "claim_region() should have filtered out continues humongous regions"); + "claim_region() should have filtered out NULL regions"); assert(!hr->continuesHumongous(), "claim_region() should have filtered out continues humongous regions"); diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Aug 19 12:39:06 2014 +0200 @@ -442,24 +442,18 @@ // implementation of is_scavengable() for G1 will indicate that // all nmethods must be scanned during a partial collection. bool G1CollectedHeap::is_in_partial_collection(const void* p) { - HeapRegion* hr = heap_region_containing(p); - return hr != NULL && hr->in_collection_set(); + if (p == NULL) { + return false; + } + return heap_region_containing(p)->in_collection_set(); } #endif // Returns true if the reference points to an object that // can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); HeapRegion* hr = heap_region_containing(p); - if (hr == NULL) { - // null - assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p)); - return false; - } else { - return !hr->isHumongous(); - } + return !hr->isHumongous(); } void G1CollectedHeap::check_ct_logs_at_safepoint() { @@ -2984,21 +2978,16 @@ } Space* G1CollectedHeap::space_containing(const void* addr) const { - Space* res = heap_region_containing(addr); - return res; + return heap_region_containing(addr); } HeapWord* G1CollectedHeap::block_start(const void* addr) const { Space* sp = space_containing(addr); - if (sp != NULL) { - return sp->block_start(addr); - } - return NULL; + return sp->block_start(addr); } size_t G1CollectedHeap::block_size(const HeapWord* addr) const { Space* sp = space_containing(addr); - assert(sp != NULL, "block_size of address outside of heap"); return sp->block_size(addr); } @@ -4652,30 +4641,19 @@ ParGCAllocBuffer(gclab_word_size), _retired(true) { } void G1ParCopyHelper::mark_object(oop obj) { -#ifdef ASSERT - HeapRegion* hr = _g1->heap_region_containing(obj); - assert(hr != NULL, "sanity"); - assert(!hr->in_collection_set(), "should not mark objects in the CSet"); -#endif // ASSERT + assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); // We know that the object is not moving so it's safe to read its size. _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); } void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { -#ifdef ASSERT assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj != to_obj, "should not be self-forwarded"); - HeapRegion* from_hr = _g1->heap_region_containing(from_obj); - assert(from_hr != NULL, "sanity"); - assert(from_hr->in_collection_set(), "from obj should be in the CSet"); - - HeapRegion* to_hr = _g1->heap_region_containing(to_obj); - assert(to_hr != NULL, "sanity"); - assert(!to_hr->in_collection_set(), "should not mark objects in the CSet"); -#endif // ASSERT + assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); + assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); // The object might be in the process of being copied by another // worker so we cannot trust that its to-space image is @@ -6912,11 +6890,7 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const { HeapRegion* hr = heap_region_containing(p); - if (hr == NULL) { - return false; - } else { - return hr->is_in(p); - } + return hr->is_in(p); } // Methods for the mutator alloc region diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -1490,17 +1490,15 @@ // space containing a given address, or else returns NULL. virtual Space* space_containing(const void* addr) const; - // A G1CollectedHeap will contain some number of heap regions. This - // finds the region containing a given address, or else returns NULL. + // Returns the HeapRegion that contains addr. addr must not be NULL. + template + inline HeapRegion* heap_region_containing_raw(const T addr) const; + + // Returns the HeapRegion that contains addr. addr must not be NULL. + // If addr is within a humongous continues region, it returns its humongous start region. template inline HeapRegion* heap_region_containing(const T addr) const; - // Like the above, but requires "addr" to be in the heap (to avoid a - // null-check), and unlike the above, may return an continuing humongous - // region. - template - inline HeapRegion* heap_region_containing_raw(const T addr) const; - // A CollectedHeap is divided into a dense sequence of "blocks"; that is, // each address in the (reserved) heap is a member of exactly // one block. The defining characteristic of a block is that it is @@ -1642,7 +1640,6 @@ // the region to which the object belongs. An object is dead // iff a) it was not allocated since the last mark and b) it // is not marked. - bool is_obj_dead(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_prev_marking(obj) && @@ -1652,7 +1649,6 @@ // This function returns true when an object has been // around since the previous marking and hasn't yet // been marked during this marking. - bool is_obj_ill(const oop obj, const HeapRegion* hr) const { return !hr->obj_allocated_since_next_marking(obj) && diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -49,21 +49,22 @@ template inline HeapRegion* -G1CollectedHeap::heap_region_containing(const T addr) const { - HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); - // hr can be null if addr in perm_gen - if (hr != NULL && hr->continuesHumongous()) { - hr = hr->humongous_start_region(); - } - return hr; +G1CollectedHeap::heap_region_containing_raw(const T addr) const { + assert(addr != NULL, "invariant"); + assert(_g1_reserved.contains((const void*) addr), + err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", + p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end()))); + return _hrs.addr_to_region((HeapWord*) addr); } template inline HeapRegion* -G1CollectedHeap::heap_region_containing_raw(const T addr) const { - assert(_g1_reserved.contains((const void*) addr), "invariant"); - HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); - return res; +G1CollectedHeap::heap_region_containing(const T addr) const { + HeapRegion* hr = heap_region_containing_raw(addr); + if (hr->continuesHumongous()) { + return hr->humongous_start_region(); + } + return hr; } inline void G1CollectedHeap::reset_gc_time_stamp() { @@ -154,8 +155,7 @@ // have to keep calling heap_region_containing_raw() in the // asserts below. DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) - assert(containing_hr != NULL && start != NULL && word_size > 0, - "pre-condition"); + assert(word_size > 0, "pre-condition"); assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_young(), "it should be young"); assert(!containing_hr->isHumongous(), "it should not be humongous"); @@ -277,8 +277,10 @@ #endif // #ifndef PRODUCT inline bool G1CollectedHeap::is_in_young(const oop obj) { - HeapRegion* hr = heap_region_containing(obj); - return hr != NULL && hr->is_young(); + if (obj == NULL) { + return false; + } + return heap_region_containing(obj)->is_young(); } // We don't need barriers for initializing stores to objects @@ -291,21 +293,17 @@ } inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_dead(obj, hr); + return is_obj_dead(obj, heap_region_containing(obj)); } inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_ill(obj, hr); + return is_obj_ill(obj, heap_region_containing(obj)); } inline void G1CollectedHeap::set_humongous_is_live(oop obj) { diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -130,9 +130,7 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj); - if (hr != NULL) { - _cm->grayRoot(obj, obj->size(), _worker_id, hr); - } + _cm->grayRoot(obj, obj->size(), _worker_id, hr); } } @@ -159,57 +157,61 @@ template inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) { oop obj = oopDesc::load_decode_heap_oop(p); + if (obj == NULL) { + return; + } #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop - if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS - oopDesc* o = obj.obj(); + oopDesc* o = obj.obj(); #else - oopDesc* o = obj; + oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS - assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); - assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); - } + assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); + assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(_from != NULL, "from region must be non-NULL"); assert(_from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); - if (to != NULL && _from != to) { - // The _record_refs_into_cset flag is true during the RSet - // updating part of an evacuation pause. It is false at all - // other times: - // * rebuilding the rembered sets after a full GC - // * during concurrent refinement. - // * updating the remembered sets of regions in the collection - // set in the event of an evacuation failure (when deferred - // updates are enabled). + if (_from == to) { + // Normally this closure should only be called with cross-region references. + // But since Java threads are manipulating the references concurrently and we + // reload the values things may have changed. + return; + } + // The _record_refs_into_cset flag is true during the RSet + // updating part of an evacuation pause. It is false at all + // other times: + // * rebuilding the remembered sets after a full GC + // * during concurrent refinement. + // * updating the remembered sets of regions in the collection + // set in the event of an evacuation failure (when deferred + // updates are enabled). - if (_record_refs_into_cset && to->in_collection_set()) { - // We are recording references that point into the collection - // set and this particular reference does exactly that... - // If the referenced object has already been forwarded - // to itself, we are handling an evacuation failure and - // we have already visited/tried to copy this object - // there is no need to retry. - if (!self_forwarded(obj)) { - assert(_push_ref_cl != NULL, "should not be null"); - // Push the reference in the refs queue of the G1ParScanThreadState - // instance for this worker thread. - _push_ref_cl->do_oop(p); - } + if (_record_refs_into_cset && to->in_collection_set()) { + // We are recording references that point into the collection + // set and this particular reference does exactly that... + // If the referenced object has already been forwarded + // to itself, we are handling an evacuation failure and + // we have already visited/tried to copy this object + // there is no need to retry. + if (!self_forwarded(obj)) { + assert(_push_ref_cl != NULL, "should not be null"); + // Push the reference in the refs queue of the G1ParScanThreadState + // instance for this worker thread. + _push_ref_cl->do_oop(p); + } - // Deferred updates to the CSet are either discarded (in the normal case), - // or processed (if an evacuation failure occurs) at the end - // of the collection. - // See G1RemSet::cleanup_after_oops_into_collection_set_do(). - return; - } - + // Deferred updates to the CSet are either discarded (in the normal case), + // or processed (if an evacuation failure occurs) at the end + // of the collection. + // See G1RemSet::cleanup_after_oops_into_collection_set_do(). + } else { // We either don't care about pushing references that point into the // collection set (i.e. we're not during an evacuation pause) _or_ // the reference doesn't point into the collection set. Either way diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Aug 19 12:39:06 2014 +0200 @@ -211,7 +211,6 @@ #endif HeapRegion* card_region = _g1h->heap_region_containing(card_start); - assert(card_region != NULL, "Yielding cards not in the heap?"); _cards++; if (!card_region->is_on_dirty_cards_region_list()) { @@ -406,7 +405,6 @@ HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); - assert(r != NULL, "unexpected null"); // Scan oops in the card looking for references into the collection set // Don't use addr_for(card_ptr + 1) which can ask for @@ -568,11 +566,6 @@ HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); - if (r == NULL) { - // Again no need to return that this card contains refs that - // point into the collection set. - return false; // Not in the G1 heap (might be in perm, for example.) - } // Why do we have to check here whether a card is on a young region, // given that we dirty young regions and, as a result, the @@ -625,10 +618,6 @@ start = _ct_bs->addr_for(card_ptr); r = _g1->heap_region_containing(start); - if (r == NULL) { - // Not in the G1 heap - return false; - } // Checking whether the region we got back from the cache // is young here is inappropriate. The region could have been diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -46,26 +46,28 @@ template inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) { oop obj = oopDesc::load_decode_heap_oop(p); + if (obj == NULL) { + return; + } + #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop - if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS - oopDesc* o = obj.obj(); + oopDesc* o = obj.obj(); #else - oopDesc* o = obj; + oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS - assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); - assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); - } + assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); + assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(from == NULL || from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); - if (to != NULL && from != to) { + if (from != to) { assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Aug 19 12:39:06 2014 +0200 @@ -802,7 +802,6 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { HeapRegion* hr = _g1h->heap_region_containing_raw(from); - if (hr == NULL) return false; RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); // Is this region in the coarse map? if (_coarse_map.at(hr_ind)) return true; diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/heapRegionSeq.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Tue Aug 19 12:39:06 2014 +0200 @@ -240,7 +240,6 @@ // Asserts will fire if i is >= _length HeapWord* addr = hr->bottom(); guarantee(addr_to_region(addr) == hr, "sanity"); - guarantee(addr_to_region_unsafe(addr) == hr, "sanity"); } else { guarantee(hr->is_empty(), "sanity"); guarantee(!hr->isHumongous(), "sanity"); diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/heapRegionSeq.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -110,10 +110,6 @@ // HeapRegion, otherwise return NULL. inline HeapRegion* addr_to_region(HeapWord* addr) const; - // Return the HeapRegion that corresponds to the given - // address. Assume the address is valid. - inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; - // Return the number of regions that have been committed in the heap. uint length() const { return _committed_length; } diff -r ff3169f25621 -r eec72fa4b108 src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Tue Apr 01 07:46:51 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Tue Aug 19 12:39:06 2014 +0200 @@ -28,21 +28,17 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp" -inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { +inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { + assert(addr < heap_end(), + err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end()))); + assert(addr >= heap_bottom(), + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); + HeapRegion* hr = _regions.get_by_address(addr); assert(hr != NULL, "invariant"); return hr; } -inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { - if (addr != NULL && addr < heap_end()) { - assert(addr >= heap_bottom(), - err_msg("addr: " PTR_FORMAT " bottom: " PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); - return addr_to_region_unsafe(addr); - } - return NULL; -} - inline HeapRegion* HeapRegionSeq::at(uint index) const { assert(index < length(), "pre-condition"); HeapRegion* hr = _regions.get_by_index(index);