Mercurial > hg > truffle
diff src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 e5668dcf12e9 |
children |
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Apr 07 11:20:51 2015 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Apr 07 14:58:49 2015 +0200 @@ -29,34 +29,61 @@ #include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "runtime/orderAccess.inline.hpp" #include "utilities/taskqueue.hpp" // Inline functions for G1CollectedHeap +inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { + return _allocation_context_stats; +} + // Return the region with the given index. It assumes the index is valid. -inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } +inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } + +inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { + assert(is_in_reserved(addr), + err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", + p2i(addr), p2i(_reserved.start()), p2i(_reserved.end()))); + return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); +} + +inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { + return _hrm.reserved().start() + index * HeapRegion::GrainWords; +} template <class T> -inline HeapRegion* -G1CollectedHeap::heap_region_containing(const T addr) const { - HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); - // hr can be null if addr in perm_gen - if (hr != NULL && hr->continuesHumongous()) { - hr = hr->humongous_start_region(); +inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { + assert(addr != NULL, "invariant"); + assert(is_in_g1_reserved((const void*) addr), + err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", + p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); + return _hrm.addr_to_region((HeapWord*) addr); +} + +template <class T> +inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { + HeapRegion* hr = heap_region_containing_raw(addr); + if (hr->continuesHumongous()) { + return hr->humongous_start_region(); } return hr; } -template <class T> -inline HeapRegion* -G1CollectedHeap::heap_region_containing_raw(const T addr) const { - assert(_g1_reserved.contains((const void*) addr), "invariant"); - HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); - return res; +inline void G1CollectedHeap::reset_gc_time_stamp() { + _gc_time_stamp = 0; + OrderAccess::fence(); + // Clear the cached CSet starting regions and time stamps. + // Their validity is dependent on the GC timestamp. + clear_cset_start_regions(); +} + +inline void G1CollectedHeap::increment_gc_time_stamp() { + ++_gc_time_stamp; + OrderAccess::fence(); } inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { @@ -64,22 +91,23 @@ } inline bool G1CollectedHeap::obj_in_cs(oop obj) { - HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); + HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); return r != NULL && r->in_collection_set(); } -inline HeapWord* -G1CollectedHeap::attempt_allocation(size_t word_size, - unsigned int* gc_count_before_ret, - int* gclocker_retry_count_ret) { +inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret, + int* gclocker_retry_count_ret) { assert_heap_not_locked_and_not_at_safepoint(); assert(!isHumongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); - HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, - false /* bot_updates */); + AllocationContext_t context = AllocationContext::current(); + HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, + false /* bot_updates */); if (result == NULL) { result = attempt_allocation_slow(word_size, + context, gc_count_before_ret, gclocker_retry_count_ret); } @@ -90,17 +118,17 @@ return result; } -inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t - word_size) { +inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, + AllocationContext_t context) { assert(!isHumongous(word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, - false /* bot_updates */); + HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, + false /* bot_updates */); if (result == NULL) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, - false /* bot_updates */); + result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, + false /* bot_updates */); } if (result != NULL) { dirty_young_block(result, word_size); @@ -108,16 +136,17 @@ return result; } -inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { +inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, + AllocationContext_t context) { assert(!isHumongous(word_size), "we should not be seeing humongous-size allocations in this path"); - HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, - true /* bot_updates */); + HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, + true /* bot_updates */); if (result == NULL) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - result = _old_gc_alloc_region.attempt_allocation_locked(word_size, - true /* bot_updates */); + result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, + true /* bot_updates */); } return result; } @@ -134,8 +163,7 @@ // have to keep calling heap_region_containing_raw() in the // asserts below. DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) - assert(containing_hr != NULL && start != NULL && word_size > 0, - "pre-condition"); + assert(word_size > 0, "pre-condition"); assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_young(), "it should be young"); assert(!containing_hr->isHumongous(), "it should not be humongous"); @@ -159,17 +187,11 @@ return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); } - // This is a fast test on whether a reference points into the // collection set or not. Assume that the reference // points into the heap. -inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { - assert(_in_cset_fast_test != NULL, "sanity"); - assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj))); - // no need to subtract the bottom of the heap from obj, - // _in_cset_fast_test is biased - uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes; - bool ret = _in_cset_fast_test[index]; +inline bool G1CollectedHeap::is_in_cset(oop obj) { + bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); // let's make sure the result is consistent with what the slower // test returns assert( ret || !obj_in_cs(obj), "sanity"); @@ -177,6 +199,18 @@ return ret; } +bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { + return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); +} + +G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) { + return _in_cset_fast_test.at((HeapWord*)obj); +} + +void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) { + _in_cset_fast_test.set_humongous(index); +} + #ifndef PRODUCT // Support for G1EvacuationFailureALot @@ -226,8 +260,7 @@ } } -inline bool -G1CollectedHeap::evacuation_should_fail() { +inline bool G1CollectedHeap::evacuation_should_fail() { if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { return false; } @@ -251,8 +284,10 @@ #endif // #ifndef PRODUCT inline bool G1CollectedHeap::is_in_young(const oop obj) { - HeapRegion* hr = heap_region_containing(obj); - return hr != NULL && hr->is_young(); + if (obj == NULL) { + return false; + } + return heap_region_containing(obj)->is_young(); } // We don't need barriers for initializing stores to objects @@ -265,105 +300,34 @@ } inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_dead(obj, hr); + return is_obj_dead(obj, heap_region_containing(obj)); } inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { - const HeapRegion* hr = heap_region_containing(obj); - if (hr == NULL) { - if (obj == NULL) return false; - else return true; + if (obj == NULL) { + return false; } - else return is_obj_ill(obj, hr); -} - -template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { - if (!from->is_survivor()) { - _g1_rem->par_write_ref(from, p, tid); - } -} - -template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { - if (G1DeferredRSUpdate) { - deferred_rs_update(from, p, tid); - } else { - immediate_rs_update(from, p, tid); - } + return is_obj_ill(obj, heap_region_containing(obj)); } - -inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { - assert(has_partial_array_mask(p), "invariant"); - oop from_obj = clear_partial_array_mask(p); - - assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); - objArrayOop from_obj_array = objArrayOop(from_obj); - // The from-space object contains the real length. - int length = from_obj_array->length(); - - assert(from_obj->is_forwarded(), "must be forwarded"); - oop to_obj = from_obj->forwardee(); - assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); - objArrayOop to_obj_array = objArrayOop(to_obj); - // We keep track of the next start index in the length field of the - // to-space object. - int next_index = to_obj_array->length(); - assert(0 <= next_index && next_index < length, - err_msg("invariant, next index: %d, length: %d", next_index, length)); - - int start = next_index; - int end = length; - int remainder = end - start; - // We'll try not to push a range that's smaller than ParGCArrayScanChunk. - if (remainder > 2 * ParGCArrayScanChunk) { - end = start + ParGCArrayScanChunk; - to_obj_array->set_length(end); - // Push the remainder before we process the range in case another - // worker has run out of things to do and can steal it. - oop* from_obj_p = set_partial_array_mask(from_obj); - push_on_queue(from_obj_p); - } else { - assert(length == end, "sanity"); - // We'll process the final range for this object. Restore the length - // so that the heap remains parsable in case of evacuation failure. - to_obj_array->set_length(end); - } - _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); - // Process indexes [start,end). It will also process the header - // along with the first chunk (i.e., the chunk with start == 0). - // Note that at this point the length field of to_obj_array is not - // correct given that we are using it to keep track of the next - // start index. oop_iterate_range() (thankfully!) ignores the length - // field and only relies on the start / end parameters. It does - // however return the size of the object which will be incorrect. So - // we have to ignore it even if we wanted to use it. - to_obj_array->oop_iterate_range(&_scanner, start, end); -} - -template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { - if (!has_partial_array_mask(ref_to_scan)) { - // Note: we can use "raw" versions of "region_containing" because - // "obj_to_scan" is definitely in the heap, and is not in a - // humongous region. - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); - do_oop_evac(ref_to_scan, r); - } else { - do_oop_partial_array((oop*)ref_to_scan); - } -} - -inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { - assert(verify_task(ref), "sanity"); - if (ref.is_narrow()) { - deal_with_reference((narrowOop*)ref); - } else { - deal_with_reference((oop*)ref); +inline void G1CollectedHeap::set_humongous_is_live(oop obj) { + uint region = addr_to_region((HeapWord*)obj); + // We not only set the "live" flag in the humongous_is_live table, but also + // reset the entry in the _in_cset_fast_test table so that subsequent references + // to the same humongous object do not go into the slow path again. + // This is racy, as multiple threads may at the same time enter here, but this + // is benign. + // During collection we only ever set the "live" flag, and only ever clear the + // entry in the in_cset_fast_table. + // We only ever evaluate the contents of these tables (in the VM thread) after + // having synchronized the worker threads with the VM thread, or in the same + // thread (i.e. within the VM thread). + if (!_humongous_is_live.is_live(region)) { + _humongous_is_live.set_live(region); + _in_cset_fast_test.clear_humongous(region); } }