comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 20336:6701abbc4441

8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data Summary: Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code. Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
author tschatzl
date Tue, 19 Aug 2014 10:50:27 +0200
parents eec72fa4b108
children a8ea2f110d87
comparison
equal deleted inserted replaced
20335:eec72fa4b108 20336:6701abbc4441
45 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", 45 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
46 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end()))); 46 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
47 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 47 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
48 } 48 }
49 49
50 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
51 return _hrs.reserved().start() + index * HeapRegion::GrainWords;
52 }
53
50 template <class T> 54 template <class T>
51 inline HeapRegion* 55 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
52 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
53 assert(addr != NULL, "invariant"); 56 assert(addr != NULL, "invariant");
54 assert(_g1_reserved.contains((const void*) addr), 57 assert(is_in_g1_reserved((const void*) addr),
55 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", 58 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
56 p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end()))); 59 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
57 return _hrs.addr_to_region((HeapWord*) addr); 60 return _hrs.addr_to_region((HeapWord*) addr);
58 } 61 }
59 62
60 template <class T> 63 template <class T>
61 inline HeapRegion* 64 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
62 G1CollectedHeap::heap_region_containing(const T addr) const {
63 HeapRegion* hr = heap_region_containing_raw(addr); 65 HeapRegion* hr = heap_region_containing_raw(addr);
64 if (hr->continuesHumongous()) { 66 if (hr->continuesHumongous()) {
65 return hr->humongous_start_region(); 67 return hr->humongous_start_region();
66 } 68 }
67 return hr; 69 return hr;
87 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 89 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
88 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); 90 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
89 return r != NULL && r->in_collection_set(); 91 return r != NULL && r->in_collection_set();
90 } 92 }
91 93
92 inline HeapWord* 94 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
93 G1CollectedHeap::attempt_allocation(size_t word_size, 95 unsigned int* gc_count_before_ret,
94 unsigned int* gc_count_before_ret, 96 int* gclocker_retry_count_ret) {
95 int* gclocker_retry_count_ret) {
96 assert_heap_not_locked_and_not_at_safepoint(); 97 assert_heap_not_locked_and_not_at_safepoint();
97 assert(!isHumongous(word_size), "attempt_allocation() should not " 98 assert(!isHumongous(word_size), "attempt_allocation() should not "
98 "be called for humongous allocation requests"); 99 "be called for humongous allocation requests");
99 100
100 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, 101 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
250 during_im, 251 during_im,
251 during_marking); 252 during_marking);
252 } 253 }
253 } 254 }
254 255
255 inline bool 256 inline bool G1CollectedHeap::evacuation_should_fail() {
256 G1CollectedHeap::evacuation_should_fail() {
257 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 257 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
258 return false; 258 return false;
259 } 259 }
260 // G1EvacuationFailureALot is in effect for current GC 260 // G1EvacuationFailureALot is in effect for current GC
261 // Access to _evacuation_failure_alot_count is not atomic; 261 // Access to _evacuation_failure_alot_count is not atomic;