Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 e5668dcf12e9 |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
27 | 27 |
28 #include "gc_implementation/g1/concurrentMark.hpp" | 28 #include "gc_implementation/g1/concurrentMark.hpp" |
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" | 29 #include "gc_implementation/g1/g1CollectedHeap.hpp" |
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" | 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" | 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
32 #include "gc_implementation/g1/g1RemSet.inline.hpp" | |
33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" | 32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
33 #include "gc_implementation/g1/heapRegionManager.inline.hpp" | |
34 #include "gc_implementation/g1/heapRegionSet.inline.hpp" | 34 #include "gc_implementation/g1/heapRegionSet.inline.hpp" |
35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 35 #include "runtime/orderAccess.inline.hpp" |
36 #include "utilities/taskqueue.hpp" | 36 #include "utilities/taskqueue.hpp" |
37 | 37 |
38 // Inline functions for G1CollectedHeap | 38 // Inline functions for G1CollectedHeap |
39 | 39 |
40 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { | |
41 return _allocation_context_stats; | |
42 } | |
43 | |
40 // Return the region with the given index. It assumes the index is valid. | 44 // Return the region with the given index. It assumes the index is valid. |
41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } | 45 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } |
46 | |
47 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { | |
48 assert(is_in_reserved(addr), | |
49 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", | |
50 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end()))); | |
51 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); | |
52 } | |
53 | |
54 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { | |
55 return _hrm.reserved().start() + index * HeapRegion::GrainWords; | |
56 } | |
42 | 57 |
43 template <class T> | 58 template <class T> |
44 inline HeapRegion* | 59 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { |
45 G1CollectedHeap::heap_region_containing(const T addr) const { | 60 assert(addr != NULL, "invariant"); |
46 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); | 61 assert(is_in_g1_reserved((const void*) addr), |
47 // hr can be null if addr in perm_gen | 62 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", |
48 if (hr != NULL && hr->continuesHumongous()) { | 63 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); |
49 hr = hr->humongous_start_region(); | 64 return _hrm.addr_to_region((HeapWord*) addr); |
65 } | |
66 | |
67 template <class T> | |
68 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { | |
69 HeapRegion* hr = heap_region_containing_raw(addr); | |
70 if (hr->continuesHumongous()) { | |
71 return hr->humongous_start_region(); | |
50 } | 72 } |
51 return hr; | 73 return hr; |
52 } | 74 } |
53 | 75 |
54 template <class T> | 76 inline void G1CollectedHeap::reset_gc_time_stamp() { |
55 inline HeapRegion* | 77 _gc_time_stamp = 0; |
56 G1CollectedHeap::heap_region_containing_raw(const T addr) const { | 78 OrderAccess::fence(); |
57 assert(_g1_reserved.contains((const void*) addr), "invariant"); | 79 // Clear the cached CSet starting regions and time stamps. |
58 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); | 80 // Their validity is dependent on the GC timestamp. |
59 return res; | 81 clear_cset_start_regions(); |
82 } | |
83 | |
84 inline void G1CollectedHeap::increment_gc_time_stamp() { | |
85 ++_gc_time_stamp; | |
86 OrderAccess::fence(); | |
60 } | 87 } |
61 | 88 |
62 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { | 89 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { |
63 _old_set.remove(hr); | 90 _old_set.remove(hr); |
64 } | 91 } |
65 | 92 |
66 inline bool G1CollectedHeap::obj_in_cs(oop obj) { | 93 inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
67 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); | 94 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); |
68 return r != NULL && r->in_collection_set(); | 95 return r != NULL && r->in_collection_set(); |
69 } | 96 } |
70 | 97 |
71 inline HeapWord* | 98 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, |
72 G1CollectedHeap::attempt_allocation(size_t word_size, | 99 unsigned int* gc_count_before_ret, |
73 unsigned int* gc_count_before_ret, | 100 int* gclocker_retry_count_ret) { |
74 int* gclocker_retry_count_ret) { | |
75 assert_heap_not_locked_and_not_at_safepoint(); | 101 assert_heap_not_locked_and_not_at_safepoint(); |
76 assert(!isHumongous(word_size), "attempt_allocation() should not " | 102 assert(!isHumongous(word_size), "attempt_allocation() should not " |
77 "be called for humongous allocation requests"); | 103 "be called for humongous allocation requests"); |
78 | 104 |
79 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, | 105 AllocationContext_t context = AllocationContext::current(); |
80 false /* bot_updates */); | 106 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, |
107 false /* bot_updates */); | |
81 if (result == NULL) { | 108 if (result == NULL) { |
82 result = attempt_allocation_slow(word_size, | 109 result = attempt_allocation_slow(word_size, |
110 context, | |
83 gc_count_before_ret, | 111 gc_count_before_ret, |
84 gclocker_retry_count_ret); | 112 gclocker_retry_count_ret); |
85 } | 113 } |
86 assert_heap_not_locked(); | 114 assert_heap_not_locked(); |
87 if (result != NULL) { | 115 if (result != NULL) { |
88 dirty_young_block(result, word_size); | 116 dirty_young_block(result, word_size); |
89 } | 117 } |
90 return result; | 118 return result; |
91 } | 119 } |
92 | 120 |
93 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t | 121 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, |
94 word_size) { | 122 AllocationContext_t context) { |
95 assert(!isHumongous(word_size), | 123 assert(!isHumongous(word_size), |
96 "we should not be seeing humongous-size allocations in this path"); | 124 "we should not be seeing humongous-size allocations in this path"); |
97 | 125 |
98 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, | 126 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, |
99 false /* bot_updates */); | 127 false /* bot_updates */); |
100 if (result == NULL) { | 128 if (result == NULL) { |
101 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | 129 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
102 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, | 130 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
103 false /* bot_updates */); | 131 false /* bot_updates */); |
104 } | 132 } |
105 if (result != NULL) { | 133 if (result != NULL) { |
106 dirty_young_block(result, word_size); | 134 dirty_young_block(result, word_size); |
107 } | 135 } |
108 return result; | 136 return result; |
109 } | 137 } |
110 | 138 |
111 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { | 139 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, |
140 AllocationContext_t context) { | |
112 assert(!isHumongous(word_size), | 141 assert(!isHumongous(word_size), |
113 "we should not be seeing humongous-size allocations in this path"); | 142 "we should not be seeing humongous-size allocations in this path"); |
114 | 143 |
115 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, | 144 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, |
116 true /* bot_updates */); | 145 true /* bot_updates */); |
117 if (result == NULL) { | 146 if (result == NULL) { |
118 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | 147 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
119 result = _old_gc_alloc_region.attempt_allocation_locked(word_size, | 148 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
120 true /* bot_updates */); | 149 true /* bot_updates */); |
121 } | 150 } |
122 return result; | 151 return result; |
123 } | 152 } |
124 | 153 |
125 // It dirties the cards that cover the block so that so that the post | 154 // It dirties the cards that cover the block so that so that the post |
132 | 161 |
133 // Assign the containing region to containing_hr so that we don't | 162 // Assign the containing region to containing_hr so that we don't |
134 // have to keep calling heap_region_containing_raw() in the | 163 // have to keep calling heap_region_containing_raw() in the |
135 // asserts below. | 164 // asserts below. |
136 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) | 165 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) |
137 assert(containing_hr != NULL && start != NULL && word_size > 0, | 166 assert(word_size > 0, "pre-condition"); |
138 "pre-condition"); | |
139 assert(containing_hr->is_in(start), "it should contain start"); | 167 assert(containing_hr->is_in(start), "it should contain start"); |
140 assert(containing_hr->is_young(), "it should be young"); | 168 assert(containing_hr->is_young(), "it should be young"); |
141 assert(!containing_hr->isHumongous(), "it should not be humongous"); | 169 assert(!containing_hr->isHumongous(), "it should not be humongous"); |
142 | 170 |
143 HeapWord* end = start + word_size; | 171 HeapWord* end = start + word_size; |
157 | 185 |
158 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { | 186 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
159 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); | 187 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
160 } | 188 } |
161 | 189 |
162 | |
163 // This is a fast test on whether a reference points into the | 190 // This is a fast test on whether a reference points into the |
164 // collection set or not. Assume that the reference | 191 // collection set or not. Assume that the reference |
165 // points into the heap. | 192 // points into the heap. |
166 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { | 193 inline bool G1CollectedHeap::is_in_cset(oop obj) { |
167 assert(_in_cset_fast_test != NULL, "sanity"); | 194 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); |
168 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj))); | |
169 // no need to subtract the bottom of the heap from obj, | |
170 // _in_cset_fast_test is biased | |
171 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes; | |
172 bool ret = _in_cset_fast_test[index]; | |
173 // let's make sure the result is consistent with what the slower | 195 // let's make sure the result is consistent with what the slower |
174 // test returns | 196 // test returns |
175 assert( ret || !obj_in_cs(obj), "sanity"); | 197 assert( ret || !obj_in_cs(obj), "sanity"); |
176 assert(!ret || obj_in_cs(obj), "sanity"); | 198 assert(!ret || obj_in_cs(obj), "sanity"); |
177 return ret; | 199 return ret; |
200 } | |
201 | |
202 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { | |
203 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); | |
204 } | |
205 | |
206 G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) { | |
207 return _in_cset_fast_test.at((HeapWord*)obj); | |
208 } | |
209 | |
210 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) { | |
211 _in_cset_fast_test.set_humongous(index); | |
178 } | 212 } |
179 | 213 |
180 #ifndef PRODUCT | 214 #ifndef PRODUCT |
181 // Support for G1EvacuationFailureALot | 215 // Support for G1EvacuationFailureALot |
182 | 216 |
224 during_im, | 258 during_im, |
225 during_marking); | 259 during_marking); |
226 } | 260 } |
227 } | 261 } |
228 | 262 |
229 inline bool | 263 inline bool G1CollectedHeap::evacuation_should_fail() { |
230 G1CollectedHeap::evacuation_should_fail() { | |
231 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { | 264 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { |
232 return false; | 265 return false; |
233 } | 266 } |
234 // G1EvacuationFailureALot is in effect for current GC | 267 // G1EvacuationFailureALot is in effect for current GC |
235 // Access to _evacuation_failure_alot_count is not atomic; | 268 // Access to _evacuation_failure_alot_count is not atomic; |
249 } | 282 } |
250 } | 283 } |
251 #endif // #ifndef PRODUCT | 284 #endif // #ifndef PRODUCT |
252 | 285 |
253 inline bool G1CollectedHeap::is_in_young(const oop obj) { | 286 inline bool G1CollectedHeap::is_in_young(const oop obj) { |
254 HeapRegion* hr = heap_region_containing(obj); | 287 if (obj == NULL) { |
255 return hr != NULL && hr->is_young(); | 288 return false; |
289 } | |
290 return heap_region_containing(obj)->is_young(); | |
256 } | 291 } |
257 | 292 |
258 // We don't need barriers for initializing stores to objects | 293 // We don't need barriers for initializing stores to objects |
259 // in the young gen: for the SATB pre-barrier, there is no | 294 // in the young gen: for the SATB pre-barrier, there is no |
260 // pre-value that needs to be remembered; for the remembered-set | 295 // pre-value that needs to be remembered; for the remembered-set |
263 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { | 298 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { |
264 return is_in_young(new_obj); | 299 return is_in_young(new_obj); |
265 } | 300 } |
266 | 301 |
267 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { | 302 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { |
268 const HeapRegion* hr = heap_region_containing(obj); | 303 if (obj == NULL) { |
269 if (hr == NULL) { | 304 return false; |
270 if (obj == NULL) return false; | 305 } |
271 else return true; | 306 return is_obj_dead(obj, heap_region_containing(obj)); |
272 } | |
273 else return is_obj_dead(obj, hr); | |
274 } | 307 } |
275 | 308 |
276 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { | 309 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { |
277 const HeapRegion* hr = heap_region_containing(obj); | 310 if (obj == NULL) { |
278 if (hr == NULL) { | 311 return false; |
279 if (obj == NULL) return false; | 312 } |
280 else return true; | 313 return is_obj_ill(obj, heap_region_containing(obj)); |
281 } | 314 } |
282 else return is_obj_ill(obj, hr); | 315 |
283 } | 316 inline void G1CollectedHeap::set_humongous_is_live(oop obj) { |
284 | 317 uint region = addr_to_region((HeapWord*)obj); |
285 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { | 318 // We not only set the "live" flag in the humongous_is_live table, but also |
286 if (!from->is_survivor()) { | 319 // reset the entry in the _in_cset_fast_test table so that subsequent references |
287 _g1_rem->par_write_ref(from, p, tid); | 320 // to the same humongous object do not go into the slow path again. |
288 } | 321 // This is racy, as multiple threads may at the same time enter here, but this |
289 } | 322 // is benign. |
290 | 323 // During collection we only ever set the "live" flag, and only ever clear the |
291 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { | 324 // entry in the in_cset_fast_table. |
292 if (G1DeferredRSUpdate) { | 325 // We only ever evaluate the contents of these tables (in the VM thread) after |
293 deferred_rs_update(from, p, tid); | 326 // having synchronized the worker threads with the VM thread, or in the same |
294 } else { | 327 // thread (i.e. within the VM thread). |
295 immediate_rs_update(from, p, tid); | 328 if (!_humongous_is_live.is_live(region)) { |
296 } | 329 _humongous_is_live.set_live(region); |
297 } | 330 _in_cset_fast_test.clear_humongous(region); |
298 | |
299 | |
300 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { | |
301 assert(has_partial_array_mask(p), "invariant"); | |
302 oop from_obj = clear_partial_array_mask(p); | |
303 | |
304 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); | |
305 assert(from_obj->is_objArray(), "must be obj array"); | |
306 objArrayOop from_obj_array = objArrayOop(from_obj); | |
307 // The from-space object contains the real length. | |
308 int length = from_obj_array->length(); | |
309 | |
310 assert(from_obj->is_forwarded(), "must be forwarded"); | |
311 oop to_obj = from_obj->forwardee(); | |
312 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); | |
313 objArrayOop to_obj_array = objArrayOop(to_obj); | |
314 // We keep track of the next start index in the length field of the | |
315 // to-space object. | |
316 int next_index = to_obj_array->length(); | |
317 assert(0 <= next_index && next_index < length, | |
318 err_msg("invariant, next index: %d, length: %d", next_index, length)); | |
319 | |
320 int start = next_index; | |
321 int end = length; | |
322 int remainder = end - start; | |
323 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. | |
324 if (remainder > 2 * ParGCArrayScanChunk) { | |
325 end = start + ParGCArrayScanChunk; | |
326 to_obj_array->set_length(end); | |
327 // Push the remainder before we process the range in case another | |
328 // worker has run out of things to do and can steal it. | |
329 oop* from_obj_p = set_partial_array_mask(from_obj); | |
330 push_on_queue(from_obj_p); | |
331 } else { | |
332 assert(length == end, "sanity"); | |
333 // We'll process the final range for this object. Restore the length | |
334 // so that the heap remains parsable in case of evacuation failure. | |
335 to_obj_array->set_length(end); | |
336 } | |
337 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); | |
338 // Process indexes [start,end). It will also process the header | |
339 // along with the first chunk (i.e., the chunk with start == 0). | |
340 // Note that at this point the length field of to_obj_array is not | |
341 // correct given that we are using it to keep track of the next | |
342 // start index. oop_iterate_range() (thankfully!) ignores the length | |
343 // field and only relies on the start / end parameters. It does | |
344 // however return the size of the object which will be incorrect. So | |
345 // we have to ignore it even if we wanted to use it. | |
346 to_obj_array->oop_iterate_range(&_scanner, start, end); | |
347 } | |
348 | |
349 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { | |
350 if (!has_partial_array_mask(ref_to_scan)) { | |
351 // Note: we can use "raw" versions of "region_containing" because | |
352 // "obj_to_scan" is definitely in the heap, and is not in a | |
353 // humongous region. | |
354 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); | |
355 do_oop_evac(ref_to_scan, r); | |
356 } else { | |
357 do_oop_partial_array((oop*)ref_to_scan); | |
358 } | |
359 } | |
360 | |
361 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { | |
362 assert(verify_task(ref), "sanity"); | |
363 if (ref.is_narrow()) { | |
364 deal_with_reference((narrowOop*)ref); | |
365 } else { | |
366 deal_with_reference((oop*)ref); | |
367 } | 331 } |
368 } | 332 } |
369 | 333 |
370 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP | 334 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |