Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @ 20404:227a9e5e4b4a
8057536: Refactor G1 to allow context specific allocations
Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context.
Reviewed-by: mgerdin, brutisso
author | sjohanss |
---|---|
date | Fri, 05 Sep 2014 09:49:19 +0200 |
parents | a8ea2f110d87 |
children | e5668dcf12e9 |
comparison
equal
deleted
inserted
replaced
20403:8ec8971f511a | 20404:227a9e5e4b4a |
---|---|
96 int* gclocker_retry_count_ret) { | 96 int* gclocker_retry_count_ret) { |
97 assert_heap_not_locked_and_not_at_safepoint(); | 97 assert_heap_not_locked_and_not_at_safepoint(); |
98 assert(!isHumongous(word_size), "attempt_allocation() should not " | 98 assert(!isHumongous(word_size), "attempt_allocation() should not " |
99 "be called for humongous allocation requests"); | 99 "be called for humongous allocation requests"); |
100 | 100 |
101 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, | 101 AllocationContext_t context = AllocationContext::current(); |
102 false /* bot_updates */); | 102 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, |
103 false /* bot_updates */); | |
103 if (result == NULL) { | 104 if (result == NULL) { |
104 result = attempt_allocation_slow(word_size, | 105 result = attempt_allocation_slow(word_size, |
106 context, | |
105 gc_count_before_ret, | 107 gc_count_before_ret, |
106 gclocker_retry_count_ret); | 108 gclocker_retry_count_ret); |
107 } | 109 } |
108 assert_heap_not_locked(); | 110 assert_heap_not_locked(); |
109 if (result != NULL) { | 111 if (result != NULL) { |
110 dirty_young_block(result, word_size); | 112 dirty_young_block(result, word_size); |
111 } | 113 } |
112 return result; | 114 return result; |
113 } | 115 } |
114 | 116 |
115 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t | 117 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, |
116 word_size) { | 118 AllocationContext_t context) { |
117 assert(!isHumongous(word_size), | 119 assert(!isHumongous(word_size), |
118 "we should not be seeing humongous-size allocations in this path"); | 120 "we should not be seeing humongous-size allocations in this path"); |
119 | 121 |
120 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, | 122 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, |
121 false /* bot_updates */); | 123 false /* bot_updates */); |
122 if (result == NULL) { | 124 if (result == NULL) { |
123 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | 125 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
124 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, | 126 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
125 false /* bot_updates */); | 127 false /* bot_updates */); |
126 } | 128 } |
127 if (result != NULL) { | 129 if (result != NULL) { |
128 dirty_young_block(result, word_size); | 130 dirty_young_block(result, word_size); |
129 } | 131 } |
130 return result; | 132 return result; |
131 } | 133 } |
132 | 134 |
133 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { | 135 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, |
136 AllocationContext_t context) { | |
134 assert(!isHumongous(word_size), | 137 assert(!isHumongous(word_size), |
135 "we should not be seeing humongous-size allocations in this path"); | 138 "we should not be seeing humongous-size allocations in this path"); |
136 | 139 |
137 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, | 140 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, |
138 true /* bot_updates */); | 141 true /* bot_updates */); |
139 if (result == NULL) { | 142 if (result == NULL) { |
140 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); | 143 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
141 result = _old_gc_alloc_region.attempt_allocation_locked(word_size, | 144 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, |
142 true /* bot_updates */); | 145 true /* bot_updates */); |
143 } | 146 } |
144 return result; | 147 return result; |
145 } | 148 } |
146 | 149 |
147 // It dirties the cards that cover the block so that so that the post | 150 // It dirties the cards that cover the block so that so that the post |