Mercurial > hg > graal-compiler
comparison src/share/vm/memory/sharedHeap.cpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 f3aeae1f9fc5 |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
27 #include "classfile/systemDictionary.hpp" | 27 #include "classfile/systemDictionary.hpp" |
28 #include "code/codeCache.hpp" | 28 #include "code/codeCache.hpp" |
29 #include "gc_interface/collectedHeap.inline.hpp" | 29 #include "gc_interface/collectedHeap.inline.hpp" |
30 #include "memory/sharedHeap.hpp" | 30 #include "memory/sharedHeap.hpp" |
31 #include "oops/oop.inline.hpp" | 31 #include "oops/oop.inline.hpp" |
32 #include "runtime/atomic.inline.hpp" | |
32 #include "runtime/fprofiler.hpp" | 33 #include "runtime/fprofiler.hpp" |
33 #include "runtime/java.hpp" | 34 #include "runtime/java.hpp" |
34 #include "services/management.hpp" | 35 #include "services/management.hpp" |
35 #include "utilities/copy.hpp" | 36 #include "utilities/copy.hpp" |
36 #include "utilities/workgroup.hpp" | 37 #include "utilities/workgroup.hpp" |
37 | 38 |
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC | 39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
39 | 40 |
40 SharedHeap* SharedHeap::_sh; | 41 SharedHeap* SharedHeap::_sh; |
41 | 42 |
42 // The set of potentially parallel tasks in strong root scanning. | 43 // The set of potentially parallel tasks in root scanning. |
43 enum SH_process_strong_roots_tasks { | 44 enum SH_process_roots_tasks { |
44 SH_PS_Universe_oops_do, | 45 SH_PS_Universe_oops_do, |
45 SH_PS_JNIHandles_oops_do, | 46 SH_PS_JNIHandles_oops_do, |
46 SH_PS_ObjectSynchronizer_oops_do, | 47 SH_PS_ObjectSynchronizer_oops_do, |
47 SH_PS_FlatProfiler_oops_do, | 48 SH_PS_FlatProfiler_oops_do, |
48 SH_PS_Management_oops_do, | 49 SH_PS_Management_oops_do, |
56 | 57 |
57 SharedHeap::SharedHeap(CollectorPolicy* policy_) : | 58 SharedHeap::SharedHeap(CollectorPolicy* policy_) : |
58 CollectedHeap(), | 59 CollectedHeap(), |
59 _collector_policy(policy_), | 60 _collector_policy(policy_), |
60 _rem_set(NULL), | 61 _rem_set(NULL), |
62 _strong_roots_scope(NULL), | |
61 _strong_roots_parity(0), | 63 _strong_roots_parity(0), |
62 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)), | 64 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)), |
63 _workers(NULL) | 65 _workers(NULL) |
64 { | 66 { |
65 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { | 67 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
112 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } | 114 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
113 }; | 115 }; |
114 static AssertNonScavengableClosure assert_is_non_scavengable_closure; | 116 static AssertNonScavengableClosure assert_is_non_scavengable_closure; |
115 #endif | 117 #endif |
116 | 118 |
119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const { | |
120 return _strong_roots_scope; | |
121 } | |
122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { | |
123 assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active"); | |
124 assert(scope != NULL, "Illegal argument"); | |
125 _strong_roots_scope = scope; | |
126 } | |
127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { | |
128 assert(_strong_roots_scope == scope, "Wrong scope unregistered"); | |
129 _strong_roots_scope = NULL; | |
130 } | |
131 | |
117 void SharedHeap::change_strong_roots_parity() { | 132 void SharedHeap::change_strong_roots_parity() { |
118 // Also set the new collection parity. | 133 // Also set the new collection parity. |
119 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, | 134 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, |
120 "Not in range."); | 135 "Not in range."); |
121 _strong_roots_parity++; | 136 _strong_roots_parity++; |
122 if (_strong_roots_parity == 3) _strong_roots_parity = 1; | 137 if (_strong_roots_parity == 3) _strong_roots_parity = 1; |
123 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2, | 138 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2, |
124 "Not in range."); | 139 "Not in range."); |
125 } | 140 } |
126 | 141 |
127 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate) | 142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate) |
128 : MarkScope(activate) | 143 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0) |
129 { | 144 { |
130 if (_active) { | 145 if (_active) { |
131 outer->change_strong_roots_parity(); | 146 _sh->register_strong_roots_scope(this); |
147 _sh->change_strong_roots_parity(); | |
132 // Zero the claimed high water mark in the StringTable | 148 // Zero the claimed high water mark in the StringTable |
133 StringTable::clear_parallel_claimed_index(); | 149 StringTable::clear_parallel_claimed_index(); |
134 } | 150 } |
135 } | 151 } |
136 | 152 |
137 SharedHeap::StrongRootsScope::~StrongRootsScope() { | 153 SharedHeap::StrongRootsScope::~StrongRootsScope() { |
138 // nothing particular | 154 if (_active) { |
139 } | 155 _sh->unregister_strong_roots_scope(this); |
140 | 156 } |
141 void SharedHeap::process_strong_roots(bool activate_scope, | 157 } |
142 bool is_scavenging, | 158 |
143 ScanningOption so, | 159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false); |
144 OopClosure* roots, | 160 |
145 CodeBlobClosure* code_roots, | 161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) { |
146 KlassClosure* klass_closure) { | 162 // The Thread work barrier is only needed by G1 Class Unloading. |
163 // No need to use the barrier if this is single-threaded code. | |
164 if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) { | |
165 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads); | |
166 if (new_value == n_workers) { | |
167 // This thread is last. Notify the others. | |
168 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); | |
169 _lock->notify_all(); | |
170 } | |
171 } | |
172 } | |
173 | |
174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) { | |
175 assert(UseG1GC, "Currently only used by G1"); | |
176 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); | |
177 | |
178 // No need to use the barrier if this is single-threaded code. | |
179 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) { | |
180 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); | |
181 while ((uint)_n_workers_done_with_threads != n_workers) { | |
182 _lock->wait(Mutex::_no_safepoint_check_flag, 0, false); | |
183 } | |
184 } | |
185 } | |
186 | |
187 void SharedHeap::process_roots(bool activate_scope, | |
188 ScanningOption so, | |
189 OopClosure* strong_roots, | |
190 OopClosure* weak_roots, | |
191 CLDClosure* strong_cld_closure, | |
192 CLDClosure* weak_cld_closure, | |
193 CodeBlobClosure* code_roots) { | |
147 StrongRootsScope srs(this, activate_scope); | 194 StrongRootsScope srs(this, activate_scope); |
148 | 195 |
149 // General strong roots. | 196 // General roots. |
150 assert(_strong_roots_parity != 0, "must have called prologue code"); | 197 assert(_strong_roots_parity != 0, "must have called prologue code"); |
198 assert(code_roots != NULL, "code root closure should always be set"); | |
151 // _n_termination for _process_strong_tasks should be set up stream | 199 // _n_termination for _process_strong_tasks should be set up stream |
152 // in a method not running in a GC worker. Otherwise the GC worker | 200 // in a method not running in a GC worker. Otherwise the GC worker |
153 // could be trying to change the termination condition while the task | 201 // could be trying to change the termination condition while the task |
154 // is executing in another GC worker. | 202 // is executing in another GC worker. |
203 | |
204 // Iterating over the CLDG and the Threads are done early to allow G1 to | |
205 // first process the strong CLDs and nmethods and then, after a barrier, | |
206 // let the thread process the weak CLDs and nmethods. | |
207 | |
208 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { | |
209 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); | |
210 } | |
211 | |
212 // Some CLDs contained in the thread frames should be considered strong. | |
213 // Don't process them if they will be processed during the ClassLoaderDataGraph phase. | |
214 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; | |
215 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway | |
216 CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; | |
217 | |
218 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); | |
219 | |
220 // This is the point where this worker thread will not find more strong CLDs/nmethods. | |
221 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing. | |
222 active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads()); | |
223 | |
155 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { | 224 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { |
156 Universe::oops_do(roots); | 225 Universe::oops_do(strong_roots); |
157 } | 226 } |
158 // Global (strong) JNI handles | 227 // Global (strong) JNI handles |
159 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) | 228 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) |
160 JNIHandles::oops_do(roots); | 229 JNIHandles::oops_do(strong_roots); |
161 | |
162 // All threads execute this; the individual threads are task groups. | |
163 CLDToOopClosure roots_from_clds(roots); | |
164 CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds); | |
165 if (CollectedHeap::use_parallel_gc_threads()) { | |
166 Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots); | |
167 } else { | |
168 Threads::oops_do(roots, roots_from_clds_p, code_roots); | |
169 } | |
170 | 230 |
171 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) | 231 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) |
172 ObjectSynchronizer::oops_do(roots); | 232 ObjectSynchronizer::oops_do(strong_roots); |
173 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) | 233 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) |
174 FlatProfiler::oops_do(roots); | 234 FlatProfiler::oops_do(strong_roots); |
175 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) | 235 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) |
176 Management::oops_do(roots); | 236 Management::oops_do(strong_roots); |
177 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) | 237 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) |
178 JvmtiExport::oops_do(roots); | 238 JvmtiExport::oops_do(strong_roots); |
179 | 239 |
180 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { | 240 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { |
181 if (so & SO_AllClasses) { | 241 SystemDictionary::roots_oops_do(strong_roots, weak_roots); |
182 SystemDictionary::oops_do(roots); | |
183 } else if (so & SO_SystemClasses) { | |
184 SystemDictionary::always_strong_oops_do(roots); | |
185 } else { | |
186 fatal("We should always have selected either SO_AllClasses or SO_SystemClasses"); | |
187 } | |
188 } | |
189 | |
190 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { | |
191 if (so & SO_AllClasses) { | |
192 ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging); | |
193 } else if (so & SO_SystemClasses) { | |
194 ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging); | |
195 } | |
196 } | 242 } |
197 | 243 |
198 // All threads execute the following. A specific chunk of buckets | 244 // All threads execute the following. A specific chunk of buckets |
199 // from the StringTable are the individual tasks. | 245 // from the StringTable are the individual tasks. |
200 if (so & SO_Strings) { | 246 if (weak_roots != NULL) { |
201 if (CollectedHeap::use_parallel_gc_threads()) { | 247 if (CollectedHeap::use_parallel_gc_threads()) { |
202 StringTable::possibly_parallel_oops_do(roots); | 248 StringTable::possibly_parallel_oops_do(weak_roots); |
203 } else { | 249 } else { |
204 StringTable::oops_do(roots); | 250 StringTable::oops_do(weak_roots); |
205 } | 251 } |
206 } | 252 } |
207 | 253 |
208 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { | 254 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { |
209 if (so & SO_CodeCache) { | 255 if (so & SO_ScavengeCodeCache) { |
210 assert(code_roots != NULL, "must supply closure for code cache"); | 256 assert(code_roots != NULL, "must supply closure for code cache"); |
211 | 257 |
212 if (is_scavenging) { | 258 // We only visit parts of the CodeCache when scavenging. |
213 // We only visit parts of the CodeCache when scavenging. | 259 CodeCache::scavenge_root_nmethods_do(code_roots); |
214 CodeCache::scavenge_root_nmethods_do(code_roots); | 260 } |
215 } else { | 261 if (so & SO_AllCodeCache) { |
216 // CMSCollector uses this to do intermediate-strength collections. | 262 assert(code_roots != NULL, "must supply closure for code cache"); |
217 // We scan the entire code cache, since CodeCache::do_unloading is not called. | 263 |
218 CodeCache::blobs_do(code_roots); | 264 // CMSCollector uses this to do intermediate-strength collections. |
219 } | 265 // We scan the entire code cache, since CodeCache::do_unloading is not called. |
266 CodeCache::blobs_do(code_roots); | |
220 } | 267 } |
221 // Verify that the code cache contents are not subject to | 268 // Verify that the code cache contents are not subject to |
222 // movement by a scavenging collection. | 269 // movement by a scavenging collection. |
223 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false)); | 270 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); |
224 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); | 271 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); |
225 } | 272 } |
226 | 273 |
227 _process_strong_tasks->all_tasks_completed(); | 274 _process_strong_tasks->all_tasks_completed(); |
228 } | 275 } |
276 | |
277 void SharedHeap::process_all_roots(bool activate_scope, | |
278 ScanningOption so, | |
279 OopClosure* roots, | |
280 CLDClosure* cld_closure, | |
281 CodeBlobClosure* code_closure) { | |
282 process_roots(activate_scope, so, | |
283 roots, roots, | |
284 cld_closure, cld_closure, | |
285 code_closure); | |
286 } | |
287 | |
288 void SharedHeap::process_strong_roots(bool activate_scope, | |
289 ScanningOption so, | |
290 OopClosure* roots, | |
291 CLDClosure* cld_closure, | |
292 CodeBlobClosure* code_closure) { | |
293 process_roots(activate_scope, so, | |
294 roots, NULL, | |
295 cld_closure, NULL, | |
296 code_closure); | |
297 } | |
298 | |
229 | 299 |
230 class AlwaysTrueClosure: public BoolObjectClosure { | 300 class AlwaysTrueClosure: public BoolObjectClosure { |
231 public: | 301 public: |
232 bool do_object_b(oop p) { return true; } | 302 bool do_object_b(oop p) { return true; } |
233 }; | 303 }; |
234 static AlwaysTrueClosure always_true; | 304 static AlwaysTrueClosure always_true; |
235 | 305 |
236 void SharedHeap::process_weak_roots(OopClosure* root_closure, | 306 void SharedHeap::process_weak_roots(OopClosure* root_closure) { |
237 CodeBlobClosure* code_roots) { | |
238 // Global (weak) JNI handles | 307 // Global (weak) JNI handles |
239 JNIHandles::weak_oops_do(&always_true, root_closure); | 308 JNIHandles::weak_oops_do(&always_true, root_closure); |
240 | |
241 CodeCache::blobs_do(code_roots); | |
242 StringTable::oops_do(root_closure); | |
243 } | 309 } |
244 | 310 |
245 void SharedHeap::set_barrier_set(BarrierSet* bs) { | 311 void SharedHeap::set_barrier_set(BarrierSet* bs) { |
246 _barrier_set = bs; | 312 _barrier_set = bs; |
247 // Cached barrier set for fast access in oops | 313 // Cached barrier set for fast access in oops |