Mercurial > hg > graal-jvmci-8
comparison src/share/vm/runtime/sweeper.cpp @ 1538:bfe29ec02863
6950075: nmethod sweeper should operate concurrently
Reviewed-by: never, kvn
Contributed-by: eric.caspole@amd.com
author | never |
---|---|
date | Mon, 17 May 2010 16:50:07 -0700 |
parents | 5f24d0319e54 |
children | c18cbe5936b8 |
comparison
equal
deleted
inserted
replaced
1537:79bf863697eb | 1538:bfe29ec02863 |
---|---|
31 int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass | 31 int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass |
32 | 32 |
33 jint NMethodSweeper::_locked_seen = 0; | 33 jint NMethodSweeper::_locked_seen = 0; |
34 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; | 34 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; |
35 bool NMethodSweeper::_rescan = false; | 35 bool NMethodSweeper::_rescan = false; |
36 bool NMethodSweeper::_do_sweep = false; | |
37 jint NMethodSweeper::_sweep_started = 0; | |
36 bool NMethodSweeper::_was_full = false; | 38 bool NMethodSweeper::_was_full = false; |
37 jint NMethodSweeper::_advise_to_sweep = 0; | 39 jint NMethodSweeper::_advise_to_sweep = 0; |
38 jlong NMethodSweeper::_last_was_full = 0; | 40 jlong NMethodSweeper::_last_was_full = 0; |
39 uint NMethodSweeper::_highest_marked = 0; | 41 uint NMethodSweeper::_highest_marked = 0; |
40 long NMethodSweeper::_was_full_traversal = 0; | 42 long NMethodSweeper::_was_full_traversal = 0; |
48 } | 50 } |
49 } | 51 } |
50 }; | 52 }; |
51 static MarkActivationClosure mark_activation_closure; | 53 static MarkActivationClosure mark_activation_closure; |
52 | 54 |
53 void NMethodSweeper::sweep() { | 55 void NMethodSweeper::scan_stacks() { |
54 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); | 56 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); |
55 if (!MethodFlushing) return; | 57 if (!MethodFlushing) return; |
58 _do_sweep = true; | |
56 | 59 |
57 // No need to synchronize access, since this is always executed at a | 60 // No need to synchronize access, since this is always executed at a |
58 // safepoint. If we aren't in the middle of scan and a rescan | 61 // safepoint. If we aren't in the middle of scan and a rescan |
59 // hasn't been requested then just return. | 62 // hasn't been requested then just return. If UseCodeCacheFlushing is on and |
60 if (_current == NULL && !_rescan) return; | 63 // code cache flushing is in progress, don't skip sweeping to help make progress |
64 // clearing space in the code cache. | |
65 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { | |
66 _do_sweep = false; | |
67 return; | |
68 } | |
61 | 69 |
62 // Make sure CompiledIC_lock in unlocked, since we might update some | 70 // Make sure CompiledIC_lock in unlocked, since we might update some |
63 // inline caches. If it is, we just bail-out and try later. | 71 // inline caches. If it is, we just bail-out and try later. |
64 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; | 72 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; |
65 | 73 |
66 // Check for restart | 74 // Check for restart |
67 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); | 75 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); |
68 if (_current == NULL) { | 76 if (_current == NULL) { |
69 _seen = 0; | 77 _seen = 0; |
70 _invocations = NmethodSweepFraction; | 78 _invocations = NmethodSweepFraction; |
71 _current = CodeCache::first(); | 79 _current = CodeCache::first_nmethod(); |
72 _traversals += 1; | 80 _traversals += 1; |
73 if (PrintMethodFlushing) { | 81 if (PrintMethodFlushing) { |
74 tty->print_cr("### Sweep: stack traversal %d", _traversals); | 82 tty->print_cr("### Sweep: stack traversal %d", _traversals); |
75 } | 83 } |
76 Threads::nmethods_do(&mark_activation_closure); | 84 Threads::nmethods_do(&mark_activation_closure); |
79 _rescan = false; | 87 _rescan = false; |
80 _locked_seen = 0; | 88 _locked_seen = 0; |
81 _not_entrant_seen_on_stack = 0; | 89 _not_entrant_seen_on_stack = 0; |
82 } | 90 } |
83 | 91 |
84 if (PrintMethodFlushing && Verbose) { | |
85 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations); | |
86 } | |
87 | |
88 // We want to visit all nmethods after NmethodSweepFraction invocations. | |
89 // If invocation is 1 we do the rest | |
90 int todo = CodeCache::nof_blobs(); | |
91 if (_invocations != 1) { | |
92 todo = (CodeCache::nof_blobs() - _seen) / _invocations; | |
93 _invocations--; | |
94 } | |
95 | |
96 for(int i = 0; i < todo && _current != NULL; i++) { | |
97 CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current | |
98 if (_current->is_nmethod()) { | |
99 process_nmethod((nmethod *)_current); | |
100 } | |
101 _seen++; | |
102 _current = next; | |
103 } | |
104 // Because we could stop on a codeBlob other than an nmethod we skip forward | |
105 // to the next nmethod (if any). codeBlobs other than nmethods can be freed | |
106 // async to us and make _current invalid while we sleep. | |
107 while (_current != NULL && !_current->is_nmethod()) { | |
108 _current = CodeCache::next(_current); | |
109 } | |
110 | |
111 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { | |
112 // we've completed a scan without making progress but there were | |
113 // nmethods we were unable to process either because they were | |
114 // locked or were still on stack. We don't have to aggresively | |
115 // clean them up so just stop scanning. We could scan once more | |
116 // but that complicates the control logic and it's unlikely to | |
117 // matter much. | |
118 if (PrintMethodFlushing) { | |
119 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); | |
120 } | |
121 } | |
122 | |
123 if (UseCodeCacheFlushing) { | 92 if (UseCodeCacheFlushing) { |
124 if (!CodeCache::needs_flushing()) { | 93 if (!CodeCache::needs_flushing()) { |
125 // In a safepoint, no race with setters | 94 // scan_stacks() runs during a safepoint, no race with setters |
126 _advise_to_sweep = 0; | 95 _advise_to_sweep = 0; |
127 } | 96 } |
128 | 97 |
129 if (was_full()) { | 98 if (was_full()) { |
130 // There was some progress so attempt to restart the compiler | 99 // There was some progress so attempt to restart the compiler |
153 } | 122 } |
154 } | 123 } |
155 } | 124 } |
156 } | 125 } |
157 | 126 |
127 void NMethodSweeper::possibly_sweep() { | |
128 if ((!MethodFlushing) || (!_do_sweep)) return; | |
129 | |
130 if (_invocations > 0) { | |
131 // Only one thread at a time will sweep | |
132 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); | |
133 if (old != 0) { | |
134 return; | |
135 } | |
136 sweep_code_cache(); | |
137 } | |
138 _sweep_started = 0; | |
139 } | |
140 | |
141 void NMethodSweeper::sweep_code_cache() { | |
142 #ifdef ASSERT | |
143 jlong sweep_start; | |
144 if(PrintMethodFlushing) { | |
145 sweep_start = os::javaTimeMillis(); | |
146 } | |
147 #endif | |
148 if (PrintMethodFlushing && Verbose) { | |
149 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations); | |
150 } | |
151 | |
152 // We want to visit all nmethods after NmethodSweepFraction invocations. | |
153 // If invocation is 1 we do the rest | |
154 int todo = CodeCache::nof_blobs(); | |
155 if (_invocations > 1) { | |
156 todo = (CodeCache::nof_blobs() - _seen) / _invocations; | |
157 } | |
158 | |
159 // Compilers may check to sweep more often than stack scans happen, | |
160 // don't keep trying once it is all scanned | |
161 _invocations--; | |
162 | |
163 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); | |
164 assert(!CodeCache_lock->owned_by_self(), "just checking"); | |
165 | |
166 { | |
167 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
168 | |
169 for(int i = 0; i < todo && _current != NULL; i++) { | |
170 | |
171 // Since we will give up the CodeCache_lock, always skip ahead to an nmethod. | |
172 // Other blobs can be deleted by other threads | |
173 // Read next before we potentially delete current | |
174 CodeBlob* next = CodeCache::next_nmethod(_current); | |
175 | |
176 // Now ready to process nmethod and give up CodeCache_lock | |
177 { | |
178 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
179 process_nmethod((nmethod *)_current); | |
180 } | |
181 _seen++; | |
182 _current = next; | |
183 } | |
184 | |
185 // Skip forward to the next nmethod (if any). Code blobs other than nmethods | |
186 // can be freed async to us and make _current invalid while we sleep. | |
187 _current = CodeCache::next_nmethod(_current); | |
188 } | |
189 | |
190 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { | |
191 // we've completed a scan without making progress but there were | |
192 // nmethods we were unable to process either because they were | |
193 // locked or were still on stack. We don't have to aggresively | |
194 // clean them up so just stop scanning. We could scan once more | |
195 // but that complicates the control logic and it's unlikely to | |
196 // matter much. | |
197 if (PrintMethodFlushing) { | |
198 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); | |
199 } | |
200 } | |
201 | |
202 #ifdef ASSERT | |
203 if(PrintMethodFlushing) { | |
204 jlong sweep_end = os::javaTimeMillis(); | |
205 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start); | |
206 } | |
207 #endif | |
208 } | |
209 | |
158 | 210 |
159 void NMethodSweeper::process_nmethod(nmethod *nm) { | 211 void NMethodSweeper::process_nmethod(nmethod *nm) { |
212 assert(!CodeCache_lock->owned_by_self(), "just checking"); | |
213 | |
160 // Skip methods that are currently referenced by the VM | 214 // Skip methods that are currently referenced by the VM |
161 if (nm->is_locked_by_vm()) { | 215 if (nm->is_locked_by_vm()) { |
162 // But still remember to clean-up inline caches for alive nmethods | 216 // But still remember to clean-up inline caches for alive nmethods |
163 if (nm->is_alive()) { | 217 if (nm->is_alive()) { |
164 // Clean-up all inline caches that points to zombie/non-reentrant methods | 218 // Clean-up all inline caches that points to zombie/non-reentrant methods |
219 MutexLocker cl(CompiledIC_lock); | |
165 nm->cleanup_inline_caches(); | 220 nm->cleanup_inline_caches(); |
166 } else { | 221 } else { |
167 _locked_seen++; | 222 _locked_seen++; |
168 } | 223 } |
169 return; | 224 return; |
176 if (nm->is_marked_for_reclamation()) { | 231 if (nm->is_marked_for_reclamation()) { |
177 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); | 232 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); |
178 if (PrintMethodFlushing && Verbose) { | 233 if (PrintMethodFlushing && Verbose) { |
179 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); | 234 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); |
180 } | 235 } |
236 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
181 nm->flush(); | 237 nm->flush(); |
182 } else { | 238 } else { |
183 if (PrintMethodFlushing && Verbose) { | 239 if (PrintMethodFlushing && Verbose) { |
184 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); | 240 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); |
185 } | 241 } |
195 } | 251 } |
196 nm->make_zombie(); | 252 nm->make_zombie(); |
197 _rescan = true; | 253 _rescan = true; |
198 } else { | 254 } else { |
199 // Still alive, clean up its inline caches | 255 // Still alive, clean up its inline caches |
256 MutexLocker cl(CompiledIC_lock); | |
200 nm->cleanup_inline_caches(); | 257 nm->cleanup_inline_caches(); |
201 // we coudn't transition this nmethod so don't immediately | 258 // we coudn't transition this nmethod so don't immediately |
202 // request a rescan. If this method stays on the stack for a | 259 // request a rescan. If this method stays on the stack for a |
203 // long time we don't want to keep rescanning at every safepoint. | 260 // long time we don't want to keep rescanning the code cache. |
204 _not_entrant_seen_on_stack++; | 261 _not_entrant_seen_on_stack++; |
205 } | 262 } |
206 } else if (nm->is_unloaded()) { | 263 } else if (nm->is_unloaded()) { |
207 // Unloaded code, just make it a zombie | 264 // Unloaded code, just make it a zombie |
208 if (PrintMethodFlushing && Verbose) | 265 if (PrintMethodFlushing && Verbose) |
209 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); | 266 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); |
210 if (nm->is_osr_method()) { | 267 if (nm->is_osr_method()) { |
211 // No inline caches will ever point to osr methods, so we can just remove it | 268 // No inline caches will ever point to osr methods, so we can just remove it |
269 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | |
212 nm->flush(); | 270 nm->flush(); |
213 } else { | 271 } else { |
214 nm->make_zombie(); | 272 nm->make_zombie(); |
215 _rescan = true; | 273 _rescan = true; |
216 } | 274 } |
225 nm->make_not_entrant(); | 283 nm->make_not_entrant(); |
226 } | 284 } |
227 } | 285 } |
228 | 286 |
229 // Clean-up all inline caches that points to zombie/non-reentrant methods | 287 // Clean-up all inline caches that points to zombie/non-reentrant methods |
288 MutexLocker cl(CompiledIC_lock); | |
230 nm->cleanup_inline_caches(); | 289 nm->cleanup_inline_caches(); |
231 } | 290 } |
232 } | 291 } |
233 | 292 |
234 // Code cache unloading: when compilers notice the code cache is getting full, | 293 // Code cache unloading: when compilers notice the code cache is getting full, |
235 // they will call a vm op that comes here. This code attempts to speculatively | 294 // they will call a vm op that comes here. This code attempts to speculatively |
236 // unload the oldest half of the nmethods (based on the compile job id) by | 295 // unload the oldest half of the nmethods (based on the compile job id) by |
237 // saving the old code in a list in the CodeCache. Then | 296 // saving the old code in a list in the CodeCache. Then |
238 // execution resumes. If a method so marked is not called by the second | 297 // execution resumes. If a method so marked is not called by the second sweeper |
239 // safepoint from the current one, the nmethod will be marked non-entrant and | 298 // stack traversal after the current one, the nmethod will be marked non-entrant and |
240 // got rid of by normal sweeping. If the method is called, the methodOop's | 299 // got rid of by normal sweeping. If the method is called, the methodOop's |
241 // _code field is restored and the methodOop/nmethod | 300 // _code field is restored and the methodOop/nmethod |
242 // go back to their normal state. | 301 // go back to their normal state. |
243 void NMethodSweeper::handle_full_code_cache(bool is_full) { | 302 void NMethodSweeper::handle_full_code_cache(bool is_full) { |
244 // Only the first one to notice can advise us to start early cleaning | 303 // Only the first one to notice can advise us to start early cleaning |
362 disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()); | 421 disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()); |
363 xtty->stamp(); | 422 xtty->stamp(); |
364 xtty->end_elem(); | 423 xtty->end_elem(); |
365 } | 424 } |
366 | 425 |
367 // Shut off compiler. Sweeper will run exiting from this safepoint | 426 // Shut off compiler. Sweeper will start over with a new stack scan and |
368 // and turn it back on if it clears enough space | 427 // traversal cycle and turn it back on if it clears enough space. |
369 if (was_full()) { | 428 if (was_full()) { |
370 _last_was_full = os::javaTimeMillis(); | 429 _last_was_full = os::javaTimeMillis(); |
371 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); | 430 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); |
372 } | 431 } |
373 | 432 |