comparison src/share/vm/runtime/sweeper.cpp @ 12324:510fbd28919c

8020151: PSR:PERF Large performance regressions when code cache is filled Summary: Code cache sweeping based on method hotness; removed speculatively disconnect Reviewed-by: kvn, iveresov
author anoll
date Fri, 27 Sep 2013 10:50:55 +0200
parents ab274453d37f
children cefad50507d8 78da3894b86f
comparison
equal deleted inserted replaced
12323:c9ccd7b85f20 12324:510fbd28919c
125 } 125 }
126 #else 126 #else
127 #define SWEEP(nm) 127 #define SWEEP(nm)
128 #endif 128 #endif
129 129
130 130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed 131 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod 132 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache 133 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 134 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 135 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep 136
137 137 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 138 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
140 139
141 jint NMethodSweeper::_locked_seen = 0; 140 jint NMethodSweeper::_locked_seen = 0;
142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 141 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
143 bool NMethodSweeper::_resweep = false; 142 bool NMethodSweeper::_request_mark_phase = false;
144 jint NMethodSweeper::_flush_token = 0; 143
145 jlong NMethodSweeper::_last_full_flush_time = 0;
146 int NMethodSweeper::_highest_marked = 0;
147 int NMethodSweeper::_dead_compile_ids = 0;
148 long NMethodSweeper::_last_flush_traversal_id = 0;
149
150 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
151 int NMethodSweeper::_total_nof_methods_reclaimed = 0; 144 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
152 jlong NMethodSweeper::_total_time_sweeping = 0; 145 jlong NMethodSweeper::_total_time_sweeping = 0;
153 jlong NMethodSweeper::_total_time_this_sweep = 0; 146 jlong NMethodSweeper::_total_time_this_sweep = 0;
154 jlong NMethodSweeper::_peak_sweep_time = 0; 147 jlong NMethodSweeper::_peak_sweep_time = 0;
155 jlong NMethodSweeper::_peak_sweep_fraction_time = 0; 148 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
156 jlong NMethodSweeper::_total_disconnect_time = 0; 149 int NMethodSweeper::_hotness_counter_reset_val = 0;
157 jlong NMethodSweeper::_peak_disconnect_time = 0; 150
158 151
159 class MarkActivationClosure: public CodeBlobClosure { 152 class MarkActivationClosure: public CodeBlobClosure {
160 public: 153 public:
161 virtual void do_code_blob(CodeBlob* cb) { 154 virtual void do_code_blob(CodeBlob* cb) {
162 // If we see an activation belonging to a non_entrant nmethod, we mark it. 155 if (cb->is_nmethod()) {
163 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) { 156 nmethod* nm = (nmethod*)cb;
164 ((nmethod*)cb)->mark_as_seen_on_stack(); 157 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
158 // If we see an activation belonging to a non_entrant nmethod, we mark it.
159 if (nm->is_not_entrant()) {
160 nm->mark_as_seen_on_stack();
161 }
165 } 162 }
166 } 163 }
167 }; 164 };
168 static MarkActivationClosure mark_activation_closure; 165 static MarkActivationClosure mark_activation_closure;
169 166
167 class SetHotnessClosure: public CodeBlobClosure {
168 public:
169 virtual void do_code_blob(CodeBlob* cb) {
170 if (cb->is_nmethod()) {
171 nmethod* nm = (nmethod*)cb;
172 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
173 }
174 }
175 };
176 static SetHotnessClosure set_hotness_closure;
177
178
179 int NMethodSweeper::hotness_counter_reset_val() {
180 if (_hotness_counter_reset_val == 0) {
181 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
182 }
183 return _hotness_counter_reset_val;
184 }
170 bool NMethodSweeper::sweep_in_progress() { 185 bool NMethodSweeper::sweep_in_progress() {
171 return (_current != NULL); 186 return (_current != NULL);
172 } 187 }
173 188
174 void NMethodSweeper::scan_stacks() { 189 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
190 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
191 // safepoint.
192 void NMethodSweeper::mark_active_nmethods() {
175 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 193 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
176 if (!MethodFlushing) return; 194 // If we do not want to reclaim not-entrant or zombie methods there is no need
177 195 // to scan stacks
178 // No need to synchronize access, since this is always executed at a 196 if (!MethodFlushing) {
179 // safepoint. 197 return;
180 198 }
181 // Make sure CompiledIC_lock in unlocked, since we might update some
182 // inline caches. If it is, we just bail-out and try later.
183 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
184 199
185 // Check for restart 200 // Check for restart
186 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 201 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
187 if (!sweep_in_progress() && _resweep) { 202 if (!sweep_in_progress() && need_marking_phase()) {
188 _seen = 0; 203 _seen = 0;
189 _invocations = NmethodSweepFraction; 204 _invocations = NmethodSweepFraction;
190 _current = CodeCache::first_nmethod(); 205 _current = CodeCache::first_nmethod();
191 _traversals += 1; 206 _traversals += 1;
192 _total_time_this_sweep = 0; 207 _total_time_this_sweep = 0;
195 tty->print_cr("### Sweep: stack traversal %d", _traversals); 210 tty->print_cr("### Sweep: stack traversal %d", _traversals);
196 } 211 }
197 Threads::nmethods_do(&mark_activation_closure); 212 Threads::nmethods_do(&mark_activation_closure);
198 213
199 // reset the flags since we started a scan from the beginning. 214 // reset the flags since we started a scan from the beginning.
200 _resweep = false; 215 reset_nmethod_marking();
201 _locked_seen = 0; 216 _locked_seen = 0;
202 _not_entrant_seen_on_stack = 0; 217 _not_entrant_seen_on_stack = 0;
203 } 218 } else {
204 219 // Only set hotness counter
205 if (UseCodeCacheFlushing) { 220 Threads::nmethods_do(&set_hotness_closure);
206 // only allow new flushes after the interval is complete. 221 }
207 jlong now = os::javaTimeMillis(); 222
208 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 223 OrderAccess::storestore();
209 jlong curr_interval = now - _last_full_flush_time;
210 if (curr_interval > max_interval) {
211 _flush_token = 0;
212 }
213
214 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
215 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
216 log_sweep("restart_compiler");
217 }
218 }
219 } 224 }
220 225
221 void NMethodSweeper::possibly_sweep() { 226 void NMethodSweeper::possibly_sweep() {
222 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 227 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
223 if (!MethodFlushing || !sweep_in_progress()) return; 228 if (!MethodFlushing || !sweep_in_progress()) {
229 return;
230 }
224 231
225 if (_invocations > 0) { 232 if (_invocations > 0) {
226 // Only one thread at a time will sweep 233 // Only one thread at a time will sweep
227 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 234 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
228 if (old != 0) { 235 if (old != 0) {
256 } 263 }
257 264
258 if (!CompileBroker::should_compile_new_jobs()) { 265 if (!CompileBroker::should_compile_new_jobs()) {
259 // If we have turned off compilations we might as well do full sweeps 266 // If we have turned off compilations we might as well do full sweeps
260 // in order to reach the clean state faster. Otherwise the sleeping compiler 267 // in order to reach the clean state faster. Otherwise the sleeping compiler
261 // threads will slow down sweeping. After a few iterations the cache 268 // threads will slow down sweeping.
262 // will be clean and sweeping stops (_resweep will not be set)
263 _invocations = 1; 269 _invocations = 1;
264 } 270 }
265 271
266 // We want to visit all nmethods after NmethodSweepFraction 272 // We want to visit all nmethods after NmethodSweepFraction
267 // invocations so divide the remaining number of nmethods by the 273 // invocations so divide the remaining number of nmethods by the
269 // the number of nmethods changes during the sweep so the final 275 // the number of nmethods changes during the sweep so the final
270 // stage must iterate until it there are no more nmethods. 276 // stage must iterate until it there are no more nmethods.
271 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; 277 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
272 int swept_count = 0; 278 int swept_count = 0;
273 279
280
274 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 281 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
275 assert(!CodeCache_lock->owned_by_self(), "just checking"); 282 assert(!CodeCache_lock->owned_by_self(), "just checking");
276 283
284 int freed_memory = 0;
277 { 285 {
278 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 286 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
279 287
280 // The last invocation iterates until there are no more nmethods 288 // The last invocation iterates until there are no more nmethods
281 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { 289 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
297 nmethod* next = CodeCache::next_nmethod(_current); 305 nmethod* next = CodeCache::next_nmethod(_current);
298 306
299 // Now ready to process nmethod and give up CodeCache_lock 307 // Now ready to process nmethod and give up CodeCache_lock
300 { 308 {
301 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 309 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
302 process_nmethod(_current); 310 freed_memory += process_nmethod(_current);
303 } 311 }
304 _seen++; 312 _seen++;
305 _current = next; 313 _current = next;
306 } 314 }
307 } 315 }
308 316
309 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 317 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
310 318
311 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { 319 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
312 // we've completed a scan without making progress but there were 320 // we've completed a scan without making progress but there were
313 // nmethods we were unable to process either because they were 321 // nmethods we were unable to process either because they were
314 // locked or were still on stack. We don't have to aggresively 322 // locked or were still on stack. We don't have to aggressively
315 // clean them up so just stop scanning. We could scan once more 323 // clean them up so just stop scanning. We could scan once more
316 // but that complicates the control logic and it's unlikely to 324 // but that complicates the control logic and it's unlikely to
317 // matter much. 325 // matter much.
318 if (PrintMethodFlushing) { 326 if (PrintMethodFlushing) {
319 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); 327 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
320 } 328 }
349 if (_invocations == 1) { 357 if (_invocations == 1) {
350 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 358 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
351 log_sweep("finished"); 359 log_sweep("finished");
352 } 360 }
353 361
354 // Sweeper is the only case where memory is released, 362 // Sweeper is the only case where memory is released, check here if it
355 // check here if it is time to restart the compiler. 363 // is time to restart the compiler. Only checking if there is a certain
356 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) { 364 // amount of free memory in the code cache might lead to re-enabling
365 // compilation although no memory has been released. For example, there are
366 // cases when compilation was disabled although there is 4MB (or more) free
367 // memory in the code cache. The reason is code cache fragmentation. Therefore,
368 // it only makes sense to re-enable compilation if we have actually freed memory.
369 // Note that typically several kB are released for sweeping 16MB of the code
370 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
371 if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
357 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 372 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
358 log_sweep("restart_compiler"); 373 log_sweep("restart_compiler");
359 } 374 }
360 } 375 }
361 376
365 public: 380 public:
366 NMethodMarker(nmethod* nm) { 381 NMethodMarker(nmethod* nm) {
367 _thread = CompilerThread::current(); 382 _thread = CompilerThread::current();
368 if (!nm->is_zombie() && !nm->is_unloaded()) { 383 if (!nm->is_zombie() && !nm->is_unloaded()) {
369 // Only expose live nmethods for scanning 384 // Only expose live nmethods for scanning
370 _thread->set_scanned_nmethod(nm); 385 _thread->set_scanned_nmethod(nm);
371 } 386 }
372 } 387 }
373 ~NMethodMarker() { 388 ~NMethodMarker() {
374 _thread->set_scanned_nmethod(NULL); 389 _thread->set_scanned_nmethod(NULL);
375 } 390 }
376 }; 391 };
390 405
391 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 406 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
392 nm->flush(); 407 nm->flush();
393 } 408 }
394 409
395 void NMethodSweeper::process_nmethod(nmethod *nm) { 410 int NMethodSweeper::process_nmethod(nmethod *nm) {
396 assert(!CodeCache_lock->owned_by_self(), "just checking"); 411 assert(!CodeCache_lock->owned_by_self(), "just checking");
397 412
413 int freed_memory = 0;
398 // Make sure this nmethod doesn't get unloaded during the scan, 414 // Make sure this nmethod doesn't get unloaded during the scan,
399 // since the locks acquired below might safepoint. 415 // since safepoints may happen during acquired below locks.
400 NMethodMarker nmm(nm); 416 NMethodMarker nmm(nm);
401
402 SWEEP(nm); 417 SWEEP(nm);
403 418
404 // Skip methods that are currently referenced by the VM 419 // Skip methods that are currently referenced by the VM
405 if (nm->is_locked_by_vm()) { 420 if (nm->is_locked_by_vm()) {
406 // But still remember to clean-up inline caches for alive nmethods 421 // But still remember to clean-up inline caches for alive nmethods
407 if (nm->is_alive()) { 422 if (nm->is_alive()) {
408 // Clean-up all inline caches that points to zombie/non-reentrant methods 423 // Clean inline caches that point to zombie/non-entrant methods
409 MutexLocker cl(CompiledIC_lock); 424 MutexLocker cl(CompiledIC_lock);
410 nm->cleanup_inline_caches(); 425 nm->cleanup_inline_caches();
411 SWEEP(nm); 426 SWEEP(nm);
412 } else { 427 } else {
413 _locked_seen++; 428 _locked_seen++;
414 SWEEP(nm); 429 SWEEP(nm);
415 } 430 }
416 return; 431 return freed_memory;
417 } 432 }
418 433
419 if (nm->is_zombie()) { 434 if (nm->is_zombie()) {
420 // If it is first time, we see nmethod then we mark it. Otherwise, 435 // If it is the first time we see nmethod then we mark it. Otherwise,
421 // we reclame it. When we have seen a zombie method twice, we know that 436 // we reclaim it. When we have seen a zombie method twice, we know that
422 // there are no inline caches that refer to it. 437 // there are no inline caches that refer to it.
423 if (nm->is_marked_for_reclamation()) { 438 if (nm->is_marked_for_reclamation()) {
424 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); 439 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
425 if (PrintMethodFlushing && Verbose) { 440 if (PrintMethodFlushing && Verbose) {
426 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); 441 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
427 } 442 }
443 freed_memory = nm->total_size();
428 release_nmethod(nm); 444 release_nmethod(nm);
429 _flushed_count++; 445 _flushed_count++;
430 } else { 446 } else {
431 if (PrintMethodFlushing && Verbose) { 447 if (PrintMethodFlushing && Verbose) {
432 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 448 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
433 } 449 }
434 nm->mark_for_reclamation(); 450 nm->mark_for_reclamation();
435 _resweep = true; 451 request_nmethod_marking();
436 _marked_count++; 452 _marked_count++;
437 SWEEP(nm); 453 SWEEP(nm);
438 } 454 }
439 } else if (nm->is_not_entrant()) { 455 } else if (nm->is_not_entrant()) {
440 // If there is no current activations of this method on the 456 // If there are no current activations of this method on the
441 // stack we can safely convert it to a zombie method 457 // stack we can safely convert it to a zombie method
442 if (nm->can_not_entrant_be_converted()) { 458 if (nm->can_not_entrant_be_converted()) {
443 if (PrintMethodFlushing && Verbose) { 459 if (PrintMethodFlushing && Verbose) {
444 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 460 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
445 } 461 }
446 nm->make_zombie(); 462 nm->make_zombie();
447 _resweep = true; 463 request_nmethod_marking();
448 _zombified_count++; 464 _zombified_count++;
449 SWEEP(nm); 465 SWEEP(nm);
450 } else { 466 } else {
451 // Still alive, clean up its inline caches 467 // Still alive, clean up its inline caches
452 MutexLocker cl(CompiledIC_lock); 468 MutexLocker cl(CompiledIC_lock);
457 _not_entrant_seen_on_stack++; 473 _not_entrant_seen_on_stack++;
458 SWEEP(nm); 474 SWEEP(nm);
459 } 475 }
460 } else if (nm->is_unloaded()) { 476 } else if (nm->is_unloaded()) {
461 // Unloaded code, just make it a zombie 477 // Unloaded code, just make it a zombie
462 if (PrintMethodFlushing && Verbose) 478 if (PrintMethodFlushing && Verbose) {
463 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); 479 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
464 480 }
465 if (nm->is_osr_method()) { 481 if (nm->is_osr_method()) {
466 SWEEP(nm); 482 SWEEP(nm);
467 // No inline caches will ever point to osr methods, so we can just remove it 483 // No inline caches will ever point to osr methods, so we can just remove it
484 freed_memory = nm->total_size();
468 release_nmethod(nm); 485 release_nmethod(nm);
469 _flushed_count++; 486 _flushed_count++;
470 } else { 487 } else {
471 nm->make_zombie(); 488 nm->make_zombie();
472 _resweep = true; 489 request_nmethod_marking();
473 _zombified_count++; 490 _zombified_count++;
474 SWEEP(nm); 491 SWEEP(nm);
475 } 492 }
476 } else { 493 } else {
477 assert(nm->is_alive(), "should be alive");
478
479 if (UseCodeCacheFlushing) { 494 if (UseCodeCacheFlushing) {
480 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && 495 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
481 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { 496 // Do not make native methods and OSR-methods not-entrant
482 // This method has not been called since the forced cleanup happened 497 nm->dec_hotness_counter();
483 nm->make_not_entrant(); 498 // Get the initial value of the hotness counter. This value depends on the
484 } 499 // ReservedCodeCacheSize
485 } 500 int reset_val = hotness_counter_reset_val();
486 501 int time_since_reset = reset_val - nm->hotness_counter();
487 // Clean-up all inline caches that points to zombie/non-reentrant methods 502 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
503 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
504 // I.e., 'threshold' increases with lower available space in the code cache and a higher
505 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
506 // value until it is reset by stack walking - is smaller than the computed threshold, the
507 // corresponding nmethod is considered for removal.
508 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
509 // A method is marked as not-entrant if the method is
510 // 1) 'old enough': nm->hotness_counter() < threshold
511 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
512 // The second condition is necessary if we are dealing with very small code cache
513 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
514 // The second condition ensures that methods are not immediately made not-entrant
515 // after compilation.
516 nm->make_not_entrant();
517 request_nmethod_marking();
518 }
519 }
520 }
521 // Clean-up all inline caches that point to zombie/non-reentrant methods
488 MutexLocker cl(CompiledIC_lock); 522 MutexLocker cl(CompiledIC_lock);
489 nm->cleanup_inline_caches(); 523 nm->cleanup_inline_caches();
490 SWEEP(nm); 524 SWEEP(nm);
491 } 525 }
492 } 526 return freed_memory;
493 527 }
494 // Code cache unloading: when compilers notice the code cache is getting full,
495 // they will call a vm op that comes here. This code attempts to speculatively
496 // unload the oldest half of the nmethods (based on the compile job id) by
497 // saving the old code in a list in the CodeCache. Then
498 // execution resumes. If a method so marked is not called by the second sweeper
499 // stack traversal after the current one, the nmethod will be marked non-entrant and
500 // got rid of by normal sweeping. If the method is called, the Method*'s
501 // _code field is restored and the Method*/nmethod
502 // go back to their normal state.
503 void NMethodSweeper::handle_full_code_cache(bool is_full) {
504
505 if (is_full) {
506 // Since code cache is full, immediately stop new compiles
507 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
508 log_sweep("disable_compiler");
509 }
510 }
511
512 // Make sure only one thread can flush
513 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
514 // no need to check the timeout here.
515 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
516 if (old != 0) {
517 return;
518 }
519
520 VM_HandleFullCodeCache op(is_full);
521 VMThread::execute(&op);
522
523 // resweep again as soon as possible
524 _resweep = true;
525 }
526
527 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
528 // If there was a race in detecting full code cache, only run
529 // one vm op for it or keep the compiler shut off
530
531 jlong disconnect_start_counter = os::elapsed_counter();
532
533 // Traverse the code cache trying to dump the oldest nmethods
534 int curr_max_comp_id = CompileBroker::get_compilation_id();
535 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
536
537 log_sweep("start_cleaning");
538
539 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
540 jint disconnected = 0;
541 jint made_not_entrant = 0;
542 jint nmethod_count = 0;
543
544 while ((nm != NULL)){
545 int curr_comp_id = nm->compile_id();
546
547 // OSR methods cannot be flushed like this. Also, don't flush native methods
548 // since they are part of the JDK in most cases
549 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
550
551 // only count methods that can be speculatively disconnected
552 nmethod_count++;
553
554 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
555 if ((nm->method()->code() == nm)) {
556 // This method has not been previously considered for
557 // unloading or it was restored already
558 CodeCache::speculatively_disconnect(nm);
559 disconnected++;
560 } else if (nm->is_speculatively_disconnected()) {
561 // This method was previously considered for preemptive unloading and was not called since then
562 CompilationPolicy::policy()->delay_compilation(nm->method());
563 nm->make_not_entrant();
564 made_not_entrant++;
565 }
566
567 if (curr_comp_id > _highest_marked) {
568 _highest_marked = curr_comp_id;
569 }
570 }
571 }
572 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
573 }
574
575 // remember how many compile_ids wheren't seen last flush.
576 _dead_compile_ids = curr_max_comp_id - nmethod_count;
577
578 log_sweep("stop_cleaning",
579 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
580 disconnected, made_not_entrant);
581
582 // Shut off compiler. Sweeper will start over with a new stack scan and
583 // traversal cycle and turn it back on if it clears enough space.
584 if (is_full) {
585 _last_full_flush_time = os::javaTimeMillis();
586 }
587
588 jlong disconnect_end_counter = os::elapsed_counter();
589 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
590 _total_disconnect_time += disconnect_time;
591 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
592
593 EventCleanCodeCache event(UNTIMED);
594 if (event.should_commit()) {
595 event.set_starttime(disconnect_start_counter);
596 event.set_endtime(disconnect_end_counter);
597 event.set_disconnectedCount(disconnected);
598 event.set_madeNonEntrantCount(made_not_entrant);
599 event.commit();
600 }
601 _number_of_flushes++;
602
603 // After two more traversals the sweeper will get rid of unrestored nmethods
604 _last_flush_traversal_id = _traversals;
605 _resweep = true;
606 #ifdef ASSERT
607
608 if(PrintMethodFlushing && Verbose) {
609 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
610 }
611 #endif
612 }
613
614 528
615 // Print out some state information about the current sweep and the 529 // Print out some state information about the current sweep and the
616 // state of the code cache if it's requested. 530 // state of the code cache if it's requested.
617 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 531 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
618 if (PrintMethodFlushing) { 532 if (PrintMethodFlushing) {