comparison src/share/vm/runtime/sweeper.cpp @ 10206:0cfa93c2fcc4

8012547: Code cache flushing can get stuck reclaming of memory Summary: Keep sweeping regardless of if we are flushing Reviewed-by: kvn, twisti
author neliasso
date Mon, 29 Apr 2013 13:20:19 +0200
parents da91efe96a93
children f2110083203d
comparison
equal deleted inserted replaced
10205:62b683108582 10206:0cfa93c2fcc4
134 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass 134 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
135 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 135 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
136 136
137 jint NMethodSweeper::_locked_seen = 0; 137 jint NMethodSweeper::_locked_seen = 0;
138 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 138 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
139 bool NMethodSweeper::_rescan = false; 139 bool NMethodSweeper::_resweep = false;
140 bool NMethodSweeper::_do_sweep = false; 140 jint NMethodSweeper::_flush_token = 0;
141 bool NMethodSweeper::_was_full = false; 141 jlong NMethodSweeper::_last_full_flush_time = 0;
142 jint NMethodSweeper::_advise_to_sweep = 0; 142 int NMethodSweeper::_highest_marked = 0;
143 jlong NMethodSweeper::_last_was_full = 0; 143 int NMethodSweeper::_dead_compile_ids = 0;
144 uint NMethodSweeper::_highest_marked = 0; 144 long NMethodSweeper::_last_flush_traversal_id = 0;
145 long NMethodSweeper::_was_full_traversal = 0;
146 145
147 class MarkActivationClosure: public CodeBlobClosure { 146 class MarkActivationClosure: public CodeBlobClosure {
148 public: 147 public:
149 virtual void do_code_blob(CodeBlob* cb) { 148 virtual void do_code_blob(CodeBlob* cb) {
150 // If we see an activation belonging to a non_entrant nmethod, we mark it. 149 // If we see an activation belonging to a non_entrant nmethod, we mark it.
153 } 152 }
154 } 153 }
155 }; 154 };
156 static MarkActivationClosure mark_activation_closure; 155 static MarkActivationClosure mark_activation_closure;
157 156
157 bool NMethodSweeper::sweep_in_progress() {
158 return (_current != NULL);
159 }
160
158 void NMethodSweeper::scan_stacks() { 161 void NMethodSweeper::scan_stacks() {
159 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 162 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
160 if (!MethodFlushing) return; 163 if (!MethodFlushing) return;
161 _do_sweep = true;
162 164
163 // No need to synchronize access, since this is always executed at a 165 // No need to synchronize access, since this is always executed at a
164 // safepoint. If we aren't in the middle of scan and a rescan 166 // safepoint.
165 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
166 // code cache flushing is in progress, don't skip sweeping to help make progress
167 // clearing space in the code cache.
168 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
169 _do_sweep = false;
170 return;
171 }
172 167
173 // Make sure CompiledIC_lock in unlocked, since we might update some 168 // Make sure CompiledIC_lock in unlocked, since we might update some
174 // inline caches. If it is, we just bail-out and try later. 169 // inline caches. If it is, we just bail-out and try later.
175 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; 170 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
176 171
177 // Check for restart 172 // Check for restart
178 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 173 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
179 if (_current == NULL) { 174 if (!sweep_in_progress() && _resweep) {
180 _seen = 0; 175 _seen = 0;
181 _invocations = NmethodSweepFraction; 176 _invocations = NmethodSweepFraction;
182 _current = CodeCache::first_nmethod(); 177 _current = CodeCache::first_nmethod();
183 _traversals += 1; 178 _traversals += 1;
184 if (PrintMethodFlushing) { 179 if (PrintMethodFlushing) {
185 tty->print_cr("### Sweep: stack traversal %d", _traversals); 180 tty->print_cr("### Sweep: stack traversal %d", _traversals);
186 } 181 }
187 Threads::nmethods_do(&mark_activation_closure); 182 Threads::nmethods_do(&mark_activation_closure);
188 183
189 // reset the flags since we started a scan from the beginning. 184 // reset the flags since we started a scan from the beginning.
190 _rescan = false; 185 _resweep = false;
191 _locked_seen = 0; 186 _locked_seen = 0;
192 _not_entrant_seen_on_stack = 0; 187 _not_entrant_seen_on_stack = 0;
193 } 188 }
194 189
195 if (UseCodeCacheFlushing) { 190 if (UseCodeCacheFlushing) {
196 if (!CodeCache::needs_flushing()) { 191 // only allow new flushes after the interval is complete.
197 // scan_stacks() runs during a safepoint, no race with setters 192 jlong now = os::javaTimeMillis();
198 _advise_to_sweep = 0; 193 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
199 } 194 jlong curr_interval = now - _last_full_flush_time;
200 195 if (curr_interval > max_interval) {
201 if (was_full()) { 196 _flush_token = 0;
202 // There was some progress so attempt to restart the compiler 197 }
203 jlong now = os::javaTimeMillis(); 198
204 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 199 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
205 jlong curr_interval = now - _last_was_full; 200 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
206 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { 201 log_sweep("restart_compiler");
207 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
208 set_was_full(false);
209
210 // Update the _last_was_full time so we can tell how fast the
211 // code cache is filling up
212 _last_was_full = os::javaTimeMillis();
213
214 log_sweep("restart_compiler");
215 }
216 } 202 }
217 } 203 }
218 } 204 }
219 205
220 void NMethodSweeper::possibly_sweep() { 206 void NMethodSweeper::possibly_sweep() {
221 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 207 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
222 if ((!MethodFlushing) || (!_do_sweep)) return; 208 if (!MethodFlushing || !sweep_in_progress()) return;
223 209
224 if (_invocations > 0) { 210 if (_invocations > 0) {
225 // Only one thread at a time will sweep 211 // Only one thread at a time will sweep
226 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 212 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
227 if (old != 0) { 213 if (old != 0) {
249 sweep_start = os::javaTimeMillis(); 235 sweep_start = os::javaTimeMillis();
250 } 236 }
251 #endif 237 #endif
252 if (PrintMethodFlushing && Verbose) { 238 if (PrintMethodFlushing && Verbose) {
253 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 239 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
240 }
241
242 if (!CompileBroker::should_compile_new_jobs()) {
243 // If we have turned off compilations we might as well do full sweeps
244 // in order to reach the clean state faster. Otherwise the sleeping compiler
245 // threads will slow down sweeping. After a few iterations the cache
246 // will be clean and sweeping stops (_resweep will not be set)
247 _invocations = 1;
254 } 248 }
255 249
256 // We want to visit all nmethods after NmethodSweepFraction 250 // We want to visit all nmethods after NmethodSweepFraction
257 // invocations so divide the remaining number of nmethods by the 251 // invocations so divide the remaining number of nmethods by the
258 // remaining number of invocations. This is only an estimate since 252 // remaining number of invocations. This is only an estimate since
294 } 288 }
295 } 289 }
296 290
297 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 291 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
298 292
299 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { 293 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
300 // we've completed a scan without making progress but there were 294 // we've completed a scan without making progress but there were
301 // nmethods we were unable to process either because they were 295 // nmethods we were unable to process either because they were
302 // locked or were still on stack. We don't have to aggresively 296 // locked or were still on stack. We don't have to aggresively
303 // clean them up so just stop scanning. We could scan once more 297 // clean them up so just stop scanning. We could scan once more
304 // but that complicates the control logic and it's unlikely to 298 // but that complicates the control logic and it's unlikely to
315 } 309 }
316 #endif 310 #endif
317 311
318 if (_invocations == 1) { 312 if (_invocations == 1) {
319 log_sweep("finished"); 313 log_sweep("finished");
314 }
315
316 // Sweeper is the only case where memory is released,
317 // check here if it is time to restart the compiler.
318 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
319 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
320 log_sweep("restart_compiler");
320 } 321 }
321 } 322 }
322 323
323 class NMethodMarker: public StackObj { 324 class NMethodMarker: public StackObj {
324 private: 325 private:
390 } else { 391 } else {
391 if (PrintMethodFlushing && Verbose) { 392 if (PrintMethodFlushing && Verbose) {
392 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 393 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
393 } 394 }
394 nm->mark_for_reclamation(); 395 nm->mark_for_reclamation();
395 _rescan = true; 396 _resweep = true;
396 SWEEP(nm); 397 SWEEP(nm);
397 } 398 }
398 } else if (nm->is_not_entrant()) { 399 } else if (nm->is_not_entrant()) {
399 // If there is no current activations of this method on the 400 // If there is no current activations of this method on the
400 // stack we can safely convert it to a zombie method 401 // stack we can safely convert it to a zombie method
401 if (nm->can_not_entrant_be_converted()) { 402 if (nm->can_not_entrant_be_converted()) {
402 if (PrintMethodFlushing && Verbose) { 403 if (PrintMethodFlushing && Verbose) {
403 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 404 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
404 } 405 }
405 nm->make_zombie(); 406 nm->make_zombie();
406 _rescan = true; 407 _resweep = true;
407 SWEEP(nm); 408 SWEEP(nm);
408 } else { 409 } else {
409 // Still alive, clean up its inline caches 410 // Still alive, clean up its inline caches
410 MutexLocker cl(CompiledIC_lock); 411 MutexLocker cl(CompiledIC_lock);
411 nm->cleanup_inline_caches(); 412 nm->cleanup_inline_caches();
423 SWEEP(nm); 424 SWEEP(nm);
424 // No inline caches will ever point to osr methods, so we can just remove it 425 // No inline caches will ever point to osr methods, so we can just remove it
425 release_nmethod(nm); 426 release_nmethod(nm);
426 } else { 427 } else {
427 nm->make_zombie(); 428 nm->make_zombie();
428 _rescan = true; 429 _resweep = true;
429 SWEEP(nm); 430 SWEEP(nm);
430 } 431 }
431 } else { 432 } else {
432 assert(nm->is_alive(), "should be alive"); 433 assert(nm->is_alive(), "should be alive");
433 434
434 if (UseCodeCacheFlushing) { 435 if (UseCodeCacheFlushing) {
435 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && 436 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
436 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && 437 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
437 CodeCache::needs_flushing()) {
438 // This method has not been called since the forced cleanup happened 438 // This method has not been called since the forced cleanup happened
439 nm->make_not_entrant(); 439 nm->make_not_entrant();
440 } 440 }
441 } 441 }
442 442
455 // stack traversal after the current one, the nmethod will be marked non-entrant and 455 // stack traversal after the current one, the nmethod will be marked non-entrant and
456 // got rid of by normal sweeping. If the method is called, the Method*'s 456 // got rid of by normal sweeping. If the method is called, the Method*'s
457 // _code field is restored and the Method*/nmethod 457 // _code field is restored and the Method*/nmethod
458 // go back to their normal state. 458 // go back to their normal state.
459 void NMethodSweeper::handle_full_code_cache(bool is_full) { 459 void NMethodSweeper::handle_full_code_cache(bool is_full) {
460 // Only the first one to notice can advise us to start early cleaning
461 if (!is_full){
462 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
463 if (old != 0) {
464 return;
465 }
466 }
467 460
468 if (is_full) { 461 if (is_full) {
469 // Since code cache is full, immediately stop new compiles 462 // Since code cache is full, immediately stop new compiles
470 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); 463 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
471 if (!did_set) { 464 log_sweep("disable_compiler");
472 // only the first to notice can start the cleaning, 465 }
473 // others will go back and block 466 }
474 return; 467
475 } 468 // Make sure only one thread can flush
476 set_was_full(true); 469 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
477 470 // no need to check the timeout here.
478 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up 471 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
479 jlong now = os::javaTimeMillis(); 472 if (old != 0) {
480 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 473 return;
481 jlong curr_interval = now - _last_was_full;
482 if (curr_interval < max_interval) {
483 _rescan = true;
484 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
485 curr_interval/1000);
486 return;
487 }
488 } 474 }
489 475
490 VM_HandleFullCodeCache op(is_full); 476 VM_HandleFullCodeCache op(is_full);
491 VMThread::execute(&op); 477 VMThread::execute(&op);
492 478
493 // rescan again as soon as possible 479 // resweep again as soon as possible
494 _rescan = true; 480 _resweep = true;
495 } 481 }
496 482
497 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { 483 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
498 // If there was a race in detecting full code cache, only run 484 // If there was a race in detecting full code cache, only run
499 // one vm op for it or keep the compiler shut off 485 // one vm op for it or keep the compiler shut off
500 486
501 debug_only(jlong start = os::javaTimeMillis();) 487 debug_only(jlong start = os::javaTimeMillis();)
502 488
503 if ((!was_full()) && (is_full)) {
504 if (!CodeCache::needs_flushing()) {
505 log_sweep("restart_compiler");
506 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
507 return;
508 }
509 }
510
511 // Traverse the code cache trying to dump the oldest nmethods 489 // Traverse the code cache trying to dump the oldest nmethods
512 uint curr_max_comp_id = CompileBroker::get_compilation_id(); 490 int curr_max_comp_id = CompileBroker::get_compilation_id();
513 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; 491 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
492
514 log_sweep("start_cleaning"); 493 log_sweep("start_cleaning");
515 494
516 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); 495 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
517 jint disconnected = 0; 496 jint disconnected = 0;
518 jint made_not_entrant = 0; 497 jint made_not_entrant = 0;
498 jint nmethod_count = 0;
499
519 while ((nm != NULL)){ 500 while ((nm != NULL)){
520 uint curr_comp_id = nm->compile_id(); 501 int curr_comp_id = nm->compile_id();
521 502
522 // OSR methods cannot be flushed like this. Also, don't flush native methods 503 // OSR methods cannot be flushed like this. Also, don't flush native methods
523 // since they are part of the JDK in most cases 504 // since they are part of the JDK in most cases
524 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && 505 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
525 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { 506
526 507 // only count methods that can be speculatively disconnected
527 if ((nm->method()->code() == nm)) { 508 nmethod_count++;
528 // This method has not been previously considered for 509
529 // unloading or it was restored already 510 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
530 CodeCache::speculatively_disconnect(nm); 511 if ((nm->method()->code() == nm)) {
531 disconnected++; 512 // This method has not been previously considered for
532 } else if (nm->is_speculatively_disconnected()) { 513 // unloading or it was restored already
533 // This method was previously considered for preemptive unloading and was not called since then 514 CodeCache::speculatively_disconnect(nm);
534 CompilationPolicy::policy()->delay_compilation(nm->method()); 515 disconnected++;
535 nm->make_not_entrant(); 516 } else if (nm->is_speculatively_disconnected()) {
536 made_not_entrant++; 517 // This method was previously considered for preemptive unloading and was not called since then
537 } 518 CompilationPolicy::policy()->delay_compilation(nm->method());
538 519 nm->make_not_entrant();
539 if (curr_comp_id > _highest_marked) { 520 made_not_entrant++;
540 _highest_marked = curr_comp_id; 521 }
522
523 if (curr_comp_id > _highest_marked) {
524 _highest_marked = curr_comp_id;
525 }
541 } 526 }
542 } 527 }
543 nm = CodeCache::alive_nmethod(CodeCache::next(nm)); 528 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
544 } 529 }
530
531 // remember how many compile_ids wheren't seen last flush.
532 _dead_compile_ids = curr_max_comp_id - nmethod_count;
545 533
546 log_sweep("stop_cleaning", 534 log_sweep("stop_cleaning",
547 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", 535 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
548 disconnected, made_not_entrant); 536 disconnected, made_not_entrant);
549 537
550 // Shut off compiler. Sweeper will start over with a new stack scan and 538 // Shut off compiler. Sweeper will start over with a new stack scan and
551 // traversal cycle and turn it back on if it clears enough space. 539 // traversal cycle and turn it back on if it clears enough space.
552 if (was_full()) { 540 if (is_full) {
553 _last_was_full = os::javaTimeMillis(); 541 _last_full_flush_time = os::javaTimeMillis();
554 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
555 } 542 }
556 543
557 // After two more traversals the sweeper will get rid of unrestored nmethods 544 // After two more traversals the sweeper will get rid of unrestored nmethods
558 _was_full_traversal = _traversals; 545 _last_flush_traversal_id = _traversals;
546 _resweep = true;
559 #ifdef ASSERT 547 #ifdef ASSERT
560 jlong end = os::javaTimeMillis(); 548 jlong end = os::javaTimeMillis();
561 if(PrintMethodFlushing && Verbose) { 549 if(PrintMethodFlushing && Verbose) {
562 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start); 550 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
563 } 551 }