comparison src/share/vm/runtime/sweeper.cpp @ 10408:836a62f43af9

Merge with http://hg.openjdk.java.net/hsx/hsx25/hotspot/
author Doug Simon <doug.simon@oracle.com>
date Wed, 19 Jun 2013 10:45:56 +0200
parents e522a00b91aa f2110083203d
children cefad50507d8
comparison
equal deleted inserted replaced
10086:e0fb8a213650 10408:836a62f43af9
1 /* 1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
34 #include "runtime/compilationPolicy.hpp" 34 #include "runtime/compilationPolicy.hpp"
35 #include "runtime/mutexLocker.hpp" 35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/os.hpp" 36 #include "runtime/os.hpp"
37 #include "runtime/sweeper.hpp" 37 #include "runtime/sweeper.hpp"
38 #include "runtime/vm_operations.hpp" 38 #include "runtime/vm_operations.hpp"
39 #include "trace/tracing.hpp"
39 #include "utilities/events.hpp" 40 #include "utilities/events.hpp"
40 #include "utilities/xmlstream.hpp" 41 #include "utilities/xmlstream.hpp"
41 42
42 #ifdef ASSERT 43 #ifdef ASSERT
43 44
128 129
129 130
130 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed 131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
131 nmethod* NMethodSweeper::_current = NULL; // Current nmethod 132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
132 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache 133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
133 137
134 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass 138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
135 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
136 140
137 jint NMethodSweeper::_locked_seen = 0; 141 jint NMethodSweeper::_locked_seen = 0;
138 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
139 bool NMethodSweeper::_rescan = false; 143 bool NMethodSweeper::_resweep = false;
140 bool NMethodSweeper::_do_sweep = false; 144 jint NMethodSweeper::_flush_token = 0;
141 bool NMethodSweeper::_was_full = false; 145 jlong NMethodSweeper::_last_full_flush_time = 0;
142 jint NMethodSweeper::_advise_to_sweep = 0; 146 int NMethodSweeper::_highest_marked = 0;
143 jlong NMethodSweeper::_last_was_full = 0; 147 int NMethodSweeper::_dead_compile_ids = 0;
144 uint NMethodSweeper::_highest_marked = 0; 148 long NMethodSweeper::_last_flush_traversal_id = 0;
145 long NMethodSweeper::_was_full_traversal = 0; 149
150 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
151 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
152 jlong NMethodSweeper::_total_time_sweeping = 0;
153 jlong NMethodSweeper::_total_time_this_sweep = 0;
154 jlong NMethodSweeper::_peak_sweep_time = 0;
155 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
156 jlong NMethodSweeper::_total_disconnect_time = 0;
157 jlong NMethodSweeper::_peak_disconnect_time = 0;
146 158
147 class MarkActivationClosure: public CodeBlobClosure { 159 class MarkActivationClosure: public CodeBlobClosure {
148 public: 160 public:
149 virtual void do_code_blob(CodeBlob* cb) { 161 virtual void do_code_blob(CodeBlob* cb) {
150 // If we see an activation belonging to a non_entrant nmethod, we mark it. 162 // If we see an activation belonging to a non_entrant nmethod, we mark it.
153 } 165 }
154 } 166 }
155 }; 167 };
156 static MarkActivationClosure mark_activation_closure; 168 static MarkActivationClosure mark_activation_closure;
157 169
170 bool NMethodSweeper::sweep_in_progress() {
171 return (_current != NULL);
172 }
173
158 void NMethodSweeper::scan_stacks() { 174 void NMethodSweeper::scan_stacks() {
159 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 175 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
160 if (!MethodFlushing) return; 176 if (!MethodFlushing) return;
161 _do_sweep = true;
162 177
163 // No need to synchronize access, since this is always executed at a 178 // No need to synchronize access, since this is always executed at a
164 // safepoint. If we aren't in the middle of scan and a rescan 179 // safepoint.
165 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
166 // code cache flushing is in progress, don't skip sweeping to help make progress
167 // clearing space in the code cache.
168 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
169 _do_sweep = false;
170 return;
171 }
172 180
173 // Make sure CompiledIC_lock in unlocked, since we might update some 181 // Make sure CompiledIC_lock in unlocked, since we might update some
174 // inline caches. If it is, we just bail-out and try later. 182 // inline caches. If it is, we just bail-out and try later.
175 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; 183 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
176 184
177 // Check for restart 185 // Check for restart
178 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 186 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
179 if (_current == NULL) { 187 if (!sweep_in_progress() && _resweep) {
180 _seen = 0; 188 _seen = 0;
181 _invocations = NmethodSweepFraction; 189 _invocations = NmethodSweepFraction;
182 _current = CodeCache::first_nmethod(); 190 _current = CodeCache::first_nmethod();
183 _traversals += 1; 191 _traversals += 1;
192 _total_time_this_sweep = 0;
193
184 if (PrintMethodFlushing) { 194 if (PrintMethodFlushing) {
185 tty->print_cr("### Sweep: stack traversal %d", _traversals); 195 tty->print_cr("### Sweep: stack traversal %d", _traversals);
186 } 196 }
187 Threads::nmethods_do(&mark_activation_closure); 197 Threads::nmethods_do(&mark_activation_closure);
188 198
189 // reset the flags since we started a scan from the beginning. 199 // reset the flags since we started a scan from the beginning.
190 _rescan = false; 200 _resweep = false;
191 _locked_seen = 0; 201 _locked_seen = 0;
192 _not_entrant_seen_on_stack = 0; 202 _not_entrant_seen_on_stack = 0;
193 } 203 }
194 204
195 if (UseCodeCacheFlushing) { 205 if (UseCodeCacheFlushing) {
196 if (!CodeCache::needs_flushing()) { 206 // only allow new flushes after the interval is complete.
197 // scan_stacks() runs during a safepoint, no race with setters 207 jlong now = os::javaTimeMillis();
198 _advise_to_sweep = 0; 208 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
199 } 209 jlong curr_interval = now - _last_full_flush_time;
200 210 if (curr_interval > max_interval) {
201 if (was_full()) { 211 _flush_token = 0;
202 // There was some progress so attempt to restart the compiler 212 }
203 jlong now = os::javaTimeMillis(); 213
204 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 214 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
205 jlong curr_interval = now - _last_was_full; 215 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
206 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { 216 log_sweep("restart_compiler");
207 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
208 set_was_full(false);
209
210 // Update the _last_was_full time so we can tell how fast the
211 // code cache is filling up
212 _last_was_full = os::javaTimeMillis();
213
214 log_sweep("restart_compiler");
215 }
216 } 217 }
217 } 218 }
218 } 219 }
219 220
220 void NMethodSweeper::possibly_sweep() { 221 void NMethodSweeper::possibly_sweep() {
221 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 222 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
222 if ((!MethodFlushing) || (!_do_sweep)) return; 223 if (!MethodFlushing || !sweep_in_progress()) return;
223 224
224 if (_invocations > 0) { 225 if (_invocations > 0) {
225 // Only one thread at a time will sweep 226 // Only one thread at a time will sweep
226 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 227 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
227 if (old != 0) { 228 if (old != 0) {
241 _sweep_started = 0; 242 _sweep_started = 0;
242 } 243 }
243 } 244 }
244 245
245 void NMethodSweeper::sweep_code_cache() { 246 void NMethodSweeper::sweep_code_cache() {
246 #ifdef ASSERT 247
247 jlong sweep_start; 248 jlong sweep_start_counter = os::elapsed_counter();
248 if (PrintMethodFlushing) { 249
249 sweep_start = os::javaTimeMillis(); 250 _flushed_count = 0;
250 } 251 _zombified_count = 0;
251 #endif 252 _marked_count = 0;
253
252 if (PrintMethodFlushing && Verbose) { 254 if (PrintMethodFlushing && Verbose) {
253 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 255 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
256 }
257
258 if (!CompileBroker::should_compile_new_jobs()) {
259 // If we have turned off compilations we might as well do full sweeps
260 // in order to reach the clean state faster. Otherwise the sleeping compiler
261 // threads will slow down sweeping. After a few iterations the cache
262 // will be clean and sweeping stops (_resweep will not be set)
263 _invocations = 1;
254 } 264 }
255 265
256 // We want to visit all nmethods after NmethodSweepFraction 266 // We want to visit all nmethods after NmethodSweepFraction
257 // invocations so divide the remaining number of nmethods by the 267 // invocations so divide the remaining number of nmethods by the
258 // remaining number of invocations. This is only an estimate since 268 // remaining number of invocations. This is only an estimate since
294 } 304 }
295 } 305 }
296 306
297 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 307 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
298 308
299 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { 309 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
300 // we've completed a scan without making progress but there were 310 // we've completed a scan without making progress but there were
301 // nmethods we were unable to process either because they were 311 // nmethods we were unable to process either because they were
302 // locked or were still on stack. We don't have to aggresively 312 // locked or were still on stack. We don't have to aggresively
303 // clean them up so just stop scanning. We could scan once more 313 // clean them up so just stop scanning. We could scan once more
304 // but that complicates the control logic and it's unlikely to 314 // but that complicates the control logic and it's unlikely to
306 if (PrintMethodFlushing) { 316 if (PrintMethodFlushing) {
307 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); 317 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
308 } 318 }
309 } 319 }
310 320
321 jlong sweep_end_counter = os::elapsed_counter();
322 jlong sweep_time = sweep_end_counter - sweep_start_counter;
323 _total_time_sweeping += sweep_time;
324 _total_time_this_sweep += sweep_time;
325 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
326 _total_nof_methods_reclaimed += _flushed_count;
327
328 EventSweepCodeCache event(UNTIMED);
329 if (event.should_commit()) {
330 event.set_starttime(sweep_start_counter);
331 event.set_endtime(sweep_end_counter);
332 event.set_sweepIndex(_traversals);
333 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
334 event.set_sweptCount(todo);
335 event.set_flushedCount(_flushed_count);
336 event.set_markedCount(_marked_count);
337 event.set_zombifiedCount(_zombified_count);
338 event.commit();
339 }
340
311 #ifdef ASSERT 341 #ifdef ASSERT
312 if(PrintMethodFlushing) { 342 if(PrintMethodFlushing) {
313 jlong sweep_end = os::javaTimeMillis(); 343 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
314 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
315 } 344 }
316 #endif 345 #endif
317 346
318 if (_invocations == 1) { 347 if (_invocations == 1) {
348 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
319 log_sweep("finished"); 349 log_sweep("finished");
350 }
351
352 // Sweeper is the only case where memory is released,
353 // check here if it is time to restart the compiler.
354 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
355 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
356 log_sweep("restart_compiler");
320 } 357 }
321 } 358 }
322 359
323 class NMethodMarker: public StackObj { 360 class NMethodMarker: public StackObj {
324 private: 361 private:
393 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); 430 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
394 if (PrintMethodFlushing && Verbose) { 431 if (PrintMethodFlushing && Verbose) {
395 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); 432 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
396 } 433 }
397 release_nmethod(nm); 434 release_nmethod(nm);
435 _flushed_count++;
398 } else { 436 } else {
399 if (PrintMethodFlushing && Verbose) { 437 if (PrintMethodFlushing && Verbose) {
400 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 438 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
401 } 439 }
402 nm->mark_for_reclamation(); 440 nm->mark_for_reclamation();
403 _rescan = true; 441 _resweep = true;
442 _marked_count++;
404 SWEEP(nm); 443 SWEEP(nm);
405 } 444 }
406 } else if (nm->is_not_entrant()) { 445 } else if (nm->is_not_entrant()) {
407 // If there is no current activations of this method on the 446 // If there is no current activations of this method on the
408 // stack we can safely convert it to a zombie method 447 // stack we can safely convert it to a zombie method
409 if (nm->can_not_entrant_be_converted()) { 448 if (nm->can_not_entrant_be_converted()) {
410 if (PrintMethodFlushing && Verbose) { 449 if (PrintMethodFlushing && Verbose) {
411 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 450 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
412 } 451 }
413 nm->make_zombie(); 452 nm->make_zombie();
414 _rescan = true; 453 _resweep = true;
454 _zombified_count++;
415 SWEEP(nm); 455 SWEEP(nm);
416 } else { 456 } else {
417 // Still alive, clean up its inline caches 457 // Still alive, clean up its inline caches
418 MutexLocker cl(CompiledIC_lock); 458 MutexLocker cl(CompiledIC_lock);
419 nm->cleanup_inline_caches(); 459 nm->cleanup_inline_caches();
425 } 465 }
426 } else if (nm->is_unloaded()) { 466 } else if (nm->is_unloaded()) {
427 // Unloaded code, just make it a zombie 467 // Unloaded code, just make it a zombie
428 if (PrintMethodFlushing && Verbose) 468 if (PrintMethodFlushing && Verbose)
429 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); 469 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
470
430 if (nm->is_osr_method()) { 471 if (nm->is_osr_method()) {
431 SWEEP(nm); 472 SWEEP(nm);
432 // No inline caches will ever point to osr methods, so we can just remove it 473 // No inline caches will ever point to osr methods, so we can just remove it
433 release_nmethod(nm); 474 release_nmethod(nm);
475 _flushed_count++;
434 } else { 476 } else {
435 nm->make_zombie(); 477 nm->make_zombie();
436 _rescan = true; 478 _resweep = true;
479 _zombified_count++;
437 SWEEP(nm); 480 SWEEP(nm);
438 } 481 }
439 } else { 482 } else {
440 assert(nm->is_alive(), "should be alive"); 483 assert(nm->is_alive(), "should be alive");
441 484
442 if (UseCodeCacheFlushing) { 485 if (UseCodeCacheFlushing) {
443 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && 486 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
444 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && 487 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
445 CodeCache::needs_flushing()) {
446 // This method has not been called since the forced cleanup happened 488 // This method has not been called since the forced cleanup happened
447 nm->make_not_entrant(); 489 nm->make_not_entrant();
448 } 490 }
449 } 491 }
450 492
463 // stack traversal after the current one, the nmethod will be marked non-entrant and 505 // stack traversal after the current one, the nmethod will be marked non-entrant and
464 // got rid of by normal sweeping. If the method is called, the Method*'s 506 // got rid of by normal sweeping. If the method is called, the Method*'s
465 // _code field is restored and the Method*/nmethod 507 // _code field is restored and the Method*/nmethod
466 // go back to their normal state. 508 // go back to their normal state.
467 void NMethodSweeper::handle_full_code_cache(bool is_full) { 509 void NMethodSweeper::handle_full_code_cache(bool is_full) {
468 // Only the first one to notice can advise us to start early cleaning
469 if (!is_full){
470 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
471 if (old != 0) {
472 return;
473 }
474 }
475 510
476 if (is_full) { 511 if (is_full) {
477 // Since code cache is full, immediately stop new compiles 512 // Since code cache is full, immediately stop new compiles
478 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); 513 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
479 if (!did_set) { 514 log_sweep("disable_compiler");
480 // only the first to notice can start the cleaning, 515 }
481 // others will go back and block 516 }
482 return; 517
483 } 518 // Make sure only one thread can flush
484 set_was_full(true); 519 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
485 520 // no need to check the timeout here.
486 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up 521 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
487 jlong now = os::javaTimeMillis(); 522 if (old != 0) {
488 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 523 return;
489 jlong curr_interval = now - _last_was_full;
490 if (curr_interval < max_interval) {
491 _rescan = true;
492 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
493 curr_interval/1000);
494 return;
495 }
496 } 524 }
497 525
498 VM_HandleFullCodeCache op(is_full); 526 VM_HandleFullCodeCache op(is_full);
499 VMThread::execute(&op); 527 VMThread::execute(&op);
500 528
501 // rescan again as soon as possible 529 // resweep again as soon as possible
502 _rescan = true; 530 _resweep = true;
503 } 531 }
504 532
505 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { 533 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
506 // If there was a race in detecting full code cache, only run 534 // If there was a race in detecting full code cache, only run
507 // one vm op for it or keep the compiler shut off 535 // one vm op for it or keep the compiler shut off
508 536
509 debug_only(jlong start = os::javaTimeMillis();) 537 jlong disconnect_start_counter = os::elapsed_counter();
510
511 if ((!was_full()) && (is_full)) {
512 if (!CodeCache::needs_flushing()) {
513 log_sweep("restart_compiler");
514 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
515 return;
516 }
517 }
518 538
519 // Traverse the code cache trying to dump the oldest nmethods 539 // Traverse the code cache trying to dump the oldest nmethods
520 uint curr_max_comp_id = CompileBroker::get_compilation_id(); 540 int curr_max_comp_id = CompileBroker::get_compilation_id();
521 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; 541 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
542
522 log_sweep("start_cleaning"); 543 log_sweep("start_cleaning");
523 544
524 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); 545 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
525 jint disconnected = 0; 546 jint disconnected = 0;
526 jint made_not_entrant = 0; 547 jint made_not_entrant = 0;
548 jint nmethod_count = 0;
549
527 while ((nm != NULL)){ 550 while ((nm != NULL)){
528 uint curr_comp_id = nm->compile_id(); 551 int curr_comp_id = nm->compile_id();
529 552
530 // OSR methods cannot be flushed like this. Also, don't flush native methods 553 // OSR methods cannot be flushed like this. Also, don't flush native methods
531 // since they are part of the JDK in most cases 554 // since they are part of the JDK in most cases
532 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && 555 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
533 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { 556
534 557 // only count methods that can be speculatively disconnected
535 if ((nm->method()->code() == nm)) { 558 nmethod_count++;
536 // This method has not been previously considered for 559
537 // unloading or it was restored already 560 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
538 CodeCache::speculatively_disconnect(nm); 561 if ((nm->method()->code() == nm)) {
539 disconnected++; 562 // This method has not been previously considered for
540 } else if (nm->is_speculatively_disconnected()) { 563 // unloading or it was restored already
541 // This method was previously considered for preemptive unloading and was not called since then 564 CodeCache::speculatively_disconnect(nm);
542 CompilationPolicy::policy()->delay_compilation(nm->method()); 565 disconnected++;
543 nm->make_not_entrant(); 566 } else if (nm->is_speculatively_disconnected()) {
544 made_not_entrant++; 567 // This method was previously considered for preemptive unloading and was not called since then
545 } 568 CompilationPolicy::policy()->delay_compilation(nm->method());
546 569 nm->make_not_entrant();
547 if (curr_comp_id > _highest_marked) { 570 made_not_entrant++;
548 _highest_marked = curr_comp_id; 571 }
572
573 if (curr_comp_id > _highest_marked) {
574 _highest_marked = curr_comp_id;
575 }
549 } 576 }
550 } 577 }
551 nm = CodeCache::alive_nmethod(CodeCache::next(nm)); 578 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
552 } 579 }
580
581 // remember how many compile_ids wheren't seen last flush.
582 _dead_compile_ids = curr_max_comp_id - nmethod_count;
553 583
554 log_sweep("stop_cleaning", 584 log_sweep("stop_cleaning",
555 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", 585 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
556 disconnected, made_not_entrant); 586 disconnected, made_not_entrant);
557 587
558 // Shut off compiler. Sweeper will start over with a new stack scan and 588 // Shut off compiler. Sweeper will start over with a new stack scan and
559 // traversal cycle and turn it back on if it clears enough space. 589 // traversal cycle and turn it back on if it clears enough space.
560 if (was_full()) { 590 if (is_full) {
561 _last_was_full = os::javaTimeMillis(); 591 _last_full_flush_time = os::javaTimeMillis();
562 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); 592 }
563 } 593
594 jlong disconnect_end_counter = os::elapsed_counter();
595 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
596 _total_disconnect_time += disconnect_time;
597 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
598
599 EventCleanCodeCache event(UNTIMED);
600 if (event.should_commit()) {
601 event.set_starttime(disconnect_start_counter);
602 event.set_endtime(disconnect_end_counter);
603 event.set_disconnectedCount(disconnected);
604 event.set_madeNonEntrantCount(made_not_entrant);
605 event.commit();
606 }
607 _number_of_flushes++;
564 608
565 // After two more traversals the sweeper will get rid of unrestored nmethods 609 // After two more traversals the sweeper will get rid of unrestored nmethods
566 _was_full_traversal = _traversals; 610 _last_flush_traversal_id = _traversals;
611 _resweep = true;
567 #ifdef ASSERT 612 #ifdef ASSERT
568 jlong end = os::javaTimeMillis(); 613
569 if(PrintMethodFlushing && Verbose) { 614 if(PrintMethodFlushing && Verbose) {
570 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start); 615 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
571 } 616 }
572 #endif 617 #endif
573 } 618 }
574 619
575 620