comparison src/share/vm/runtime/sweeper.cpp @ 13074:78da3894b86f

8027593: performance drop with constrained codecache starting with hs25 b111 Summary: Fixed proper sweeping of small code cache sizes Reviewed-by: kvn, iveresov
author anoll
date Tue, 12 Nov 2013 09:32:50 +0100
parents 510fbd28919c
children 096c224171c4 938e1e64e28f
comparison
equal deleted inserted replaced
13073:1dcea64e9f00 13074:78da3894b86f
110 110
111 void NMethodSweeper::record_sweep(nmethod* nm, int line) { 111 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
112 if (_records != NULL) { 112 if (_records != NULL) {
113 _records[_sweep_index].traversal = _traversals; 113 _records[_sweep_index].traversal = _traversals;
114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; 114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
115 _records[_sweep_index].invocation = _invocations; 115 _records[_sweep_index].invocation = _sweep_fractions_left;
116 _records[_sweep_index].compile_id = nm->compile_id(); 116 _records[_sweep_index].compile_id = nm->compile_id();
117 _records[_sweep_index].kind = nm->compile_kind(); 117 _records[_sweep_index].kind = nm->compile_kind();
118 _records[_sweep_index].state = nm->_state; 118 _records[_sweep_index].state = nm->_state;
119 _records[_sweep_index].vep = nm->verified_entry_point(); 119 _records[_sweep_index].vep = nm->verified_entry_point();
120 _records[_sweep_index].uep = nm->entry_point(); 120 _records[_sweep_index].uep = nm->entry_point();
121 _records[_sweep_index].line = line; 121 _records[_sweep_index].line = line;
122
123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 122 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
124 } 123 }
125 } 124 }
126 #else 125 #else
127 #define SWEEP(nm) 126 #define SWEEP(nm)
128 #endif 127 #endif
129 128
130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod 129 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
131 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed 130 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
132 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache 131 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
133 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 132 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
134 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 133 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
135 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep 134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
136 135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
137 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass 136 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
138 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 137
139 138 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
140 jint NMethodSweeper::_locked_seen = 0; 139 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
141 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 140 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
142 bool NMethodSweeper::_request_mark_phase = false; 141 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
143 142 // 1) alive -> not_entrant
144 int NMethodSweeper::_total_nof_methods_reclaimed = 0; 143 // 2) not_entrant -> zombie
145 jlong NMethodSweeper::_total_time_sweeping = 0; 144 // 3) zombie -> marked_for_reclamation
146 jlong NMethodSweeper::_total_time_this_sweep = 0; 145
147 jlong NMethodSweeper::_peak_sweep_time = 0; 146 int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
148 jlong NMethodSweeper::_peak_sweep_fraction_time = 0; 147 jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping
149 int NMethodSweeper::_hotness_counter_reset_val = 0; 148 jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep
149 jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep
150 jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction
151 int NMethodSweeper::_hotness_counter_reset_val = 0;
150 152
151 153
152 class MarkActivationClosure: public CodeBlobClosure { 154 class MarkActivationClosure: public CodeBlobClosure {
153 public: 155 public:
154 virtual void do_code_blob(CodeBlob* cb) { 156 virtual void do_code_blob(CodeBlob* cb) {
195 // to scan stacks 197 // to scan stacks
196 if (!MethodFlushing) { 198 if (!MethodFlushing) {
197 return; 199 return;
198 } 200 }
199 201
202 // Increase time so that we can estimate when to invoke the sweeper again.
203 _time_counter++;
204
200 // Check for restart 205 // Check for restart
201 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 206 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
202 if (!sweep_in_progress() && need_marking_phase()) { 207 if (!sweep_in_progress()) {
203 _seen = 0; 208 _seen = 0;
204 _invocations = NmethodSweepFraction; 209 _sweep_fractions_left = NmethodSweepFraction;
205 _current = CodeCache::first_nmethod(); 210 _current = CodeCache::first_nmethod();
206 _traversals += 1; 211 _traversals += 1;
207 _total_time_this_sweep = 0; 212 _total_time_this_sweep = 0;
208 213
209 if (PrintMethodFlushing) { 214 if (PrintMethodFlushing) {
210 tty->print_cr("### Sweep: stack traversal %d", _traversals); 215 tty->print_cr("### Sweep: stack traversal %d", _traversals);
211 } 216 }
212 Threads::nmethods_do(&mark_activation_closure); 217 Threads::nmethods_do(&mark_activation_closure);
213 218
214 // reset the flags since we started a scan from the beginning.
215 reset_nmethod_marking();
216 _locked_seen = 0;
217 _not_entrant_seen_on_stack = 0;
218 } else { 219 } else {
219 // Only set hotness counter 220 // Only set hotness counter
220 Threads::nmethods_do(&set_hotness_closure); 221 Threads::nmethods_do(&set_hotness_closure);
221 } 222 }
222 223
223 OrderAccess::storestore(); 224 OrderAccess::storestore();
224 } 225 }
225 226 /**
227 * This function invokes the sweeper if at least one of the three conditions is met:
228 * (1) The code cache is getting full
229 * (2) There are sufficient state changes in/since the last sweep.
230 * (3) We have not been sweeping for 'some time'
231 */
226 void NMethodSweeper::possibly_sweep() { 232 void NMethodSweeper::possibly_sweep() {
227 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 233 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
228 if (!MethodFlushing || !sweep_in_progress()) { 234 if (!MethodFlushing || !sweep_in_progress()) {
229 return; 235 return;
230 } 236 }
231 237
232 if (_invocations > 0) { 238 // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
239 // This is one of the two places where should_sweep can be set to true. The general
240 // idea is as follows: If there is enough free space in the code cache, there is no
241 // need to invoke the sweeper. The following formula (which determines whether to invoke
242 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
243 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
244 // the formula considers how much space in the code cache is currently used. Here are
245 // some examples that will (hopefully) help in understanding.
246 //
247 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
248 // the result of the division is 0. This
249 // keeps the used code cache size small
250 // (important for embedded Java)
251 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
252 // computes: (256 / 16) - 1 = 15
253 // As a result, we invoke the sweeper after
254 // 15 invocations of 'mark_active_nmethods.
255 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
256 // computes: (256 / 16) - 10 = 6.
257 if (!_should_sweep) {
258 int time_since_last_sweep = _time_counter - _last_sweep;
259 double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep -
260 CodeCache::reverse_free_ratio();
261
262 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
263 _should_sweep = true;
264 }
265 }
266
267 if (_should_sweep && _sweep_fractions_left > 0) {
233 // Only one thread at a time will sweep 268 // Only one thread at a time will sweep
234 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 269 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
235 if (old != 0) { 270 if (old != 0) {
236 return; 271 return;
237 } 272 }
240 // Create the ring buffer for the logging code 275 // Create the ring buffer for the logging code
241 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 276 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
242 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 277 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
243 } 278 }
244 #endif 279 #endif
245 if (_invocations > 0) { 280
281 if (_sweep_fractions_left > 0) {
246 sweep_code_cache(); 282 sweep_code_cache();
247 _invocations--; 283 _sweep_fractions_left--;
284 }
285
286 // We are done with sweeping the code cache once.
287 if (_sweep_fractions_left == 0) {
288 _last_sweep = _time_counter;
289 // Reset flag; temporarily disables sweeper
290 _should_sweep = false;
291 // If there was enough state change, 'possibly_enable_sweeper()'
292 // sets '_should_sweep' to true
293 possibly_enable_sweeper();
294 // Reset _bytes_changed only if there was enough state change. _bytes_changed
295 // can further increase by calls to 'report_state_change'.
296 if (_should_sweep) {
297 _bytes_changed = 0;
298 }
248 } 299 }
249 _sweep_started = 0; 300 _sweep_started = 0;
250 } 301 }
251 } 302 }
252 303
253 void NMethodSweeper::sweep_code_cache() { 304 void NMethodSweeper::sweep_code_cache() {
254
255 jlong sweep_start_counter = os::elapsed_counter(); 305 jlong sweep_start_counter = os::elapsed_counter();
256 306
257 _flushed_count = 0; 307 _flushed_count = 0;
258 _zombified_count = 0; 308 _zombified_count = 0;
259 _marked_count = 0; 309 _marked_for_reclamation_count = 0;
260 310
261 if (PrintMethodFlushing && Verbose) { 311 if (PrintMethodFlushing && Verbose) {
262 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 312 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
263 } 313 }
264 314
265 if (!CompileBroker::should_compile_new_jobs()) { 315 if (!CompileBroker::should_compile_new_jobs()) {
266 // If we have turned off compilations we might as well do full sweeps 316 // If we have turned off compilations we might as well do full sweeps
267 // in order to reach the clean state faster. Otherwise the sleeping compiler 317 // in order to reach the clean state faster. Otherwise the sleeping compiler
268 // threads will slow down sweeping. 318 // threads will slow down sweeping.
269 _invocations = 1; 319 _sweep_fractions_left = 1;
270 } 320 }
271 321
272 // We want to visit all nmethods after NmethodSweepFraction 322 // We want to visit all nmethods after NmethodSweepFraction
273 // invocations so divide the remaining number of nmethods by the 323 // invocations so divide the remaining number of nmethods by the
274 // remaining number of invocations. This is only an estimate since 324 // remaining number of invocations. This is only an estimate since
275 // the number of nmethods changes during the sweep so the final 325 // the number of nmethods changes during the sweep so the final
276 // stage must iterate until it there are no more nmethods. 326 // stage must iterate until it there are no more nmethods.
277 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; 327 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
278 int swept_count = 0; 328 int swept_count = 0;
279 329
280 330
281 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 331 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
282 assert(!CodeCache_lock->owned_by_self(), "just checking"); 332 assert(!CodeCache_lock->owned_by_self(), "just checking");
284 int freed_memory = 0; 334 int freed_memory = 0;
285 { 335 {
286 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 336 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
287 337
288 // The last invocation iterates until there are no more nmethods 338 // The last invocation iterates until there are no more nmethods
289 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { 339 for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
290 swept_count++; 340 swept_count++;
291 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request 341 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
292 if (PrintMethodFlushing && Verbose) { 342 if (PrintMethodFlushing && Verbose) {
293 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); 343 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
294 } 344 }
295 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 345 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
296 346
297 assert(Thread::current()->is_Java_thread(), "should be java thread"); 347 assert(Thread::current()->is_Java_thread(), "should be java thread");
298 JavaThread* thread = (JavaThread*)Thread::current(); 348 JavaThread* thread = (JavaThread*)Thread::current();
312 _seen++; 362 _seen++;
313 _current = next; 363 _current = next;
314 } 364 }
315 } 365 }
316 366
317 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 367 assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
318
319 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
320 // we've completed a scan without making progress but there were
321 // nmethods we were unable to process either because they were
322 // locked or were still on stack. We don't have to aggressively
323 // clean them up so just stop scanning. We could scan once more
324 // but that complicates the control logic and it's unlikely to
325 // matter much.
326 if (PrintMethodFlushing) {
327 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
328 }
329 }
330 368
331 jlong sweep_end_counter = os::elapsed_counter(); 369 jlong sweep_end_counter = os::elapsed_counter();
332 jlong sweep_time = sweep_end_counter - sweep_start_counter; 370 jlong sweep_time = sweep_end_counter - sweep_start_counter;
333 _total_time_sweeping += sweep_time; 371 _total_time_sweeping += sweep_time;
334 _total_time_this_sweep += sweep_time; 372 _total_time_this_sweep += sweep_time;
338 EventSweepCodeCache event(UNTIMED); 376 EventSweepCodeCache event(UNTIMED);
339 if (event.should_commit()) { 377 if (event.should_commit()) {
340 event.set_starttime(sweep_start_counter); 378 event.set_starttime(sweep_start_counter);
341 event.set_endtime(sweep_end_counter); 379 event.set_endtime(sweep_end_counter);
342 event.set_sweepIndex(_traversals); 380 event.set_sweepIndex(_traversals);
343 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); 381 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
344 event.set_sweptCount(swept_count); 382 event.set_sweptCount(swept_count);
345 event.set_flushedCount(_flushed_count); 383 event.set_flushedCount(_flushed_count);
346 event.set_markedCount(_marked_count); 384 event.set_markedCount(_marked_for_reclamation_count);
347 event.set_zombifiedCount(_zombified_count); 385 event.set_zombifiedCount(_zombified_count);
348 event.commit(); 386 event.commit();
349 } 387 }
350 388
351 #ifdef ASSERT 389 #ifdef ASSERT
352 if(PrintMethodFlushing) { 390 if(PrintMethodFlushing) {
353 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); 391 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time);
354 } 392 }
355 #endif 393 #endif
356 394
357 if (_invocations == 1) { 395 if (_sweep_fractions_left == 1) {
358 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 396 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
359 log_sweep("finished"); 397 log_sweep("finished");
360 } 398 }
361 399
362 // Sweeper is the only case where memory is released, check here if it 400 // Sweeper is the only case where memory is released, check here if it
366 // cases when compilation was disabled although there is 4MB (or more) free 404 // cases when compilation was disabled although there is 4MB (or more) free
367 // memory in the code cache. The reason is code cache fragmentation. Therefore, 405 // memory in the code cache. The reason is code cache fragmentation. Therefore,
368 // it only makes sense to re-enable compilation if we have actually freed memory. 406 // it only makes sense to re-enable compilation if we have actually freed memory.
369 // Note that typically several kB are released for sweeping 16MB of the code 407 // Note that typically several kB are released for sweeping 16MB of the code
370 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 408 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
371 if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { 409 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
372 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 410 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
373 log_sweep("restart_compiler"); 411 log_sweep("restart_compiler");
412 }
413 }
414
415 /**
416 * This function updates the sweeper statistics that keep track of nmethods
417 * state changes. If there is 'enough' state change, the sweeper is invoked
418 * as soon as possible. There can be data races on _bytes_changed. The data
419 * races are benign, since it does not matter if we loose a couple of bytes.
420 * In the worst case we call the sweeper a little later. Also, we are guaranteed
421 * to invoke the sweeper if the code cache gets full.
422 */
423 void NMethodSweeper::report_state_change(nmethod* nm) {
424 _bytes_changed += nm->total_size();
425 possibly_enable_sweeper();
426 }
427
428 /**
429 * Function determines if there was 'enough' state change in the code cache to invoke
430 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
431 * the code cache since the last sweep.
432 */
433 void NMethodSweeper::possibly_enable_sweeper() {
434 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
435 if (percent_changed > 1.0) {
436 _should_sweep = true;
374 } 437 }
375 } 438 }
376 439
377 class NMethodMarker: public StackObj { 440 class NMethodMarker: public StackObj {
378 private: 441 private:
422 if (nm->is_alive()) { 485 if (nm->is_alive()) {
423 // Clean inline caches that point to zombie/non-entrant methods 486 // Clean inline caches that point to zombie/non-entrant methods
424 MutexLocker cl(CompiledIC_lock); 487 MutexLocker cl(CompiledIC_lock);
425 nm->cleanup_inline_caches(); 488 nm->cleanup_inline_caches();
426 SWEEP(nm); 489 SWEEP(nm);
427 } else {
428 _locked_seen++;
429 SWEEP(nm);
430 } 490 }
431 return freed_memory; 491 return freed_memory;
432 } 492 }
433 493
434 if (nm->is_zombie()) { 494 if (nm->is_zombie()) {
446 } else { 506 } else {
447 if (PrintMethodFlushing && Verbose) { 507 if (PrintMethodFlushing && Verbose) {
448 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 508 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
449 } 509 }
450 nm->mark_for_reclamation(); 510 nm->mark_for_reclamation();
451 request_nmethod_marking(); 511 // Keep track of code cache state change
452 _marked_count++; 512 _bytes_changed += nm->total_size();
513 _marked_for_reclamation_count++;
453 SWEEP(nm); 514 SWEEP(nm);
454 } 515 }
455 } else if (nm->is_not_entrant()) { 516 } else if (nm->is_not_entrant()) {
456 // If there are no current activations of this method on the 517 // If there are no current activations of this method on the
457 // stack we can safely convert it to a zombie method 518 // stack we can safely convert it to a zombie method
458 if (nm->can_not_entrant_be_converted()) { 519 if (nm->can_not_entrant_be_converted()) {
459 if (PrintMethodFlushing && Verbose) { 520 if (PrintMethodFlushing && Verbose) {
460 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 521 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
461 } 522 }
523 // Code cache state change is tracked in make_zombie()
462 nm->make_zombie(); 524 nm->make_zombie();
463 request_nmethod_marking();
464 _zombified_count++; 525 _zombified_count++;
465 SWEEP(nm); 526 SWEEP(nm);
466 } else { 527 } else {
467 // Still alive, clean up its inline caches 528 // Still alive, clean up its inline caches
468 MutexLocker cl(CompiledIC_lock); 529 MutexLocker cl(CompiledIC_lock);
469 nm->cleanup_inline_caches(); 530 nm->cleanup_inline_caches();
470 // we coudn't transition this nmethod so don't immediately
471 // request a rescan. If this method stays on the stack for a
472 // long time we don't want to keep rescanning the code cache.
473 _not_entrant_seen_on_stack++;
474 SWEEP(nm); 531 SWEEP(nm);
475 } 532 }
476 } else if (nm->is_unloaded()) { 533 } else if (nm->is_unloaded()) {
477 // Unloaded code, just make it a zombie 534 // Unloaded code, just make it a zombie
478 if (PrintMethodFlushing && Verbose) { 535 if (PrintMethodFlushing && Verbose) {
483 // No inline caches will ever point to osr methods, so we can just remove it 540 // No inline caches will ever point to osr methods, so we can just remove it
484 freed_memory = nm->total_size(); 541 freed_memory = nm->total_size();
485 release_nmethod(nm); 542 release_nmethod(nm);
486 _flushed_count++; 543 _flushed_count++;
487 } else { 544 } else {
545 // Code cache state change is tracked in make_zombie()
488 nm->make_zombie(); 546 nm->make_zombie();
489 request_nmethod_marking();
490 _zombified_count++; 547 _zombified_count++;
491 SWEEP(nm); 548 SWEEP(nm);
492 } 549 }
493 } else { 550 } else {
494 if (UseCodeCacheFlushing) { 551 if (UseCodeCacheFlushing) {
512 // The second condition is necessary if we are dealing with very small code cache 569 // The second condition is necessary if we are dealing with very small code cache
513 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. 570 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
514 // The second condition ensures that methods are not immediately made not-entrant 571 // The second condition ensures that methods are not immediately made not-entrant
515 // after compilation. 572 // after compilation.
516 nm->make_not_entrant(); 573 nm->make_not_entrant();
517 request_nmethod_marking(); 574 // Code cache state change is tracked in make_not_entrant()
575 if (PrintMethodFlushing && Verbose) {
576 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
577 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
578 }
518 } 579 }
519 } 580 }
520 } 581 }
521 // Clean-up all inline caches that point to zombie/non-reentrant methods 582 // Clean-up all inline caches that point to zombie/non-reentrant methods
522 MutexLocker cl(CompiledIC_lock); 583 MutexLocker cl(CompiledIC_lock);