comparison src/share/vm/code/nmethod.cpp @ 2360:fc5ebbb2d1a8

Merge
author twisti
date Fri, 18 Mar 2011 01:44:15 -0700
parents 1c0cf339481b 65f880e2869b
children d673ef06fe96
comparison
equal deleted inserted replaced
2359:d2134498fd3f 2360:fc5ebbb2d1a8
168 } 168 }
169 169
170 int pc_desc_resets; // number of resets (= number of caches) 170 int pc_desc_resets; // number of resets (= number of caches)
171 int pc_desc_queries; // queries to nmethod::find_pc_desc 171 int pc_desc_queries; // queries to nmethod::find_pc_desc
172 int pc_desc_approx; // number of those which have approximate true 172 int pc_desc_approx; // number of those which have approximate true
173 int pc_desc_repeats; // number of _last_pc_desc hits 173 int pc_desc_repeats; // number of _pc_descs[0] hits
174 int pc_desc_hits; // number of LRU cache hits 174 int pc_desc_hits; // number of LRU cache hits
175 int pc_desc_tests; // total number of PcDesc examinations 175 int pc_desc_tests; // total number of PcDesc examinations
176 int pc_desc_searches; // total number of quasi-binary search steps 176 int pc_desc_searches; // total number of quasi-binary search steps
177 int pc_desc_adds; // number of LUR cache insertions 177 int pc_desc_adds; // number of LUR cache insertions
178 178
276 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); 276 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
277 } 277 }
278 278
279 void PcDescCache::reset_to(PcDesc* initial_pc_desc) { 279 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
280 if (initial_pc_desc == NULL) { 280 if (initial_pc_desc == NULL) {
281 _last_pc_desc = NULL; // native method 281 _pc_descs[0] = NULL; // native method; no PcDescs at all
282 return; 282 return;
283 } 283 }
284 NOT_PRODUCT(++nmethod_stats.pc_desc_resets); 284 NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
285 // reset the cache by filling it with benign (non-null) values 285 // reset the cache by filling it with benign (non-null) values
286 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); 286 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
287 _last_pc_desc = initial_pc_desc + 1; // first valid one is after sentinel
288 for (int i = 0; i < cache_size; i++) 287 for (int i = 0; i < cache_size; i++)
289 _pc_descs[i] = initial_pc_desc; 288 _pc_descs[i] = initial_pc_desc;
290 } 289 }
291 290
292 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { 291 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
293 NOT_PRODUCT(++nmethod_stats.pc_desc_queries); 292 NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
294 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); 293 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
294
295 // Note: one might think that caching the most recently
296 // read value separately would be a win, but one would be
297 // wrong. When many threads are updating it, the cache
298 // line it's in would bounce between caches, negating
299 // any benefit.
295 300
296 // In order to prevent race conditions do not load cache elements 301 // In order to prevent race conditions do not load cache elements
297 // repeatedly, but use a local copy: 302 // repeatedly, but use a local copy:
298 PcDesc* res; 303 PcDesc* res;
299 304
300 // Step one: Check the most recently returned value. 305 // Step one: Check the most recently added value.
301 res = _last_pc_desc; 306 res = _pc_descs[0];
302 if (res == NULL) return NULL; // native method; no PcDescs at all 307 if (res == NULL) return NULL; // native method; no PcDescs at all
303 if (match_desc(res, pc_offset, approximate)) { 308 if (match_desc(res, pc_offset, approximate)) {
304 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); 309 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
305 return res; 310 return res;
306 } 311 }
307 312
308 // Step two: Check the LRU cache. 313 // Step two: Check the rest of the LRU cache.
309 for (int i = 0; i < cache_size; i++) { 314 for (int i = 1; i < cache_size; ++i) {
310 res = _pc_descs[i]; 315 res = _pc_descs[i];
311 if (res->pc_offset() < 0) break; // optimization: skip empty cache 316 if (res->pc_offset() < 0) break; // optimization: skip empty cache
312 if (match_desc(res, pc_offset, approximate)) { 317 if (match_desc(res, pc_offset, approximate)) {
313 NOT_PRODUCT(++nmethod_stats.pc_desc_hits); 318 NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
314 _last_pc_desc = res; // record this cache hit in case of repeat
315 return res; 319 return res;
316 } 320 }
317 } 321 }
318 322
319 // Report failure. 323 // Report failure.
320 return NULL; 324 return NULL;
321 } 325 }
322 326
323 void PcDescCache::add_pc_desc(PcDesc* pc_desc) { 327 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
324 NOT_PRODUCT(++nmethod_stats.pc_desc_adds); 328 NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
325 // Update the LRU cache by shifting pc_desc forward: 329 // Update the LRU cache by shifting pc_desc forward.
326 for (int i = 0; i < cache_size; i++) { 330 for (int i = 0; i < cache_size; i++) {
327 PcDesc* next = _pc_descs[i]; 331 PcDesc* next = _pc_descs[i];
328 _pc_descs[i] = pc_desc; 332 _pc_descs[i] = pc_desc;
329 pc_desc = next; 333 pc_desc = next;
330 } 334 }
331 // Note: Do not update _last_pc_desc. It fronts for the LRU cache.
332 } 335 }
333 336
334 // adjust pcs_size so that it is a multiple of both oopSize and 337 // adjust pcs_size so that it is a multiple of both oopSize and
335 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple 338 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
336 // of oopSize, then 2*sizeof(PcDesc) is) 339 // of oopSize, then 2*sizeof(PcDesc) is)
337 static int adjust_pcs_size(int pcs_size) { 340 static int adjust_pcs_size(int pcs_size) {
338 int nsize = round_to(pcs_size, oopSize); 341 int nsize = round_to(pcs_size, oopSize);
339 if ((nsize % sizeof(PcDesc)) != 0) { 342 if ((nsize % sizeof(PcDesc)) != 0) {
340 nsize = pcs_size + sizeof(PcDesc); 343 nsize = pcs_size + sizeof(PcDesc);
341 } 344 }
342 assert((nsize % oopSize) == 0, "correct alignment"); 345 assert((nsize % oopSize) == 0, "correct alignment");
343 return nsize; 346 return nsize;
344 } 347 }
345 348
346 //----------------------------------------------------------------------------- 349 //-----------------------------------------------------------------------------
347 350
1178 // Set the traversal mark to ensure that the sweeper does 2 1181 // Set the traversal mark to ensure that the sweeper does 2
1179 // cleaning passes before moving to zombie. 1182 // cleaning passes before moving to zombie.
1180 set_stack_traversal_mark(NMethodSweeper::traversal_count()); 1183 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1181 } 1184 }
1182 1185
1183 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) 1186 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1187 // there are no activations on the stack, not in use by the VM,
1188 // and not in use by the ServiceThread)
1184 bool nmethod::can_not_entrant_be_converted() { 1189 bool nmethod::can_not_entrant_be_converted() {
1185 assert(is_not_entrant(), "must be a non-entrant method"); 1190 assert(is_not_entrant(), "must be a non-entrant method");
1186 1191
1187 // Since the nmethod sweeper only does partial sweep the sweeper's traversal 1192 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1188 // count can be greater than the stack traversal count before it hits the 1193 // count can be greater than the stack traversal count before it hits the
1189 // nmethod for the second time. 1194 // nmethod for the second time.
1190 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); 1195 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1196 !is_locked_by_vm();
1191 } 1197 }
1192 1198
1193 void nmethod::inc_decompile_count() { 1199 void nmethod::inc_decompile_count() {
1194 if (!is_compiled_by_c2()) return; 1200 if (!is_compiled_by_c2()) return;
1195 // Could be gated by ProfileTraps, but do not bother... 1201 // Could be gated by ProfileTraps, but do not bother...
1292 } 1298 }
1293 1299
1294 // Common functionality for both make_not_entrant and make_zombie 1300 // Common functionality for both make_not_entrant and make_zombie
1295 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { 1301 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1296 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 1302 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1303 assert(!is_zombie(), "should not already be a zombie");
1297 1304
1298 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. 1305 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1299 nmethodLocker nml(this); 1306 nmethodLocker nml(this);
1300 methodHandle the_method(method()); 1307 methodHandle the_method(method());
1301 No_Safepoint_Verifier nsv; 1308 No_Safepoint_Verifier nsv;
1302 1309
1303 { 1310 {
1304 // If the method is already zombie there is nothing to do
1305 if (is_zombie()) {
1306 return false;
1307 }
1308
1309 // invalidate osr nmethod before acquiring the patching lock since 1311 // invalidate osr nmethod before acquiring the patching lock since
1310 // they both acquire leaf locks and we don't want a deadlock. 1312 // they both acquire leaf locks and we don't want a deadlock.
1311 // This logic is equivalent to the logic below for patching the 1313 // This logic is equivalent to the logic below for patching the
1312 // verified entry point of regular methods. 1314 // verified entry point of regular methods.
1313 if (is_osr_method()) { 1315 if (is_osr_method()) {
1373 // dependency logic could have become stale. 1375 // dependency logic could have become stale.
1374 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1376 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1375 flush_dependencies(NULL); 1377 flush_dependencies(NULL);
1376 } 1378 }
1377 1379
1378 { 1380 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1379 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event 1381 // event and it hasn't already been reported for this nmethod then
1380 // and it hasn't already been reported for this nmethod then report it now. 1382 // report it now. The event may have been reported earilier if the GC
1381 // (the event may have been reported earilier if the GC marked it for unloading). 1383 // marked it for unloading). JvmtiDeferredEventQueue support means
1382 Pause_No_Safepoint_Verifier pnsv(&nsv); 1384 // we no longer go to a safepoint here.
1383 post_compiled_method_unload(); 1385 post_compiled_method_unload();
1384 }
1385 1386
1386 #ifdef ASSERT 1387 #ifdef ASSERT
1387 // It's no longer safe to access the oops section since zombie 1388 // It's no longer safe to access the oops section since zombie
1388 // nmethods aren't scanned for GC. 1389 // nmethods aren't scanned for GC.
1389 _oops_are_stale = true; 1390 _oops_are_stale = true;
1564 // it's being unloaded there's no way to look it up since the weak 1565 // it's being unloaded there's no way to look it up since the weak
1565 // ref will have been cleared. 1566 // ref will have been cleared.
1566 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { 1567 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1567 assert(!unload_reported(), "already unloaded"); 1568 assert(!unload_reported(), "already unloaded");
1568 JvmtiDeferredEvent event = 1569 JvmtiDeferredEvent event =
1569 JvmtiDeferredEvent::compiled_method_unload_event( 1570 JvmtiDeferredEvent::compiled_method_unload_event(this,
1570 _jmethod_id, insts_begin()); 1571 _jmethod_id, insts_begin());
1571 if (SafepointSynchronize::is_at_safepoint()) { 1572 if (SafepointSynchronize::is_at_safepoint()) {
1572 // Don't want to take the queueing lock. Add it as pending and 1573 // Don't want to take the queueing lock. Add it as pending and
1573 // it will get enqueued later. 1574 // it will get enqueued later.
1574 JvmtiDeferredEventQueue::add_pending_event(event); 1575 JvmtiDeferredEventQueue::add_pending_event(event);
2169 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); 2170 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2170 _nm = (nmethod*)cb; 2171 _nm = (nmethod*)cb;
2171 lock_nmethod(_nm); 2172 lock_nmethod(_nm);
2172 } 2173 }
2173 2174
2174 void nmethodLocker::lock_nmethod(nmethod* nm) { 2175 // Only JvmtiDeferredEvent::compiled_method_unload_event()
2176 // should pass zombie_ok == true.
2177 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2175 if (nm == NULL) return; 2178 if (nm == NULL) return;
2176 Atomic::inc(&nm->_lock_count); 2179 Atomic::inc(&nm->_lock_count);
2177 guarantee(!nm->is_zombie(), "cannot lock a zombie method"); 2180 guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2178 } 2181 }
2179 2182
2180 void nmethodLocker::unlock_nmethod(nmethod* nm) { 2183 void nmethodLocker::unlock_nmethod(nmethod* nm) {
2181 if (nm == NULL) return; 2184 if (nm == NULL) return;
2182 Atomic::dec(&nm->_lock_count); 2185 Atomic::dec(&nm->_lock_count);