Mercurial > hg > graal-compiler
comparison src/share/vm/code/nmethod.cpp @ 2343:65f880e2869b
Merge
author | dcubed |
---|---|
date | Tue, 15 Mar 2011 06:50:01 -0700 |
parents | 3d5a546351ef 46a56fac55c7 |
children | fc5ebbb2d1a8 |
comparison
equal
deleted
inserted
replaced
2340:4775a1e3e923 | 2343:65f880e2869b |
---|---|
1181 // Set the traversal mark to ensure that the sweeper does 2 | 1181 // Set the traversal mark to ensure that the sweeper does 2 |
1182 // cleaning passes before moving to zombie. | 1182 // cleaning passes before moving to zombie. |
1183 set_stack_traversal_mark(NMethodSweeper::traversal_count()); | 1183 set_stack_traversal_mark(NMethodSweeper::traversal_count()); |
1184 } | 1184 } |
1185 | 1185 |
1186 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) | 1186 // Tell if a non-entrant method can be converted to a zombie (i.e., |
1187 // there are no activations on the stack, not in use by the VM, | |
1188 // and not in use by the ServiceThread) | |
1187 bool nmethod::can_not_entrant_be_converted() { | 1189 bool nmethod::can_not_entrant_be_converted() { |
1188 assert(is_not_entrant(), "must be a non-entrant method"); | 1190 assert(is_not_entrant(), "must be a non-entrant method"); |
1189 | 1191 |
1190 // Since the nmethod sweeper only does partial sweep the sweeper's traversal | 1192 // Since the nmethod sweeper only does partial sweep the sweeper's traversal |
1191 // count can be greater than the stack traversal count before it hits the | 1193 // count can be greater than the stack traversal count before it hits the |
1192 // nmethod for the second time. | 1194 // nmethod for the second time. |
1193 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); | 1195 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && |
1196 !is_locked_by_vm(); | |
1194 } | 1197 } |
1195 | 1198 |
1196 void nmethod::inc_decompile_count() { | 1199 void nmethod::inc_decompile_count() { |
1197 if (!is_compiled_by_c2()) return; | 1200 if (!is_compiled_by_c2()) return; |
1198 // Could be gated by ProfileTraps, but do not bother... | 1201 // Could be gated by ProfileTraps, but do not bother... |
1295 } | 1298 } |
1296 | 1299 |
1297 // Common functionality for both make_not_entrant and make_zombie | 1300 // Common functionality for both make_not_entrant and make_zombie |
1298 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { | 1301 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { |
1299 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); | 1302 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); |
1303 assert(!is_zombie(), "should not already be a zombie"); | |
1300 | 1304 |
1301 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. | 1305 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. |
1302 nmethodLocker nml(this); | 1306 nmethodLocker nml(this); |
1303 methodHandle the_method(method()); | 1307 methodHandle the_method(method()); |
1304 No_Safepoint_Verifier nsv; | 1308 No_Safepoint_Verifier nsv; |
1305 | 1309 |
1306 { | 1310 { |
1307 // If the method is already zombie there is nothing to do | |
1308 if (is_zombie()) { | |
1309 return false; | |
1310 } | |
1311 | |
1312 // invalidate osr nmethod before acquiring the patching lock since | 1311 // invalidate osr nmethod before acquiring the patching lock since |
1313 // they both acquire leaf locks and we don't want a deadlock. | 1312 // they both acquire leaf locks and we don't want a deadlock. |
1314 // This logic is equivalent to the logic below for patching the | 1313 // This logic is equivalent to the logic below for patching the |
1315 // verified entry point of regular methods. | 1314 // verified entry point of regular methods. |
1316 if (is_osr_method()) { | 1315 if (is_osr_method()) { |
1376 // dependency logic could have become stale. | 1375 // dependency logic could have become stale. |
1377 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); | 1376 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
1378 flush_dependencies(NULL); | 1377 flush_dependencies(NULL); |
1379 } | 1378 } |
1380 | 1379 |
1381 { | 1380 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload |
1382 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event | 1381 // event and it hasn't already been reported for this nmethod then |
1383 // and it hasn't already been reported for this nmethod then report it now. | 1382 // report it now. The event may have been reported earilier if the GC |
1384 // (the event may have been reported earilier if the GC marked it for unloading). | 1383 // marked it for unloading). JvmtiDeferredEventQueue support means |
1385 Pause_No_Safepoint_Verifier pnsv(&nsv); | 1384 // we no longer go to a safepoint here. |
1386 post_compiled_method_unload(); | 1385 post_compiled_method_unload(); |
1387 } | |
1388 | 1386 |
1389 #ifdef ASSERT | 1387 #ifdef ASSERT |
1390 // It's no longer safe to access the oops section since zombie | 1388 // It's no longer safe to access the oops section since zombie |
1391 // nmethods aren't scanned for GC. | 1389 // nmethods aren't scanned for GC. |
1392 _oops_are_stale = true; | 1390 _oops_are_stale = true; |
1567 // it's being unloaded there's no way to look it up since the weak | 1565 // it's being unloaded there's no way to look it up since the weak |
1568 // ref will have been cleared. | 1566 // ref will have been cleared. |
1569 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { | 1567 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { |
1570 assert(!unload_reported(), "already unloaded"); | 1568 assert(!unload_reported(), "already unloaded"); |
1571 JvmtiDeferredEvent event = | 1569 JvmtiDeferredEvent event = |
1572 JvmtiDeferredEvent::compiled_method_unload_event( | 1570 JvmtiDeferredEvent::compiled_method_unload_event(this, |
1573 _jmethod_id, insts_begin()); | 1571 _jmethod_id, insts_begin()); |
1574 if (SafepointSynchronize::is_at_safepoint()) { | 1572 if (SafepointSynchronize::is_at_safepoint()) { |
1575 // Don't want to take the queueing lock. Add it as pending and | 1573 // Don't want to take the queueing lock. Add it as pending and |
1576 // it will get enqueued later. | 1574 // it will get enqueued later. |
1577 JvmtiDeferredEventQueue::add_pending_event(event); | 1575 JvmtiDeferredEventQueue::add_pending_event(event); |
2172 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); | 2170 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); |
2173 _nm = (nmethod*)cb; | 2171 _nm = (nmethod*)cb; |
2174 lock_nmethod(_nm); | 2172 lock_nmethod(_nm); |
2175 } | 2173 } |
2176 | 2174 |
2177 void nmethodLocker::lock_nmethod(nmethod* nm) { | 2175 // Only JvmtiDeferredEvent::compiled_method_unload_event() |
2176 // should pass zombie_ok == true. | |
2177 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { | |
2178 if (nm == NULL) return; | 2178 if (nm == NULL) return; |
2179 Atomic::inc(&nm->_lock_count); | 2179 Atomic::inc(&nm->_lock_count); |
2180 guarantee(!nm->is_zombie(), "cannot lock a zombie method"); | 2180 guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); |
2181 } | 2181 } |
2182 | 2182 |
2183 void nmethodLocker::unlock_nmethod(nmethod* nm) { | 2183 void nmethodLocker::unlock_nmethod(nmethod* nm) { |
2184 if (nm == NULL) return; | 2184 if (nm == NULL) return; |
2185 Atomic::dec(&nm->_lock_count); | 2185 Atomic::dec(&nm->_lock_count); |