comparison src/share/vm/code/nmethod.cpp @ 2491:0654ee04b214

Merge with OpenJDK.
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Fri, 22 Apr 2011 15:30:53 +0200
parents 1cfdec4e7f07 3d58a4983660
children 75a99b4f1c98
comparison
equal deleted inserted replaced
2490:29246b1d2d3c 2491:0654ee04b214
26 #include "code/codeCache.hpp" 26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp" 27 #include "code/compiledIC.hpp"
28 #include "code/nmethod.hpp" 28 #include "code/nmethod.hpp"
29 #include "code/scopeDesc.hpp" 29 #include "code/scopeDesc.hpp"
30 #include "compiler/abstractCompiler.hpp" 30 #include "compiler/abstractCompiler.hpp"
31 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp" 32 #include "compiler/compileLog.hpp"
32 #include "compiler/compilerOracle.hpp" 33 #include "compiler/compilerOracle.hpp"
33 #include "compiler/disassembler.hpp" 34 #include "compiler/disassembler.hpp"
34 #include "interpreter/bytecode.hpp" 35 #include "interpreter/bytecode.hpp"
35 #include "oops/methodDataOop.hpp" 36 #include "oops/methodDataOop.hpp"
168 } 169 }
169 170
170 int pc_desc_resets; // number of resets (= number of caches) 171 int pc_desc_resets; // number of resets (= number of caches)
171 int pc_desc_queries; // queries to nmethod::find_pc_desc 172 int pc_desc_queries; // queries to nmethod::find_pc_desc
172 int pc_desc_approx; // number of those which have approximate true 173 int pc_desc_approx; // number of those which have approximate true
173 int pc_desc_repeats; // number of _last_pc_desc hits 174 int pc_desc_repeats; // number of _pc_descs[0] hits
174 int pc_desc_hits; // number of LRU cache hits 175 int pc_desc_hits; // number of LRU cache hits
175 int pc_desc_tests; // total number of PcDesc examinations 176 int pc_desc_tests; // total number of PcDesc examinations
176 int pc_desc_searches; // total number of quasi-binary search steps 177 int pc_desc_searches; // total number of quasi-binary search steps
177 int pc_desc_adds; // number of LUR cache insertions 178 int pc_desc_adds; // number of LUR cache insertions
178 179
188 pc_desc_tests, pc_desc_searches, pc_desc_adds); 189 pc_desc_tests, pc_desc_searches, pc_desc_adds);
189 } 190 }
190 } nmethod_stats; 191 } nmethod_stats;
191 #endif //PRODUCT 192 #endif //PRODUCT
192 193
194
193 //--------------------------------------------------------------------------------- 195 //---------------------------------------------------------------------------------
194
195
196 // The _unwind_handler is a special marker address, which says that
197 // for given exception oop and address, the frame should be removed
198 // as the tuple cannot be caught in the nmethod
199 address ExceptionCache::_unwind_handler = (address) -1;
200 196
201 197
202 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { 198 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
203 assert(pc != NULL, "Must be non null"); 199 assert(pc != NULL, "Must be non null");
204 assert(exception.not_null(), "Must be non null"); 200 assert(exception.not_null(), "Must be non null");
281 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); 277 return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
282 } 278 }
283 279
284 void PcDescCache::reset_to(PcDesc* initial_pc_desc) { 280 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
285 if (initial_pc_desc == NULL) { 281 if (initial_pc_desc == NULL) {
286 _last_pc_desc = NULL; // native method 282 _pc_descs[0] = NULL; // native method; no PcDescs at all
287 return; 283 return;
288 } 284 }
289 NOT_PRODUCT(++nmethod_stats.pc_desc_resets); 285 NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
290 // reset the cache by filling it with benign (non-null) values 286 // reset the cache by filling it with benign (non-null) values
291 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); 287 assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
292 _last_pc_desc = initial_pc_desc + 1; // first valid one is after sentinel
293 for (int i = 0; i < cache_size; i++) 288 for (int i = 0; i < cache_size; i++)
294 _pc_descs[i] = initial_pc_desc; 289 _pc_descs[i] = initial_pc_desc;
295 } 290 }
296 291
297 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { 292 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
298 NOT_PRODUCT(++nmethod_stats.pc_desc_queries); 293 NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
299 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); 294 NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
295
296 // Note: one might think that caching the most recently
297 // read value separately would be a win, but one would be
298 // wrong. When many threads are updating it, the cache
299 // line it's in would bounce between caches, negating
300 // any benefit.
300 301
301 // In order to prevent race conditions do not load cache elements 302 // In order to prevent race conditions do not load cache elements
302 // repeatedly, but use a local copy: 303 // repeatedly, but use a local copy:
303 PcDesc* res; 304 PcDesc* res;
304 305
305 // Step one: Check the most recently returned value. 306 // Step one: Check the most recently added value.
306 res = _last_pc_desc; 307 res = _pc_descs[0];
307 if (res == NULL) return NULL; // native method; no PcDescs at all 308 if (res == NULL) return NULL; // native method; no PcDescs at all
308 if (match_desc(res, pc_offset, approximate)) { 309 if (match_desc(res, pc_offset, approximate)) {
309 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); 310 NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
310 return res; 311 return res;
311 } 312 }
312 313
313 // Step two: Check the LRU cache. 314 // Step two: Check the rest of the LRU cache.
314 for (int i = 0; i < cache_size; i++) { 315 for (int i = 1; i < cache_size; ++i) {
315 res = _pc_descs[i]; 316 res = _pc_descs[i];
316 if (res->pc_offset() < 0) break; // optimization: skip empty cache 317 if (res->pc_offset() < 0) break; // optimization: skip empty cache
317 if (match_desc(res, pc_offset, approximate)) { 318 if (match_desc(res, pc_offset, approximate)) {
318 NOT_PRODUCT(++nmethod_stats.pc_desc_hits); 319 NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
319 _last_pc_desc = res; // record this cache hit in case of repeat
320 return res; 320 return res;
321 } 321 }
322 } 322 }
323 323
324 // Report failure. 324 // Report failure.
325 return NULL; 325 return NULL;
326 } 326 }
327 327
328 void PcDescCache::add_pc_desc(PcDesc* pc_desc) { 328 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
329 NOT_PRODUCT(++nmethod_stats.pc_desc_adds); 329 NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
330 // Update the LRU cache by shifting pc_desc forward: 330 // Update the LRU cache by shifting pc_desc forward.
331 for (int i = 0; i < cache_size; i++) { 331 for (int i = 0; i < cache_size; i++) {
332 PcDesc* next = _pc_descs[i]; 332 PcDesc* next = _pc_descs[i];
333 _pc_descs[i] = pc_desc; 333 _pc_descs[i] = pc_desc;
334 pc_desc = next; 334 pc_desc = next;
335 } 335 }
336 // Note: Do not update _last_pc_desc. It fronts for the LRU cache.
337 } 336 }
338 337
339 // adjust pcs_size so that it is a multiple of both oopSize and 338 // adjust pcs_size so that it is a multiple of both oopSize and
340 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple 339 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
341 // of oopSize, then 2*sizeof(PcDesc) is) 340 // of oopSize, then 2*sizeof(PcDesc) is)
342 static int adjust_pcs_size(int pcs_size) { 341 static int adjust_pcs_size(int pcs_size) {
343 int nsize = round_to(pcs_size, oopSize); 342 int nsize = round_to(pcs_size, oopSize);
344 if ((nsize % sizeof(PcDesc)) != 0) { 343 if ((nsize % sizeof(PcDesc)) != 0) {
345 nsize = pcs_size + sizeof(PcDesc); 344 nsize = pcs_size + sizeof(PcDesc);
346 } 345 }
347 assert((nsize % oopSize) == 0, "correct alignment"); 346 assert((nsize % oopSize) == 0, "correct alignment");
348 return nsize; 347 return nsize;
349 } 348 }
350 349
351 //----------------------------------------------------------------------------- 350 //-----------------------------------------------------------------------------
352 351
469 #endif // def HAVE_DTRACE_H 468 #endif // def HAVE_DTRACE_H
470 } 469 }
471 470
472 471
473 nmethod* nmethod::new_native_nmethod(methodHandle method, 472 nmethod* nmethod::new_native_nmethod(methodHandle method,
473 int compile_id,
474 CodeBuffer *code_buffer, 474 CodeBuffer *code_buffer,
475 int vep_offset, 475 int vep_offset,
476 int frame_complete, 476 int frame_complete,
477 int frame_size, 477 int frame_size,
478 ByteSize basic_lock_owner_sp_offset, 478 ByteSize basic_lock_owner_sp_offset,
485 int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); 485 int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
486 CodeOffsets offsets; 486 CodeOffsets offsets;
487 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); 487 offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
488 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); 488 offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
489 nm = new (native_nmethod_size) 489 nm = new (native_nmethod_size)
490 nmethod(method(), native_nmethod_size, &offsets, 490 nmethod(method(), native_nmethod_size, compile_id, &offsets,
491 code_buffer, frame_size, 491 code_buffer, frame_size,
492 basic_lock_owner_sp_offset, basic_lock_sp_offset, 492 basic_lock_owner_sp_offset, basic_lock_sp_offset,
493 oop_maps); 493 oop_maps);
494 NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); 494 NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
495 if (PrintAssembly && nm != NULL) 495 if (PrintAssembly && nm != NULL)
610 610
611 // For native wrappers 611 // For native wrappers
612 nmethod::nmethod( 612 nmethod::nmethod(
613 methodOop method, 613 methodOop method,
614 int nmethod_size, 614 int nmethod_size,
615 int compile_id,
615 CodeOffsets* offsets, 616 CodeOffsets* offsets,
616 CodeBuffer* code_buffer, 617 CodeBuffer* code_buffer,
617 int frame_size, 618 int frame_size,
618 ByteSize basic_lock_owner_sp_offset, 619 ByteSize basic_lock_owner_sp_offset,
619 ByteSize basic_lock_sp_offset, 620 ByteSize basic_lock_sp_offset,
644 _scopes_pcs_offset = _scopes_data_offset; 645 _scopes_pcs_offset = _scopes_data_offset;
645 _dependencies_offset = _scopes_pcs_offset; 646 _dependencies_offset = _scopes_pcs_offset;
646 _handler_table_offset = _dependencies_offset; 647 _handler_table_offset = _dependencies_offset;
647 _nul_chk_table_offset = _handler_table_offset; 648 _nul_chk_table_offset = _handler_table_offset;
648 _nmethod_end_offset = _nul_chk_table_offset; 649 _nmethod_end_offset = _nul_chk_table_offset;
649 _compile_id = 0; // default 650 _compile_id = compile_id;
650 _comp_level = CompLevel_none; 651 _comp_level = CompLevel_none;
651 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); 652 _entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
652 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 653 _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
653 _osr_entry_point = NULL; 654 _osr_entry_point = NULL;
654 _exception_cache = NULL; 655 _exception_cache = NULL;
655 _pc_desc_cache.reset_to(NULL); 656 _pc_desc_cache.reset_to(NULL);
656 657
657 code_buffer->copy_oops_to(this); 658 code_buffer->copy_oops_to(this);
659 if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
660 CodeCache::add_scavenge_root_nmethod(this);
661 }
658 debug_only(verify_scavenge_root_oops()); 662 debug_only(verify_scavenge_root_oops());
659 CodeCache::commit(this); 663 CodeCache::commit(this);
660 } 664 }
661 665
662 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { 666 if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
765 } 769 }
766 #endif // def HAVE_DTRACE_H 770 #endif // def HAVE_DTRACE_H
767 771
768 void* nmethod::operator new(size_t size, int nmethod_size) { 772 void* nmethod::operator new(size_t size, int nmethod_size) {
769 // Always leave some room in the CodeCache for I2C/C2I adapters 773 // Always leave some room in the CodeCache for I2C/C2I adapters
770 if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL; 774 if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
771 return CodeCache::allocate(nmethod_size); 775 return CodeCache::allocate(nmethod_size);
772 } 776 }
773 777
774 778
775 nmethod::nmethod( 779 nmethod::nmethod(
938 } 942 }
939 943
940 #undef LOG_OFFSET 944 #undef LOG_OFFSET
941 945
942 946
943 void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
944 methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
945 bool is_synchronized = false, has_xhandler = false, is_native = false;
946 int code_size = -1;
947 if (method != NULL) {
948 is_synchronized = method->is_synchronized();
949 has_xhandler = method->has_exception_handler();
950 is_native = method->is_native();
951 code_size = method->code_size();
952 }
953 // print compilation number
954 st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
955
956 // print method attributes
957 const bool is_osr = bci != InvocationEntryBci;
958 const char blocking_char = is_blocking ? 'b' : ' ';
959 const char compile_type = is_osr ? '%' : ' ';
960 const char sync_char = is_synchronized ? 's' : ' ';
961 const char exception_char = has_xhandler ? '!' : ' ';
962 const char native_char = is_native ? 'n' : ' ';
963 st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
964 if (TieredCompilation) {
965 st->print("%d ", comp_level);
966 }
967
968 // print optional title
969 bool do_nl = false;
970 if (title != NULL) {
971 int tlen = (int) strlen(title);
972 bool do_nl = false;
973 if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
974 st->print("%.*s", tlen, title);
975 } else {
976 do_nl = true;
977 }
978
979 // print method name string if given
980 if (method_name != NULL) {
981 st->print(method_name);
982 } else {
983 // otherwise as the method to print itself
984 if (method != NULL && !Universe::heap()->is_gc_active()) {
985 method->print_short_name(st);
986 } else {
987 st->print("(method)");
988 }
989 }
990
991 if (method != NULL) {
992 // print osr_bci if any
993 if (is_osr) st->print(" @ %d", bci);
994 // print method size
995 st->print(" (%d bytes)", code_size);
996 }
997 if (do_nl) st->cr();
998 }
999
1000 // Print out more verbose output usually for a newly created nmethod. 947 // Print out more verbose output usually for a newly created nmethod.
1001 void nmethod::print_on(outputStream* st, const char* title) const { 948 void nmethod::print_on(outputStream* st, const char* msg) const {
1002 if (st != NULL) { 949 if (st != NULL) {
1003 ttyLocker ttyl; 950 ttyLocker ttyl;
1004 print_compilation(st, /*method_name*/NULL, title, 951 CompileTask::print_compilation(st, this, msg);
1005 method(), /*is_blocking*/false,
1006 compile_id(),
1007 is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
1008 comp_level());
1009 if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); 952 if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
1010 } 953 }
1011 } 954 }
1012 955
1013 956
1116 "no active breakpoint"); 1059 "no active breakpoint");
1117 } 1060 }
1118 } 1061 }
1119 1062
1120 1063
1064 void nmethod::verify_oop_relocations() {
1065 // Ensure sure that the code matches the current oop values
1066 RelocIterator iter(this, NULL, NULL);
1067 while (iter.next()) {
1068 if (iter.type() == relocInfo::oop_type) {
1069 oop_Relocation* reloc = iter.oop_reloc();
1070 if (!reloc->oop_is_immediate()) {
1071 reloc->verify_oop_relocation();
1072 }
1073 }
1074 }
1075 }
1076
1077
1121 ScopeDesc* nmethod::scope_desc_at(address pc) { 1078 ScopeDesc* nmethod::scope_desc_at(address pc) {
1122 PcDesc* pd = pc_desc_at(pc); 1079 PcDesc* pd = pc_desc_at(pc);
1123 #ifdef ASSERT
1124 if (pd == NULL) {
1125 tty->print_cr(err_msg("Missing scope at relative pc %d of method %s", pc - code_begin(), this->method()->name()->as_C_string()));
1126 print_pcs();
1127 }
1128 #endif
1129 guarantee(pd != NULL, "scope must be present"); 1080 guarantee(pd != NULL, "scope must be present");
1130 return new ScopeDesc(this, pd->scope_decode_offset(), 1081 return new ScopeDesc(this, pd->scope_decode_offset(),
1131 pd->obj_decode_offset(), pd->should_reexecute(), 1082 pd->obj_decode_offset(), pd->should_reexecute(),
1132 pd->return_oop()); 1083 pd->return_oop());
1133 } 1084 }
1200 // Set the traversal mark to ensure that the sweeper does 2 1151 // Set the traversal mark to ensure that the sweeper does 2
1201 // cleaning passes before moving to zombie. 1152 // cleaning passes before moving to zombie.
1202 set_stack_traversal_mark(NMethodSweeper::traversal_count()); 1153 set_stack_traversal_mark(NMethodSweeper::traversal_count());
1203 } 1154 }
1204 1155
1205 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) 1156 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1157 // there are no activations on the stack, not in use by the VM,
1158 // and not in use by the ServiceThread)
1206 bool nmethod::can_not_entrant_be_converted() { 1159 bool nmethod::can_not_entrant_be_converted() {
1207 assert(is_not_entrant(), "must be a non-entrant method"); 1160 assert(is_not_entrant(), "must be a non-entrant method");
1208 1161
1209 // Since the nmethod sweeper only does partial sweep the sweeper's traversal 1162 // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1210 // count can be greater than the stack traversal count before it hits the 1163 // count can be greater than the stack traversal count before it hits the
1211 // nmethod for the second time. 1164 // nmethod for the second time.
1212 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); 1165 return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1166 !is_locked_by_vm();
1213 } 1167 }
1214 1168
1215 void nmethod::inc_decompile_count() { 1169 void nmethod::inc_decompile_count() {
1216 if (!is_compiled_by_c2()) return; 1170 if (!is_compiled_by_c2()) return;
1217 // Could be gated by ProfileTraps, but do not bother... 1171 // Could be gated by ProfileTraps, but do not bother...
1306 xtty->stamp(); 1260 xtty->stamp();
1307 xtty->end_elem(); 1261 xtty->end_elem();
1308 } 1262 }
1309 } 1263 }
1310 if (PrintCompilation && _state != unloaded) { 1264 if (PrintCompilation && _state != unloaded) {
1311 print_on(tty, _state == zombie ? "made zombie " : "made not entrant "); 1265 print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1312 tty->cr();
1313 } 1266 }
1314 } 1267 }
1315 1268
1316 // Common functionality for both make_not_entrant and make_zombie 1269 // Common functionality for both make_not_entrant and make_zombie
1317 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { 1270 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1318 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 1271 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1272 assert(!is_zombie(), "should not already be a zombie");
1319 1273
1320 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. 1274 // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1321 nmethodLocker nml(this); 1275 nmethodLocker nml(this);
1322 methodHandle the_method(method()); 1276 methodHandle the_method(method());
1323 No_Safepoint_Verifier nsv; 1277 No_Safepoint_Verifier nsv;
1324 1278
1325 { 1279 {
1326 // If the method is already zombie there is nothing to do
1327 if (is_zombie()) {
1328 return false;
1329 }
1330
1331 // invalidate osr nmethod before acquiring the patching lock since 1280 // invalidate osr nmethod before acquiring the patching lock since
1332 // they both acquire leaf locks and we don't want a deadlock. 1281 // they both acquire leaf locks and we don't want a deadlock.
1333 // This logic is equivalent to the logic below for patching the 1282 // This logic is equivalent to the logic below for patching the
1334 // verified entry point of regular methods. 1283 // verified entry point of regular methods.
1335 if (is_osr_method()) { 1284 if (is_osr_method()) {
1395 // dependency logic could have become stale. 1344 // dependency logic could have become stale.
1396 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1345 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1397 flush_dependencies(NULL); 1346 flush_dependencies(NULL);
1398 } 1347 }
1399 1348
1400 { 1349 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1401 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event 1350 // event and it hasn't already been reported for this nmethod then
1402 // and it hasn't already been reported for this nmethod then report it now. 1351 // report it now. The event may have been reported earilier if the GC
1403 // (the event may have been reported earilier if the GC marked it for unloading). 1352 // marked it for unloading). JvmtiDeferredEventQueue support means
1404 Pause_No_Safepoint_Verifier pnsv(&nsv); 1353 // we no longer go to a safepoint here.
1405 post_compiled_method_unload(); 1354 post_compiled_method_unload();
1406 }
1407 1355
1408 #ifdef ASSERT 1356 #ifdef ASSERT
1409 // It's no longer safe to access the oops section since zombie 1357 // It's no longer safe to access the oops section since zombie
1410 // nmethods aren't scanned for GC. 1358 // nmethods aren't scanned for GC.
1411 _oops_are_stale = true; 1359 _oops_are_stale = true;
1586 // it's being unloaded there's no way to look it up since the weak 1534 // it's being unloaded there's no way to look it up since the weak
1587 // ref will have been cleared. 1535 // ref will have been cleared.
1588 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { 1536 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1589 assert(!unload_reported(), "already unloaded"); 1537 assert(!unload_reported(), "already unloaded");
1590 JvmtiDeferredEvent event = 1538 JvmtiDeferredEvent event =
1591 JvmtiDeferredEvent::compiled_method_unload_event( 1539 JvmtiDeferredEvent::compiled_method_unload_event(this,
1592 _jmethod_id, insts_begin()); 1540 _jmethod_id, insts_begin());
1593 if (SafepointSynchronize::is_at_safepoint()) { 1541 if (SafepointSynchronize::is_at_safepoint()) {
1594 // Don't want to take the queueing lock. Add it as pending and 1542 // Don't want to take the queueing lock. Add it as pending and
1595 // it will get enqueued later. 1543 // it will get enqueued later.
1596 JvmtiDeferredEventQueue::add_pending_event(event); 1544 JvmtiDeferredEventQueue::add_pending_event(event);
1818 Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods); 1766 Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
1819 if (observed_mark_nmethods == required_mark_nmethods) 1767 if (observed_mark_nmethods == required_mark_nmethods)
1820 break; 1768 break;
1821 } 1769 }
1822 // Mark was clear when we first saw this guy. 1770 // Mark was clear when we first saw this guy.
1823 NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark\n")); 1771 NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark"));
1824 return false; 1772 return false;
1825 } 1773 }
1826 } 1774 }
1827 // On fall through, another racing thread marked this nmethod before we did. 1775 // On fall through, another racing thread marked this nmethod before we did.
1828 return true; 1776 return true;
1842 nmethod* cur = _oops_do_mark_nmethods; 1790 nmethod* cur = _oops_do_mark_nmethods;
1843 while (cur != NMETHOD_SENTINEL) { 1791 while (cur != NMETHOD_SENTINEL) {
1844 assert(cur != NULL, "not NULL-terminated"); 1792 assert(cur != NULL, "not NULL-terminated");
1845 nmethod* next = cur->_oops_do_mark_link; 1793 nmethod* next = cur->_oops_do_mark_link;
1846 cur->_oops_do_mark_link = NULL; 1794 cur->_oops_do_mark_link = NULL;
1847 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n")); 1795 cur->fix_oop_relocations();
1796 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
1848 cur = next; 1797 cur = next;
1849 } 1798 }
1850 void* required = _oops_do_mark_nmethods; 1799 void* required = _oops_do_mark_nmethods;
1851 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); 1800 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1852 guarantee(observed == required, "no races in this sequential code"); 1801 guarantee(observed == required, "no races in this sequential code");
1901 #endif // !SHARK 1850 #endif // !SHARK
1902 } 1851 }
1903 1852
1904 1853
1905 oop nmethod::embeddedOop_at(u_char* p) { 1854 oop nmethod::embeddedOop_at(u_char* p) {
1906 RelocIterator iter(this, p, MIN2(p + oopSize, code_end())); 1855 RelocIterator iter(this, p, p + 1);
1907 while (iter.next()) 1856 while (iter.next())
1908 if (iter.type() == relocInfo::oop_type) { 1857 if (iter.type() == relocInfo::oop_type) {
1909 return iter.oop_reloc()->oop_value(); 1858 return iter.oop_reloc()->oop_value();
1910 } 1859 }
1911 return NULL; 1860 return NULL;
2191 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found"); 2140 guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2192 _nm = (nmethod*)cb; 2141 _nm = (nmethod*)cb;
2193 lock_nmethod(_nm); 2142 lock_nmethod(_nm);
2194 } 2143 }
2195 2144
2196 void nmethodLocker::lock_nmethod(nmethod* nm) { 2145 // Only JvmtiDeferredEvent::compiled_method_unload_event()
2146 // should pass zombie_ok == true.
2147 void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2197 if (nm == NULL) return; 2148 if (nm == NULL) return;
2198 Atomic::inc(&nm->_lock_count); 2149 Atomic::inc(&nm->_lock_count);
2199 guarantee(!nm->is_zombie(), "cannot lock a zombie method"); 2150 guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2200 } 2151 }
2201 2152
2202 void nmethodLocker::unlock_nmethod(nmethod* nm) { 2153 void nmethodLocker::unlock_nmethod(nmethod* nm) {
2203 if (nm == NULL) return; 2154 if (nm == NULL) return;
2204 Atomic::dec(&nm->_lock_count); 2155 Atomic::dec(&nm->_lock_count);
2395 2346
2396 void nmethod::print() const { 2347 void nmethod::print() const {
2397 ResourceMark rm; 2348 ResourceMark rm;
2398 ttyLocker ttyl; // keep the following output all in one block 2349 ttyLocker ttyl; // keep the following output all in one block
2399 2350
2400 tty->print("Compiled "); 2351 tty->print("Compiled method ");
2401 2352
2402 if (is_compiled_by_c1()) { 2353 if (is_compiled_by_c1()) {
2403 tty->print("(c1) "); 2354 tty->print("(c1) ");
2404 } else if (is_compiled_by_c2()) { 2355 } else if (is_compiled_by_c2()) {
2405 tty->print("(c2) "); 2356 tty->print("(c2) ");
2407 tty->print("(shark) "); 2358 tty->print("(shark) ");
2408 } else { 2359 } else {
2409 tty->print("(nm) "); 2360 tty->print("(nm) ");
2410 } 2361 }
2411 2362
2412 print_on(tty, "nmethod"); 2363 print_on(tty, NULL);
2413 tty->cr(); 2364
2414 if (WizardMode) { 2365 if (WizardMode) {
2415 tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); 2366 tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
2416 tty->print(" for method " INTPTR_FORMAT , (address)method()); 2367 tty->print(" for method " INTPTR_FORMAT , (address)method());
2417 tty->print(" { "); 2368 tty->print(" { ");
2418 if (is_in_use()) tty->print("in_use "); 2369 if (is_in_use()) tty->print("in_use ");
2795 } 2746 }
2796 2747
2797 #ifndef PRODUCT 2748 #ifndef PRODUCT
2798 2749
2799 void nmethod::print_value_on(outputStream* st) const { 2750 void nmethod::print_value_on(outputStream* st) const {
2800 print_on(st, "nmethod"); 2751 st->print("nmethod");
2752 print_on(st, NULL);
2801 } 2753 }
2802 2754
2803 void nmethod::print_calls(outputStream* st) { 2755 void nmethod::print_calls(outputStream* st) {
2804 RelocIterator iter(this); 2756 RelocIterator iter(this);
2805 while (iter.next()) { 2757 while (iter.next()) {