diff src/share/vm/code/nmethod.cpp @ 2491:0654ee04b214

Merge with OpenJDK.
author Thomas Wuerthinger <thomas.wuerthinger@oracle.com>
date Fri, 22 Apr 2011 15:30:53 +0200
parents 1cfdec4e7f07 3d58a4983660
children 75a99b4f1c98
line wrap: on
line diff
--- a/src/share/vm/code/nmethod.cpp	Fri Apr 22 15:22:45 2011 +0200
+++ b/src/share/vm/code/nmethod.cpp	Fri Apr 22 15:30:53 2011 +0200
@@ -28,6 +28,7 @@
 #include "code/nmethod.hpp"
 #include "code/scopeDesc.hpp"
 #include "compiler/abstractCompiler.hpp"
+#include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "compiler/disassembler.hpp"
@@ -170,7 +171,7 @@
   int pc_desc_resets;   // number of resets (= number of caches)
   int pc_desc_queries;  // queries to nmethod::find_pc_desc
   int pc_desc_approx;   // number of those which have approximate true
-  int pc_desc_repeats;  // number of _last_pc_desc hits
+  int pc_desc_repeats;  // number of _pc_descs[0] hits
   int pc_desc_hits;     // number of LRU cache hits
   int pc_desc_tests;    // total number of PcDesc examinations
   int pc_desc_searches; // total number of quasi-binary search steps
@@ -190,15 +191,10 @@
 } nmethod_stats;
 #endif //PRODUCT
 
+
 //---------------------------------------------------------------------------------
 
 
-// The _unwind_handler is a special marker address, which says that
-// for given exception oop and address, the frame should be removed
-// as the tuple cannot be caught in the nmethod
-address ExceptionCache::_unwind_handler = (address) -1;
-
-
 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
   assert(pc != NULL, "Must be non null");
   assert(exception.not_null(), "Must be non null");
@@ -283,40 +279,44 @@
 
 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
   if (initial_pc_desc == NULL) {
-    _last_pc_desc = NULL;  // native method
+    _pc_descs[0] = NULL; // native method; no PcDescs at all
     return;
   }
   NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
   // reset the cache by filling it with benign (non-null) values
   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
-  _last_pc_desc = initial_pc_desc + 1;  // first valid one is after sentinel
   for (int i = 0; i < cache_size; i++)
     _pc_descs[i] = initial_pc_desc;
 }
 
 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
   NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
-  NOT_PRODUCT(if (approximate)  ++nmethod_stats.pc_desc_approx);
+  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
+
+  // Note: one might think that caching the most recently
+  // read value separately would be a win, but one would be
+  // wrong.  When many threads are updating it, the cache
+  // line it's in would bounce between caches, negating
+  // any benefit.
 
   // In order to prevent race conditions do not load cache elements
   // repeatedly, but use a local copy:
   PcDesc* res;
 
-  // Step one:  Check the most recently returned value.
-  res = _last_pc_desc;
-  if (res == NULL)  return NULL;  // native method; no PcDescs at all
+  // Step one:  Check the most recently added value.
+  res = _pc_descs[0];
+  if (res == NULL) return NULL;  // native method; no PcDescs at all
   if (match_desc(res, pc_offset, approximate)) {
     NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
     return res;
   }
 
-  // Step two:  Check the LRU cache.
-  for (int i = 0; i < cache_size; i++) {
+  // Step two:  Check the rest of the LRU cache.
+  for (int i = 1; i < cache_size; ++i) {
     res = _pc_descs[i];
-    if (res->pc_offset() < 0)  break;  // optimization: skip empty cache
+    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
     if (match_desc(res, pc_offset, approximate)) {
       NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
-      _last_pc_desc = res;  // record this cache hit in case of repeat
       return res;
     }
   }
@@ -327,24 +327,23 @@
 
 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
   NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
-  // Update the LRU cache by shifting pc_desc forward:
+  // Update the LRU cache by shifting pc_desc forward.
   for (int i = 0; i < cache_size; i++)  {
     PcDesc* next = _pc_descs[i];
     _pc_descs[i] = pc_desc;
     pc_desc = next;
   }
-  // Note:  Do not update _last_pc_desc.  It fronts for the LRU cache.
 }
 
 // adjust pcs_size so that it is a multiple of both oopSize and
 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
 // of oopSize, then 2*sizeof(PcDesc) is)
-static int  adjust_pcs_size(int pcs_size) {
+static int adjust_pcs_size(int pcs_size) {
   int nsize = round_to(pcs_size,   oopSize);
   if ((nsize % sizeof(PcDesc)) != 0) {
     nsize = pcs_size + sizeof(PcDesc);
   }
-  assert((nsize %  oopSize) == 0, "correct alignment");
+  assert((nsize % oopSize) == 0, "correct alignment");
   return nsize;
 }
 
@@ -471,6 +470,7 @@
 
 
 nmethod* nmethod::new_native_nmethod(methodHandle method,
+  int compile_id,
   CodeBuffer *code_buffer,
   int vep_offset,
   int frame_complete,
@@ -487,7 +487,7 @@
     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
     nm = new (native_nmethod_size)
-      nmethod(method(), native_nmethod_size, &offsets,
+      nmethod(method(), native_nmethod_size, compile_id, &offsets,
               code_buffer, frame_size,
               basic_lock_owner_sp_offset, basic_lock_sp_offset,
               oop_maps);
@@ -612,6 +612,7 @@
 nmethod::nmethod(
   methodOop method,
   int nmethod_size,
+  int compile_id,
   CodeOffsets* offsets,
   CodeBuffer* code_buffer,
   int frame_size,
@@ -646,7 +647,7 @@
     _handler_table_offset    = _dependencies_offset;
     _nul_chk_table_offset    = _handler_table_offset;
     _nmethod_end_offset      = _nul_chk_table_offset;
-    _compile_id              = 0;  // default
+    _compile_id              = compile_id;
     _comp_level              = CompLevel_none;
     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
@@ -655,6 +656,9 @@
     _pc_desc_cache.reset_to(NULL);
 
     code_buffer->copy_oops_to(this);
+    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
+      CodeCache::add_scavenge_root_nmethod(this);
+    }
     debug_only(verify_scavenge_root_oops());
     CodeCache::commit(this);
   }
@@ -767,7 +771,7 @@
 
 void* nmethod::operator new(size_t size, int nmethod_size) {
   // Always leave some room in the CodeCache for I2C/C2I adapters
-  if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL;
+  if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
   return CodeCache::allocate(nmethod_size);
 }
 
@@ -940,72 +944,11 @@
 #undef LOG_OFFSET
 
 
-void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
-                                methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
-  bool is_synchronized = false, has_xhandler = false, is_native = false;
-  int code_size = -1;
-  if (method != NULL) {
-    is_synchronized = method->is_synchronized();
-    has_xhandler    = method->has_exception_handler();
-    is_native       = method->is_native();
-    code_size       = method->code_size();
-  }
-  // print compilation number
-  st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
-
-  // print method attributes
-  const bool is_osr = bci != InvocationEntryBci;
-  const char blocking_char  = is_blocking     ? 'b' : ' ';
-  const char compile_type   = is_osr          ? '%' : ' ';
-  const char sync_char      = is_synchronized ? 's' : ' ';
-  const char exception_char = has_xhandler    ? '!' : ' ';
-  const char native_char    = is_native       ? 'n' : ' ';
-  st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
-  if (TieredCompilation) {
-    st->print("%d ", comp_level);
-  }
-
-  // print optional title
-  bool do_nl = false;
-  if (title != NULL) {
-    int tlen = (int) strlen(title);
-    bool do_nl = false;
-    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
-    st->print("%.*s", tlen, title);
-  } else {
-    do_nl = true;
-  }
-
-  // print method name string if given
-  if (method_name != NULL) {
-    st->print(method_name);
-  } else {
-    // otherwise as the method to print itself
-    if (method != NULL && !Universe::heap()->is_gc_active()) {
-      method->print_short_name(st);
-    } else {
-      st->print("(method)");
-    }
-  }
-
-  if (method != NULL) {
-    // print osr_bci if any
-    if (is_osr) st->print(" @ %d", bci);
-    // print method size
-    st->print(" (%d bytes)", code_size);
-  }
-  if (do_nl) st->cr();
-}
-
 // Print out more verbose output usually for a newly created nmethod.
-void nmethod::print_on(outputStream* st, const char* title) const {
+void nmethod::print_on(outputStream* st, const char* msg) const {
   if (st != NULL) {
     ttyLocker ttyl;
-    print_compilation(st, /*method_name*/NULL, title,
-                      method(), /*is_blocking*/false,
-                      compile_id(),
-                      is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
-                      comp_level());
+    CompileTask::print_compilation(st, this, msg);
     if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
   }
 }
@@ -1118,14 +1061,22 @@
 }
 
 
+void nmethod::verify_oop_relocations() {
+  // Ensure sure that the code matches the current oop values
+  RelocIterator iter(this, NULL, NULL);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::oop_type) {
+      oop_Relocation* reloc = iter.oop_reloc();
+      if (!reloc->oop_is_immediate()) {
+        reloc->verify_oop_relocation();
+      }
+    }
+  }
+}
+
+
 ScopeDesc* nmethod::scope_desc_at(address pc) {
   PcDesc* pd = pc_desc_at(pc);
-#ifdef ASSERT
-  if (pd == NULL) {
-    tty->print_cr(err_msg("Missing scope at relative pc %d of method %s", pc - code_begin(), this->method()->name()->as_C_string()));
-    print_pcs();
-  }
-#endif
   guarantee(pd != NULL, "scope must be present");
   return new ScopeDesc(this, pd->scope_decode_offset(),
                        pd->obj_decode_offset(), pd->should_reexecute(),
@@ -1202,14 +1153,17 @@
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
 }
 
-// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
+// Tell if a non-entrant method can be converted to a zombie (i.e.,
+// there are no activations on the stack, not in use by the VM,
+// and not in use by the ServiceThread)
 bool nmethod::can_not_entrant_be_converted() {
   assert(is_not_entrant(), "must be a non-entrant method");
 
   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
   // count can be greater than the stack traversal count before it hits the
   // nmethod for the second time.
-  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
+  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
+         !is_locked_by_vm();
 }
 
 void nmethod::inc_decompile_count() {
@@ -1308,14 +1262,14 @@
     }
   }
   if (PrintCompilation && _state != unloaded) {
-    print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
-    tty->cr();
+    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
   }
 }
 
 // Common functionality for both make_not_entrant and make_zombie
 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
+  assert(!is_zombie(), "should not already be a zombie");
 
   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
@@ -1323,11 +1277,6 @@
   No_Safepoint_Verifier nsv;
 
   {
-    // If the method is already zombie there is nothing to do
-    if (is_zombie()) {
-      return false;
-    }
-
     // invalidate osr nmethod before acquiring the patching lock since
     // they both acquire leaf locks and we don't want a deadlock.
     // This logic is equivalent to the logic below for patching the
@@ -1397,13 +1346,12 @@
       flush_dependencies(NULL);
     }
 
-    {
-      // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
-      // and it hasn't already been reported for this nmethod then report it now.
-      // (the event may have been reported earilier if the GC marked it for unloading).
-      Pause_No_Safepoint_Verifier pnsv(&nsv);
-      post_compiled_method_unload();
-    }
+    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
+    // event and it hasn't already been reported for this nmethod then
+    // report it now. The event may have been reported earilier if the GC
+    // marked it for unloading). JvmtiDeferredEventQueue support means
+    // we no longer go to a safepoint here.
+    post_compiled_method_unload();
 
 #ifdef ASSERT
     // It's no longer safe to access the oops section since zombie
@@ -1588,7 +1536,7 @@
   if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
     assert(!unload_reported(), "already unloaded");
     JvmtiDeferredEvent event =
-      JvmtiDeferredEvent::compiled_method_unload_event(
+      JvmtiDeferredEvent::compiled_method_unload_event(this,
           _jmethod_id, insts_begin());
     if (SafepointSynchronize::is_at_safepoint()) {
       // Don't want to take the queueing lock. Add it as pending and
@@ -1820,7 +1768,7 @@
           break;
       }
       // Mark was clear when we first saw this guy.
-      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark\n"));
+      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
       return false;
     }
   }
@@ -1844,7 +1792,8 @@
     assert(cur != NULL, "not NULL-terminated");
     nmethod* next = cur->_oops_do_mark_link;
     cur->_oops_do_mark_link = NULL;
-    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark\n"));
+    cur->fix_oop_relocations();
+    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
     cur = next;
   }
   void* required = _oops_do_mark_nmethods;
@@ -1903,7 +1852,7 @@
 
 
 oop nmethod::embeddedOop_at(u_char* p) {
-  RelocIterator iter(this, p, MIN2(p + oopSize, code_end()));
+  RelocIterator iter(this, p, p + 1);
   while (iter.next())
     if (iter.type() == relocInfo::oop_type) {
       return iter.oop_reloc()->oop_value();
@@ -2193,10 +2142,12 @@
   lock_nmethod(_nm);
 }
 
-void nmethodLocker::lock_nmethod(nmethod* nm) {
+// Only JvmtiDeferredEvent::compiled_method_unload_event()
+// should pass zombie_ok == true.
+void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
   if (nm == NULL)  return;
   Atomic::inc(&nm->_lock_count);
-  guarantee(!nm->is_zombie(), "cannot lock a zombie method");
+  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
 }
 
 void nmethodLocker::unlock_nmethod(nmethod* nm) {
@@ -2397,7 +2348,7 @@
   ResourceMark rm;
   ttyLocker ttyl;   // keep the following output all in one block
 
-  tty->print("Compiled ");
+  tty->print("Compiled method ");
 
   if (is_compiled_by_c1()) {
     tty->print("(c1) ");
@@ -2409,8 +2360,8 @@
     tty->print("(nm) ");
   }
 
-  print_on(tty, "nmethod");
-  tty->cr();
+  print_on(tty, NULL);
+
   if (WizardMode) {
     tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
     tty->print(" for method " INTPTR_FORMAT , (address)method());
@@ -2797,7 +2748,8 @@
 #ifndef PRODUCT
 
 void nmethod::print_value_on(outputStream* st) const {
-  print_on(st, "nmethod");
+  st->print("nmethod");
+  print_on(st, NULL);
 }
 
 void nmethod::print_calls(outputStream* st) {