changeset 7976:ff5401ad5635

Merge
author minqi
date Sat, 02 Feb 2013 03:51:01 -0800
parents 9fe95b01ad32 (diff) 65b632b77a97 (current diff)
children 879c6de913d6
files
diffstat 15 files changed, 190 insertions(+), 32 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/tools/whitebox/sun/hotspot/WhiteBox.java	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/tools/whitebox/sun/hotspot/WhiteBox.java	Sat Feb 02 03:51:01 2013 -0800
@@ -76,4 +76,9 @@
   public native long    g1NumFreeRegions();
   public native int     g1RegionSize();
   public native Object[]    parseCommandLine(String commandline, DiagnosticCommand[] args);
+
+  // NMT
+  public native boolean NMTAllocTest();
+  public native boolean NMTFreeTestMemory();
+  public native boolean NMTWaitForDataMerge();
 }
--- a/src/share/vm/memory/allocation.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/memory/allocation.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -145,9 +145,10 @@
   mtChunk             = 0x0B00,  // chunk that holds content of arenas
   mtJavaHeap          = 0x0C00,  // Java heap
   mtClassShared       = 0x0D00,  // class data sharing
-  mt_number_of_types  = 0x000D,  // number of memory types (mtDontTrack
+  mtTest              = 0x0E00,  // Test type for verifying NMT
+  mt_number_of_types  = 0x000E,  // number of memory types (mtDontTrack
                                  // is not included as validate type)
-  mtDontTrack         = 0x0E00,  // memory we do not or cannot track
+  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
   mt_masks            = 0x7F00,
 
   // object type mask
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/oops/instanceKlass.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -256,6 +256,16 @@
   // JVMTI fields can be moved to their own structure - see 6315920
   unsigned char * _cached_class_file_bytes;       // JVMTI: cached class file, before retransformable agent modified it in CFLH
   jint            _cached_class_file_len;         // JVMTI: length of above
+
+  volatile u2     _idnum_allocated_count;         // JNI/JVMTI: increments with the addition of methods, old ids don't change
+
+  // Class states are defined as ClassState (see above).
+  // Place the _init_state here to utilize the unused 2-byte after
+  // _idnum_allocated_count.
+  u1              _init_state;                    // state of class
+  u1              _reference_type;                // reference type
+
+
   JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map;  // JVMTI: used during heap iteration
 
   // Method array.
@@ -281,15 +291,6 @@
   //     ...
   Array<u2>*      _fields;
 
-  volatile u2     _idnum_allocated_count;         // JNI/JVMTI: increments with the addition of methods, old ids don't change
-
-  // Class states are defined as ClassState (see above).
-  // Place the _init_state here to utilize the unused 2-byte after
-  // _idnum_allocated_count.
-  u1              _init_state;                    // state of class
-
-  u1              _reference_type;                // reference type
-
   // embedded Java vtable follows here
   // embedded Java itables follows here
   // embedded static fields follows here
--- a/src/share/vm/oops/klass.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/oops/klass.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -75,11 +75,11 @@
 //    [class_loader_data]
 //    [modifier_flags]
 //    [access_flags  ]
-//    [verify_count  ] - not in product
-//    [alloc_count   ]
 //    [last_biased_lock_bulk_revocation_time] (64 bits)
 //    [prototype_header]
 //    [biased_lock_revocation_count]
+//    [verify_count  ] - not in product
+//    [alloc_count   ]
 //    [_modified_oops]
 //    [_accumulated_modified_oops]
 //    [trace_id]
@@ -165,18 +165,18 @@
   jint        _modifier_flags;  // Processed access flags, for use by Class.getModifiers.
   AccessFlags _access_flags;    // Access flags. The class/interface distinction is stored here.
 
+  // Biased locking implementation and statistics
+  // (the 64-bit chunk goes first, to avoid some fragmentation)
+  jlong    _last_biased_lock_bulk_revocation_time;
+  markOop  _prototype_header;   // Used when biased locking is both enabled and disabled for this type
+  jint     _biased_lock_revocation_count;
+
 #ifndef PRODUCT
   int           _verify_count;  // to avoid redundant verifies
 #endif
 
   juint    _alloc_count;        // allocation profiling support
 
-  // Biased locking implementation and statistics
-  // (the 64-bit chunk goes first, to avoid some fragmentation)
-  jlong    _last_biased_lock_bulk_revocation_time;
-  markOop  _prototype_header;   // Used when biased locking is both enabled and disabled for this type
-  jint     _biased_lock_revocation_count;
-
   TRACE_DEFINE_KLASS_TRACE_ID;
 
   // Remembered sets support for the oops in the klasses.
--- a/src/share/vm/oops/method.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/oops/method.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -128,8 +128,8 @@
   InvocationCounter _backedge_counter;           // Incremented before each backedge taken - used to trigger frequencey-based optimizations
 
 #ifdef TIERED
+  float             _rate;                        // Events (invocation and backedge counter increments) per millisecond
   jlong             _prev_time;                   // Previous time the rate was acquired
-  float             _rate;                        // Events (invocation and backedge counter increments) per millisecond
 #endif
 
 #ifndef PRODUCT
--- a/src/share/vm/prims/whitebox.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/prims/whitebox.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -43,6 +43,10 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif // !SERIALGC
 
+#ifdef INCLUDE_NMT
+#include "services/memTracker.hpp"
+#endif // INCLUDE_NMT
+
 bool WhiteBox::_used = false;
 
 WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
@@ -110,6 +114,60 @@
 WB_END
 #endif // !SERIALGC
 
+#ifdef INCLUDE_NMT
+// Keep track of the 3 allocations in NMTAllocTest so we can free them later
+// on and verify that they're not visible anymore
+static void* nmtMtTest1 = NULL, *nmtMtTest2 = NULL, *nmtMtTest3 = NULL;
+
+// Alloc memory using the test memory type so that we can use that to see if
+// NMT picks it up correctly
+WB_ENTRY(jboolean, WB_NMTAllocTest(JNIEnv* env))
+  void *mem;
+
+  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
+    return false;
+  }
+
+  // Allocate 2 * 128k + 256k + 1024k and free the 1024k one to make sure we track
+  // everything correctly. Total should be 512k held alive.
+  nmtMtTest1 = os::malloc(128 * 1024, mtTest);
+  mem = os::malloc(1024 * 1024, mtTest);
+  nmtMtTest2 = os::malloc(256 * 1024, mtTest);
+  os::free(mem, mtTest);
+  nmtMtTest3 = os::malloc(128 * 1024, mtTest);
+
+  return true;
+WB_END
+
+// Free the memory allocated by NMTAllocTest
+WB_ENTRY(jboolean, WB_NMTFreeTestMemory(JNIEnv* env))
+
+  if (nmtMtTest1 == NULL || nmtMtTest2 == NULL || nmtMtTest3 == NULL) {
+    return false;
+  }
+
+  os::free(nmtMtTest1, mtTest);
+  nmtMtTest1 = NULL;
+  os::free(nmtMtTest2, mtTest);
+  nmtMtTest2 = NULL;
+  os::free(nmtMtTest3, mtTest);
+  nmtMtTest3 = NULL;
+
+  return true;
+WB_END
+
+// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
+WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
+
+  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
+    return false;
+  }
+
+  return MemTracker::wbtest_wait_for_data_merge();
+WB_END
+
+#endif // INCLUDE_NMT
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -177,6 +235,11 @@
   {CC"g1NumFreeRegions",   CC"()J",                   (void*)&WB_G1NumFreeRegions  },
   {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
 #endif // !SERIALGC
+#ifdef INCLUDE_NMT
+  {CC"NMTAllocTest",       CC"()Z",                   (void*)&WB_NMTAllocTest      },
+  {CC"NMTFreeTestMemory",  CC"()Z",                   (void*)&WB_NMTFreeTestMemory },
+  {CC"NMTWaitForDataMerge",CC"()Z",                   (void*)&WB_NMTWaitForDataMerge},
+#endif // INCLUDE_NMT
 };
 
 #undef CC
--- a/src/share/vm/services/memBaseline.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memBaseline.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -40,6 +40,7 @@
   {mtNMT,        "Memory Tracking"},
   {mtChunk,      "Pooled Free Chunks"},
   {mtClassShared,"Shared spaces for classes"},
+  {mtTest,       "Test"},
   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
                              // behind
 };
--- a/src/share/vm/services/memPtr.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memPtr.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -27,8 +27,8 @@
 #include "services/memTracker.hpp"
 
 volatile jint SequenceGenerator::_seq_number = 1;
+volatile unsigned long SequenceGenerator::_generation = 1;
 NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
-DEBUG_ONLY(volatile unsigned long SequenceGenerator::_generation = 0;)
 
 jint SequenceGenerator::next() {
   jint seq = Atomic::add(1, &_seq_number);
--- a/src/share/vm/services/memPtr.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memPtr.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -47,16 +47,16 @@
   static void reset() {
     assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
     _seq_number = 1;
-    DEBUG_ONLY(_generation ++;)
+    _generation ++;
   };
 
-  DEBUG_ONLY(static unsigned long current_generation() { return (unsigned long)_generation; })
+  static unsigned long current_generation() { return _generation; }
   NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
 
  private:
-  static volatile jint _seq_number;
-  NOT_PRODUCT(static jint _max_seq_number; )
-  DEBUG_ONLY(static volatile unsigned long _generation; )
+  static volatile jint             _seq_number;
+  static volatile unsigned long    _generation;
+  NOT_PRODUCT(static jint          _max_seq_number; )
 };
 
 /*
--- a/src/share/vm/services/memRecorder.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memRecorder.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -55,7 +55,7 @@
 MemRecorder::MemRecorder() {
   assert(MemTracker::is_on(), "Native memory tracking is off");
   Atomic::inc(&_instance_count);
-  debug_only(set_generation();)
+  set_generation();
 
   if (MemTracker::track_callsite()) {
     _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
@@ -151,11 +151,12 @@
 }
 
 
-#ifdef ASSERT
 void MemRecorder::set_generation() {
   _generation = SequenceGenerator::current_generation();
 }
 
+#ifdef ASSERT
+
 void MemRecorder::check_dup_seq(jint seq) const {
   MemPointerArrayIteratorImpl itr(_pointer_records);
   MemPointerRecord* rc = (MemPointerRecord*)itr.current();
--- a/src/share/vm/services/memRecorder.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memRecorder.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -213,7 +213,7 @@
   // used for linked list
   MemRecorder*             _next;
   // active recorder can only record a certain generation data
-  debug_only(unsigned long _generation;)
+  unsigned long            _generation;
 
  protected:
   _NOINLINE_ MemRecorder();
@@ -251,6 +251,8 @@
 
   SequencedRecordIterator pointer_itr();
 
+  // return the generation of this recorder which it belongs to
+  unsigned long get_generation() const { return _generation; }
  protected:
   // number of MemRecorder instance
   static volatile jint _instance_count;
@@ -263,7 +265,7 @@
   static int sort_record_fn(const void* e1, const void* e2);
 
   debug_only(void check_dup_seq(jint seq) const;)
-  debug_only(void set_generation();)
+  void set_generation();
 };
 
 #endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
--- a/src/share/vm/services/memTrackWorker.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memTrackWorker.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -91,6 +91,8 @@
   MemSnapshot* snapshot = MemTracker::get_snapshot();
   assert(snapshot != NULL, "Worker should not be started");
   MemRecorder* rec;
+  unsigned long processing_generation = 0;
+  bool          worker_idle = false;
 
   while (!MemTracker::shutdown_in_progress()) {
     NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
@@ -100,6 +102,12 @@
       rec = _gen[_head].next_recorder();
     }
     if (rec != NULL) {
+      if (rec->get_generation() != processing_generation || worker_idle) {
+        processing_generation = rec->get_generation();
+        worker_idle = false;
+        MemTracker::set_current_processing_generation(processing_generation);
+      }
+
       // merge the recorder into staging area
       if (!snapshot->merge(rec)) {
         MemTracker::shutdown(MemTracker::NMT_out_of_memory);
@@ -129,6 +137,9 @@
           MemTracker::shutdown(MemTracker::NMT_out_of_memory);
         }
       } else {
+        // worker thread is idle
+        worker_idle = true;
+        MemTracker::report_worker_idle();
         snapshot->wait(1000);
         ThreadCritical tc;
         // check if more data arrived
--- a/src/share/vm/services/memTrackWorker.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memTrackWorker.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -107,6 +107,7 @@
   NOT_PRODUCT(int _merge_count;)
   NOT_PRODUCT(int _last_gen_in_use;)
 
+  // how many generations are queued
   inline int generations_in_use() const {
     return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
   }
--- a/src/share/vm/services/memTracker.cpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memTracker.cpp	Sat Feb 02 03:51:01 2013 -0800
@@ -29,6 +29,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/threadCritical.hpp"
+#include "runtime/vm_operations.hpp"
 #include "services/memPtr.hpp"
 #include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
@@ -65,6 +66,8 @@
 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
 int                             MemTracker::_thread_count = 255;
 volatile jint                   MemTracker::_pooled_recorder_count = 0;
+volatile unsigned long          MemTracker::_processing_generation = 0;
+volatile bool                   MemTracker::_worker_thread_idle = false;
 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
 
@@ -279,7 +282,7 @@
      }
      cur_head->set_next(NULL);
      Atomic::dec(&_pooled_recorder_count);
-     debug_only(cur_head->set_generation();)
+     cur_head->set_generation();
      return cur_head;
   }
 }
@@ -570,6 +573,51 @@
   return false;
 }
 
+// Whitebox API for blocking until the current generation of NMT data has been merged
+bool MemTracker::wbtest_wait_for_data_merge() {
+  // NMT can't be shutdown while we're holding _query_lock
+  MutexLockerEx lock(_query_lock, true);
+  assert(_worker_thread != NULL, "Invalid query");
+  // the generation at query time, so NMT will spin till this generation is processed
+  unsigned long generation_at_query_time = SequenceGenerator::current_generation();
+  unsigned long current_processing_generation = _processing_generation;
+  // if generation counter overflown
+  bool generation_overflown = (generation_at_query_time < current_processing_generation);
+  long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
+  // spin
+  while (!shutdown_in_progress()) {
+    if (!generation_overflown) {
+      if (current_processing_generation > generation_at_query_time) {
+        return true;
+      }
+    } else {
+      assert(generations_to_wrap >= 0, "Sanity check");
+      long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
+      assert(current_generations_to_wrap >= 0, "Sanity check");
+      // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
+      if (current_generations_to_wrap > generations_to_wrap &&
+          current_processing_generation > generation_at_query_time) {
+        return true;
+      }
+    }
+
+    // if worker thread is idle, but generation is not advancing, that means
+    // there is not safepoint to let NMT advance generation, force one.
+    if (_worker_thread_idle) {
+      VM_ForceSafepoint vfs;
+      VMThread::execute(&vfs);
+    }
+    MemSnapshot* snapshot = get_snapshot();
+    if (snapshot == NULL) {
+      return false;
+    }
+    snapshot->wait(1000);
+    current_processing_generation = _processing_generation;
+  }
+  // We end up here if NMT is shutting down before our data has been merged
+  return false;
+}
+
 // compare memory usage between current snapshot and baseline
 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   MutexLockerEx lock(_query_lock, true);
--- a/src/share/vm/services/memTracker.hpp	Fri Feb 01 22:41:34 2013 -0800
+++ b/src/share/vm/services/memTracker.hpp	Sat Feb 02 03:51:01 2013 -0800
@@ -91,9 +91,10 @@
    static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
             bool summary_only = true) { }
 
+   static bool wbtest_wait_for_data_merge() { }
+
    static inline void sync() { }
    static inline void thread_exiting(JavaThread* thread) { }
-
 };
 
 
@@ -111,6 +112,10 @@
 
 extern bool NMT_track_callsite;
 
+#ifndef MAX_UNSIGNED_LONG
+#define MAX_UNSIGNED_LONG    (unsigned long)(-1)
+#endif
+
 #ifdef ASSERT
   #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
 #else
@@ -380,6 +385,11 @@
   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
            bool summary_only = true);
 
+  // the version for whitebox testing support, it ensures that all memory
+  // activities before this method call, are reflected in the snapshot
+  // database.
+  static bool wbtest_wait_for_data_merge();
+
   // sync is called within global safepoint to synchronize nmt data
   static void sync();
 
@@ -432,6 +442,15 @@
   static void create_record_in_recorder(address addr, MEMFLAGS type,
                    size_t size, address pc, JavaThread* thread);
 
+  static void set_current_processing_generation(unsigned long generation) {
+    _worker_thread_idle = false;
+    _processing_generation = generation;
+  }
+
+  static void report_worker_idle() {
+    _worker_thread_idle = true;
+  }
+
  private:
   // global memory snapshot
   static MemSnapshot*     _snapshot;
@@ -483,6 +502,11 @@
   static volatile enum NMTStates   _state;
   // the reason for shutting down nmt
   static enum ShutdownReason       _reason;
+  // the generation that NMT is processing
+  static volatile unsigned long    _processing_generation;
+  // although NMT is still procesing current generation, but
+  // there is not more recorder to process, set idle state
+  static volatile bool             _worker_thread_idle;
 };
 
 #endif // !INCLUDE_NMT