# HG changeset patch # User zgu # Date 1357688843 28800 # Node ID 0c93d4818214c9407ca9c855ec21016be00f716d # Parent 185a2c979a0e11150d9312759695c80049e3ab5a# Parent 37a3e8b7a1e9a96f3fddb927bffaaac4b5d798a2 Merge diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -161,6 +161,8 @@ #endif // ndef DTRACE_ENABLED +volatile int InstanceKlass::_total_instanceKlass_count = 0; + Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data, int vtable_len, int itable_len, @@ -204,6 +206,7 @@ access_flags, !host_klass.is_null()); } + Atomic::inc(&_total_instanceKlass_count); return ik; } @@ -2331,6 +2334,9 @@ if (_array_name != NULL) _array_name->decrement_refcount(); if (_source_file_name != NULL) _source_file_name->decrement_refcount(); if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass); + + assert(_total_instanceKlass_count >= 1, "Sanity check"); + Atomic::dec(&_total_instanceKlass_count); } void InstanceKlass::set_source_file_name(Symbol* n) { diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/oops/instanceKlass.hpp Tue Jan 08 15:47:23 2013 -0800 @@ -31,6 +31,7 @@ #include "oops/fieldInfo.hpp" #include "oops/instanceOop.hpp" #include "oops/klassVtable.hpp" +#include "runtime/atomic.hpp" #include "runtime/handles.hpp" #include "runtime/os.hpp" #include "utilities/accessFlags.hpp" @@ -170,6 +171,11 @@ initialization_error // error happened during initialization }; + static int number_of_instance_classes() { return _total_instanceKlass_count; } + + private: + static volatile int _total_instanceKlass_count; + protected: // Protection domain. oop _protection_domain; diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memBaseline.cpp --- a/src/share/vm/services/memBaseline.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memBaseline.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -22,7 +22,6 @@ * */ #include "precompiled.hpp" -#include "classfile/systemDictionary.hpp" #include "memory/allocation.hpp" #include "services/memBaseline.hpp" #include "services/memTracker.hpp" @@ -349,7 +348,7 @@ reset(); _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) && baseline_vm_summary(snapshot._vm_ptrs); - _number_of_classes = SystemDictionary::number_of_classes(); + _number_of_classes = snapshot.number_of_classes(); if (!summary_only && MemTracker::track_callsite() && _baselined) { _baselined = baseline_malloc_details(snapshot._alloc_ptrs) && diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memRecorder.cpp --- a/src/share/vm/services/memRecorder.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memRecorder.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -84,10 +84,13 @@ } delete _pointer_records; } - if (_next != NULL) { - delete _next; + // delete all linked recorders + while (_next != NULL) { + MemRecorder* tmp = _next; + _next = _next->next(); + tmp->set_next(NULL); + delete tmp; } - Atomic::dec(&_instance_count); } diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memRecorder.hpp --- a/src/share/vm/services/memRecorder.hpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memRecorder.hpp Tue Jan 08 15:47:23 2013 -0800 @@ -203,6 +203,7 @@ friend class MemSnapshot; friend class MemTracker; friend class MemTrackWorker; + friend class GenerationData; protected: // the array that holds memory records diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memSnapshot.cpp --- a/src/share/vm/services/memSnapshot.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memSnapshot.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -384,6 +384,7 @@ _staging_area.init(); _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); NOT_PRODUCT(_untracked_count = 0;) + _number_of_classes = 0; } MemSnapshot::~MemSnapshot() { @@ -479,7 +480,7 @@ // promote data to next generation -bool MemSnapshot::promote() { +bool MemSnapshot::promote(int number_of_classes) { assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, "Just check"); @@ -496,6 +497,7 @@ NOT_PRODUCT(check_malloc_pointers();) _staging_area.clear(); + _number_of_classes = number_of_classes; return promoted; } diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memSnapshot.hpp --- a/src/share/vm/services/memSnapshot.hpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memSnapshot.hpp Tue Jan 08 15:47:23 2013 -0800 @@ -355,6 +355,9 @@ // the lock to protect this snapshot Monitor* _lock; + // the number of instance classes + int _number_of_classes; + NOT_PRODUCT(size_t _untracked_count;) friend class MemBaseline; @@ -375,8 +378,9 @@ // merge a per-thread memory recorder into staging area bool merge(MemRecorder* rec); // promote staged data to snapshot - bool promote(); + bool promote(int number_of_classes); + int number_of_classes() const { return _number_of_classes; } void wait(long timeout) { assert(_lock != NULL, "Just check"); diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memTrackWorker.cpp --- a/src/share/vm/services/memTrackWorker.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memTrackWorker.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -29,6 +29,16 @@ #include "utilities/decoder.hpp" #include "utilities/vmError.hpp" + +void GenerationData::reset() { + _number_of_classes = 0; + while (_recorder_list != NULL) { + MemRecorder* tmp = _recorder_list; + _recorder_list = _recorder_list->next(); + MemTracker::release_thread_recorder(tmp); + } +} + MemTrackWorker::MemTrackWorker() { // create thread uses cgc thread type for now. We should revisit // the option, or create new thread type. @@ -39,7 +49,7 @@ if (!has_error()) { _head = _tail = 0; for(int index = 0; index < MAX_GENERATIONS; index ++) { - _gen[index] = NULL; + ::new ((void*)&_gen[index]) GenerationData(); } } NOT_PRODUCT(_sync_point_count = 0;) @@ -49,10 +59,7 @@ MemTrackWorker::~MemTrackWorker() { for (int index = 0; index < MAX_GENERATIONS; index ++) { - MemRecorder* rc = _gen[index]; - if (rc != NULL) { - delete rc; - } + _gen[index].reset(); } } @@ -90,12 +97,7 @@ { // take a recorder from earliest generation in buffer ThreadCritical tc; - rec = _gen[_head]; - if (rec != NULL) { - _gen[_head] = rec->next(); - } - assert(count_recorder(_gen[_head]) <= MemRecorder::_instance_count, - "infinite loop after dequeue"); + rec = _gen[_head].next_recorder(); } if (rec != NULL) { // merge the recorder into staging area @@ -109,16 +111,20 @@ // no more recorder to merge, promote staging area // to snapshot if (_head != _tail) { + long number_of_classes; { ThreadCritical tc; - if (_gen[_head] != NULL || _head == _tail) { + if (_gen[_head].has_more_recorder() || _head == _tail) { continue; } + number_of_classes = _gen[_head].number_of_classes(); + _gen[_head].reset(); + // done with this generation, increment _head pointer _head = (_head + 1) % MAX_GENERATIONS; } // promote this generation data to snapshot - if (!snapshot->promote()) { + if (!snapshot->promote(number_of_classes)) { // failed to promote, means out of memory MemTracker::shutdown(MemTracker::NMT_out_of_memory); } @@ -126,8 +132,8 @@ snapshot->wait(1000); ThreadCritical tc; // check if more data arrived - if (_gen[_head] == NULL) { - _gen[_head] = MemTracker::get_pending_recorders(); + if (!_gen[_head].has_more_recorder()) { + _gen[_head].add_recorders(MemTracker::get_pending_recorders()); } } } @@ -147,7 +153,7 @@ // 1. add all recorders in pending queue to current generation // 2. increase generation -void MemTrackWorker::at_sync_point(MemRecorder* rec) { +void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) { NOT_PRODUCT(_sync_point_count ++;) assert(count_recorder(rec) <= MemRecorder::_instance_count, "pending queue has infinite loop"); @@ -155,23 +161,15 @@ bool out_of_generation_buffer = false; // check shutdown state inside ThreadCritical if (MemTracker::shutdown_in_progress()) return; + + _gen[_tail].set_number_of_classes(number_of_classes); // append the recorders to the end of the generation - if( rec != NULL) { - MemRecorder* cur_head = _gen[_tail]; - if (cur_head == NULL) { - _gen[_tail] = rec; - } else { - while (cur_head->next() != NULL) { - cur_head = cur_head->next(); - } - cur_head->set_next(rec); - } - } - assert(count_recorder(rec) <= MemRecorder::_instance_count, + _gen[_tail].add_recorders(rec); + assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count, "after add to current generation has infinite loop"); // we have collected all recorders for this generation. If there is data, // we need to increment _tail to start a new generation. - if (_gen[_tail] != NULL || _head == _tail) { + if (_gen[_tail].has_more_recorder() || _head == _tail) { _tail = (_tail + 1) % MAX_GENERATIONS; out_of_generation_buffer = (_tail == _head); } @@ -194,7 +192,7 @@ int MemTrackWorker::count_pending_recorders() const { int count = 0; for (int index = 0; index < MAX_GENERATIONS; index ++) { - MemRecorder* head = _gen[index]; + MemRecorder* head = _gen[index].peek(); if (head != NULL) { count += count_recorder(head); } diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memTrackWorker.hpp --- a/src/share/vm/services/memTrackWorker.hpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memTrackWorker.hpp Tue Jan 08 15:47:23 2013 -0800 @@ -32,17 +32,58 @@ // Maximum MAX_GENERATIONS generation data can be tracked. #define MAX_GENERATIONS 512 +class GenerationData : public _ValueObj { + private: + int _number_of_classes; + MemRecorder* _recorder_list; + + public: + GenerationData(): _number_of_classes(0), _recorder_list(NULL) { } + + inline int number_of_classes() const { return _number_of_classes; } + inline void set_number_of_classes(long num) { _number_of_classes = num; } + + inline MemRecorder* next_recorder() { + if (_recorder_list == NULL) { + return NULL; + } else { + MemRecorder* tmp = _recorder_list; + _recorder_list = _recorder_list->next(); + return tmp; + } + } + + inline bool has_more_recorder() const { + return (_recorder_list != NULL); + } + + // add recorders to this generation + void add_recorders(MemRecorder* head) { + if (head != NULL) { + if (_recorder_list == NULL) { + _recorder_list = head; + } else { + MemRecorder* tmp = _recorder_list; + for (; tmp->next() != NULL; tmp = tmp->next()); + tmp->set_next(head); + } + } + } + + void reset(); + + NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; }) +}; class MemTrackWorker : public NamedThread { private: - // circular buffer. This buffer contains recorders to be merged into global + // circular buffer. This buffer contains generation data to be merged into global // snaphsot. - // Each slot holds a linked list of memory recorders, that contains one - // generation of memory data. - MemRecorder* _gen[MAX_GENERATIONS]; - int _head, _tail; // head and tail pointers to above circular buffer + // Each slot holds a generation + GenerationData _gen[MAX_GENERATIONS]; + int _head, _tail; // head and tail pointers to above circular buffer - bool _has_error; + bool _has_error; public: MemTrackWorker(); @@ -56,7 +97,7 @@ inline bool has_error() const { return _has_error; } // task at synchronization point - void at_sync_point(MemRecorder* pending_recorders); + void at_sync_point(MemRecorder* pending_recorders, int number_of_classes); // for debugging purpose, they are not thread safe. NOT_PRODUCT(static int count_recorder(const MemRecorder* head);) diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memTracker.cpp --- a/src/share/vm/services/memTracker.cpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memTracker.cpp Tue Jan 08 15:47:23 2013 -0800 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "oops/instanceKlass.hpp" #include "runtime/atomic.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" @@ -485,7 +486,7 @@ } // check _worker_thread with lock to avoid racing condition if (_worker_thread != NULL) { - _worker_thread->at_sync_point(pending_recorders); + _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); } assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); diff -r 185a2c979a0e -r 0c93d4818214 src/share/vm/services/memTracker.hpp --- a/src/share/vm/services/memTracker.hpp Tue Jan 08 13:44:10 2013 -0800 +++ b/src/share/vm/services/memTracker.hpp Tue Jan 08 15:47:23 2013 -0800 @@ -142,6 +142,7 @@ * MemTracker is the 'gate' class to native memory tracking runtime. */ class MemTracker : AllStatic { + friend class GenerationData; friend class MemTrackWorker; friend class MemSnapshot; friend class SyncThreadRecorderClosure;