comparison src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp @ 6197:d2a62e0f25eb

6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
author zgu
date Thu, 28 Jun 2012 17:03:16 -0400
parents c84ee870e0b9
children 3a431b605145
comparison
equal deleted inserted replaced
6174:74533f63b116 6197:d2a62e0f25eb
77 _n_worker_threads = thread_num(); 77 _n_worker_threads = thread_num();
78 // We need one extra thread to do the young gen rset size sampling. 78 // We need one extra thread to do the young gen rset size sampling.
79 _n_threads = _n_worker_threads + 1; 79 _n_threads = _n_worker_threads + 1;
80 reset_threshold_step(); 80 reset_threshold_step();
81 81
82 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads); 82 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
83 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids(); 83 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
84 ConcurrentG1RefineThread *next = NULL; 84 ConcurrentG1RefineThread *next = NULL;
85 for (int i = _n_threads - 1; i >= 0; i--) { 85 for (int i = _n_threads - 1; i >= 0; i--) {
86 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i); 86 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
87 assert(t != NULL, "Conc refine should have been created"); 87 assert(t != NULL, "Conc refine should have been created");
155 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); 155 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
156 156
157 _def_use_cache = true; 157 _def_use_cache = true;
158 _use_cache = true; 158 _use_cache = true;
159 _hot_cache_size = (1 << G1ConcRSLogCacheSize); 159 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
160 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size); 160 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
161 _n_hot = 0; 161 _n_hot = 0;
162 _hot_cache_idx = 0; 162 _hot_cache_idx = 0;
163 163
164 // For refining the cards in the hot cache in parallel 164 // For refining the cards in the hot cache in parallel
165 int n_workers = (ParallelGCThreads > 0 ? 165 int n_workers = (ParallelGCThreads > 0 ?
189 ConcurrentG1Refine::~ConcurrentG1Refine() { 189 ConcurrentG1Refine::~ConcurrentG1Refine() {
190 if (G1ConcRSLogCacheSize > 0) { 190 if (G1ConcRSLogCacheSize > 0) {
191 // Please see the comment in allocate_card_count_cache 191 // Please see the comment in allocate_card_count_cache
192 // for why we call os::malloc() and os::free() directly. 192 // for why we call os::malloc() and os::free() directly.
193 assert(_card_counts != NULL, "Logic"); 193 assert(_card_counts != NULL, "Logic");
194 os::free(_card_counts); 194 os::free(_card_counts, mtGC);
195 assert(_card_epochs != NULL, "Logic"); 195 assert(_card_epochs != NULL, "Logic");
196 os::free(_card_epochs); 196 os::free(_card_epochs, mtGC);
197 197
198 assert(_hot_cache != NULL, "Logic"); 198 assert(_hot_cache != NULL, "Logic");
199 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); 199 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
200 } 200 }
201 if (_threads != NULL) { 201 if (_threads != NULL) {
202 for (int i = 0; i < _n_threads; i++) { 202 for (int i = 0; i < _n_threads; i++) {
203 delete _threads[i]; 203 delete _threads[i];
204 } 204 }
205 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads); 205 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
206 } 206 }
207 } 207 }
208 208
209 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) { 209 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
210 if (_threads != NULL) { 210 if (_threads != NULL) {
434 assert(*epochs == NULL, "out param"); 434 assert(*epochs == NULL, "out param");
435 435
436 size_t counts_size = n * sizeof(CardCountCacheEntry); 436 size_t counts_size = n * sizeof(CardCountCacheEntry);
437 size_t epochs_size = n * sizeof(CardEpochCacheEntry); 437 size_t epochs_size = n * sizeof(CardEpochCacheEntry);
438 438
439 *counts = (CardCountCacheEntry*) os::malloc(counts_size); 439 *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
440 if (*counts == NULL) { 440 if (*counts == NULL) {
441 // allocation was unsuccessful 441 // allocation was unsuccessful
442 return false; 442 return false;
443 } 443 }
444 444
445 *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size); 445 *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
446 if (*epochs == NULL) { 446 if (*epochs == NULL) {
447 // allocation was unsuccessful - free counts array 447 // allocation was unsuccessful - free counts array
448 assert(*counts != NULL, "must be"); 448 assert(*counts != NULL, "must be");
449 os::free(*counts); 449 os::free(*counts, mtGC);
450 *counts = NULL; 450 *counts = NULL;
451 return false; 451 return false;
452 } 452 }
453 453
454 // We successfully allocated both counts and epochs 454 // We successfully allocated both counts and epochs
477 477
478 if (allocate_card_count_cache(cache_size, &counts, &epochs)) { 478 if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
479 // Allocation was successful. 479 // Allocation was successful.
480 // We can just free the old arrays; we're 480 // We can just free the old arrays; we're
481 // not interested in preserving the contents 481 // not interested in preserving the contents
482 if (_card_counts != NULL) os::free(_card_counts); 482 if (_card_counts != NULL) os::free(_card_counts, mtGC);
483 if (_card_epochs != NULL) os::free(_card_epochs); 483 if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
484 484
485 // Cache the size of the arrays and the index that got us there. 485 // Cache the size of the arrays and the index that got us there.
486 _n_card_counts = cache_size; 486 _n_card_counts = cache_size;
487 _cache_size_index = cache_size_idx; 487 _cache_size_index = cache_size_idx;
488 488