comparison src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @ 6197:d2a62e0f25eb

6995781: Native Memory Tracking (Phase 1) 7151532: DCmd for hotspot native memory tracking Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd Reviewed-by: acorn, coleenp, fparain
author zgu
date Thu, 28 Jun 2012 17:03:16 -0400
parents 9d679effd28c
children aaf61e68b255
comparison
equal deleted inserted replaced
6174:74533f63b116 6197:d2a62e0f25eb
57 ParScanThreadState::ParScanThreadState(Space* to_space_, 57 ParScanThreadState::ParScanThreadState(Space* to_space_,
58 ParNewGeneration* gen_, 58 ParNewGeneration* gen_,
59 Generation* old_gen_, 59 Generation* old_gen_,
60 int thread_num_, 60 int thread_num_,
61 ObjToScanQueueSet* work_queue_set_, 61 ObjToScanQueueSet* work_queue_set_,
62 Stack<oop>* overflow_stacks_, 62 Stack<oop, mtGC>* overflow_stacks_,
63 size_t desired_plab_sz_, 63 size_t desired_plab_sz_,
64 ParallelTaskTerminator& term_) : 64 ParallelTaskTerminator& term_) :
65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), 65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), 66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
67 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), 67 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
182 182
183 bool ParScanThreadState::take_from_overflow_stack() { 183 bool ParScanThreadState::take_from_overflow_stack() {
184 assert(ParGCUseLocalOverflow, "Else should not call"); 184 assert(ParGCUseLocalOverflow, "Else should not call");
185 assert(young_gen()->overflow_list() == NULL, "Error"); 185 assert(young_gen()->overflow_list() == NULL, "Error");
186 ObjToScanQueue* queue = work_queue(); 186 ObjToScanQueue* queue = work_queue();
187 Stack<oop>* const of_stack = overflow_stack(); 187 Stack<oop, mtGC>* const of_stack = overflow_stack();
188 const size_t num_overflow_elems = of_stack->size(); 188 const size_t num_overflow_elems = of_stack->size();
189 const size_t space_available = queue->max_elems() - queue->size(); 189 const size_t space_available = queue->max_elems() - queue->size();
190 const size_t num_take_elems = MIN3(space_available / 4, 190 const size_t num_take_elems = MIN3(space_available / 4,
191 ParGCDesiredObjsFromOverflowList, 191 ParGCDesiredObjsFromOverflowList,
192 num_overflow_elems); 192 num_overflow_elems);
295 ParScanThreadStateSet(int num_threads, 295 ParScanThreadStateSet(int num_threads,
296 Space& to_space, 296 Space& to_space,
297 ParNewGeneration& gen, 297 ParNewGeneration& gen,
298 Generation& old_gen, 298 Generation& old_gen,
299 ObjToScanQueueSet& queue_set, 299 ObjToScanQueueSet& queue_set,
300 Stack<oop>* overflow_stacks_, 300 Stack<oop, mtGC>* overflow_stacks_,
301 size_t desired_plab_sz, 301 size_t desired_plab_sz,
302 ParallelTaskTerminator& term); 302 ParallelTaskTerminator& term);
303 303
304 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } 304 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
305 305
329 329
330 330
331 ParScanThreadStateSet::ParScanThreadStateSet( 331 ParScanThreadStateSet::ParScanThreadStateSet(
332 int num_threads, Space& to_space, ParNewGeneration& gen, 332 int num_threads, Space& to_space, ParNewGeneration& gen,
333 Generation& old_gen, ObjToScanQueueSet& queue_set, 333 Generation& old_gen, ObjToScanQueueSet& queue_set,
334 Stack<oop>* overflow_stacks, 334 Stack<oop, mtGC>* overflow_stacks,
335 size_t desired_plab_sz, ParallelTaskTerminator& term) 335 size_t desired_plab_sz, ParallelTaskTerminator& term)
336 : ResourceArray(sizeof(ParScanThreadState), num_threads), 336 : ResourceArray(sizeof(ParScanThreadState), num_threads),
337 _gen(gen), _next_gen(old_gen), _term(term) 337 _gen(gen), _next_gen(old_gen), _term(term)
338 { 338 {
339 assert(num_threads > 0, "sanity check!"); 339 assert(num_threads > 0, "sanity check!");
647 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) 647 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
648 _task_queues->queue(i2)->initialize(); 648 _task_queues->queue(i2)->initialize();
649 649
650 _overflow_stacks = NULL; 650 _overflow_stacks = NULL;
651 if (ParGCUseLocalOverflow) { 651 if (ParGCUseLocalOverflow) {
652 _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads); 652
653 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
654 // with ','
655 typedef Stack<oop, mtGC> GCOopStack;
656
657 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
653 for (size_t i = 0; i < ParallelGCThreads; ++i) { 658 for (size_t i = 0; i < ParallelGCThreads; ++i) {
654 new (_overflow_stacks + i) Stack<oop>(); 659 new (_overflow_stacks + i) Stack<oop, mtGC>();
655 } 660 }
656 } 661 }
657 662
658 if (UsePerfData) { 663 if (UsePerfData) {
659 EXCEPTION_MARK; 664 EXCEPTION_MARK;
1399 #ifndef PRODUCT 1404 #ifndef PRODUCT
1400 Atomic::inc_ptr(&_num_par_pushes); 1405 Atomic::inc_ptr(&_num_par_pushes);
1401 assert(_num_par_pushes > 0, "Tautology"); 1406 assert(_num_par_pushes > 0, "Tautology");
1402 #endif 1407 #endif
1403 if (from_space_obj->forwardee() == from_space_obj) { 1408 if (from_space_obj->forwardee() == from_space_obj) {
1404 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); 1409 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1405 listhead->forward_to(from_space_obj); 1410 listhead->forward_to(from_space_obj);
1406 from_space_obj = listhead; 1411 from_space_obj = listhead;
1407 } 1412 }
1408 oop observed_overflow_list = _overflow_list; 1413 oop observed_overflow_list = _overflow_list;
1409 oop cur_overflow_list; 1414 oop cur_overflow_list;
1551 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. 1556 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1552 if (!is_in_reserved(cur)) { 1557 if (!is_in_reserved(cur)) {
1553 // This can become a scaling bottleneck when there is work queue overflow coincident 1558 // This can become a scaling bottleneck when there is work queue overflow coincident
1554 // with promotion failure. 1559 // with promotion failure.
1555 oopDesc* f = cur; 1560 oopDesc* f = cur;
1556 FREE_C_HEAP_ARRAY(oopDesc, f); 1561 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
1557 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { 1562 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1558 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); 1563 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1559 obj_to_push = cur; 1564 obj_to_push = cur;
1560 } 1565 }
1561 bool ok = work_q->push(obj_to_push); 1566 bool ok = work_q->push(obj_to_push);