comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 8506:c3657d00e343

-Merge with tip
author Christos Kotselidis <christos.kotselidis@oracle.com>
date Thu, 21 Mar 2013 14:11:13 +0100
parents f90b9bceb8e5
children 9def4075da6d
comparison
equal deleted inserted replaced
8505:dee7c8b578c7 8506:c3657d00e343
369 friend class CMConcurrentMarkingTask; 369 friend class CMConcurrentMarkingTask;
370 friend class G1ParNoteEndTask; 370 friend class G1ParNoteEndTask;
371 friend class CalcLiveObjectsClosure; 371 friend class CalcLiveObjectsClosure;
372 friend class G1CMRefProcTaskProxy; 372 friend class G1CMRefProcTaskProxy;
373 friend class G1CMRefProcTaskExecutor; 373 friend class G1CMRefProcTaskExecutor;
374 friend class G1CMParKeepAliveAndDrainClosure; 374 friend class G1CMKeepAliveAndDrainClosure;
375 friend class G1CMParDrainMarkingStackClosure; 375 friend class G1CMDrainMarkingStackClosure;
376 376
377 protected: 377 protected:
378 ConcurrentMarkThread* _cmThread; // the thread doing the work 378 ConcurrentMarkThread* _cmThread; // the thread doing the work
379 G1CollectedHeap* _g1h; // the heap. 379 G1CollectedHeap* _g1h; // the heap.
380 uint _parallel_marking_threads; // the number of marking 380 uint _parallel_marking_threads; // the number of marking
497 bool cleanup_list_is_empty() { 497 bool cleanup_list_is_empty() {
498 return _cleanup_list.is_empty(); 498 return _cleanup_list.is_empty();
499 } 499 }
500 500
501 // accessor methods 501 // accessor methods
502 uint parallel_marking_threads() { return _parallel_marking_threads; } 502 uint parallel_marking_threads() const { return _parallel_marking_threads; }
503 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;} 503 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
504 double sleep_factor() { return _sleep_factor; } 504 double sleep_factor() { return _sleep_factor; }
505 double marking_task_overhead() { return _marking_task_overhead;} 505 double marking_task_overhead() { return _marking_task_overhead;}
506 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } 506 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
507 double cleanup_task_overhead() { return _cleanup_task_overhead;} 507 double cleanup_task_overhead() { return _cleanup_task_overhead;}
508 508
509 HeapWord* finger() { return _finger; } 509 bool use_parallel_marking_threads() const {
510 bool concurrent() { return _concurrent; } 510 assert(parallel_marking_threads() <=
511 uint active_tasks() { return _active_tasks; } 511 max_parallel_marking_threads(), "sanity");
512 ParallelTaskTerminator* terminator() { return &_terminator; } 512 assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
513 parallel_marking_threads() > 0,
514 "parallel workers not set up correctly");
515 return _parallel_workers != NULL;
516 }
517
518 HeapWord* finger() { return _finger; }
519 bool concurrent() { return _concurrent; }
520 uint active_tasks() { return _active_tasks; }
521 ParallelTaskTerminator* terminator() { return &_terminator; }
513 522
514 // It claims the next available region to be scanned by a marking 523 // It claims the next available region to be scanned by a marking
515 // task/thread. It might return NULL if the next region is empty or 524 // task/thread. It might return NULL if the next region is empty or
516 // we have run out of regions. In the latter case, out_of_regions() 525 // we have run out of regions. In the latter case, out_of_regions()
517 // determines whether we've really run out of regions or the task 526 // determines whether we've really run out of regions or the task