comparison src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp @ 4095:bca17e38de00

6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads Summary: Select number of GC threads dynamically based on heap usage and number of Java threads Reviewed-by: johnc, ysr, jcoomes
author jmasa
date Tue, 09 Aug 2011 10:16:01 -0700
parents f95d63e2154a
children 7913e93dca52
comparison
equal deleted inserted replaced
4094:3a298e04d914 4095:bca17e38de00
46 friend class ParMarkBitMap; 46 friend class ParMarkBitMap;
47 friend class PSParallelCompact; 47 friend class PSParallelCompact;
48 friend class StealRegionCompactionTask; 48 friend class StealRegionCompactionTask;
49 friend class UpdateAndFillClosure; 49 friend class UpdateAndFillClosure;
50 friend class RefProcTaskExecutor; 50 friend class RefProcTaskExecutor;
51 friend class IdleGCTask;
51 52
52 public: 53 public:
53 54
54 // ------------------------ Don't putback if not needed 55 // ------------------------ Don't putback if not needed
55 // Actions that the compaction manager should take. 56 // Actions that the compaction manager should take.
83 ObjArrayTaskQueue _objarray_stack; 84 ObjArrayTaskQueue _objarray_stack;
84 85
85 // Is there a way to reuse the _marking_stack for the 86 // Is there a way to reuse the _marking_stack for the
86 // saving empty regions? For now just create a different 87 // saving empty regions? For now just create a different
87 // type of TaskQueue. 88 // type of TaskQueue.
88 RegionTaskQueue _region_stack; 89 RegionTaskQueue* _region_stack;
90
91 static RegionTaskQueue** _region_list;
92 // Index in _region_list for current _region_stack.
93 uint _region_stack_index;
94
95 // Indexes of recycled region stacks/overflow stacks
96 // Stacks of regions to be compacted are embedded in the tasks doing
97 // the compaction. A thread that executes the task extracts the
98 // region stack and drains it. These threads keep these region
99 // stacks for use during compaction task stealing. If a thread
100 // gets a second draining task, it pushed its current region stack
101 // index into the array _recycled_stack_index and gets a new
102 // region stack from the task. A thread that is executing a
103 // compaction stealing task without ever having executing a
104 // draining task, will get a region stack from _recycled_stack_index.
105 //
106 // Array of indexes into the array of region stacks.
107 static uint* _recycled_stack_index;
108 // The index into _recycled_stack_index of the last region stack index
109 // pushed. If -1, there are no entries into _recycled_stack_index.
110 static int _recycled_top;
111 // The index into _recycled_stack_index of the last region stack index
112 // popped. If -1, there has not been any entry popped.
113 static int _recycled_bottom;
89 114
90 Stack<Klass*> _revisit_klass_stack; 115 Stack<Klass*> _revisit_klass_stack;
91 Stack<DataLayout*> _revisit_mdo_stack; 116 Stack<DataLayout*> _revisit_mdo_stack;
92 117
93 static ParMarkBitMap* _mark_bitmap; 118 static ParMarkBitMap* _mark_bitmap;
102 127
103 protected: 128 protected:
104 // Array of tasks. Needed by the ParallelTaskTerminator. 129 // Array of tasks. Needed by the ParallelTaskTerminator.
105 static RegionTaskQueueSet* region_array() { return _region_array; } 130 static RegionTaskQueueSet* region_array() { return _region_array; }
106 OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; } 131 OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; }
107 RegionTaskQueue* region_stack() { return &_region_stack; }
108 132
109 // Pushes onto the marking stack. If the marking stack is full, 133 // Pushes onto the marking stack. If the marking stack is full,
110 // pushes onto the overflow stack. 134 // pushes onto the overflow stack.
111 void stack_push(oop obj); 135 void stack_push(oop obj);
112 // Do not implement an equivalent stack_pop. Deal with the 136 // Do not implement an equivalent stack_pop. Deal with the
114 138
115 public: 139 public:
116 Action action() { return _action; } 140 Action action() { return _action; }
117 void set_action(Action v) { _action = v; } 141 void set_action(Action v) { _action = v; }
118 142
143 RegionTaskQueue* region_stack() { return _region_stack; }
144 void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
145
119 inline static ParCompactionManager* manager_array(int index); 146 inline static ParCompactionManager* manager_array(int index);
120 147
148 inline static RegionTaskQueue* region_list(int index) {
149 return _region_list[index];
150 }
151
152 uint region_stack_index() { return _region_stack_index; }
153 void set_region_stack_index(uint v) { _region_stack_index = v; }
154
155 // Pop and push unique reusable stack index
156 static int pop_recycled_stack_index();
157 static void push_recycled_stack_index(uint v);
158 static void reset_recycled_stack_index() {
159 _recycled_bottom = _recycled_top = -1;
160 }
161
121 ParCompactionManager(); 162 ParCompactionManager();
122 163 ~ParCompactionManager();
164
165 // Pushes onto the region stack at the given index. If the
166 // region stack is full,
167 // pushes onto the region overflow stack.
168 static void region_list_push(uint stack_index, size_t region_index);
169 static void verify_region_list_empty(uint stack_index);
123 ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } 170 ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
124 171
125 // Take actions in preparation for a compaction. 172 // Take actions in preparation for a compaction.
126 static void reset(); 173 static void reset();
127 174