Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/concurrentMark.hpp @ 7397:442f942757c0
8000244: G1: Ergonomically set MarkStackSize and use virtual space for global marking stack
Summary: Set the value of MarkStackSize to a value based on the number of parallel marking threads with a reasonable minimum. Expand the marking stack if we have to restart marking due to an overflow up to a reasonable maximum. Allocate the underlying space for the marking stack from virtual memory.
Reviewed-by: jmasa, brutisso
author | johnc |
---|---|
date | Mon, 01 Oct 2012 09:28:13 -0700 |
parents | 8a5ea0a9ccc4 |
children | d275c3dc73e6 |
comparison
equal
deleted
inserted
replaced
7208:eade6b2e4782 | 7397:442f942757c0 |
---|---|
61 VirtualSpace _virtual_space; // underlying the bit map | 61 VirtualSpace _virtual_space; // underlying the bit map |
62 BitMap _bm; // the bit map itself | 62 BitMap _bm; // the bit map itself |
63 | 63 |
64 public: | 64 public: |
65 // constructor | 65 // constructor |
66 CMBitMapRO(ReservedSpace rs, int shifter); | 66 CMBitMapRO(int shifter); |
67 | 67 |
68 enum { do_yield = true }; | 68 enum { do_yield = true }; |
69 | 69 |
70 // inquiries | 70 // inquiries |
71 HeapWord* startWord() const { return _bmStartWord; } | 71 HeapWord* startWord() const { return _bmStartWord; } |
115 | 115 |
116 class CMBitMap : public CMBitMapRO { | 116 class CMBitMap : public CMBitMapRO { |
117 | 117 |
118 public: | 118 public: |
119 // constructor | 119 // constructor |
120 CMBitMap(ReservedSpace rs, int shifter) : | 120 CMBitMap(int shifter) : |
121 CMBitMapRO(rs, shifter) {} | 121 CMBitMapRO(shifter) {} |
122 | |
123 // Allocates the back store for the marking bitmap | |
124 bool allocate(ReservedSpace heap_rs); | |
122 | 125 |
123 // write marks | 126 // write marks |
124 void mark(HeapWord* addr) { | 127 void mark(HeapWord* addr) { |
125 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), | 128 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), |
126 "outside underlying space?"); | 129 "outside underlying space?"); |
153 // the run. If there is no "1" bit at or after "addr", return an empty | 156 // the run. If there is no "1" bit at or after "addr", return an empty |
154 // MemRegion. | 157 // MemRegion. |
155 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); | 158 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); |
156 }; | 159 }; |
157 | 160 |
158 // Represents a marking stack used by the CM collector. | 161 // Represents a marking stack used by ConcurrentMarking in the G1 collector. |
159 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). | |
160 class CMMarkStack VALUE_OBJ_CLASS_SPEC { | 162 class CMMarkStack VALUE_OBJ_CLASS_SPEC { |
163 VirtualSpace _virtual_space; // Underlying backing store for actual stack | |
161 ConcurrentMark* _cm; | 164 ConcurrentMark* _cm; |
162 oop* _base; // bottom of stack | 165 oop* _base; // bottom of stack |
163 jint _index; // one more than last occupied index | 166 jint _index; // one more than last occupied index |
164 jint _capacity; // max #elements | 167 jint _capacity; // max #elements |
165 jint _saved_index; // value of _index saved at start of GC | 168 jint _saved_index; // value of _index saved at start of GC |
166 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run | 169 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run |
167 | 170 |
168 bool _overflow; | 171 bool _overflow; |
172 bool _should_expand; | |
169 DEBUG_ONLY(bool _drain_in_progress;) | 173 DEBUG_ONLY(bool _drain_in_progress;) |
170 DEBUG_ONLY(bool _drain_in_progress_yields;) | 174 DEBUG_ONLY(bool _drain_in_progress_yields;) |
171 | 175 |
172 public: | 176 public: |
173 CMMarkStack(ConcurrentMark* cm); | 177 CMMarkStack(ConcurrentMark* cm); |
174 ~CMMarkStack(); | 178 ~CMMarkStack(); |
175 | 179 |
176 void allocate(size_t size); | 180 #ifndef PRODUCT |
181 jint max_depth() const { | |
182 return _max_depth; | |
183 } | |
184 #endif | |
185 | |
186 bool allocate(size_t capacity); | |
177 | 187 |
178 oop pop() { | 188 oop pop() { |
179 if (!isEmpty()) { | 189 if (!isEmpty()) { |
180 return _base[--_index] ; | 190 return _base[--_index] ; |
181 } | 191 } |
229 template<class OopClosureClass> | 239 template<class OopClosureClass> |
230 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); | 240 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); |
231 | 241 |
232 bool isEmpty() { return _index == 0; } | 242 bool isEmpty() { return _index == 0; } |
233 bool isFull() { return _index == _capacity; } | 243 bool isFull() { return _index == _capacity; } |
234 int maxElems() { return _capacity; } | 244 int maxElems() { return _capacity; } |
235 | 245 |
236 bool overflow() { return _overflow; } | 246 bool overflow() { return _overflow; } |
237 void clear_overflow() { _overflow = false; } | 247 void clear_overflow() { _overflow = false; } |
248 | |
249 bool should_expand() const { return _should_expand; } | |
250 void set_should_expand(); | |
251 | |
252 // Expand the stack, typically in response to an overflow condition | |
253 void expand(); | |
238 | 254 |
239 int size() { return _index; } | 255 int size() { return _index; } |
240 | 256 |
241 void setEmpty() { _index = 0; clear_overflow(); } | 257 void setEmpty() { _index = 0; clear_overflow(); } |
242 | 258 |
342 }; | 358 }; |
343 | 359 |
344 class ConcurrentMarkThread; | 360 class ConcurrentMarkThread; |
345 | 361 |
346 class ConcurrentMark: public CHeapObj<mtGC> { | 362 class ConcurrentMark: public CHeapObj<mtGC> { |
363 friend class CMMarkStack; | |
347 friend class ConcurrentMarkThread; | 364 friend class ConcurrentMarkThread; |
348 friend class CMTask; | 365 friend class CMTask; |
349 friend class CMBitMapClosure; | 366 friend class CMBitMapClosure; |
350 friend class CMGlobalObjectClosure; | 367 friend class CMGlobalObjectClosure; |
351 friend class CMRemarkTask; | 368 friend class CMRemarkTask; |
575 | 592 |
576 // Card index of the bottom of the G1 heap. Used for biasing indices into | 593 // Card index of the bottom of the G1 heap. Used for biasing indices into |
577 // the card bitmaps. | 594 // the card bitmaps. |
578 intptr_t _heap_bottom_card_num; | 595 intptr_t _heap_bottom_card_num; |
579 | 596 |
597 // Set to true when initialization is complete | |
598 bool _completed_initialization; | |
599 | |
580 public: | 600 public: |
581 // Manipulation of the global mark stack. | 601 // Manipulation of the global mark stack. |
582 // Notice that the first mark_stack_push is CAS-based, whereas the | 602 // Notice that the first mark_stack_push is CAS-based, whereas the |
583 // two below are Mutex-based. This is OK since the first one is only | 603 // two below are Mutex-based. This is OK since the first one is only |
584 // called during evacuation pauses and doesn't compete with the | 604 // called during evacuation pauses and doesn't compete with the |
634 // Attempts to steal an object from the task queues of other tasks | 654 // Attempts to steal an object from the task queues of other tasks |
635 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) { | 655 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) { |
636 return _task_queues->steal(worker_id, hash_seed, obj); | 656 return _task_queues->steal(worker_id, hash_seed, obj); |
637 } | 657 } |
638 | 658 |
639 ConcurrentMark(ReservedSpace rs, uint max_regions); | 659 ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs); |
640 ~ConcurrentMark(); | 660 ~ConcurrentMark(); |
641 | 661 |
642 ConcurrentMarkThread* cmThread() { return _cmThread; } | 662 ConcurrentMarkThread* cmThread() { return _cmThread; } |
643 | 663 |
644 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } | 664 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } |
905 // Similar to the above routine but we don't know the heap region that | 925 // Similar to the above routine but we don't know the heap region that |
906 // contains the object to be marked/counted, which this routine looks up. | 926 // contains the object to be marked/counted, which this routine looks up. |
907 // Should *not* be called from parallel code. | 927 // Should *not* be called from parallel code. |
908 inline bool mark_and_count(oop obj); | 928 inline bool mark_and_count(oop obj); |
909 | 929 |
930 // Returns true if initialization was successfully completed. | |
931 bool completed_initialization() const { | |
932 return _completed_initialization; | |
933 } | |
934 | |
910 protected: | 935 protected: |
911 // Clear all the per-task bitmaps and arrays used to store the | 936 // Clear all the per-task bitmaps and arrays used to store the |
912 // counting data. | 937 // counting data. |
913 void clear_all_count_data(); | 938 void clear_all_count_data(); |
914 | 939 |