Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/g1RemSet.hpp @ 1861:c32059ef4dc0
6971296: G1: simplify G1RemSet class hierarchy
Summary: Remove G1RemSet base class and StupidG1RemSet class; rename HRInto_G1RemSet to just G1RemSet.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Tue, 12 Oct 2010 09:36:48 -0700 |
parents | a03ae377b2e8 |
children | 878b57474103 |
comparison
equal
deleted
inserted
replaced
1843:0715f0cf171d | 1861:c32059ef4dc0 |
---|---|
25 // A G1RemSet provides ways of iterating over pointers into a selected | 25 // A G1RemSet provides ways of iterating over pointers into a selected |
26 // collection set. | 26 // collection set. |
27 | 27 |
28 class G1CollectedHeap; | 28 class G1CollectedHeap; |
29 class CardTableModRefBarrierSet; | 29 class CardTableModRefBarrierSet; |
30 class HRInto_G1RemSet; | |
31 class ConcurrentG1Refine; | 30 class ConcurrentG1Refine; |
31 | |
32 // A G1RemSet in which each heap region has a rem set that records the | |
33 // external heap references into it. Uses a mod ref bs to track updates, | |
34 // so that they can be used to update the individual region remsets. | |
32 | 35 |
33 class G1RemSet: public CHeapObj { | 36 class G1RemSet: public CHeapObj { |
34 protected: | 37 protected: |
35 G1CollectedHeap* _g1; | 38 G1CollectedHeap* _g1; |
36 unsigned _conc_refine_cards; | 39 unsigned _conc_refine_cards; |
37 size_t n_workers(); | 40 size_t n_workers(); |
38 | 41 |
39 public: | |
40 G1RemSet(G1CollectedHeap* g1) : | |
41 _g1(g1), _conc_refine_cards(0) | |
42 {} | |
43 | |
44 // Invoke "blk->do_oop" on all pointers into the CS in object in regions | |
45 // outside the CS (having invoked "blk->set_region" to set the "from" | |
46 // region correctly beforehand.) The "worker_i" param is for the | |
47 // parallel case where the number of the worker thread calling this | |
48 // function can be helpful in partitioning the work to be done. It | |
49 // should be the same as the "i" passed to the calling thread's | |
50 // work(i) function. In the sequential case this param will be ingored. | |
51 virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, | |
52 int worker_i) = 0; | |
53 | |
54 // Prepare for and cleanup after an oops_into_collection_set_do | |
55 // call. Must call each of these once before and after (in sequential | |
56 // code) any threads call oops into collection set do. (This offers an | |
57 // opportunity to sequential setup and teardown of structures needed by a | |
58 // parallel iteration over the CS's RS.) | |
59 virtual void prepare_for_oops_into_collection_set_do() = 0; | |
60 virtual void cleanup_after_oops_into_collection_set_do() = 0; | |
61 | |
62 // If "this" is of the given subtype, return "this", else "NULL". | |
63 virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } | |
64 | |
65 // Record, if necessary, the fact that *p (where "p" is in region "from", | |
66 // and is, a fortiori, required to be non-NULL) has changed to its new value. | |
67 virtual void write_ref(HeapRegion* from, oop* p) = 0; | |
68 virtual void write_ref(HeapRegion* from, narrowOop* p) = 0; | |
69 virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; | |
70 virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0; | |
71 | |
72 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region | |
73 // or card, respectively, such that a region or card with a corresponding | |
74 // 0 bit contains no part of any live object. Eliminates any remembered | |
75 // set entries that correspond to dead heap ranges. | |
76 virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; | |
77 // Like the above, but assumes is called in parallel: "worker_num" is the | |
78 // parallel thread id of the current thread, and "claim_val" is the | |
79 // value that should be used to claim heap regions. | |
80 virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, | |
81 int worker_num, int claim_val) = 0; | |
82 | |
83 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, | |
84 // join and leave around parts that must be atomic wrt GC. (NULL means | |
85 // being done at a safepoint.) | |
86 // With some implementations of this routine, when check_for_refs_into_cset | |
87 // is true, a true result may be returned if the given card contains oops | |
88 // that have references into the current collection set. | |
89 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, | |
90 bool check_for_refs_into_cset) { | |
91 return false; | |
92 } | |
93 | |
94 // Print any relevant summary info. | |
95 virtual void print_summary_info() {} | |
96 | |
97 // Prepare remebered set for verification. | |
98 virtual void prepare_for_verify() {}; | |
99 }; | |
100 | |
101 | |
102 // The simplest possible G1RemSet: iterates over all objects in non-CS | |
103 // regions, searching for pointers into the CS. | |
104 class StupidG1RemSet: public G1RemSet { | |
105 public: | |
106 StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} | |
107 | |
108 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, | |
109 int worker_i); | |
110 | |
111 void prepare_for_oops_into_collection_set_do() {} | |
112 void cleanup_after_oops_into_collection_set_do() {} | |
113 | |
114 // Nothing is necessary in the version below. | |
115 void write_ref(HeapRegion* from, oop* p) {} | |
116 void write_ref(HeapRegion* from, narrowOop* p) {} | |
117 void par_write_ref(HeapRegion* from, oop* p, int tid) {} | |
118 void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {} | |
119 | |
120 void scrub(BitMap* region_bm, BitMap* card_bm) {} | |
121 void scrub_par(BitMap* region_bm, BitMap* card_bm, | |
122 int worker_num, int claim_val) {} | |
123 | |
124 }; | |
125 | |
126 // A G1RemSet in which each heap region has a rem set that records the | |
127 // external heap references into it. Uses a mod ref bs to track updates, | |
128 // so that they can be used to update the individual region remsets. | |
129 | |
130 class HRInto_G1RemSet: public G1RemSet { | |
131 protected: | 42 protected: |
132 enum SomePrivateConstants { | 43 enum SomePrivateConstants { |
133 UpdateRStoMergeSync = 0, | 44 UpdateRStoMergeSync = 0, |
134 MergeRStoDoDirtySync = 1, | 45 MergeRStoDoDirtySync = 1, |
135 DoDirtySync = 2, | 46 DoDirtySync = 2, |
173 // This is called to reset dual hash tables after the gc pause | 84 // This is called to reset dual hash tables after the gc pause |
174 // is finished and the initial hash table is no longer being | 85 // is finished and the initial hash table is no longer being |
175 // scanned. | 86 // scanned. |
176 void cleanupHRRS(); | 87 void cleanupHRRS(); |
177 | 88 |
178 HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); | 89 G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); |
179 ~HRInto_G1RemSet(); | 90 ~G1RemSet(); |
180 | 91 |
92 // Invoke "blk->do_oop" on all pointers into the CS in objects in regions | |
93 // outside the CS (having invoked "blk->set_region" to set the "from" | |
94 // region correctly beforehand.) The "worker_i" param is for the | |
95 // parallel case where the number of the worker thread calling this | |
96 // function can be helpful in partitioning the work to be done. It | |
97 // should be the same as the "i" passed to the calling thread's | |
98 // work(i) function. In the sequential case this param will be ingored. | |
181 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, | 99 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
182 int worker_i); | 100 int worker_i); |
183 | 101 |
102 // Prepare for and cleanup after an oops_into_collection_set_do | |
103 // call. Must call each of these once before and after (in sequential | |
104 // code) any threads call oops_into_collection_set_do. (This offers an | |
105 // opportunity to sequential setup and teardown of structures needed by a | |
106 // parallel iteration over the CS's RS.) | |
184 void prepare_for_oops_into_collection_set_do(); | 107 void prepare_for_oops_into_collection_set_do(); |
185 void cleanup_after_oops_into_collection_set_do(); | 108 void cleanup_after_oops_into_collection_set_do(); |
109 | |
186 void scanRS(OopsInHeapRegionClosure* oc, int worker_i); | 110 void scanRS(OopsInHeapRegionClosure* oc, int worker_i); |
187 template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i); | |
188 void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) { | |
189 if (UseCompressedOops) { | |
190 scanNewRefsRS_work<narrowOop>(oc, worker_i); | |
191 } else { | |
192 scanNewRefsRS_work<oop>(oc, worker_i); | |
193 } | |
194 } | |
195 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); | 111 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); |
112 | |
196 HeapRegion* calculateStartRegion(int i); | 113 HeapRegion* calculateStartRegion(int i); |
197 | |
198 HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } | |
199 | 114 |
200 CardTableModRefBS* ct_bs() { return _ct_bs; } | 115 CardTableModRefBS* ct_bs() { return _ct_bs; } |
201 size_t cardsScanned() { return _total_cards_scanned; } | 116 size_t cardsScanned() { return _total_cards_scanned; } |
202 | 117 |
203 // Record, if necessary, the fact that *p (where "p" is in region "from", | 118 // Record, if necessary, the fact that *p (where "p" is in region "from", |
217 par_write_ref_nv(from, p, tid); | 132 par_write_ref_nv(from, p, tid); |
218 } | 133 } |
219 | 134 |
220 bool self_forwarded(oop obj); | 135 bool self_forwarded(oop obj); |
221 | 136 |
137 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region | |
138 // or card, respectively, such that a region or card with a corresponding | |
139 // 0 bit contains no part of any live object. Eliminates any remembered | |
140 // set entries that correspond to dead heap ranges. | |
222 void scrub(BitMap* region_bm, BitMap* card_bm); | 141 void scrub(BitMap* region_bm, BitMap* card_bm); |
142 | |
143 // Like the above, but assumes is called in parallel: "worker_num" is the | |
144 // parallel thread id of the current thread, and "claim_val" is the | |
145 // value that should be used to claim heap regions. | |
223 void scrub_par(BitMap* region_bm, BitMap* card_bm, | 146 void scrub_par(BitMap* region_bm, BitMap* card_bm, |
224 int worker_num, int claim_val); | 147 int worker_num, int claim_val); |
225 | 148 |
226 // If check_for_refs_into_cset is true then a true result is returned | 149 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, |
227 // if the card contains oops that have references into the current | 150 // join and leave around parts that must be atomic wrt GC. (NULL means |
228 // collection set. | 151 // being done at a safepoint.) |
152 // If check_for_refs_into_cset is true, a true result is returned | |
153 // if the given card contains oops that have references into the | |
154 // current collection set. | |
229 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, | 155 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, |
230 bool check_for_refs_into_cset); | 156 bool check_for_refs_into_cset); |
231 | 157 |
158 // Print any relevant summary info. | |
232 virtual void print_summary_info(); | 159 virtual void print_summary_info(); |
160 | |
161 // Prepare remembered set for verification. | |
233 virtual void prepare_for_verify(); | 162 virtual void prepare_for_verify(); |
234 }; | 163 }; |
235 | 164 |
236 #define G1_REM_SET_LOGGING 0 | 165 #define G1_REM_SET_LOGGING 0 |
237 | 166 |
248 HeapWord* start_first() { return _start_first; } | 177 HeapWord* start_first() { return _start_first; } |
249 }; | 178 }; |
250 | 179 |
251 class UpdateRSOopClosure: public OopClosure { | 180 class UpdateRSOopClosure: public OopClosure { |
252 HeapRegion* _from; | 181 HeapRegion* _from; |
253 HRInto_G1RemSet* _rs; | 182 G1RemSet* _rs; |
254 int _worker_i; | 183 int _worker_i; |
255 | 184 |
256 template <class T> void do_oop_work(T* p); | 185 template <class T> void do_oop_work(T* p); |
257 | 186 |
258 public: | 187 public: |
259 UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : | 188 UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) : |
260 _from(NULL), _rs(rs), _worker_i(worker_i) { | 189 _from(NULL), _rs(rs), _worker_i(worker_i) { |
261 guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); | 190 guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); |
262 } | 191 } |
263 | 192 |
264 void set_from(HeapRegion* from) { | 193 void set_from(HeapRegion* from) { |