Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @ 807:d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
Summary: For heaps larger than 32Gb, the number of heap regions overflows the data type used to hold the region index in the SparsePRT structure. Changed the region indexes, card indexes, and RSet hash table buckets to ints and added some size overflow guarantees.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Thu, 11 Jun 2009 17:19:33 -0700 |
parents | 315a5d70b295 |
children | df6caf649ff7 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Remembered set for a heap region. Represent a set of "cards" that | |
26 // contain pointers into the owner heap region. Cards are defined somewhat | |
27 // abstractly, in terms of what the "BlockOffsetTable" in use can parse. | |
28 | |
29 class G1CollectedHeap; | |
30 class G1BlockOffsetSharedArray; | |
31 class HeapRegion; | |
32 class HeapRegionRemSetIterator; | |
33 class PosParPRT; | |
34 class SparsePRT; | |
35 | |
36 | |
37 // The "_coarse_map" is a bitmap with one bit for each region, where set | |
38 // bits indicate that the corresponding region may contain some pointer | |
39 // into the owning region. | |
40 | |
41 // The "_fine_grain_entries" array is an open hash table of PerRegionTables | |
42 // (PRTs), indicating regions for which we're keeping the RS as a set of | |
43 // cards. The strategy is to cap the size of the fine-grain table, | |
44 // deleting an entry and setting the corresponding coarse-grained bit when | |
45 // we would overflow this cap. | |
46 | |
47 // We use a mixture of locking and lock-free techniques here. We allow | |
48 // threads to locate PRTs without locking, but threads attempting to alter | |
49 // a bucket list obtain a lock. This means that any failing attempt to | |
50 // find a PRT must be retried with the lock. It might seem dangerous that | |
51 // a read can find a PRT that is concurrently deleted. This is all right, | |
52 // because: | |
53 // | |
54 // 1) We only actually free PRT's at safe points (though we reuse them at | |
55 // other times). | |
56 // 2) We find PRT's in an attempt to add entries. If a PRT is deleted, | |
57 // it's _coarse_map bit is set, so the that we were attempting to add | |
58 // is represented. If a deleted PRT is re-used, a thread adding a bit, | |
59 // thinking the PRT is for a different region, does no harm. | |
60 | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
61 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { |
342 | 62 friend class HeapRegionRemSetIterator; |
63 | |
64 G1CollectedHeap* _g1h; | |
65 Mutex _m; | |
66 HeapRegion* _hr; | |
67 | |
68 // These are protected by "_m". | |
69 BitMap _coarse_map; | |
70 size_t _n_coarse_entries; | |
71 static jint _n_coarsenings; | |
72 | |
73 PosParPRT** _fine_grain_regions; | |
74 size_t _n_fine_entries; | |
75 | |
76 #define SAMPLE_FOR_EVICTION 1 | |
77 #if SAMPLE_FOR_EVICTION | |
78 size_t _fine_eviction_start; | |
79 static size_t _fine_eviction_stride; | |
80 static size_t _fine_eviction_sample_size; | |
81 #endif | |
82 | |
83 SparsePRT _sparse_table; | |
84 | |
85 // These are static after init. | |
86 static size_t _max_fine_entries; | |
87 static size_t _mod_max_fine_entries_mask; | |
88 | |
89 // Requires "prt" to be the first element of the bucket list appropriate | |
90 // for "hr". If this list contains an entry for "hr", return it, | |
91 // otherwise return "NULL". | |
92 PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const; | |
93 | |
94 // Find, delete, and return a candidate PosParPRT, if any exists, | |
95 // adding the deleted region to the coarse bitmap. Requires the caller | |
96 // to hold _m, and the fine-grain table to be full. | |
97 PosParPRT* delete_region_table(); | |
98 | |
99 // If a PRT for "hr" is in the bucket list indicated by "ind" (which must | |
100 // be the correct index for "hr"), delete it and return true; else return | |
101 // false. | |
102 bool del_single_region_table(size_t ind, HeapRegion* hr); | |
103 | |
104 static jint _cache_probes; | |
105 static jint _cache_hits; | |
106 | |
107 // Indexed by thread X heap region, to minimize thread contention. | |
108 static int** _from_card_cache; | |
109 static size_t _from_card_cache_max_regions; | |
110 static size_t _from_card_cache_mem_size; | |
111 | |
112 public: | |
113 OtherRegionsTable(HeapRegion* hr); | |
114 | |
115 HeapRegion* hr() const { return _hr; } | |
116 | |
117 // For now. Could "expand" some tables in the future, so that this made | |
118 // sense. | |
119 void add_reference(oop* from, int tid); | |
120 | |
121 void add_reference(oop* from) { | |
122 return add_reference(from, 0); | |
123 } | |
124 | |
125 // Removes any entries shown by the given bitmaps to contain only dead | |
126 // objects. | |
127 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); | |
128 | |
129 // Not const because it takes a lock. | |
130 size_t occupied() const; | |
131 size_t occ_fine() const; | |
132 size_t occ_coarse() const; | |
133 size_t occ_sparse() const; | |
134 | |
135 static jint n_coarsenings() { return _n_coarsenings; } | |
136 | |
137 // Returns size in bytes. | |
138 // Not const because it takes a lock. | |
139 size_t mem_size() const; | |
140 static size_t static_mem_size(); | |
141 static size_t fl_mem_size(); | |
142 | |
143 bool contains_reference(oop* from) const; | |
144 bool contains_reference_locked(oop* from) const; | |
145 | |
146 void clear(); | |
147 | |
148 // Specifically clear the from_card_cache. | |
149 void clear_fcc(); | |
150 | |
151 // "from_hr" is being cleared; remove any entries from it. | |
152 void clear_incoming_entry(HeapRegion* from_hr); | |
153 | |
154 // Declare the heap size (in # of regions) to the OtherRegionsTable. | |
155 // (Uses it to initialize from_card_cache). | |
156 static void init_from_card_cache(size_t max_regions); | |
157 | |
158 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. | |
159 // Make sure any entries for higher regions are invalid. | |
160 static void shrink_from_card_cache(size_t new_n_regs); | |
161 | |
162 static void print_from_card_cache(); | |
163 | |
164 }; | |
165 | |
166 | |
167 class HeapRegionRemSet : public CHeapObj { | |
168 friend class VMStructs; | |
169 friend class HeapRegionRemSetIterator; | |
170 | |
171 public: | |
172 enum Event { | |
173 Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd | |
174 }; | |
175 | |
176 private: | |
177 G1BlockOffsetSharedArray* _bosa; | |
178 G1BlockOffsetSharedArray* bosa() const { return _bosa; } | |
179 | |
180 OtherRegionsTable _other_regions; | |
181 | |
182 // One set bit for every region that has an entry for this one. | |
183 BitMap _outgoing_region_map; | |
184 | |
185 // Clear entries for the current region in any rem sets named in | |
186 // the _outgoing_region_map. | |
187 void clear_outgoing_entries(); | |
188 | |
189 enum ParIterState { Unclaimed, Claimed, Complete }; | |
190 ParIterState _iter_state; | |
191 | |
192 // Unused unless G1RecordHRRSOops is true. | |
193 | |
194 static const int MaxRecorded = 1000000; | |
195 static oop** _recorded_oops; | |
196 static HeapWord** _recorded_cards; | |
197 static HeapRegion** _recorded_regions; | |
198 static int _n_recorded; | |
199 | |
200 static const int MaxRecordedEvents = 1000; | |
201 static Event* _recorded_events; | |
202 static int* _recorded_event_index; | |
203 static int _n_recorded_events; | |
204 | |
205 static void print_event(outputStream* str, Event evnt); | |
206 | |
207 public: | |
208 HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, | |
209 HeapRegion* hr); | |
210 | |
211 static int num_par_rem_sets(); | |
212 | |
213 HeapRegion* hr() const { | |
214 return _other_regions.hr(); | |
215 } | |
216 | |
217 size_t occupied() const { | |
218 return _other_regions.occupied(); | |
219 } | |
220 size_t occ_fine() const { | |
221 return _other_regions.occ_fine(); | |
222 } | |
223 size_t occ_coarse() const { | |
224 return _other_regions.occ_coarse(); | |
225 } | |
226 size_t occ_sparse() const { | |
227 return _other_regions.occ_sparse(); | |
228 } | |
229 | |
230 static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } | |
231 | |
232 /* Used in the sequential case. Returns "true" iff this addition causes | |
233 the size limit to be reached. */ | |
677 | 234 void add_reference(oop* from) { |
342 | 235 _other_regions.add_reference(from); |
236 } | |
237 | |
238 /* Used in the parallel case. Returns "true" iff this addition causes | |
239 the size limit to be reached. */ | |
677 | 240 void add_reference(oop* from, int tid) { |
342 | 241 _other_regions.add_reference(from, tid); |
242 } | |
243 | |
244 // Records the fact that the current region contains an outgoing | |
245 // reference into "to_hr". | |
246 void add_outgoing_reference(HeapRegion* to_hr); | |
247 | |
248 // Removes any entries shown by the given bitmaps to contain only dead | |
249 // objects. | |
250 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); | |
251 | |
252 // The region is being reclaimed; clear its remset, and any mention of | |
253 // entries for this region in other remsets. | |
254 void clear(); | |
255 | |
256 // Forget any entries due to pointers from "from_hr". | |
257 void clear_incoming_entry(HeapRegion* from_hr) { | |
258 _other_regions.clear_incoming_entry(from_hr); | |
259 } | |
260 | |
261 #if 0 | |
262 virtual void cleanup() = 0; | |
263 #endif | |
264 | |
265 // Should be called from single-threaded code. | |
266 void init_for_par_iteration(); | |
267 // Attempt to claim the region. Returns true iff this call caused an | |
268 // atomic transition from Unclaimed to Claimed. | |
269 bool claim_iter(); | |
270 // Sets the iteration state to "complete". | |
271 void set_iter_complete(); | |
272 // Returns "true" iff the region's iteration is complete. | |
273 bool iter_is_complete(); | |
274 | |
275 // Initialize the given iterator to iterate over this rem set. | |
276 void init_iterator(HeapRegionRemSetIterator* iter) const; | |
277 | |
278 #if 0 | |
279 // Apply the "do_card" method to the start address of every card in the | |
280 // rem set. Returns false if some application of the closure aborted. | |
281 virtual bool card_iterate(CardClosure* iter) = 0; | |
282 #endif | |
283 | |
284 // The actual # of bytes this hr_remset takes up. | |
285 size_t mem_size() { | |
286 return _other_regions.mem_size() | |
287 // This correction is necessary because the above includes the second | |
288 // part. | |
289 + sizeof(this) - sizeof(OtherRegionsTable); | |
290 } | |
291 | |
292 // Returns the memory occupancy of all static data structures associated | |
293 // with remembered sets. | |
294 static size_t static_mem_size() { | |
295 return OtherRegionsTable::static_mem_size(); | |
296 } | |
297 | |
298 // Returns the memory occupancy of all free_list data structures associated | |
299 // with remembered sets. | |
300 static size_t fl_mem_size() { | |
301 return OtherRegionsTable::fl_mem_size(); | |
302 } | |
303 | |
304 bool contains_reference(oop* from) const { | |
305 return _other_regions.contains_reference(from); | |
306 } | |
307 void print() const; | |
308 | |
309 // Called during a stop-world phase to perform any deferred cleanups. | |
310 // The second version may be called by parallel threads after then finish | |
311 // collection work. | |
312 static void cleanup(); | |
313 static void par_cleanup(); | |
314 | |
315 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). | |
316 // (Uses it to initialize from_card_cache). | |
317 static void init_heap(size_t max_regions) { | |
318 OtherRegionsTable::init_from_card_cache(max_regions); | |
319 } | |
320 | |
321 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. | |
322 static void shrink_heap(size_t new_n_regs) { | |
323 OtherRegionsTable::shrink_from_card_cache(new_n_regs); | |
324 } | |
325 | |
326 #ifndef PRODUCT | |
327 static void print_from_card_cache() { | |
328 OtherRegionsTable::print_from_card_cache(); | |
329 } | |
330 #endif | |
331 | |
332 static void record(HeapRegion* hr, oop* f); | |
333 static void print_recorded(); | |
334 static void record_event(Event evnt); | |
335 | |
336 // Run unit tests. | |
337 #ifndef PRODUCT | |
338 static void test(); | |
339 #endif | |
340 | |
341 }; | |
342 | |
343 class HeapRegionRemSetIterator : public CHeapObj { | |
344 | |
345 // The region over which we're iterating. | |
346 const HeapRegionRemSet* _hrrs; | |
347 | |
348 // Local caching of HRRS fields. | |
349 const BitMap* _coarse_map; | |
350 PosParPRT** _fine_grain_regions; | |
351 | |
352 G1BlockOffsetSharedArray* _bosa; | |
353 G1CollectedHeap* _g1h; | |
354 | |
355 // The number yielded since initialization. | |
356 size_t _n_yielded_fine; | |
357 size_t _n_yielded_coarse; | |
358 size_t _n_yielded_sparse; | |
359 | |
360 // If true we're iterating over the coarse table; if false the fine | |
361 // table. | |
362 enum IterState { | |
363 Sparse, | |
364 Fine, | |
365 Coarse | |
366 }; | |
367 IterState _is; | |
368 | |
369 // In both kinds of iteration, heap offset of first card of current | |
370 // region. | |
371 size_t _cur_region_card_offset; | |
372 // Card offset within cur region. | |
373 size_t _cur_region_cur_card; | |
374 | |
375 // Coarse table iteration fields: | |
376 | |
377 // Current region index; | |
378 int _coarse_cur_region_index; | |
379 int _coarse_cur_region_cur_card; | |
380 | |
381 bool coarse_has_next(size_t& card_index); | |
382 | |
383 // Fine table iteration fields: | |
384 | |
385 // Index of bucket-list we're working on. | |
386 int _fine_array_index; | |
387 // Per Region Table we're doing within current bucket list. | |
388 PosParPRT* _fine_cur_prt; | |
389 | |
390 /* SparsePRT::*/ SparsePRTIter _sparse_iter; | |
391 | |
392 void fine_find_next_non_null_prt(); | |
393 | |
394 bool fine_has_next(); | |
395 bool fine_has_next(size_t& card_index); | |
396 | |
397 public: | |
398 // We require an iterator to be initialized before use, so the | |
399 // constructor does little. | |
400 HeapRegionRemSetIterator(); | |
401 | |
402 void initialize(const HeapRegionRemSet* hrrs); | |
403 | |
404 // If there remains one or more cards to be yielded, returns true and | |
405 // sets "card_index" to one of those cards (which is then considered | |
406 // yielded.) Otherwise, returns false (and leaves "card_index" | |
407 // undefined.) | |
408 bool has_next(size_t& card_index); | |
409 | |
410 size_t n_yielded_fine() { return _n_yielded_fine; } | |
411 size_t n_yielded_coarse() { return _n_yielded_coarse; } | |
412 size_t n_yielded_sparse() { return _n_yielded_sparse; } | |
413 size_t n_yielded() { | |
414 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); | |
415 } | |
416 }; | |
417 | |
418 #if 0 | |
419 class CardClosure: public Closure { | |
420 public: | |
421 virtual void do_card(HeapWord* card_start) = 0; | |
422 }; | |
423 | |
424 #endif |