Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @ 652:4aaa9f5e02a8
4766230: Hotspot vtable inconsistencies cause core dumps. 6579515. 6582242.
Reviewed-by: kamg, coleenp
author | acorn |
---|---|
date | Wed, 18 Mar 2009 17:20:57 -0400 |
parents | 0fbdb4381b99 |
children | 96b229c54d1e |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 // Remembered set for a heap region. Represent a set of "cards" that | |
26 // contain pointers into the owner heap region. Cards are defined somewhat | |
27 // abstractly, in terms of what the "BlockOffsetTable" in use can parse. | |
28 | |
29 class G1CollectedHeap; | |
30 class G1BlockOffsetSharedArray; | |
31 class HeapRegion; | |
32 class HeapRegionRemSetIterator; | |
33 class PosParPRT; | |
34 class SparsePRT; | |
35 | |
36 | |
37 // The "_coarse_map" is a bitmap with one bit for each region, where set | |
38 // bits indicate that the corresponding region may contain some pointer | |
39 // into the owning region. | |
40 | |
41 // The "_fine_grain_entries" array is an open hash table of PerRegionTables | |
42 // (PRTs), indicating regions for which we're keeping the RS as a set of | |
43 // cards. The strategy is to cap the size of the fine-grain table, | |
44 // deleting an entry and setting the corresponding coarse-grained bit when | |
45 // we would overflow this cap. | |
46 | |
47 // We use a mixture of locking and lock-free techniques here. We allow | |
48 // threads to locate PRTs without locking, but threads attempting to alter | |
49 // a bucket list obtain a lock. This means that any failing attempt to | |
50 // find a PRT must be retried with the lock. It might seem dangerous that | |
51 // a read can find a PRT that is concurrently deleted. This is all right, | |
52 // because: | |
53 // | |
54 // 1) We only actually free PRT's at safe points (though we reuse them at | |
55 // other times). | |
56 // 2) We find PRT's in an attempt to add entries. If a PRT is deleted, | |
57 // it's _coarse_map bit is set, so the that we were attempting to add | |
58 // is represented. If a deleted PRT is re-used, a thread adding a bit, | |
59 // thinking the PRT is for a different region, does no harm. | |
60 | |
549
fe3d7c11b4b7
6700941: G1: allocation spec missing for some G1 classes
apetrusenko
parents:
342
diff
changeset
|
61 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { |
342 | 62 friend class HeapRegionRemSetIterator; |
63 | |
64 G1CollectedHeap* _g1h; | |
65 Mutex _m; | |
66 HeapRegion* _hr; | |
67 | |
68 // These are protected by "_m". | |
69 BitMap _coarse_map; | |
70 size_t _n_coarse_entries; | |
71 static jint _n_coarsenings; | |
72 | |
73 PosParPRT** _fine_grain_regions; | |
74 size_t _n_fine_entries; | |
75 | |
76 #define SAMPLE_FOR_EVICTION 1 | |
77 #if SAMPLE_FOR_EVICTION | |
78 size_t _fine_eviction_start; | |
79 static size_t _fine_eviction_stride; | |
80 static size_t _fine_eviction_sample_size; | |
81 #endif | |
82 | |
83 SparsePRT _sparse_table; | |
84 | |
85 // These are static after init. | |
86 static size_t _max_fine_entries; | |
87 static size_t _mod_max_fine_entries_mask; | |
88 | |
89 // Requires "prt" to be the first element of the bucket list appropriate | |
90 // for "hr". If this list contains an entry for "hr", return it, | |
91 // otherwise return "NULL". | |
92 PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const; | |
93 | |
94 // Find, delete, and return a candidate PosParPRT, if any exists, | |
95 // adding the deleted region to the coarse bitmap. Requires the caller | |
96 // to hold _m, and the fine-grain table to be full. | |
97 PosParPRT* delete_region_table(); | |
98 | |
99 // If a PRT for "hr" is in the bucket list indicated by "ind" (which must | |
100 // be the correct index for "hr"), delete it and return true; else return | |
101 // false. | |
102 bool del_single_region_table(size_t ind, HeapRegion* hr); | |
103 | |
104 static jint _cache_probes; | |
105 static jint _cache_hits; | |
106 | |
107 // Indexed by thread X heap region, to minimize thread contention. | |
108 static int** _from_card_cache; | |
109 static size_t _from_card_cache_max_regions; | |
110 static size_t _from_card_cache_mem_size; | |
111 | |
112 public: | |
113 OtherRegionsTable(HeapRegion* hr); | |
114 | |
115 HeapRegion* hr() const { return _hr; } | |
116 | |
117 // For now. Could "expand" some tables in the future, so that this made | |
118 // sense. | |
119 void add_reference(oop* from, int tid); | |
120 | |
121 void add_reference(oop* from) { | |
122 return add_reference(from, 0); | |
123 } | |
124 | |
125 // Removes any entries shown by the given bitmaps to contain only dead | |
126 // objects. | |
127 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); | |
128 | |
129 // Not const because it takes a lock. | |
130 size_t occupied() const; | |
131 size_t occ_fine() const; | |
132 size_t occ_coarse() const; | |
133 size_t occ_sparse() const; | |
134 | |
135 static jint n_coarsenings() { return _n_coarsenings; } | |
136 | |
137 // Returns size in bytes. | |
138 // Not const because it takes a lock. | |
139 size_t mem_size() const; | |
140 static size_t static_mem_size(); | |
141 static size_t fl_mem_size(); | |
142 | |
143 bool contains_reference(oop* from) const; | |
144 bool contains_reference_locked(oop* from) const; | |
145 | |
146 void clear(); | |
147 | |
148 // Specifically clear the from_card_cache. | |
149 void clear_fcc(); | |
150 | |
151 // "from_hr" is being cleared; remove any entries from it. | |
152 void clear_incoming_entry(HeapRegion* from_hr); | |
153 | |
154 // Declare the heap size (in # of regions) to the OtherRegionsTable. | |
155 // (Uses it to initialize from_card_cache). | |
156 static void init_from_card_cache(size_t max_regions); | |
157 | |
158 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. | |
159 // Make sure any entries for higher regions are invalid. | |
160 static void shrink_from_card_cache(size_t new_n_regs); | |
161 | |
162 static void print_from_card_cache(); | |
163 | |
164 }; | |
165 | |
166 | |
167 class HeapRegionRemSet : public CHeapObj { | |
168 friend class VMStructs; | |
169 friend class HeapRegionRemSetIterator; | |
170 | |
171 public: | |
172 enum Event { | |
173 Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd | |
174 }; | |
175 | |
176 private: | |
177 G1BlockOffsetSharedArray* _bosa; | |
178 G1BlockOffsetSharedArray* bosa() const { return _bosa; } | |
179 | |
180 static bool _par_traversal; | |
181 | |
182 OtherRegionsTable _other_regions; | |
183 | |
184 // One set bit for every region that has an entry for this one. | |
185 BitMap _outgoing_region_map; | |
186 | |
187 // Clear entries for the current region in any rem sets named in | |
188 // the _outgoing_region_map. | |
189 void clear_outgoing_entries(); | |
190 | |
191 #if MAYBE | |
192 // Audit the given card index. | |
193 void audit_card(size_t card_num, HeapRegion* hr, u2* rc_arr, | |
194 HeapRegionRemSet* empty_cards, size_t* one_obj_cards); | |
195 | |
196 // Assumes that "audit_stage1" has been called for "hr", to set up | |
197 // "shadow" and "new_rs" appropriately. Identifies individual popular | |
198 // objects; returns "true" if any are found. | |
199 bool audit_find_pop(HeapRegion* hr, u2* rc_arr); | |
200 | |
201 // Assumes that "audit_stage1" has been called for "hr", to set up | |
202 // "shadow" and "new_rs" appropriately. Identifies individual popular | |
203 // objects, and determines the number of entries in "new_rs" if any such | |
204 // popular objects are ignored. If this is sufficiently small, returns | |
205 // "false" to indicate that a constraint should not be introduced. | |
206 // Otherwise, returns "true" to indicate that we should go ahead with | |
207 // adding the constraint. | |
208 bool audit_stag(HeapRegion* hr, u2* rc_arr); | |
209 | |
210 | |
211 u2* alloc_rc_array(); | |
212 | |
213 SeqHeapRegionRemSet* audit_post(u2* rc_arr, size_t multi_obj_crds, | |
214 SeqHeapRegionRemSet* empty_cards); | |
215 #endif | |
216 | |
217 enum ParIterState { Unclaimed, Claimed, Complete }; | |
218 ParIterState _iter_state; | |
219 | |
220 // Unused unless G1RecordHRRSOops is true. | |
221 | |
222 static const int MaxRecorded = 1000000; | |
223 static oop** _recorded_oops; | |
224 static HeapWord** _recorded_cards; | |
225 static HeapRegion** _recorded_regions; | |
226 static int _n_recorded; | |
227 | |
228 static const int MaxRecordedEvents = 1000; | |
229 static Event* _recorded_events; | |
230 static int* _recorded_event_index; | |
231 static int _n_recorded_events; | |
232 | |
233 static void print_event(outputStream* str, Event evnt); | |
234 | |
235 public: | |
236 HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, | |
237 HeapRegion* hr); | |
238 | |
239 static int num_par_rem_sets(); | |
240 static bool par_traversal() { return _par_traversal; } | |
241 static void set_par_traversal(bool b); | |
242 | |
243 HeapRegion* hr() const { | |
244 return _other_regions.hr(); | |
245 } | |
246 | |
247 size_t occupied() const { | |
248 return _other_regions.occupied(); | |
249 } | |
250 size_t occ_fine() const { | |
251 return _other_regions.occ_fine(); | |
252 } | |
253 size_t occ_coarse() const { | |
254 return _other_regions.occ_coarse(); | |
255 } | |
256 size_t occ_sparse() const { | |
257 return _other_regions.occ_sparse(); | |
258 } | |
259 | |
260 static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } | |
261 | |
262 /* Used in the sequential case. Returns "true" iff this addition causes | |
263 the size limit to be reached. */ | |
264 bool add_reference(oop* from) { | |
265 _other_regions.add_reference(from); | |
266 return false; | |
267 } | |
268 | |
269 /* Used in the parallel case. Returns "true" iff this addition causes | |
270 the size limit to be reached. */ | |
271 bool add_reference(oop* from, int tid) { | |
272 _other_regions.add_reference(from, tid); | |
273 return false; | |
274 } | |
275 | |
276 // Records the fact that the current region contains an outgoing | |
277 // reference into "to_hr". | |
278 void add_outgoing_reference(HeapRegion* to_hr); | |
279 | |
280 // Removes any entries shown by the given bitmaps to contain only dead | |
281 // objects. | |
282 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); | |
283 | |
284 // The region is being reclaimed; clear its remset, and any mention of | |
285 // entries for this region in other remsets. | |
286 void clear(); | |
287 | |
288 // Forget any entries due to pointers from "from_hr". | |
289 void clear_incoming_entry(HeapRegion* from_hr) { | |
290 _other_regions.clear_incoming_entry(from_hr); | |
291 } | |
292 | |
293 #if 0 | |
294 virtual void cleanup() = 0; | |
295 #endif | |
296 | |
297 // Should be called from single-threaded code. | |
298 void init_for_par_iteration(); | |
299 // Attempt to claim the region. Returns true iff this call caused an | |
300 // atomic transition from Unclaimed to Claimed. | |
301 bool claim_iter(); | |
302 // Sets the iteration state to "complete". | |
303 void set_iter_complete(); | |
304 // Returns "true" iff the region's iteration is complete. | |
305 bool iter_is_complete(); | |
306 | |
307 // Initialize the given iterator to iterate over this rem set. | |
308 void init_iterator(HeapRegionRemSetIterator* iter) const; | |
309 | |
310 #if 0 | |
311 // Apply the "do_card" method to the start address of every card in the | |
312 // rem set. Returns false if some application of the closure aborted. | |
313 virtual bool card_iterate(CardClosure* iter) = 0; | |
314 #endif | |
315 | |
316 // The actual # of bytes this hr_remset takes up. | |
317 size_t mem_size() { | |
318 return _other_regions.mem_size() | |
319 // This correction is necessary because the above includes the second | |
320 // part. | |
321 + sizeof(this) - sizeof(OtherRegionsTable); | |
322 } | |
323 | |
324 // Returns the memory occupancy of all static data structures associated | |
325 // with remembered sets. | |
326 static size_t static_mem_size() { | |
327 return OtherRegionsTable::static_mem_size(); | |
328 } | |
329 | |
330 // Returns the memory occupancy of all free_list data structures associated | |
331 // with remembered sets. | |
332 static size_t fl_mem_size() { | |
333 return OtherRegionsTable::fl_mem_size(); | |
334 } | |
335 | |
336 bool contains_reference(oop* from) const { | |
337 return _other_regions.contains_reference(from); | |
338 } | |
339 void print() const; | |
340 | |
341 #if MAYBE | |
342 // We are about to introduce a constraint, requiring the collection time | |
343 // of the region owning this RS to be <= "hr", and forgetting pointers | |
344 // from the owning region to "hr." Before doing so, examines this rem | |
345 // set for pointers to "hr", possibly identifying some popular objects., | |
346 // and possibly finding some cards to no longer contain pointers to "hr", | |
347 // | |
348 // These steps may prevent the the constraint from being necessary; in | |
349 // which case returns a set of cards now thought to contain no pointers | |
350 // into HR. In the normal (I assume) case, returns NULL, indicating that | |
351 // we should go ahead and add the constraint. | |
352 virtual SeqHeapRegionRemSet* audit(HeapRegion* hr) = 0; | |
353 #endif | |
354 | |
355 // Called during a stop-world phase to perform any deferred cleanups. | |
356 // The second version may be called by parallel threads after then finish | |
357 // collection work. | |
358 static void cleanup(); | |
359 static void par_cleanup(); | |
360 | |
361 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). | |
362 // (Uses it to initialize from_card_cache). | |
363 static void init_heap(size_t max_regions) { | |
364 OtherRegionsTable::init_from_card_cache(max_regions); | |
365 } | |
366 | |
367 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. | |
368 static void shrink_heap(size_t new_n_regs) { | |
369 OtherRegionsTable::shrink_from_card_cache(new_n_regs); | |
370 } | |
371 | |
372 #ifndef PRODUCT | |
373 static void print_from_card_cache() { | |
374 OtherRegionsTable::print_from_card_cache(); | |
375 } | |
376 #endif | |
377 | |
378 static void record(HeapRegion* hr, oop* f); | |
379 static void print_recorded(); | |
380 static void record_event(Event evnt); | |
381 | |
382 // Run unit tests. | |
383 #ifndef PRODUCT | |
384 static void test(); | |
385 #endif | |
386 | |
387 }; | |
388 | |
389 class HeapRegionRemSetIterator : public CHeapObj { | |
390 | |
391 // The region over which we're iterating. | |
392 const HeapRegionRemSet* _hrrs; | |
393 | |
394 // Local caching of HRRS fields. | |
395 const BitMap* _coarse_map; | |
396 PosParPRT** _fine_grain_regions; | |
397 | |
398 G1BlockOffsetSharedArray* _bosa; | |
399 G1CollectedHeap* _g1h; | |
400 | |
401 // The number yielded since initialization. | |
402 size_t _n_yielded_fine; | |
403 size_t _n_yielded_coarse; | |
404 size_t _n_yielded_sparse; | |
405 | |
406 // If true we're iterating over the coarse table; if false the fine | |
407 // table. | |
408 enum IterState { | |
409 Sparse, | |
410 Fine, | |
411 Coarse | |
412 }; | |
413 IterState _is; | |
414 | |
415 // In both kinds of iteration, heap offset of first card of current | |
416 // region. | |
417 size_t _cur_region_card_offset; | |
418 // Card offset within cur region. | |
419 size_t _cur_region_cur_card; | |
420 | |
421 // Coarse table iteration fields: | |
422 | |
423 // Current region index; | |
424 int _coarse_cur_region_index; | |
425 int _coarse_cur_region_cur_card; | |
426 | |
427 bool coarse_has_next(size_t& card_index); | |
428 | |
429 // Fine table iteration fields: | |
430 | |
431 // Index of bucket-list we're working on. | |
432 int _fine_array_index; | |
433 // Per Region Table we're doing within current bucket list. | |
434 PosParPRT* _fine_cur_prt; | |
435 | |
436 /* SparsePRT::*/ SparsePRTIter _sparse_iter; | |
437 | |
438 void fine_find_next_non_null_prt(); | |
439 | |
440 bool fine_has_next(); | |
441 bool fine_has_next(size_t& card_index); | |
442 | |
443 public: | |
444 // We require an iterator to be initialized before use, so the | |
445 // constructor does little. | |
446 HeapRegionRemSetIterator(); | |
447 | |
448 void initialize(const HeapRegionRemSet* hrrs); | |
449 | |
450 // If there remains one or more cards to be yielded, returns true and | |
451 // sets "card_index" to one of those cards (which is then considered | |
452 // yielded.) Otherwise, returns false (and leaves "card_index" | |
453 // undefined.) | |
454 bool has_next(size_t& card_index); | |
455 | |
456 size_t n_yielded_fine() { return _n_yielded_fine; } | |
457 size_t n_yielded_coarse() { return _n_yielded_coarse; } | |
458 size_t n_yielded_sparse() { return _n_yielded_sparse; } | |
459 size_t n_yielded() { | |
460 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); | |
461 } | |
462 }; | |
463 | |
464 #if 0 | |
465 class CardClosure: public Closure { | |
466 public: | |
467 virtual void do_card(HeapWord* card_start) = 0; | |
468 }; | |
469 | |
470 #endif |