Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @ 807:d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
Summary: For heaps larger than 32Gb, the number of heap regions overflows the data type used to hold the region index in the SparsePRT structure. Changed the region indexes, card indexes, and RSet hash table buckets to ints and added some size overflow guarantees.
Reviewed-by: ysr, tonyp
author | johnc |
---|---|
date | Thu, 11 Jun 2009 17:19:33 -0700 |
parents | 215f81b4d9b3 |
children | df6caf649ff7 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegionRemSet.cpp.incl" | |
27 | |
28 #define HRRS_VERBOSE 0 | |
29 | |
30 #define PRT_COUNT_OCCUPIED 1 | |
31 | |
32 // OtherRegionsTable | |
33 | |
34 class PerRegionTable: public CHeapObj { | |
35 friend class OtherRegionsTable; | |
36 friend class HeapRegionRemSetIterator; | |
37 | |
38 HeapRegion* _hr; | |
39 BitMap _bm; | |
40 #if PRT_COUNT_OCCUPIED | |
41 jint _occupied; | |
42 #endif | |
43 PerRegionTable* _next_free; | |
44 | |
45 PerRegionTable* next_free() { return _next_free; } | |
46 void set_next_free(PerRegionTable* prt) { _next_free = prt; } | |
47 | |
48 | |
49 static PerRegionTable* _free_list; | |
50 | |
51 #ifdef _MSC_VER | |
52 // For some reason even though the classes are marked as friend they are unable | |
53 // to access CardsPerRegion when private/protected. Only the windows c++ compiler | |
54 // says this Sun CC and linux gcc don't have a problem with access when private | |
55 | |
56 public: | |
57 | |
58 #endif // _MSC_VER | |
59 | |
60 enum SomePrivateConstants { | |
61 CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift | |
62 }; | |
63 | |
64 protected: | |
65 // We need access in order to union things into the base table. | |
66 BitMap* bm() { return &_bm; } | |
67 | |
545 | 68 #if PRT_COUNT_OCCUPIED |
342 | 69 void recount_occupied() { |
70 _occupied = (jint) bm()->count_one_bits(); | |
71 } | |
545 | 72 #endif |
342 | 73 |
74 PerRegionTable(HeapRegion* hr) : | |
75 _hr(hr), | |
76 #if PRT_COUNT_OCCUPIED | |
77 _occupied(0), | |
78 #endif | |
79 _bm(CardsPerRegion, false /* in-resource-area */) | |
80 {} | |
81 | |
82 static void free(PerRegionTable* prt) { | |
83 while (true) { | |
84 PerRegionTable* fl = _free_list; | |
85 prt->set_next_free(fl); | |
86 PerRegionTable* res = | |
87 (PerRegionTable*) | |
88 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | |
89 if (res == fl) return; | |
90 } | |
91 ShouldNotReachHere(); | |
92 } | |
93 | |
94 static PerRegionTable* alloc(HeapRegion* hr) { | |
95 PerRegionTable* fl = _free_list; | |
96 while (fl != NULL) { | |
97 PerRegionTable* nxt = fl->next_free(); | |
98 PerRegionTable* res = | |
99 (PerRegionTable*) | |
100 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | |
101 if (res == fl) { | |
102 fl->init(hr); | |
103 return fl; | |
104 } else { | |
105 fl = _free_list; | |
106 } | |
107 } | |
108 assert(fl == NULL, "Loop condition."); | |
109 return new PerRegionTable(hr); | |
110 } | |
111 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
112 void add_card_work(CardIdx_t from_card, bool par) { |
342 | 113 if (!_bm.at(from_card)) { |
114 if (par) { | |
115 if (_bm.par_at_put(from_card, 1)) { | |
116 #if PRT_COUNT_OCCUPIED | |
117 Atomic::inc(&_occupied); | |
118 #endif | |
119 } | |
120 } else { | |
121 _bm.at_put(from_card, 1); | |
122 #if PRT_COUNT_OCCUPIED | |
123 _occupied++; | |
124 #endif | |
125 } | |
126 } | |
127 } | |
128 | |
129 void add_reference_work(oop* from, bool par) { | |
130 // Must make this robust in case "from" is not in "_hr", because of | |
131 // concurrency. | |
132 | |
133 #if HRRS_VERBOSE | |
134 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", | |
135 from, *from); | |
136 #endif | |
137 | |
138 HeapRegion* loc_hr = hr(); | |
139 // If the test below fails, then this table was reused concurrently | |
140 // with this operation. This is OK, since the old table was coarsened, | |
141 // and adding a bit to the new table is never incorrect. | |
142 if (loc_hr->is_in_reserved(from)) { | |
143 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
144 CardIdx_t from_card = (CardIdx_t) |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
145 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); |
342 | 146 |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
147 assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range."); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
148 add_card_work(from_card, par); |
342 | 149 } |
150 } | |
151 | |
152 public: | |
153 | |
154 HeapRegion* hr() const { return _hr; } | |
155 | |
156 #if PRT_COUNT_OCCUPIED | |
157 jint occupied() const { | |
158 // Overkill, but if we ever need it... | |
159 // guarantee(_occupied == _bm.count_one_bits(), "Check"); | |
160 return _occupied; | |
161 } | |
162 #else | |
163 jint occupied() const { | |
164 return _bm.count_one_bits(); | |
165 } | |
166 #endif | |
167 | |
168 void init(HeapRegion* hr) { | |
169 _hr = hr; | |
170 #if PRT_COUNT_OCCUPIED | |
171 _occupied = 0; | |
172 #endif | |
173 _bm.clear(); | |
174 } | |
175 | |
176 void add_reference(oop* from) { | |
177 add_reference_work(from, /*parallel*/ true); | |
178 } | |
179 | |
180 void seq_add_reference(oop* from) { | |
181 add_reference_work(from, /*parallel*/ false); | |
182 } | |
183 | |
184 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | |
185 HeapWord* hr_bot = hr()->bottom(); | |
489
2494ab195856
6653214: MemoryPoolMXBean.setUsageThreshold() does not support large heap sizes.
swamyv
parents:
342
diff
changeset
|
186 size_t hr_first_card_index = ctbs->index_for(hr_bot); |
342 | 187 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); |
188 #if PRT_COUNT_OCCUPIED | |
189 recount_occupied(); | |
190 #endif | |
191 } | |
192 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
193 void add_card(CardIdx_t from_card_index) { |
342 | 194 add_card_work(from_card_index, /*parallel*/ true); |
195 } | |
196 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
197 void seq_add_card(CardIdx_t from_card_index) { |
342 | 198 add_card_work(from_card_index, /*parallel*/ false); |
199 } | |
200 | |
201 // (Destructively) union the bitmap of the current table into the given | |
202 // bitmap (which is assumed to be of the same size.) | |
203 void union_bitmap_into(BitMap* bm) { | |
204 bm->set_union(_bm); | |
205 } | |
206 | |
207 // Mem size in bytes. | |
208 size_t mem_size() const { | |
209 return sizeof(this) + _bm.size_in_words() * HeapWordSize; | |
210 } | |
211 | |
212 static size_t fl_mem_size() { | |
213 PerRegionTable* cur = _free_list; | |
214 size_t res = 0; | |
215 while (cur != NULL) { | |
216 res += sizeof(PerRegionTable); | |
217 cur = cur->next_free(); | |
218 } | |
219 return res; | |
220 } | |
221 | |
222 // Requires "from" to be in "hr()". | |
223 bool contains_reference(oop* from) const { | |
224 assert(hr()->is_in_reserved(from), "Precondition."); | |
225 size_t card_ind = pointer_delta(from, hr()->bottom(), | |
226 CardTableModRefBS::card_size); | |
227 return _bm.at(card_ind); | |
228 } | |
229 }; | |
230 | |
231 PerRegionTable* PerRegionTable::_free_list = NULL; | |
232 | |
233 | |
234 #define COUNT_PAR_EXPANDS 0 | |
235 | |
236 #if COUNT_PAR_EXPANDS | |
237 static jint n_par_expands = 0; | |
238 static jint n_par_contracts = 0; | |
239 static jint par_expand_list_len = 0; | |
240 static jint max_par_expand_list_len = 0; | |
241 | |
242 static void print_par_expand() { | |
243 Atomic::inc(&n_par_expands); | |
244 Atomic::inc(&par_expand_list_len); | |
245 if (par_expand_list_len > max_par_expand_list_len) { | |
246 max_par_expand_list_len = par_expand_list_len; | |
247 } | |
248 if ((n_par_expands % 10) == 0) { | |
249 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, " | |
250 "len = %d, max_len = %d\n.", | |
251 n_par_expands, n_par_contracts, par_expand_list_len, | |
252 max_par_expand_list_len); | |
253 } | |
254 } | |
255 #endif | |
256 | |
257 class PosParPRT: public PerRegionTable { | |
258 PerRegionTable** _par_tables; | |
259 | |
260 enum SomePrivateConstants { | |
261 ReserveParTableExpansion = 1 | |
262 }; | |
263 | |
264 void par_expand() { | |
265 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
266 if (n <= 0) return; | |
267 if (_par_tables == NULL) { | |
268 PerRegionTable* res = | |
269 (PerRegionTable*) | |
270 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion, | |
271 &_par_tables, NULL); | |
272 if (res != NULL) return; | |
273 // Otherwise, we reserved the right to do the expansion. | |
274 | |
275 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n); | |
276 for (int i = 0; i < n; i++) { | |
277 PerRegionTable* ptable = PerRegionTable::alloc(hr()); | |
278 ptables[i] = ptable; | |
279 } | |
280 // Here we do not need an atomic. | |
281 _par_tables = ptables; | |
282 #if COUNT_PAR_EXPANDS | |
283 print_par_expand(); | |
284 #endif | |
285 // We must put this table on the expanded list. | |
286 PosParPRT* exp_head = _par_expanded_list; | |
287 while (true) { | |
288 set_next_par_expanded(exp_head); | |
289 PosParPRT* res = | |
290 (PosParPRT*) | |
291 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head); | |
292 if (res == exp_head) return; | |
293 // Otherwise. | |
294 exp_head = res; | |
295 } | |
296 ShouldNotReachHere(); | |
297 } | |
298 } | |
299 | |
300 void par_contract() { | |
301 assert(_par_tables != NULL, "Precondition."); | |
302 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
303 for (int i = 0; i < n; i++) { | |
304 _par_tables[i]->union_bitmap_into(bm()); | |
305 PerRegionTable::free(_par_tables[i]); | |
306 _par_tables[i] = NULL; | |
307 } | |
308 #if PRT_COUNT_OCCUPIED | |
309 // We must recount the "occupied." | |
310 recount_occupied(); | |
311 #endif | |
312 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables); | |
313 _par_tables = NULL; | |
314 #if COUNT_PAR_EXPANDS | |
315 Atomic::inc(&n_par_contracts); | |
316 Atomic::dec(&par_expand_list_len); | |
317 #endif | |
318 } | |
319 | |
320 static PerRegionTable** _par_table_fl; | |
321 | |
322 PosParPRT* _next; | |
323 | |
324 static PosParPRT* _free_list; | |
325 | |
326 PerRegionTable** par_tables() const { | |
327 assert(uintptr_t(NULL) == 0, "Assumption."); | |
328 if (uintptr_t(_par_tables) <= ReserveParTableExpansion) | |
329 return NULL; | |
330 else | |
331 return _par_tables; | |
332 } | |
333 | |
334 PosParPRT* _next_par_expanded; | |
335 PosParPRT* next_par_expanded() { return _next_par_expanded; } | |
336 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; } | |
337 static PosParPRT* _par_expanded_list; | |
338 | |
339 public: | |
340 | |
341 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {} | |
342 | |
343 jint occupied() const { | |
344 jint res = PerRegionTable::occupied(); | |
345 if (par_tables() != NULL) { | |
346 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
347 res += par_tables()[i]->occupied(); | |
348 } | |
349 } | |
350 return res; | |
351 } | |
352 | |
353 void init(HeapRegion* hr) { | |
354 PerRegionTable::init(hr); | |
355 _next = NULL; | |
356 if (par_tables() != NULL) { | |
357 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
358 par_tables()[i]->init(hr); | |
359 } | |
360 } | |
361 } | |
362 | |
363 static void free(PosParPRT* prt) { | |
364 while (true) { | |
365 PosParPRT* fl = _free_list; | |
366 prt->set_next(fl); | |
367 PosParPRT* res = | |
368 (PosParPRT*) | |
369 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | |
370 if (res == fl) return; | |
371 } | |
372 ShouldNotReachHere(); | |
373 } | |
374 | |
375 static PosParPRT* alloc(HeapRegion* hr) { | |
376 PosParPRT* fl = _free_list; | |
377 while (fl != NULL) { | |
378 PosParPRT* nxt = fl->next(); | |
379 PosParPRT* res = | |
380 (PosParPRT*) | |
381 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | |
382 if (res == fl) { | |
383 fl->init(hr); | |
384 return fl; | |
385 } else { | |
386 fl = _free_list; | |
387 } | |
388 } | |
389 assert(fl == NULL, "Loop condition."); | |
390 return new PosParPRT(hr); | |
391 } | |
392 | |
393 PosParPRT* next() const { return _next; } | |
394 void set_next(PosParPRT* nxt) { _next = nxt; } | |
395 PosParPRT** next_addr() { return &_next; } | |
396 | |
397 void add_reference(oop* from, int tid) { | |
398 // Expand if necessary. | |
399 PerRegionTable** pt = par_tables(); | |
400 if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) { | |
401 par_expand(); | |
402 pt = par_tables(); | |
403 } | |
404 if (pt != NULL) { | |
405 // We always have to assume that mods to table 0 are in parallel, | |
406 // because of the claiming scheme in parallel expansion. A thread | |
407 // with tid != 0 that finds the table to be NULL, but doesn't succeed | |
408 // in claiming the right of expanding it, will end up in the else | |
409 // clause of the above if test. That thread could be delayed, and a | |
410 // thread 0 add reference could see the table expanded, and come | |
411 // here. Both threads would be adding in parallel. But we get to | |
412 // not use atomics for tids > 0. | |
413 if (tid == 0) { | |
414 PerRegionTable::add_reference(from); | |
415 } else { | |
416 pt[tid-1]->seq_add_reference(from); | |
417 } | |
418 } else { | |
419 // Not expanded -- add to the base table. | |
420 PerRegionTable::add_reference(from); | |
421 } | |
422 } | |
423 | |
424 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | |
425 assert(_par_tables == NULL, "Precondition"); | |
426 PerRegionTable::scrub(ctbs, card_bm); | |
427 } | |
428 | |
429 size_t mem_size() const { | |
430 size_t res = | |
431 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable); | |
432 if (_par_tables != NULL) { | |
433 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
434 res += _par_tables[i]->mem_size(); | |
435 } | |
436 } | |
437 return res; | |
438 } | |
439 | |
440 static size_t fl_mem_size() { | |
441 PosParPRT* cur = _free_list; | |
442 size_t res = 0; | |
443 while (cur != NULL) { | |
444 res += sizeof(PosParPRT); | |
445 cur = cur->next(); | |
446 } | |
447 return res; | |
448 } | |
449 | |
450 bool contains_reference(oop* from) const { | |
451 if (PerRegionTable::contains_reference(from)) return true; | |
452 if (_par_tables != NULL) { | |
453 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
454 if (_par_tables[i]->contains_reference(from)) return true; | |
455 } | |
456 } | |
457 return false; | |
458 } | |
459 | |
460 static void par_contract_all(); | |
461 | |
462 }; | |
463 | |
464 void PosParPRT::par_contract_all() { | |
465 PosParPRT* hd = _par_expanded_list; | |
466 while (hd != NULL) { | |
467 PosParPRT* nxt = hd->next_par_expanded(); | |
468 PosParPRT* res = | |
469 (PosParPRT*) | |
470 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd); | |
471 if (res == hd) { | |
472 // We claimed the right to contract this table. | |
473 hd->set_next_par_expanded(NULL); | |
474 hd->par_contract(); | |
475 hd = _par_expanded_list; | |
476 } else { | |
477 hd = res; | |
478 } | |
479 } | |
480 } | |
481 | |
482 PosParPRT* PosParPRT::_free_list = NULL; | |
483 PosParPRT* PosParPRT::_par_expanded_list = NULL; | |
484 | |
485 jint OtherRegionsTable::_cache_probes = 0; | |
486 jint OtherRegionsTable::_cache_hits = 0; | |
487 | |
488 size_t OtherRegionsTable::_max_fine_entries = 0; | |
489 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; | |
490 #if SAMPLE_FOR_EVICTION | |
491 size_t OtherRegionsTable::_fine_eviction_stride = 0; | |
492 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; | |
493 #endif | |
494 | |
495 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : | |
496 _g1h(G1CollectedHeap::heap()), | |
497 _m(Mutex::leaf, "An OtherRegionsTable lock", true), | |
498 _hr(hr), | |
499 _coarse_map(G1CollectedHeap::heap()->max_regions(), | |
500 false /* in-resource-area */), | |
501 _fine_grain_regions(NULL), | |
502 _n_fine_entries(0), _n_coarse_entries(0), | |
503 #if SAMPLE_FOR_EVICTION | |
504 _fine_eviction_start(0), | |
505 #endif | |
506 _sparse_table(hr) | |
507 { | |
508 typedef PosParPRT* PosParPRTPtr; | |
509 if (_max_fine_entries == 0) { | |
510 assert(_mod_max_fine_entries_mask == 0, "Both or none."); | |
645
c3a720eefe82
6816308: Changes to allow builds with latest Windows SDK 6.1 on 64bit Windows 2003
kvn
parents:
545
diff
changeset
|
511 _max_fine_entries = (size_t)(1 << G1LogRSRegionEntries); |
342 | 512 _mod_max_fine_entries_mask = _max_fine_entries - 1; |
513 #if SAMPLE_FOR_EVICTION | |
514 assert(_fine_eviction_sample_size == 0 | |
515 && _fine_eviction_stride == 0, "All init at same time."); | |
516 _fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries); | |
517 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; | |
518 #endif | |
519 } | |
520 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries]; | |
521 if (_fine_grain_regions == NULL) | |
522 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, | |
523 "Failed to allocate _fine_grain_entries."); | |
524 for (size_t i = 0; i < _max_fine_entries; i++) { | |
525 _fine_grain_regions[i] = NULL; | |
526 } | |
527 } | |
528 | |
529 int** OtherRegionsTable::_from_card_cache = NULL; | |
530 size_t OtherRegionsTable::_from_card_cache_max_regions = 0; | |
531 size_t OtherRegionsTable::_from_card_cache_mem_size = 0; | |
532 | |
533 void OtherRegionsTable::init_from_card_cache(size_t max_regions) { | |
534 _from_card_cache_max_regions = max_regions; | |
535 | |
536 int n_par_rs = HeapRegionRemSet::num_par_rem_sets(); | |
537 _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs); | |
538 for (int i = 0; i < n_par_rs; i++) { | |
539 _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions); | |
540 for (size_t j = 0; j < max_regions; j++) { | |
541 _from_card_cache[i][j] = -1; // An invalid value. | |
542 } | |
543 } | |
544 _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int); | |
545 } | |
546 | |
547 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) { | |
548 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
549 assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max."); | |
550 for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) { | |
551 _from_card_cache[i][j] = -1; // An invalid value. | |
552 } | |
553 } | |
554 } | |
555 | |
556 #ifndef PRODUCT | |
557 void OtherRegionsTable::print_from_card_cache() { | |
558 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
559 for (size_t j = 0; j < _from_card_cache_max_regions; j++) { | |
560 gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.", | |
561 i, j, _from_card_cache[i][j]); | |
562 } | |
563 } | |
564 } | |
565 #endif | |
566 | |
567 void OtherRegionsTable::add_reference(oop* from, int tid) { | |
568 size_t cur_hrs_ind = hr()->hrs_index(); | |
569 | |
570 #if HRRS_VERBOSE | |
571 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", | |
572 from, *from); | |
573 #endif | |
574 | |
575 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); | |
576 | |
577 #if HRRS_VERBOSE | |
578 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", | |
579 hr()->bottom(), from_card, | |
580 _from_card_cache[tid][cur_hrs_ind]); | |
581 #endif | |
582 | |
583 #define COUNT_CACHE 0 | |
584 #if COUNT_CACHE | |
585 jint p = Atomic::add(1, &_cache_probes); | |
586 if ((p % 10000) == 0) { | |
587 jint hits = _cache_hits; | |
588 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.", | |
589 _cache_hits, p, 100.0* (float)hits/(float)p); | |
590 } | |
591 #endif | |
592 if (from_card == _from_card_cache[tid][cur_hrs_ind]) { | |
593 #if HRRS_VERBOSE | |
594 gclog_or_tty->print_cr(" from-card cache hit."); | |
595 #endif | |
596 #if COUNT_CACHE | |
597 Atomic::inc(&_cache_hits); | |
598 #endif | |
599 assert(contains_reference(from), "We just added it!"); | |
600 return; | |
601 } else { | |
602 _from_card_cache[tid][cur_hrs_ind] = from_card; | |
603 } | |
604 | |
605 // Note that this may be a continued H region. | |
606 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
607 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); |
342 | 608 |
609 // If the region is already coarsened, return. | |
610 if (_coarse_map.at(from_hrs_ind)) { | |
611 #if HRRS_VERBOSE | |
612 gclog_or_tty->print_cr(" coarse map hit."); | |
613 #endif | |
614 assert(contains_reference(from), "We just added it!"); | |
615 return; | |
616 } | |
617 | |
618 // Otherwise find a per-region table to add it to. | |
619 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; | |
620 PosParPRT* prt = find_region_table(ind, from_hr); | |
621 if (prt == NULL) { | |
622 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
623 // Confirm that it's really not there... | |
624 prt = find_region_table(ind, from_hr); | |
625 if (prt == NULL) { | |
626 | |
627 uintptr_t from_hr_bot_card_index = | |
628 uintptr_t(from_hr->bottom()) | |
629 >> CardTableModRefBS::card_shift; | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
630 CardIdx_t card_index = from_card - from_hr_bot_card_index; |
342 | 631 assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, |
632 "Must be in range."); | |
633 if (G1HRRSUseSparseTable && | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
634 _sparse_table.add_card(from_hrs_ind, card_index)) { |
342 | 635 if (G1RecordHRRSOops) { |
636 HeapRegionRemSet::record(hr(), from); | |
637 #if HRRS_VERBOSE | |
638 gclog_or_tty->print(" Added card " PTR_FORMAT " to region " | |
639 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | |
640 align_size_down(uintptr_t(from), | |
641 CardTableModRefBS::card_size), | |
642 hr()->bottom(), from); | |
643 #endif | |
644 } | |
645 #if HRRS_VERBOSE | |
646 gclog_or_tty->print_cr(" added card to sparse table."); | |
647 #endif | |
648 assert(contains_reference_locked(from), "We just added it!"); | |
649 return; | |
650 } else { | |
651 #if HRRS_VERBOSE | |
652 gclog_or_tty->print_cr(" [tid %d] sparse table entry " | |
653 "overflow(f: %d, t: %d)", | |
654 tid, from_hrs_ind, cur_hrs_ind); | |
655 #endif | |
656 } | |
657 | |
658 // Otherwise, transfer from sparse to fine-grain. | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
659 CardIdx_t cards[SparsePRTEntry::CardsPerEntry]; |
342 | 660 if (G1HRRSUseSparseTable) { |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
661 bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]); |
342 | 662 assert(res, "There should have been an entry"); |
663 } | |
664 | |
665 if (_n_fine_entries == _max_fine_entries) { | |
666 prt = delete_region_table(); | |
667 } else { | |
668 prt = PosParPRT::alloc(from_hr); | |
669 } | |
670 prt->init(from_hr); | |
671 // Record the outgoing pointer in the from_region's outgoing bitmap. | |
672 from_hr->rem_set()->add_outgoing_reference(hr()); | |
673 | |
674 PosParPRT* first_prt = _fine_grain_regions[ind]; | |
675 prt->set_next(first_prt); // XXX Maybe move to init? | |
676 _fine_grain_regions[ind] = prt; | |
677 _n_fine_entries++; | |
678 | |
679 // Add in the cards from the sparse table. | |
680 if (G1HRRSUseSparseTable) { | |
681 for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) { | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
682 CardIdx_t c = cards[i]; |
342 | 683 if (c != SparsePRTEntry::NullEntry) { |
684 prt->add_card(c); | |
685 } | |
686 } | |
687 // Now we can delete the sparse entry. | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
688 bool res = _sparse_table.delete_entry(from_hrs_ind); |
342 | 689 assert(res, "It should have been there."); |
690 } | |
691 } | |
692 assert(prt != NULL && prt->hr() == from_hr, "consequence"); | |
693 } | |
694 // Note that we can't assert "prt->hr() == from_hr", because of the | |
695 // possibility of concurrent reuse. But see head comment of | |
696 // OtherRegionsTable for why this is OK. | |
697 assert(prt != NULL, "Inv"); | |
698 | |
699 prt->add_reference(from, tid); | |
700 if (G1RecordHRRSOops) { | |
701 HeapRegionRemSet::record(hr(), from); | |
702 #if HRRS_VERBOSE | |
703 gclog_or_tty->print("Added card " PTR_FORMAT " to region " | |
704 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | |
705 align_size_down(uintptr_t(from), | |
706 CardTableModRefBS::card_size), | |
707 hr()->bottom(), from); | |
708 #endif | |
709 } | |
710 assert(contains_reference(from), "We just added it!"); | |
711 } | |
712 | |
713 PosParPRT* | |
714 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { | |
715 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | |
716 PosParPRT* prt = _fine_grain_regions[ind]; | |
717 while (prt != NULL && prt->hr() != hr) { | |
718 prt = prt->next(); | |
719 } | |
720 // Loop postcondition is the method postcondition. | |
721 return prt; | |
722 } | |
723 | |
724 | |
725 #define DRT_CENSUS 0 | |
726 | |
727 #if DRT_CENSUS | |
728 static const int HistoSize = 6; | |
729 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
730 static int coarsenings = 0; | |
731 static int occ_sum = 0; | |
732 #endif | |
733 | |
734 jint OtherRegionsTable::_n_coarsenings = 0; | |
735 | |
736 PosParPRT* OtherRegionsTable::delete_region_table() { | |
737 #if DRT_CENSUS | |
738 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
739 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 }; | |
740 #endif | |
741 | |
742 assert(_m.owned_by_self(), "Precondition"); | |
743 assert(_n_fine_entries == _max_fine_entries, "Precondition"); | |
744 PosParPRT* max = NULL; | |
745 jint max_occ = 0; | |
746 PosParPRT** max_prev; | |
747 size_t max_ind; | |
748 | |
749 #if SAMPLE_FOR_EVICTION | |
750 size_t i = _fine_eviction_start; | |
751 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { | |
752 size_t ii = i; | |
753 // Make sure we get a non-NULL sample. | |
754 while (_fine_grain_regions[ii] == NULL) { | |
755 ii++; | |
756 if (ii == _max_fine_entries) ii = 0; | |
757 guarantee(ii != i, "We must find one."); | |
758 } | |
759 PosParPRT** prev = &_fine_grain_regions[ii]; | |
760 PosParPRT* cur = *prev; | |
761 while (cur != NULL) { | |
762 jint cur_occ = cur->occupied(); | |
763 if (max == NULL || cur_occ > max_occ) { | |
764 max = cur; | |
765 max_prev = prev; | |
766 max_ind = i; | |
767 max_occ = cur_occ; | |
768 } | |
769 prev = cur->next_addr(); | |
770 cur = cur->next(); | |
771 } | |
772 i = i + _fine_eviction_stride; | |
773 if (i >= _n_fine_entries) i = i - _n_fine_entries; | |
774 } | |
775 _fine_eviction_start++; | |
776 if (_fine_eviction_start >= _n_fine_entries) | |
777 _fine_eviction_start -= _n_fine_entries; | |
778 #else | |
779 for (int i = 0; i < _max_fine_entries; i++) { | |
780 PosParPRT** prev = &_fine_grain_regions[i]; | |
781 PosParPRT* cur = *prev; | |
782 while (cur != NULL) { | |
783 jint cur_occ = cur->occupied(); | |
784 #if DRT_CENSUS | |
785 for (int k = 0; k < HistoSize; k++) { | |
786 if (cur_occ <= histo_limits[k]) { | |
787 histo[k]++; global_histo[k]++; break; | |
788 } | |
789 } | |
790 #endif | |
791 if (max == NULL || cur_occ > max_occ) { | |
792 max = cur; | |
793 max_prev = prev; | |
794 max_ind = i; | |
795 max_occ = cur_occ; | |
796 } | |
797 prev = cur->next_addr(); | |
798 cur = cur->next(); | |
799 } | |
800 } | |
801 #endif | |
802 // XXX | |
803 guarantee(max != NULL, "Since _n_fine_entries > 0"); | |
804 #if DRT_CENSUS | |
805 gclog_or_tty->print_cr("In a coarsening: histo of occs:"); | |
806 for (int k = 0; k < HistoSize; k++) { | |
807 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]); | |
808 } | |
809 coarsenings++; | |
810 occ_sum += max_occ; | |
811 if ((coarsenings % 100) == 0) { | |
812 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings); | |
813 for (int k = 0; k < HistoSize; k++) { | |
814 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]); | |
815 } | |
816 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.", | |
817 (float)occ_sum/(float)coarsenings); | |
818 } | |
819 #endif | |
820 | |
821 // Set the corresponding coarse bit. | |
822 int max_hrs_index = max->hr()->hrs_index(); | |
823 if (!_coarse_map.at(max_hrs_index)) { | |
824 _coarse_map.at_put(max_hrs_index, true); | |
825 _n_coarse_entries++; | |
826 #if 0 | |
827 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " | |
828 "for region [" PTR_FORMAT "...] (%d coarse entries).\n", | |
829 hr()->bottom(), | |
830 max->hr()->bottom(), | |
831 _n_coarse_entries); | |
832 #endif | |
833 } | |
834 | |
835 // Unsplice. | |
836 *max_prev = max->next(); | |
837 Atomic::inc(&_n_coarsenings); | |
838 _n_fine_entries--; | |
839 return max; | |
840 } | |
841 | |
842 | |
843 // At present, this must be called stop-world single-threaded. | |
844 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, | |
845 BitMap* region_bm, BitMap* card_bm) { | |
846 // First eliminated garbage regions from the coarse map. | |
847 if (G1RSScrubVerbose) | |
848 gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index()); | |
849 | |
850 assert(_coarse_map.size() == region_bm->size(), "Precondition"); | |
851 if (G1RSScrubVerbose) | |
852 gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries); | |
853 _coarse_map.set_intersection(*region_bm); | |
854 _n_coarse_entries = _coarse_map.count_one_bits(); | |
855 if (G1RSScrubVerbose) | |
856 gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries); | |
857 | |
858 // Now do the fine-grained maps. | |
859 for (size_t i = 0; i < _max_fine_entries; i++) { | |
860 PosParPRT* cur = _fine_grain_regions[i]; | |
861 PosParPRT** prev = &_fine_grain_regions[i]; | |
862 while (cur != NULL) { | |
863 PosParPRT* nxt = cur->next(); | |
864 // If the entire region is dead, eliminate. | |
865 if (G1RSScrubVerbose) | |
866 gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index()); | |
867 if (!region_bm->at(cur->hr()->hrs_index())) { | |
868 *prev = nxt; | |
869 cur->set_next(NULL); | |
870 _n_fine_entries--; | |
871 if (G1RSScrubVerbose) | |
872 gclog_or_tty->print_cr(" deleted via region map."); | |
873 PosParPRT::free(cur); | |
874 } else { | |
875 // Do fine-grain elimination. | |
876 if (G1RSScrubVerbose) | |
877 gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); | |
878 cur->scrub(ctbs, card_bm); | |
879 if (G1RSScrubVerbose) | |
880 gclog_or_tty->print_cr(" after = %4d.", cur->occupied()); | |
881 // Did that empty the table completely? | |
882 if (cur->occupied() == 0) { | |
883 *prev = nxt; | |
884 cur->set_next(NULL); | |
885 _n_fine_entries--; | |
886 PosParPRT::free(cur); | |
887 } else { | |
888 prev = cur->next_addr(); | |
889 } | |
890 } | |
891 cur = nxt; | |
892 } | |
893 } | |
894 // Since we may have deleted a from_card_cache entry from the RS, clear | |
895 // the FCC. | |
896 clear_fcc(); | |
897 } | |
898 | |
899 | |
900 size_t OtherRegionsTable::occupied() const { | |
901 // Cast away const in this case. | |
902 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
903 size_t sum = occ_fine(); | |
904 sum += occ_sparse(); | |
905 sum += occ_coarse(); | |
906 return sum; | |
907 } | |
908 | |
909 size_t OtherRegionsTable::occ_fine() const { | |
910 size_t sum = 0; | |
911 for (size_t i = 0; i < _max_fine_entries; i++) { | |
912 PosParPRT* cur = _fine_grain_regions[i]; | |
913 while (cur != NULL) { | |
914 sum += cur->occupied(); | |
915 cur = cur->next(); | |
916 } | |
917 } | |
918 return sum; | |
919 } | |
920 | |
921 size_t OtherRegionsTable::occ_coarse() const { | |
922 return (_n_coarse_entries * PosParPRT::CardsPerRegion); | |
923 } | |
924 | |
925 size_t OtherRegionsTable::occ_sparse() const { | |
926 return _sparse_table.occupied(); | |
927 } | |
928 | |
929 size_t OtherRegionsTable::mem_size() const { | |
930 // Cast away const in this case. | |
931 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
932 size_t sum = 0; | |
933 for (size_t i = 0; i < _max_fine_entries; i++) { | |
934 PosParPRT* cur = _fine_grain_regions[i]; | |
935 while (cur != NULL) { | |
936 sum += cur->mem_size(); | |
937 cur = cur->next(); | |
938 } | |
939 } | |
940 sum += (sizeof(PosParPRT*) * _max_fine_entries); | |
941 sum += (_coarse_map.size_in_words() * HeapWordSize); | |
942 sum += (_sparse_table.mem_size()); | |
943 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. | |
944 return sum; | |
945 } | |
946 | |
947 size_t OtherRegionsTable::static_mem_size() { | |
948 return _from_card_cache_mem_size; | |
949 } | |
950 | |
951 size_t OtherRegionsTable::fl_mem_size() { | |
952 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size(); | |
953 } | |
954 | |
955 void OtherRegionsTable::clear_fcc() { | |
956 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
957 _from_card_cache[i][hr()->hrs_index()] = -1; | |
958 } | |
959 } | |
960 | |
961 void OtherRegionsTable::clear() { | |
962 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
963 for (size_t i = 0; i < _max_fine_entries; i++) { | |
964 PosParPRT* cur = _fine_grain_regions[i]; | |
965 while (cur != NULL) { | |
966 PosParPRT* nxt = cur->next(); | |
967 PosParPRT::free(cur); | |
968 cur = nxt; | |
969 } | |
970 _fine_grain_regions[i] = NULL; | |
971 } | |
972 _sparse_table.clear(); | |
973 _coarse_map.clear(); | |
974 _n_fine_entries = 0; | |
975 _n_coarse_entries = 0; | |
976 | |
977 clear_fcc(); | |
978 } | |
979 | |
980 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { | |
981 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
982 size_t hrs_ind = (size_t)from_hr->hrs_index(); | |
983 size_t ind = hrs_ind & _mod_max_fine_entries_mask; | |
984 if (del_single_region_table(ind, from_hr)) { | |
985 assert(!_coarse_map.at(hrs_ind), "Inv"); | |
986 } else { | |
987 _coarse_map.par_at_put(hrs_ind, 0); | |
988 } | |
989 // Check to see if any of the fcc entries come from here. | |
990 int hr_ind = hr()->hrs_index(); | |
991 for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { | |
992 int fcc_ent = _from_card_cache[tid][hr_ind]; | |
993 if (fcc_ent != -1) { | |
994 HeapWord* card_addr = (HeapWord*) | |
995 (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift); | |
996 if (hr()->is_in_reserved(card_addr)) { | |
997 // Clear the from card cache. | |
998 _from_card_cache[tid][hr_ind] = -1; | |
999 } | |
1000 } | |
1001 } | |
1002 } | |
1003 | |
1004 bool OtherRegionsTable::del_single_region_table(size_t ind, | |
1005 HeapRegion* hr) { | |
1006 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | |
1007 PosParPRT** prev_addr = &_fine_grain_regions[ind]; | |
1008 PosParPRT* prt = *prev_addr; | |
1009 while (prt != NULL && prt->hr() != hr) { | |
1010 prev_addr = prt->next_addr(); | |
1011 prt = prt->next(); | |
1012 } | |
1013 if (prt != NULL) { | |
1014 assert(prt->hr() == hr, "Loop postcondition."); | |
1015 *prev_addr = prt->next(); | |
1016 PosParPRT::free(prt); | |
1017 _n_fine_entries--; | |
1018 return true; | |
1019 } else { | |
1020 return false; | |
1021 } | |
1022 } | |
1023 | |
1024 bool OtherRegionsTable::contains_reference(oop* from) const { | |
1025 // Cast away const in this case. | |
1026 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
1027 return contains_reference_locked(from); | |
1028 } | |
1029 | |
1030 bool OtherRegionsTable::contains_reference_locked(oop* from) const { | |
1031 HeapRegion* hr = _g1h->heap_region_containing_raw(from); | |
1032 if (hr == NULL) return false; | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1033 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); |
342 | 1034 // Is this region in the coarse map? |
1035 if (_coarse_map.at(hr_ind)) return true; | |
1036 | |
1037 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, | |
1038 hr); | |
1039 if (prt != NULL) { | |
1040 return prt->contains_reference(from); | |
1041 | |
1042 } else { | |
1043 uintptr_t from_card = | |
1044 (uintptr_t(from) >> CardTableModRefBS::card_shift); | |
1045 uintptr_t hr_bot_card_index = | |
1046 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; | |
1047 assert(from_card >= hr_bot_card_index, "Inv"); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1048 CardIdx_t card_index = from_card - hr_bot_card_index; |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1049 assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range."); |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1050 return _sparse_table.contains_card(hr_ind, card_index); |
342 | 1051 } |
1052 | |
1053 | |
1054 } | |
1055 | |
795
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1056 // Determines how many threads can add records to an rset in parallel. |
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1057 // This can be done by either mutator threads together with the |
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1058 // concurrent refinement threads or GC threads. |
342 | 1059 int HeapRegionRemSet::num_par_rem_sets() { |
795
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1060 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); |
342 | 1061 } |
1062 | |
1063 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, | |
1064 HeapRegion* hr) | |
1065 : _bosa(bosa), _other_regions(hr), | |
1066 _outgoing_region_map(G1CollectedHeap::heap()->max_regions(), | |
1067 false /* in-resource-area */), | |
1068 _iter_state(Unclaimed) | |
1069 {} | |
1070 | |
1071 | |
1072 void HeapRegionRemSet::init_for_par_iteration() { | |
1073 _iter_state = Unclaimed; | |
1074 } | |
1075 | |
1076 bool HeapRegionRemSet::claim_iter() { | |
1077 if (_iter_state != Unclaimed) return false; | |
1078 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); | |
1079 return (res == Unclaimed); | |
1080 } | |
1081 | |
1082 void HeapRegionRemSet::set_iter_complete() { | |
1083 _iter_state = Complete; | |
1084 } | |
1085 | |
1086 bool HeapRegionRemSet::iter_is_complete() { | |
1087 return _iter_state == Complete; | |
1088 } | |
1089 | |
1090 | |
1091 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { | |
1092 iter->initialize(this); | |
1093 } | |
1094 | |
1095 #ifndef PRODUCT | |
1096 void HeapRegionRemSet::print() const { | |
1097 HeapRegionRemSetIterator iter; | |
1098 init_iterator(&iter); | |
1099 size_t card_index; | |
1100 while (iter.has_next(card_index)) { | |
1101 HeapWord* card_start = | |
1102 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); | |
1103 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); | |
1104 } | |
1105 // XXX | |
1106 if (iter.n_yielded() != occupied()) { | |
1107 gclog_or_tty->print_cr("Yielded disagrees with occupied:"); | |
1108 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", | |
1109 iter.n_yielded(), | |
1110 iter.n_yielded_coarse(), iter.n_yielded_fine()); | |
1111 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).", | |
1112 occupied(), occ_coarse(), occ_fine()); | |
1113 } | |
1114 guarantee(iter.n_yielded() == occupied(), | |
1115 "We should have yielded all the represented cards."); | |
1116 } | |
1117 #endif | |
1118 | |
1119 void HeapRegionRemSet::cleanup() { | |
1120 SparsePRT::cleanup_all(); | |
1121 } | |
1122 | |
1123 void HeapRegionRemSet::par_cleanup() { | |
1124 PosParPRT::par_contract_all(); | |
1125 } | |
1126 | |
1127 void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) { | |
1128 _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1); | |
1129 } | |
1130 | |
1131 void HeapRegionRemSet::clear() { | |
1132 clear_outgoing_entries(); | |
1133 _outgoing_region_map.clear(); | |
1134 _other_regions.clear(); | |
1135 assert(occupied() == 0, "Should be clear."); | |
1136 } | |
1137 | |
1138 void HeapRegionRemSet::clear_outgoing_entries() { | |
1139 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1140 size_t i = _outgoing_region_map.get_next_one_offset(0); | |
1141 while (i < _outgoing_region_map.size()) { | |
1142 HeapRegion* to_region = g1h->region_at(i); | |
545 | 1143 if (!to_region->in_collection_set()) { |
1144 to_region->rem_set()->clear_incoming_entry(hr()); | |
1145 } | |
342 | 1146 i = _outgoing_region_map.get_next_one_offset(i+1); |
1147 } | |
1148 } | |
1149 | |
1150 | |
1151 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, | |
1152 BitMap* region_bm, BitMap* card_bm) { | |
1153 _other_regions.scrub(ctbs, region_bm, card_bm); | |
1154 } | |
1155 | |
1156 //-------------------- Iteration -------------------- | |
1157 | |
1158 HeapRegionRemSetIterator:: | |
1159 HeapRegionRemSetIterator() : | |
1160 _hrrs(NULL), | |
1161 _g1h(G1CollectedHeap::heap()), | |
1162 _bosa(NULL), | |
1163 _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start()) | |
1164 >> CardTableModRefBS::card_shift) | |
1165 {} | |
1166 | |
1167 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { | |
1168 _hrrs = hrrs; | |
1169 _coarse_map = &_hrrs->_other_regions._coarse_map; | |
1170 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; | |
1171 _bosa = _hrrs->bosa(); | |
1172 | |
1173 _is = Sparse; | |
1174 // Set these values so that we increment to the first region. | |
1175 _coarse_cur_region_index = -1; | |
1176 _coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);; | |
1177 | |
1178 _cur_region_cur_card = 0; | |
1179 | |
1180 _fine_array_index = -1; | |
1181 _fine_cur_prt = NULL; | |
1182 | |
1183 _n_yielded_coarse = 0; | |
1184 _n_yielded_fine = 0; | |
1185 _n_yielded_sparse = 0; | |
1186 | |
1187 _sparse_iter.init(&hrrs->_other_regions._sparse_table); | |
1188 } | |
1189 | |
1190 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { | |
1191 if (_hrrs->_other_regions._n_coarse_entries == 0) return false; | |
1192 // Go to the next card. | |
1193 _coarse_cur_region_cur_card++; | |
1194 // Was the last the last card in the current region? | |
1195 if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) { | |
1196 // Yes: find the next region. This may leave _coarse_cur_region_index | |
1197 // Set to the last index, in which case there are no more coarse | |
1198 // regions. | |
1199 _coarse_cur_region_index = | |
1200 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); | |
1201 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { | |
1202 _coarse_cur_region_cur_card = 0; | |
1203 HeapWord* r_bot = | |
1204 _g1h->region_at(_coarse_cur_region_index)->bottom(); | |
1205 _cur_region_card_offset = _bosa->index_for(r_bot); | |
1206 } else { | |
1207 return false; | |
1208 } | |
1209 } | |
1210 // If we didn't return false above, then we can yield a card. | |
1211 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; | |
1212 return true; | |
1213 } | |
1214 | |
1215 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() { | |
1216 // Otherwise, find the next bucket list in the array. | |
1217 _fine_array_index++; | |
1218 while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) { | |
1219 _fine_cur_prt = _fine_grain_regions[_fine_array_index]; | |
1220 if (_fine_cur_prt != NULL) return; | |
1221 else _fine_array_index++; | |
1222 } | |
1223 assert(_fine_cur_prt == NULL, "Loop post"); | |
1224 } | |
1225 | |
1226 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { | |
1227 if (fine_has_next()) { | |
1228 _cur_region_cur_card = | |
1229 _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1); | |
1230 } | |
1231 while (!fine_has_next()) { | |
1232 if (_cur_region_cur_card == PosParPRT::CardsPerRegion) { | |
1233 _cur_region_cur_card = 0; | |
1234 _fine_cur_prt = _fine_cur_prt->next(); | |
1235 } | |
1236 if (_fine_cur_prt == NULL) { | |
1237 fine_find_next_non_null_prt(); | |
1238 if (_fine_cur_prt == NULL) return false; | |
1239 } | |
1240 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0, | |
1241 "inv."); | |
1242 HeapWord* r_bot = | |
1243 _fine_cur_prt->hr()->bottom(); | |
1244 _cur_region_card_offset = _bosa->index_for(r_bot); | |
1245 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0); | |
1246 } | |
1247 assert(fine_has_next(), "Or else we exited the loop via the return."); | |
1248 card_index = _cur_region_card_offset + _cur_region_cur_card; | |
1249 return true; | |
1250 } | |
1251 | |
1252 bool HeapRegionRemSetIterator::fine_has_next() { | |
1253 return | |
1254 _fine_cur_prt != NULL && | |
1255 _cur_region_cur_card < PosParPRT::CardsPerRegion; | |
1256 } | |
1257 | |
1258 bool HeapRegionRemSetIterator::has_next(size_t& card_index) { | |
1259 switch (_is) { | |
1260 case Sparse: | |
1261 if (_sparse_iter.has_next(card_index)) { | |
1262 _n_yielded_sparse++; | |
1263 return true; | |
1264 } | |
1265 // Otherwise, deliberate fall-through | |
1266 _is = Fine; | |
1267 case Fine: | |
1268 if (fine_has_next(card_index)) { | |
1269 _n_yielded_fine++; | |
1270 return true; | |
1271 } | |
1272 // Otherwise, deliberate fall-through | |
1273 _is = Coarse; | |
1274 case Coarse: | |
1275 if (coarse_has_next(card_index)) { | |
1276 _n_yielded_coarse++; | |
1277 return true; | |
1278 } | |
1279 // Otherwise... | |
1280 break; | |
1281 } | |
1282 assert(ParallelGCThreads > 1 || | |
1283 n_yielded() == _hrrs->occupied(), | |
1284 "Should have yielded all the cards in the rem set " | |
1285 "(in the non-par case)."); | |
1286 return false; | |
1287 } | |
1288 | |
1289 | |
1290 | |
1291 oop** HeapRegionRemSet::_recorded_oops = NULL; | |
1292 HeapWord** HeapRegionRemSet::_recorded_cards = NULL; | |
1293 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL; | |
1294 int HeapRegionRemSet::_n_recorded = 0; | |
1295 | |
1296 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL; | |
1297 int* HeapRegionRemSet::_recorded_event_index = NULL; | |
1298 int HeapRegionRemSet::_n_recorded_events = 0; | |
1299 | |
1300 void HeapRegionRemSet::record(HeapRegion* hr, oop* f) { | |
1301 if (_recorded_oops == NULL) { | |
1302 assert(_n_recorded == 0 | |
1303 && _recorded_cards == NULL | |
1304 && _recorded_regions == NULL, | |
1305 "Inv"); | |
1306 _recorded_oops = NEW_C_HEAP_ARRAY(oop*, MaxRecorded); | |
1307 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded); | |
1308 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded); | |
1309 } | |
1310 if (_n_recorded == MaxRecorded) { | |
1311 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded); | |
1312 } else { | |
1313 _recorded_cards[_n_recorded] = | |
1314 (HeapWord*)align_size_down(uintptr_t(f), | |
1315 CardTableModRefBS::card_size); | |
1316 _recorded_oops[_n_recorded] = f; | |
1317 _recorded_regions[_n_recorded] = hr; | |
1318 _n_recorded++; | |
1319 } | |
1320 } | |
1321 | |
1322 void HeapRegionRemSet::record_event(Event evnt) { | |
1323 if (!G1RecordHRRSEvents) return; | |
1324 | |
1325 if (_recorded_events == NULL) { | |
1326 assert(_n_recorded_events == 0 | |
1327 && _recorded_event_index == NULL, | |
1328 "Inv"); | |
1329 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents); | |
1330 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents); | |
1331 } | |
1332 if (_n_recorded_events == MaxRecordedEvents) { | |
1333 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents); | |
1334 } else { | |
1335 _recorded_events[_n_recorded_events] = evnt; | |
1336 _recorded_event_index[_n_recorded_events] = _n_recorded; | |
1337 _n_recorded_events++; | |
1338 } | |
1339 } | |
1340 | |
1341 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) { | |
1342 switch (evnt) { | |
1343 case Event_EvacStart: | |
1344 str->print("Evac Start"); | |
1345 break; | |
1346 case Event_EvacEnd: | |
1347 str->print("Evac End"); | |
1348 break; | |
1349 case Event_RSUpdateEnd: | |
1350 str->print("RS Update End"); | |
1351 break; | |
1352 } | |
1353 } | |
1354 | |
1355 void HeapRegionRemSet::print_recorded() { | |
1356 int cur_evnt = 0; | |
1357 Event cur_evnt_kind; | |
1358 int cur_evnt_ind = 0; | |
1359 if (_n_recorded_events > 0) { | |
1360 cur_evnt_kind = _recorded_events[cur_evnt]; | |
1361 cur_evnt_ind = _recorded_event_index[cur_evnt]; | |
1362 } | |
1363 | |
1364 for (int i = 0; i < _n_recorded; i++) { | |
1365 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) { | |
1366 gclog_or_tty->print("Event: "); | |
1367 print_event(gclog_or_tty, cur_evnt_kind); | |
1368 gclog_or_tty->print_cr(""); | |
1369 cur_evnt++; | |
1370 if (cur_evnt < MaxRecordedEvents) { | |
1371 cur_evnt_kind = _recorded_events[cur_evnt]; | |
1372 cur_evnt_ind = _recorded_event_index[cur_evnt]; | |
1373 } | |
1374 } | |
1375 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]" | |
1376 " for ref " PTR_FORMAT ".\n", | |
1377 _recorded_cards[i], _recorded_regions[i]->bottom(), | |
1378 _recorded_oops[i]); | |
1379 } | |
1380 } | |
1381 | |
1382 #ifndef PRODUCT | |
1383 void HeapRegionRemSet::test() { | |
1384 os::sleep(Thread::current(), (jlong)5000, false); | |
1385 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1386 | |
1387 // Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same | |
1388 // hash bucket. | |
1389 HeapRegion* hr0 = g1h->region_at(0); | |
1390 HeapRegion* hr1 = g1h->region_at(1); | |
1391 HeapRegion* hr2 = g1h->region_at(5); | |
1392 HeapRegion* hr3 = g1h->region_at(6); | |
1393 HeapRegion* hr4 = g1h->region_at(7); | |
1394 HeapRegion* hr5 = g1h->region_at(8); | |
1395 | |
1396 HeapWord* hr1_start = hr1->bottom(); | |
1397 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; | |
1398 HeapWord* hr1_last = hr1->end() - 1; | |
1399 | |
1400 HeapWord* hr2_start = hr2->bottom(); | |
1401 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; | |
1402 HeapWord* hr2_last = hr2->end() - 1; | |
1403 | |
1404 HeapWord* hr3_start = hr3->bottom(); | |
1405 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; | |
1406 HeapWord* hr3_last = hr3->end() - 1; | |
1407 | |
1408 HeapRegionRemSet* hrrs = hr0->rem_set(); | |
1409 | |
1410 // Make three references from region 0x101... | |
1411 hrrs->add_reference((oop*)hr1_start); | |
1412 hrrs->add_reference((oop*)hr1_mid); | |
1413 hrrs->add_reference((oop*)hr1_last); | |
1414 | |
1415 hrrs->add_reference((oop*)hr2_start); | |
1416 hrrs->add_reference((oop*)hr2_mid); | |
1417 hrrs->add_reference((oop*)hr2_last); | |
1418 | |
1419 hrrs->add_reference((oop*)hr3_start); | |
1420 hrrs->add_reference((oop*)hr3_mid); | |
1421 hrrs->add_reference((oop*)hr3_last); | |
1422 | |
1423 // Now cause a coarsening. | |
1424 hrrs->add_reference((oop*)hr4->bottom()); | |
1425 hrrs->add_reference((oop*)hr5->bottom()); | |
1426 | |
1427 // Now, does iteration yield these three? | |
1428 HeapRegionRemSetIterator iter; | |
1429 hrrs->init_iterator(&iter); | |
1430 size_t sum = 0; | |
1431 size_t card_index; | |
1432 while (iter.has_next(card_index)) { | |
1433 HeapWord* card_start = | |
1434 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); | |
1435 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); | |
1436 sum++; | |
1437 } | |
1438 guarantee(sum == 11 - 3 + 2048, "Failure"); | |
1439 guarantee(sum == hrrs->occupied(), "Failure"); | |
1440 } | |
1441 #endif |