Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @ 1161:1fc01a2425ce
Merge
author | iveresov |
---|---|
date | Tue, 12 Jan 2010 13:54:40 -0800 |
parents | 2c79770d1f6e |
children | 9eee977dd1a9 |
rev | line source |
---|---|
342 | 1 /* |
579 | 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_heapRegionRemSet.cpp.incl" | |
27 | |
28 #define HRRS_VERBOSE 0 | |
29 | |
30 #define PRT_COUNT_OCCUPIED 1 | |
31 | |
32 // OtherRegionsTable | |
33 | |
34 class PerRegionTable: public CHeapObj { | |
35 friend class OtherRegionsTable; | |
36 friend class HeapRegionRemSetIterator; | |
37 | |
38 HeapRegion* _hr; | |
39 BitMap _bm; | |
40 #if PRT_COUNT_OCCUPIED | |
41 jint _occupied; | |
42 #endif | |
43 PerRegionTable* _next_free; | |
44 | |
45 PerRegionTable* next_free() { return _next_free; } | |
46 void set_next_free(PerRegionTable* prt) { _next_free = prt; } | |
47 | |
48 | |
49 static PerRegionTable* _free_list; | |
50 | |
51 #ifdef _MSC_VER | |
52 // For some reason even though the classes are marked as friend they are unable | |
53 // to access CardsPerRegion when private/protected. Only the windows c++ compiler | |
54 // says this Sun CC and linux gcc don't have a problem with access when private | |
55 | |
56 public: | |
57 | |
58 #endif // _MSC_VER | |
59 | |
60 protected: | |
61 // We need access in order to union things into the base table. | |
62 BitMap* bm() { return &_bm; } | |
63 | |
545 | 64 #if PRT_COUNT_OCCUPIED |
342 | 65 void recount_occupied() { |
66 _occupied = (jint) bm()->count_one_bits(); | |
67 } | |
545 | 68 #endif |
342 | 69 |
70 PerRegionTable(HeapRegion* hr) : | |
71 _hr(hr), | |
72 #if PRT_COUNT_OCCUPIED | |
73 _occupied(0), | |
74 #endif | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
75 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */) |
342 | 76 {} |
77 | |
78 static void free(PerRegionTable* prt) { | |
79 while (true) { | |
80 PerRegionTable* fl = _free_list; | |
81 prt->set_next_free(fl); | |
82 PerRegionTable* res = | |
83 (PerRegionTable*) | |
84 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | |
85 if (res == fl) return; | |
86 } | |
87 ShouldNotReachHere(); | |
88 } | |
89 | |
90 static PerRegionTable* alloc(HeapRegion* hr) { | |
91 PerRegionTable* fl = _free_list; | |
92 while (fl != NULL) { | |
93 PerRegionTable* nxt = fl->next_free(); | |
94 PerRegionTable* res = | |
95 (PerRegionTable*) | |
96 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | |
97 if (res == fl) { | |
98 fl->init(hr); | |
99 return fl; | |
100 } else { | |
101 fl = _free_list; | |
102 } | |
103 } | |
104 assert(fl == NULL, "Loop condition."); | |
105 return new PerRegionTable(hr); | |
106 } | |
107 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
108 void add_card_work(CardIdx_t from_card, bool par) { |
342 | 109 if (!_bm.at(from_card)) { |
110 if (par) { | |
111 if (_bm.par_at_put(from_card, 1)) { | |
112 #if PRT_COUNT_OCCUPIED | |
113 Atomic::inc(&_occupied); | |
114 #endif | |
115 } | |
116 } else { | |
117 _bm.at_put(from_card, 1); | |
118 #if PRT_COUNT_OCCUPIED | |
119 _occupied++; | |
120 #endif | |
121 } | |
122 } | |
123 } | |
124 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
125 void add_reference_work(OopOrNarrowOopStar from, bool par) { |
342 | 126 // Must make this robust in case "from" is not in "_hr", because of |
127 // concurrency. | |
128 | |
129 #if HRRS_VERBOSE | |
130 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", | |
131 from, *from); | |
132 #endif | |
133 | |
134 HeapRegion* loc_hr = hr(); | |
135 // If the test below fails, then this table was reused concurrently | |
136 // with this operation. This is OK, since the old table was coarsened, | |
137 // and adding a bit to the new table is never incorrect. | |
138 if (loc_hr->is_in_reserved(from)) { | |
139 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
140 CardIdx_t from_card = (CardIdx_t) |
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
141 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); |
342 | 142 |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
143 assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
144 "Must be in range."); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
145 add_card_work(from_card, par); |
342 | 146 } |
147 } | |
148 | |
149 public: | |
150 | |
151 HeapRegion* hr() const { return _hr; } | |
152 | |
153 #if PRT_COUNT_OCCUPIED | |
154 jint occupied() const { | |
155 // Overkill, but if we ever need it... | |
156 // guarantee(_occupied == _bm.count_one_bits(), "Check"); | |
157 return _occupied; | |
158 } | |
159 #else | |
160 jint occupied() const { | |
161 return _bm.count_one_bits(); | |
162 } | |
163 #endif | |
164 | |
165 void init(HeapRegion* hr) { | |
166 _hr = hr; | |
167 #if PRT_COUNT_OCCUPIED | |
168 _occupied = 0; | |
169 #endif | |
170 _bm.clear(); | |
171 } | |
172 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
173 void add_reference(OopOrNarrowOopStar from) { |
342 | 174 add_reference_work(from, /*parallel*/ true); |
175 } | |
176 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
177 void seq_add_reference(OopOrNarrowOopStar from) { |
342 | 178 add_reference_work(from, /*parallel*/ false); |
179 } | |
180 | |
181 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | |
182 HeapWord* hr_bot = hr()->bottom(); | |
489
2494ab195856
6653214: MemoryPoolMXBean.setUsageThreshold() does not support large heap sizes.
swamyv
parents:
342
diff
changeset
|
183 size_t hr_first_card_index = ctbs->index_for(hr_bot); |
342 | 184 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); |
185 #if PRT_COUNT_OCCUPIED | |
186 recount_occupied(); | |
187 #endif | |
188 } | |
189 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
190 void add_card(CardIdx_t from_card_index) { |
342 | 191 add_card_work(from_card_index, /*parallel*/ true); |
192 } | |
193 | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
194 void seq_add_card(CardIdx_t from_card_index) { |
342 | 195 add_card_work(from_card_index, /*parallel*/ false); |
196 } | |
197 | |
198 // (Destructively) union the bitmap of the current table into the given | |
199 // bitmap (which is assumed to be of the same size.) | |
200 void union_bitmap_into(BitMap* bm) { | |
201 bm->set_union(_bm); | |
202 } | |
203 | |
204 // Mem size in bytes. | |
205 size_t mem_size() const { | |
206 return sizeof(this) + _bm.size_in_words() * HeapWordSize; | |
207 } | |
208 | |
209 static size_t fl_mem_size() { | |
210 PerRegionTable* cur = _free_list; | |
211 size_t res = 0; | |
212 while (cur != NULL) { | |
213 res += sizeof(PerRegionTable); | |
214 cur = cur->next_free(); | |
215 } | |
216 return res; | |
217 } | |
218 | |
219 // Requires "from" to be in "hr()". | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
220 bool contains_reference(OopOrNarrowOopStar from) const { |
342 | 221 assert(hr()->is_in_reserved(from), "Precondition."); |
222 size_t card_ind = pointer_delta(from, hr()->bottom(), | |
223 CardTableModRefBS::card_size); | |
224 return _bm.at(card_ind); | |
225 } | |
226 }; | |
227 | |
228 PerRegionTable* PerRegionTable::_free_list = NULL; | |
229 | |
230 | |
231 #define COUNT_PAR_EXPANDS 0 | |
232 | |
233 #if COUNT_PAR_EXPANDS | |
234 static jint n_par_expands = 0; | |
235 static jint n_par_contracts = 0; | |
236 static jint par_expand_list_len = 0; | |
237 static jint max_par_expand_list_len = 0; | |
238 | |
239 static void print_par_expand() { | |
240 Atomic::inc(&n_par_expands); | |
241 Atomic::inc(&par_expand_list_len); | |
242 if (par_expand_list_len > max_par_expand_list_len) { | |
243 max_par_expand_list_len = par_expand_list_len; | |
244 } | |
245 if ((n_par_expands % 10) == 0) { | |
246 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, " | |
247 "len = %d, max_len = %d\n.", | |
248 n_par_expands, n_par_contracts, par_expand_list_len, | |
249 max_par_expand_list_len); | |
250 } | |
251 } | |
252 #endif | |
253 | |
254 class PosParPRT: public PerRegionTable { | |
255 PerRegionTable** _par_tables; | |
256 | |
257 enum SomePrivateConstants { | |
258 ReserveParTableExpansion = 1 | |
259 }; | |
260 | |
261 void par_expand() { | |
262 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
263 if (n <= 0) return; | |
264 if (_par_tables == NULL) { | |
265 PerRegionTable* res = | |
266 (PerRegionTable*) | |
267 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion, | |
268 &_par_tables, NULL); | |
269 if (res != NULL) return; | |
270 // Otherwise, we reserved the right to do the expansion. | |
271 | |
272 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n); | |
273 for (int i = 0; i < n; i++) { | |
274 PerRegionTable* ptable = PerRegionTable::alloc(hr()); | |
275 ptables[i] = ptable; | |
276 } | |
277 // Here we do not need an atomic. | |
278 _par_tables = ptables; | |
279 #if COUNT_PAR_EXPANDS | |
280 print_par_expand(); | |
281 #endif | |
282 // We must put this table on the expanded list. | |
283 PosParPRT* exp_head = _par_expanded_list; | |
284 while (true) { | |
285 set_next_par_expanded(exp_head); | |
286 PosParPRT* res = | |
287 (PosParPRT*) | |
288 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head); | |
289 if (res == exp_head) return; | |
290 // Otherwise. | |
291 exp_head = res; | |
292 } | |
293 ShouldNotReachHere(); | |
294 } | |
295 } | |
296 | |
297 void par_contract() { | |
298 assert(_par_tables != NULL, "Precondition."); | |
299 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
300 for (int i = 0; i < n; i++) { | |
301 _par_tables[i]->union_bitmap_into(bm()); | |
302 PerRegionTable::free(_par_tables[i]); | |
303 _par_tables[i] = NULL; | |
304 } | |
305 #if PRT_COUNT_OCCUPIED | |
306 // We must recount the "occupied." | |
307 recount_occupied(); | |
308 #endif | |
309 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables); | |
310 _par_tables = NULL; | |
311 #if COUNT_PAR_EXPANDS | |
312 Atomic::inc(&n_par_contracts); | |
313 Atomic::dec(&par_expand_list_len); | |
314 #endif | |
315 } | |
316 | |
317 static PerRegionTable** _par_table_fl; | |
318 | |
319 PosParPRT* _next; | |
320 | |
321 static PosParPRT* _free_list; | |
322 | |
323 PerRegionTable** par_tables() const { | |
324 assert(uintptr_t(NULL) == 0, "Assumption."); | |
325 if (uintptr_t(_par_tables) <= ReserveParTableExpansion) | |
326 return NULL; | |
327 else | |
328 return _par_tables; | |
329 } | |
330 | |
331 PosParPRT* _next_par_expanded; | |
332 PosParPRT* next_par_expanded() { return _next_par_expanded; } | |
333 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; } | |
334 static PosParPRT* _par_expanded_list; | |
335 | |
336 public: | |
337 | |
338 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {} | |
339 | |
340 jint occupied() const { | |
341 jint res = PerRegionTable::occupied(); | |
342 if (par_tables() != NULL) { | |
343 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
344 res += par_tables()[i]->occupied(); | |
345 } | |
346 } | |
347 return res; | |
348 } | |
349 | |
350 void init(HeapRegion* hr) { | |
351 PerRegionTable::init(hr); | |
352 _next = NULL; | |
353 if (par_tables() != NULL) { | |
354 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
355 par_tables()[i]->init(hr); | |
356 } | |
357 } | |
358 } | |
359 | |
360 static void free(PosParPRT* prt) { | |
361 while (true) { | |
362 PosParPRT* fl = _free_list; | |
363 prt->set_next(fl); | |
364 PosParPRT* res = | |
365 (PosParPRT*) | |
366 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | |
367 if (res == fl) return; | |
368 } | |
369 ShouldNotReachHere(); | |
370 } | |
371 | |
372 static PosParPRT* alloc(HeapRegion* hr) { | |
373 PosParPRT* fl = _free_list; | |
374 while (fl != NULL) { | |
375 PosParPRT* nxt = fl->next(); | |
376 PosParPRT* res = | |
377 (PosParPRT*) | |
378 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | |
379 if (res == fl) { | |
380 fl->init(hr); | |
381 return fl; | |
382 } else { | |
383 fl = _free_list; | |
384 } | |
385 } | |
386 assert(fl == NULL, "Loop condition."); | |
387 return new PosParPRT(hr); | |
388 } | |
389 | |
390 PosParPRT* next() const { return _next; } | |
391 void set_next(PosParPRT* nxt) { _next = nxt; } | |
392 PosParPRT** next_addr() { return &_next; } | |
393 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
394 void add_reference(OopOrNarrowOopStar from, int tid) { |
342 | 395 // Expand if necessary. |
396 PerRegionTable** pt = par_tables(); | |
397 if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) { | |
398 par_expand(); | |
399 pt = par_tables(); | |
400 } | |
401 if (pt != NULL) { | |
402 // We always have to assume that mods to table 0 are in parallel, | |
403 // because of the claiming scheme in parallel expansion. A thread | |
404 // with tid != 0 that finds the table to be NULL, but doesn't succeed | |
405 // in claiming the right of expanding it, will end up in the else | |
406 // clause of the above if test. That thread could be delayed, and a | |
407 // thread 0 add reference could see the table expanded, and come | |
408 // here. Both threads would be adding in parallel. But we get to | |
409 // not use atomics for tids > 0. | |
410 if (tid == 0) { | |
411 PerRegionTable::add_reference(from); | |
412 } else { | |
413 pt[tid-1]->seq_add_reference(from); | |
414 } | |
415 } else { | |
416 // Not expanded -- add to the base table. | |
417 PerRegionTable::add_reference(from); | |
418 } | |
419 } | |
420 | |
421 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | |
422 assert(_par_tables == NULL, "Precondition"); | |
423 PerRegionTable::scrub(ctbs, card_bm); | |
424 } | |
425 | |
426 size_t mem_size() const { | |
427 size_t res = | |
428 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable); | |
429 if (_par_tables != NULL) { | |
430 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
431 res += _par_tables[i]->mem_size(); | |
432 } | |
433 } | |
434 return res; | |
435 } | |
436 | |
437 static size_t fl_mem_size() { | |
438 PosParPRT* cur = _free_list; | |
439 size_t res = 0; | |
440 while (cur != NULL) { | |
441 res += sizeof(PosParPRT); | |
442 cur = cur->next(); | |
443 } | |
444 return res; | |
445 } | |
446 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
447 bool contains_reference(OopOrNarrowOopStar from) const { |
342 | 448 if (PerRegionTable::contains_reference(from)) return true; |
449 if (_par_tables != NULL) { | |
450 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
451 if (_par_tables[i]->contains_reference(from)) return true; | |
452 } | |
453 } | |
454 return false; | |
455 } | |
456 | |
457 static void par_contract_all(); | |
458 | |
459 }; | |
460 | |
461 void PosParPRT::par_contract_all() { | |
462 PosParPRT* hd = _par_expanded_list; | |
463 while (hd != NULL) { | |
464 PosParPRT* nxt = hd->next_par_expanded(); | |
465 PosParPRT* res = | |
466 (PosParPRT*) | |
467 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd); | |
468 if (res == hd) { | |
469 // We claimed the right to contract this table. | |
470 hd->set_next_par_expanded(NULL); | |
471 hd->par_contract(); | |
472 hd = _par_expanded_list; | |
473 } else { | |
474 hd = res; | |
475 } | |
476 } | |
477 } | |
478 | |
479 PosParPRT* PosParPRT::_free_list = NULL; | |
480 PosParPRT* PosParPRT::_par_expanded_list = NULL; | |
481 | |
482 jint OtherRegionsTable::_cache_probes = 0; | |
483 jint OtherRegionsTable::_cache_hits = 0; | |
484 | |
485 size_t OtherRegionsTable::_max_fine_entries = 0; | |
486 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; | |
487 #if SAMPLE_FOR_EVICTION | |
488 size_t OtherRegionsTable::_fine_eviction_stride = 0; | |
489 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; | |
490 #endif | |
491 | |
492 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : | |
493 _g1h(G1CollectedHeap::heap()), | |
494 _m(Mutex::leaf, "An OtherRegionsTable lock", true), | |
495 _hr(hr), | |
496 _coarse_map(G1CollectedHeap::heap()->max_regions(), | |
497 false /* in-resource-area */), | |
498 _fine_grain_regions(NULL), | |
499 _n_fine_entries(0), _n_coarse_entries(0), | |
500 #if SAMPLE_FOR_EVICTION | |
501 _fine_eviction_start(0), | |
502 #endif | |
503 _sparse_table(hr) | |
504 { | |
505 typedef PosParPRT* PosParPRTPtr; | |
506 if (_max_fine_entries == 0) { | |
507 assert(_mod_max_fine_entries_mask == 0, "Both or none."); | |
645
c3a720eefe82
6816308: Changes to allow builds with latest Windows SDK 6.1 on 64bit Windows 2003
kvn
parents:
545
diff
changeset
|
508 _max_fine_entries = (size_t)(1 << G1LogRSRegionEntries); |
342 | 509 _mod_max_fine_entries_mask = _max_fine_entries - 1; |
510 #if SAMPLE_FOR_EVICTION | |
511 assert(_fine_eviction_sample_size == 0 | |
512 && _fine_eviction_stride == 0, "All init at same time."); | |
513 _fine_eviction_sample_size = MAX2((size_t)4, (size_t)G1LogRSRegionEntries); | |
514 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; | |
515 #endif | |
516 } | |
517 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries]; | |
518 if (_fine_grain_regions == NULL) | |
519 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, | |
520 "Failed to allocate _fine_grain_entries."); | |
521 for (size_t i = 0; i < _max_fine_entries; i++) { | |
522 _fine_grain_regions[i] = NULL; | |
523 } | |
524 } | |
525 | |
526 int** OtherRegionsTable::_from_card_cache = NULL; | |
527 size_t OtherRegionsTable::_from_card_cache_max_regions = 0; | |
528 size_t OtherRegionsTable::_from_card_cache_mem_size = 0; | |
529 | |
530 void OtherRegionsTable::init_from_card_cache(size_t max_regions) { | |
531 _from_card_cache_max_regions = max_regions; | |
532 | |
533 int n_par_rs = HeapRegionRemSet::num_par_rem_sets(); | |
534 _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs); | |
535 for (int i = 0; i < n_par_rs; i++) { | |
536 _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions); | |
537 for (size_t j = 0; j < max_regions; j++) { | |
538 _from_card_cache[i][j] = -1; // An invalid value. | |
539 } | |
540 } | |
541 _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int); | |
542 } | |
543 | |
544 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) { | |
545 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
546 assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max."); | |
547 for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) { | |
548 _from_card_cache[i][j] = -1; // An invalid value. | |
549 } | |
550 } | |
551 } | |
552 | |
553 #ifndef PRODUCT | |
554 void OtherRegionsTable::print_from_card_cache() { | |
555 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
556 for (size_t j = 0; j < _from_card_cache_max_regions; j++) { | |
557 gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.", | |
558 i, j, _from_card_cache[i][j]); | |
559 } | |
560 } | |
561 } | |
562 #endif | |
563 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
564 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { |
342 | 565 size_t cur_hrs_ind = hr()->hrs_index(); |
566 | |
567 #if HRRS_VERBOSE | |
568 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
569 from, |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
570 UseCompressedOops |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
571 ? oopDesc::load_decode_heap_oop((narrowOop*)from) |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
572 : oopDesc::load_decode_heap_oop((oop*)from)); |
342 | 573 #endif |
574 | |
575 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); | |
576 | |
577 #if HRRS_VERBOSE | |
578 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", | |
579 hr()->bottom(), from_card, | |
580 _from_card_cache[tid][cur_hrs_ind]); | |
581 #endif | |
582 | |
583 #define COUNT_CACHE 0 | |
584 #if COUNT_CACHE | |
585 jint p = Atomic::add(1, &_cache_probes); | |
586 if ((p % 10000) == 0) { | |
587 jint hits = _cache_hits; | |
588 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.", | |
589 _cache_hits, p, 100.0* (float)hits/(float)p); | |
590 } | |
591 #endif | |
592 if (from_card == _from_card_cache[tid][cur_hrs_ind]) { | |
593 #if HRRS_VERBOSE | |
594 gclog_or_tty->print_cr(" from-card cache hit."); | |
595 #endif | |
596 #if COUNT_CACHE | |
597 Atomic::inc(&_cache_hits); | |
598 #endif | |
599 assert(contains_reference(from), "We just added it!"); | |
600 return; | |
601 } else { | |
602 _from_card_cache[tid][cur_hrs_ind] = from_card; | |
603 } | |
604 | |
605 // Note that this may be a continued H region. | |
606 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
607 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); |
342 | 608 |
609 // If the region is already coarsened, return. | |
610 if (_coarse_map.at(from_hrs_ind)) { | |
611 #if HRRS_VERBOSE | |
612 gclog_or_tty->print_cr(" coarse map hit."); | |
613 #endif | |
614 assert(contains_reference(from), "We just added it!"); | |
615 return; | |
616 } | |
617 | |
618 // Otherwise find a per-region table to add it to. | |
619 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; | |
620 PosParPRT* prt = find_region_table(ind, from_hr); | |
621 if (prt == NULL) { | |
622 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
623 // Confirm that it's really not there... | |
624 prt = find_region_table(ind, from_hr); | |
625 if (prt == NULL) { | |
626 | |
627 uintptr_t from_hr_bot_card_index = | |
628 uintptr_t(from_hr->bottom()) | |
629 >> CardTableModRefBS::card_shift; | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
630 CardIdx_t card_index = from_card - from_hr_bot_card_index; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
631 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion, |
342 | 632 "Must be in range."); |
633 if (G1HRRSUseSparseTable && | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
634 _sparse_table.add_card(from_hrs_ind, card_index)) { |
342 | 635 if (G1RecordHRRSOops) { |
636 HeapRegionRemSet::record(hr(), from); | |
637 #if HRRS_VERBOSE | |
638 gclog_or_tty->print(" Added card " PTR_FORMAT " to region " | |
639 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | |
640 align_size_down(uintptr_t(from), | |
641 CardTableModRefBS::card_size), | |
642 hr()->bottom(), from); | |
643 #endif | |
644 } | |
645 #if HRRS_VERBOSE | |
646 gclog_or_tty->print_cr(" added card to sparse table."); | |
647 #endif | |
648 assert(contains_reference_locked(from), "We just added it!"); | |
649 return; | |
650 } else { | |
651 #if HRRS_VERBOSE | |
652 gclog_or_tty->print_cr(" [tid %d] sparse table entry " | |
653 "overflow(f: %d, t: %d)", | |
654 tid, from_hrs_ind, cur_hrs_ind); | |
655 #endif | |
656 } | |
657 | |
658 // Otherwise, transfer from sparse to fine-grain. | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
659 CardIdx_t cards[SparsePRTEntry::CardsPerEntry]; |
342 | 660 if (G1HRRSUseSparseTable) { |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
661 bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]); |
342 | 662 assert(res, "There should have been an entry"); |
663 } | |
664 | |
665 if (_n_fine_entries == _max_fine_entries) { | |
666 prt = delete_region_table(); | |
667 } else { | |
668 prt = PosParPRT::alloc(from_hr); | |
669 } | |
670 prt->init(from_hr); | |
671 // Record the outgoing pointer in the from_region's outgoing bitmap. | |
672 from_hr->rem_set()->add_outgoing_reference(hr()); | |
673 | |
674 PosParPRT* first_prt = _fine_grain_regions[ind]; | |
675 prt->set_next(first_prt); // XXX Maybe move to init? | |
676 _fine_grain_regions[ind] = prt; | |
677 _n_fine_entries++; | |
678 | |
679 // Add in the cards from the sparse table. | |
680 if (G1HRRSUseSparseTable) { | |
681 for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) { | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
682 CardIdx_t c = cards[i]; |
342 | 683 if (c != SparsePRTEntry::NullEntry) { |
684 prt->add_card(c); | |
685 } | |
686 } | |
687 // Now we can delete the sparse entry. | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
688 bool res = _sparse_table.delete_entry(from_hrs_ind); |
342 | 689 assert(res, "It should have been there."); |
690 } | |
691 } | |
692 assert(prt != NULL && prt->hr() == from_hr, "consequence"); | |
693 } | |
694 // Note that we can't assert "prt->hr() == from_hr", because of the | |
695 // possibility of concurrent reuse. But see head comment of | |
696 // OtherRegionsTable for why this is OK. | |
697 assert(prt != NULL, "Inv"); | |
698 | |
699 prt->add_reference(from, tid); | |
700 if (G1RecordHRRSOops) { | |
701 HeapRegionRemSet::record(hr(), from); | |
702 #if HRRS_VERBOSE | |
703 gclog_or_tty->print("Added card " PTR_FORMAT " to region " | |
704 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | |
705 align_size_down(uintptr_t(from), | |
706 CardTableModRefBS::card_size), | |
707 hr()->bottom(), from); | |
708 #endif | |
709 } | |
710 assert(contains_reference(from), "We just added it!"); | |
711 } | |
712 | |
713 PosParPRT* | |
714 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { | |
715 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | |
716 PosParPRT* prt = _fine_grain_regions[ind]; | |
717 while (prt != NULL && prt->hr() != hr) { | |
718 prt = prt->next(); | |
719 } | |
720 // Loop postcondition is the method postcondition. | |
721 return prt; | |
722 } | |
723 | |
724 | |
725 #define DRT_CENSUS 0 | |
726 | |
727 #if DRT_CENSUS | |
728 static const int HistoSize = 6; | |
729 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
730 static int coarsenings = 0; | |
731 static int occ_sum = 0; | |
732 #endif | |
733 | |
734 jint OtherRegionsTable::_n_coarsenings = 0; | |
735 | |
736 PosParPRT* OtherRegionsTable::delete_region_table() { | |
737 #if DRT_CENSUS | |
738 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
739 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 }; | |
740 #endif | |
741 | |
742 assert(_m.owned_by_self(), "Precondition"); | |
743 assert(_n_fine_entries == _max_fine_entries, "Precondition"); | |
744 PosParPRT* max = NULL; | |
745 jint max_occ = 0; | |
746 PosParPRT** max_prev; | |
747 size_t max_ind; | |
748 | |
749 #if SAMPLE_FOR_EVICTION | |
750 size_t i = _fine_eviction_start; | |
751 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { | |
752 size_t ii = i; | |
753 // Make sure we get a non-NULL sample. | |
754 while (_fine_grain_regions[ii] == NULL) { | |
755 ii++; | |
756 if (ii == _max_fine_entries) ii = 0; | |
757 guarantee(ii != i, "We must find one."); | |
758 } | |
759 PosParPRT** prev = &_fine_grain_regions[ii]; | |
760 PosParPRT* cur = *prev; | |
761 while (cur != NULL) { | |
762 jint cur_occ = cur->occupied(); | |
763 if (max == NULL || cur_occ > max_occ) { | |
764 max = cur; | |
765 max_prev = prev; | |
766 max_ind = i; | |
767 max_occ = cur_occ; | |
768 } | |
769 prev = cur->next_addr(); | |
770 cur = cur->next(); | |
771 } | |
772 i = i + _fine_eviction_stride; | |
773 if (i >= _n_fine_entries) i = i - _n_fine_entries; | |
774 } | |
775 _fine_eviction_start++; | |
776 if (_fine_eviction_start >= _n_fine_entries) | |
777 _fine_eviction_start -= _n_fine_entries; | |
778 #else | |
779 for (int i = 0; i < _max_fine_entries; i++) { | |
780 PosParPRT** prev = &_fine_grain_regions[i]; | |
781 PosParPRT* cur = *prev; | |
782 while (cur != NULL) { | |
783 jint cur_occ = cur->occupied(); | |
784 #if DRT_CENSUS | |
785 for (int k = 0; k < HistoSize; k++) { | |
786 if (cur_occ <= histo_limits[k]) { | |
787 histo[k]++; global_histo[k]++; break; | |
788 } | |
789 } | |
790 #endif | |
791 if (max == NULL || cur_occ > max_occ) { | |
792 max = cur; | |
793 max_prev = prev; | |
794 max_ind = i; | |
795 max_occ = cur_occ; | |
796 } | |
797 prev = cur->next_addr(); | |
798 cur = cur->next(); | |
799 } | |
800 } | |
801 #endif | |
802 // XXX | |
803 guarantee(max != NULL, "Since _n_fine_entries > 0"); | |
804 #if DRT_CENSUS | |
805 gclog_or_tty->print_cr("In a coarsening: histo of occs:"); | |
806 for (int k = 0; k < HistoSize; k++) { | |
807 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]); | |
808 } | |
809 coarsenings++; | |
810 occ_sum += max_occ; | |
811 if ((coarsenings % 100) == 0) { | |
812 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings); | |
813 for (int k = 0; k < HistoSize; k++) { | |
814 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]); | |
815 } | |
816 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.", | |
817 (float)occ_sum/(float)coarsenings); | |
818 } | |
819 #endif | |
820 | |
821 // Set the corresponding coarse bit. | |
822 int max_hrs_index = max->hr()->hrs_index(); | |
823 if (!_coarse_map.at(max_hrs_index)) { | |
824 _coarse_map.at_put(max_hrs_index, true); | |
825 _n_coarse_entries++; | |
826 #if 0 | |
827 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " | |
828 "for region [" PTR_FORMAT "...] (%d coarse entries).\n", | |
829 hr()->bottom(), | |
830 max->hr()->bottom(), | |
831 _n_coarse_entries); | |
832 #endif | |
833 } | |
834 | |
835 // Unsplice. | |
836 *max_prev = max->next(); | |
837 Atomic::inc(&_n_coarsenings); | |
838 _n_fine_entries--; | |
839 return max; | |
840 } | |
841 | |
842 | |
843 // At present, this must be called stop-world single-threaded. | |
844 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, | |
845 BitMap* region_bm, BitMap* card_bm) { | |
846 // First eliminated garbage regions from the coarse map. | |
847 if (G1RSScrubVerbose) | |
848 gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index()); | |
849 | |
850 assert(_coarse_map.size() == region_bm->size(), "Precondition"); | |
851 if (G1RSScrubVerbose) | |
852 gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries); | |
853 _coarse_map.set_intersection(*region_bm); | |
854 _n_coarse_entries = _coarse_map.count_one_bits(); | |
855 if (G1RSScrubVerbose) | |
856 gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries); | |
857 | |
858 // Now do the fine-grained maps. | |
859 for (size_t i = 0; i < _max_fine_entries; i++) { | |
860 PosParPRT* cur = _fine_grain_regions[i]; | |
861 PosParPRT** prev = &_fine_grain_regions[i]; | |
862 while (cur != NULL) { | |
863 PosParPRT* nxt = cur->next(); | |
864 // If the entire region is dead, eliminate. | |
865 if (G1RSScrubVerbose) | |
866 gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index()); | |
867 if (!region_bm->at(cur->hr()->hrs_index())) { | |
868 *prev = nxt; | |
869 cur->set_next(NULL); | |
870 _n_fine_entries--; | |
871 if (G1RSScrubVerbose) | |
872 gclog_or_tty->print_cr(" deleted via region map."); | |
873 PosParPRT::free(cur); | |
874 } else { | |
875 // Do fine-grain elimination. | |
876 if (G1RSScrubVerbose) | |
877 gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); | |
878 cur->scrub(ctbs, card_bm); | |
879 if (G1RSScrubVerbose) | |
880 gclog_or_tty->print_cr(" after = %4d.", cur->occupied()); | |
881 // Did that empty the table completely? | |
882 if (cur->occupied() == 0) { | |
883 *prev = nxt; | |
884 cur->set_next(NULL); | |
885 _n_fine_entries--; | |
886 PosParPRT::free(cur); | |
887 } else { | |
888 prev = cur->next_addr(); | |
889 } | |
890 } | |
891 cur = nxt; | |
892 } | |
893 } | |
894 // Since we may have deleted a from_card_cache entry from the RS, clear | |
895 // the FCC. | |
896 clear_fcc(); | |
897 } | |
898 | |
899 | |
900 size_t OtherRegionsTable::occupied() const { | |
901 // Cast away const in this case. | |
902 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
903 size_t sum = occ_fine(); | |
904 sum += occ_sparse(); | |
905 sum += occ_coarse(); | |
906 return sum; | |
907 } | |
908 | |
909 size_t OtherRegionsTable::occ_fine() const { | |
910 size_t sum = 0; | |
911 for (size_t i = 0; i < _max_fine_entries; i++) { | |
912 PosParPRT* cur = _fine_grain_regions[i]; | |
913 while (cur != NULL) { | |
914 sum += cur->occupied(); | |
915 cur = cur->next(); | |
916 } | |
917 } | |
918 return sum; | |
919 } | |
920 | |
921 size_t OtherRegionsTable::occ_coarse() const { | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
922 return (_n_coarse_entries * HeapRegion::CardsPerRegion); |
342 | 923 } |
924 | |
925 size_t OtherRegionsTable::occ_sparse() const { | |
926 return _sparse_table.occupied(); | |
927 } | |
928 | |
929 size_t OtherRegionsTable::mem_size() const { | |
930 // Cast away const in this case. | |
931 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
932 size_t sum = 0; | |
933 for (size_t i = 0; i < _max_fine_entries; i++) { | |
934 PosParPRT* cur = _fine_grain_regions[i]; | |
935 while (cur != NULL) { | |
936 sum += cur->mem_size(); | |
937 cur = cur->next(); | |
938 } | |
939 } | |
940 sum += (sizeof(PosParPRT*) * _max_fine_entries); | |
941 sum += (_coarse_map.size_in_words() * HeapWordSize); | |
942 sum += (_sparse_table.mem_size()); | |
943 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. | |
944 return sum; | |
945 } | |
946 | |
947 size_t OtherRegionsTable::static_mem_size() { | |
948 return _from_card_cache_mem_size; | |
949 } | |
950 | |
951 size_t OtherRegionsTable::fl_mem_size() { | |
952 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size(); | |
953 } | |
954 | |
955 void OtherRegionsTable::clear_fcc() { | |
956 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | |
957 _from_card_cache[i][hr()->hrs_index()] = -1; | |
958 } | |
959 } | |
960 | |
961 void OtherRegionsTable::clear() { | |
962 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
963 for (size_t i = 0; i < _max_fine_entries; i++) { | |
964 PosParPRT* cur = _fine_grain_regions[i]; | |
965 while (cur != NULL) { | |
966 PosParPRT* nxt = cur->next(); | |
967 PosParPRT::free(cur); | |
968 cur = nxt; | |
969 } | |
970 _fine_grain_regions[i] = NULL; | |
971 } | |
972 _sparse_table.clear(); | |
973 _coarse_map.clear(); | |
974 _n_fine_entries = 0; | |
975 _n_coarse_entries = 0; | |
976 | |
977 clear_fcc(); | |
978 } | |
979 | |
980 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { | |
981 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | |
982 size_t hrs_ind = (size_t)from_hr->hrs_index(); | |
983 size_t ind = hrs_ind & _mod_max_fine_entries_mask; | |
984 if (del_single_region_table(ind, from_hr)) { | |
985 assert(!_coarse_map.at(hrs_ind), "Inv"); | |
986 } else { | |
987 _coarse_map.par_at_put(hrs_ind, 0); | |
988 } | |
989 // Check to see if any of the fcc entries come from here. | |
990 int hr_ind = hr()->hrs_index(); | |
991 for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { | |
992 int fcc_ent = _from_card_cache[tid][hr_ind]; | |
993 if (fcc_ent != -1) { | |
994 HeapWord* card_addr = (HeapWord*) | |
995 (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift); | |
996 if (hr()->is_in_reserved(card_addr)) { | |
997 // Clear the from card cache. | |
998 _from_card_cache[tid][hr_ind] = -1; | |
999 } | |
1000 } | |
1001 } | |
1002 } | |
1003 | |
1004 bool OtherRegionsTable::del_single_region_table(size_t ind, | |
1005 HeapRegion* hr) { | |
1006 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | |
1007 PosParPRT** prev_addr = &_fine_grain_regions[ind]; | |
1008 PosParPRT* prt = *prev_addr; | |
1009 while (prt != NULL && prt->hr() != hr) { | |
1010 prev_addr = prt->next_addr(); | |
1011 prt = prt->next(); | |
1012 } | |
1013 if (prt != NULL) { | |
1014 assert(prt->hr() == hr, "Loop postcondition."); | |
1015 *prev_addr = prt->next(); | |
1016 PosParPRT::free(prt); | |
1017 _n_fine_entries--; | |
1018 return true; | |
1019 } else { | |
1020 return false; | |
1021 } | |
1022 } | |
1023 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1024 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { |
342 | 1025 // Cast away const in this case. |
1026 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | |
1027 return contains_reference_locked(from); | |
1028 } | |
1029 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1030 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { |
342 | 1031 HeapRegion* hr = _g1h->heap_region_containing_raw(from); |
1032 if (hr == NULL) return false; | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1033 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); |
342 | 1034 // Is this region in the coarse map? |
1035 if (_coarse_map.at(hr_ind)) return true; | |
1036 | |
1037 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, | |
1038 hr); | |
1039 if (prt != NULL) { | |
1040 return prt->contains_reference(from); | |
1041 | |
1042 } else { | |
1043 uintptr_t from_card = | |
1044 (uintptr_t(from) >> CardTableModRefBS::card_shift); | |
1045 uintptr_t hr_bot_card_index = | |
1046 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; | |
1047 assert(from_card >= hr_bot_card_index, "Inv"); | |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1048 CardIdx_t card_index = from_card - hr_bot_card_index; |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1049 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion, |
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1050 "Must be in range."); |
807
d44bdab1c03d
6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
johnc
parents:
795
diff
changeset
|
1051 return _sparse_table.contains_card(hr_ind, card_index); |
342 | 1052 } |
1053 | |
1054 | |
1055 } | |
1056 | |
795
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1057 // Determines how many threads can add records to an rset in parallel. |
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1058 // This can be done by either mutator threads together with the |
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1059 // concurrent refinement threads or GC threads. |
342 | 1060 int HeapRegionRemSet::num_par_rem_sets() { |
795
215f81b4d9b3
6841831: G1: assert(contains_reference(from),"We just added it!") fires
iveresov
parents:
794
diff
changeset
|
1061 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); |
342 | 1062 } |
1063 | |
1064 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, | |
1065 HeapRegion* hr) | |
1066 : _bosa(bosa), _other_regions(hr), | |
1067 _outgoing_region_map(G1CollectedHeap::heap()->max_regions(), | |
1068 false /* in-resource-area */), | |
1069 _iter_state(Unclaimed) | |
1070 {} | |
1071 | |
1072 | |
1073 void HeapRegionRemSet::init_for_par_iteration() { | |
1074 _iter_state = Unclaimed; | |
1075 } | |
1076 | |
1077 bool HeapRegionRemSet::claim_iter() { | |
1078 if (_iter_state != Unclaimed) return false; | |
1079 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); | |
1080 return (res == Unclaimed); | |
1081 } | |
1082 | |
1083 void HeapRegionRemSet::set_iter_complete() { | |
1084 _iter_state = Complete; | |
1085 } | |
1086 | |
1087 bool HeapRegionRemSet::iter_is_complete() { | |
1088 return _iter_state == Complete; | |
1089 } | |
1090 | |
1091 | |
1092 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { | |
1093 iter->initialize(this); | |
1094 } | |
1095 | |
1096 #ifndef PRODUCT | |
1097 void HeapRegionRemSet::print() const { | |
1098 HeapRegionRemSetIterator iter; | |
1099 init_iterator(&iter); | |
1100 size_t card_index; | |
1101 while (iter.has_next(card_index)) { | |
1102 HeapWord* card_start = | |
1103 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); | |
1104 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); | |
1105 } | |
1106 // XXX | |
1107 if (iter.n_yielded() != occupied()) { | |
1108 gclog_or_tty->print_cr("Yielded disagrees with occupied:"); | |
1109 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", | |
1110 iter.n_yielded(), | |
1111 iter.n_yielded_coarse(), iter.n_yielded_fine()); | |
1112 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).", | |
1113 occupied(), occ_coarse(), occ_fine()); | |
1114 } | |
1115 guarantee(iter.n_yielded() == occupied(), | |
1116 "We should have yielded all the represented cards."); | |
1117 } | |
1118 #endif | |
1119 | |
1120 void HeapRegionRemSet::cleanup() { | |
1121 SparsePRT::cleanup_all(); | |
1122 } | |
1123 | |
1124 void HeapRegionRemSet::par_cleanup() { | |
1125 PosParPRT::par_contract_all(); | |
1126 } | |
1127 | |
1128 void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) { | |
1129 _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1); | |
1130 } | |
1131 | |
1132 void HeapRegionRemSet::clear() { | |
1133 clear_outgoing_entries(); | |
1134 _outgoing_region_map.clear(); | |
1135 _other_regions.clear(); | |
1136 assert(occupied() == 0, "Should be clear."); | |
1137 } | |
1138 | |
1139 void HeapRegionRemSet::clear_outgoing_entries() { | |
1140 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1141 size_t i = _outgoing_region_map.get_next_one_offset(0); | |
1142 while (i < _outgoing_region_map.size()) { | |
1143 HeapRegion* to_region = g1h->region_at(i); | |
545 | 1144 if (!to_region->in_collection_set()) { |
1145 to_region->rem_set()->clear_incoming_entry(hr()); | |
1146 } | |
342 | 1147 i = _outgoing_region_map.get_next_one_offset(i+1); |
1148 } | |
1149 } | |
1150 | |
1151 | |
1152 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, | |
1153 BitMap* region_bm, BitMap* card_bm) { | |
1154 _other_regions.scrub(ctbs, region_bm, card_bm); | |
1155 } | |
1156 | |
1157 //-------------------- Iteration -------------------- | |
1158 | |
1159 HeapRegionRemSetIterator:: | |
1160 HeapRegionRemSetIterator() : | |
1161 _hrrs(NULL), | |
1162 _g1h(G1CollectedHeap::heap()), | |
1163 _bosa(NULL), | |
1164 _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start()) | |
1165 >> CardTableModRefBS::card_shift) | |
1166 {} | |
1167 | |
1168 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { | |
1169 _hrrs = hrrs; | |
1170 _coarse_map = &_hrrs->_other_regions._coarse_map; | |
1171 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; | |
1172 _bosa = _hrrs->bosa(); | |
1173 | |
1174 _is = Sparse; | |
1175 // Set these values so that we increment to the first region. | |
1176 _coarse_cur_region_index = -1; | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1177 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);; |
342 | 1178 |
1179 _cur_region_cur_card = 0; | |
1180 | |
1181 _fine_array_index = -1; | |
1182 _fine_cur_prt = NULL; | |
1183 | |
1184 _n_yielded_coarse = 0; | |
1185 _n_yielded_fine = 0; | |
1186 _n_yielded_sparse = 0; | |
1187 | |
1188 _sparse_iter.init(&hrrs->_other_regions._sparse_table); | |
1189 } | |
1190 | |
1191 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { | |
1192 if (_hrrs->_other_regions._n_coarse_entries == 0) return false; | |
1193 // Go to the next card. | |
1194 _coarse_cur_region_cur_card++; | |
1195 // Was the last the last card in the current region? | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1196 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) { |
342 | 1197 // Yes: find the next region. This may leave _coarse_cur_region_index |
1198 // Set to the last index, in which case there are no more coarse | |
1199 // regions. | |
1200 _coarse_cur_region_index = | |
1201 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); | |
1202 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { | |
1203 _coarse_cur_region_cur_card = 0; | |
1204 HeapWord* r_bot = | |
1205 _g1h->region_at(_coarse_cur_region_index)->bottom(); | |
1206 _cur_region_card_offset = _bosa->index_for(r_bot); | |
1207 } else { | |
1208 return false; | |
1209 } | |
1210 } | |
1211 // If we didn't return false above, then we can yield a card. | |
1212 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; | |
1213 return true; | |
1214 } | |
1215 | |
1216 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() { | |
1217 // Otherwise, find the next bucket list in the array. | |
1218 _fine_array_index++; | |
1219 while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) { | |
1220 _fine_cur_prt = _fine_grain_regions[_fine_array_index]; | |
1221 if (_fine_cur_prt != NULL) return; | |
1222 else _fine_array_index++; | |
1223 } | |
1224 assert(_fine_cur_prt == NULL, "Loop post"); | |
1225 } | |
1226 | |
1227 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { | |
1228 if (fine_has_next()) { | |
1229 _cur_region_cur_card = | |
1230 _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1); | |
1231 } | |
1232 while (!fine_has_next()) { | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1233 if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) { |
342 | 1234 _cur_region_cur_card = 0; |
1235 _fine_cur_prt = _fine_cur_prt->next(); | |
1236 } | |
1237 if (_fine_cur_prt == NULL) { | |
1238 fine_find_next_non_null_prt(); | |
1239 if (_fine_cur_prt == NULL) return false; | |
1240 } | |
1241 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0, | |
1242 "inv."); | |
1243 HeapWord* r_bot = | |
1244 _fine_cur_prt->hr()->bottom(); | |
1245 _cur_region_card_offset = _bosa->index_for(r_bot); | |
1246 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0); | |
1247 } | |
1248 assert(fine_has_next(), "Or else we exited the loop via the return."); | |
1249 card_index = _cur_region_card_offset + _cur_region_cur_card; | |
1250 return true; | |
1251 } | |
1252 | |
1253 bool HeapRegionRemSetIterator::fine_has_next() { | |
1254 return | |
1255 _fine_cur_prt != NULL && | |
942
2c79770d1f6e
6819085: G1: use larger and/or user settable region size
tonyp
parents:
845
diff
changeset
|
1256 _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion; |
342 | 1257 } |
1258 | |
1259 bool HeapRegionRemSetIterator::has_next(size_t& card_index) { | |
1260 switch (_is) { | |
1261 case Sparse: | |
1262 if (_sparse_iter.has_next(card_index)) { | |
1263 _n_yielded_sparse++; | |
1264 return true; | |
1265 } | |
1266 // Otherwise, deliberate fall-through | |
1267 _is = Fine; | |
1268 case Fine: | |
1269 if (fine_has_next(card_index)) { | |
1270 _n_yielded_fine++; | |
1271 return true; | |
1272 } | |
1273 // Otherwise, deliberate fall-through | |
1274 _is = Coarse; | |
1275 case Coarse: | |
1276 if (coarse_has_next(card_index)) { | |
1277 _n_yielded_coarse++; | |
1278 return true; | |
1279 } | |
1280 // Otherwise... | |
1281 break; | |
1282 } | |
1283 assert(ParallelGCThreads > 1 || | |
1284 n_yielded() == _hrrs->occupied(), | |
1285 "Should have yielded all the cards in the rem set " | |
1286 "(in the non-par case)."); | |
1287 return false; | |
1288 } | |
1289 | |
1290 | |
1291 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1292 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1293 HeapWord** HeapRegionRemSet::_recorded_cards = NULL; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1294 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL; |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1295 int HeapRegionRemSet::_n_recorded = 0; |
342 | 1296 |
1297 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL; | |
1298 int* HeapRegionRemSet::_recorded_event_index = NULL; | |
1299 int HeapRegionRemSet::_n_recorded_events = 0; | |
1300 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1301 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) { |
342 | 1302 if (_recorded_oops == NULL) { |
1303 assert(_n_recorded == 0 | |
1304 && _recorded_cards == NULL | |
1305 && _recorded_regions == NULL, | |
1306 "Inv"); | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1307 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1308 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1309 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded); |
342 | 1310 } |
1311 if (_n_recorded == MaxRecorded) { | |
1312 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded); | |
1313 } else { | |
1314 _recorded_cards[_n_recorded] = | |
1315 (HeapWord*)align_size_down(uintptr_t(f), | |
1316 CardTableModRefBS::card_size); | |
1317 _recorded_oops[_n_recorded] = f; | |
1318 _recorded_regions[_n_recorded] = hr; | |
1319 _n_recorded++; | |
1320 } | |
1321 } | |
1322 | |
1323 void HeapRegionRemSet::record_event(Event evnt) { | |
1324 if (!G1RecordHRRSEvents) return; | |
1325 | |
1326 if (_recorded_events == NULL) { | |
1327 assert(_n_recorded_events == 0 | |
1328 && _recorded_event_index == NULL, | |
1329 "Inv"); | |
1330 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents); | |
1331 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents); | |
1332 } | |
1333 if (_n_recorded_events == MaxRecordedEvents) { | |
1334 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents); | |
1335 } else { | |
1336 _recorded_events[_n_recorded_events] = evnt; | |
1337 _recorded_event_index[_n_recorded_events] = _n_recorded; | |
1338 _n_recorded_events++; | |
1339 } | |
1340 } | |
1341 | |
1342 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) { | |
1343 switch (evnt) { | |
1344 case Event_EvacStart: | |
1345 str->print("Evac Start"); | |
1346 break; | |
1347 case Event_EvacEnd: | |
1348 str->print("Evac End"); | |
1349 break; | |
1350 case Event_RSUpdateEnd: | |
1351 str->print("RS Update End"); | |
1352 break; | |
1353 } | |
1354 } | |
1355 | |
1356 void HeapRegionRemSet::print_recorded() { | |
1357 int cur_evnt = 0; | |
1358 Event cur_evnt_kind; | |
1359 int cur_evnt_ind = 0; | |
1360 if (_n_recorded_events > 0) { | |
1361 cur_evnt_kind = _recorded_events[cur_evnt]; | |
1362 cur_evnt_ind = _recorded_event_index[cur_evnt]; | |
1363 } | |
1364 | |
1365 for (int i = 0; i < _n_recorded; i++) { | |
1366 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) { | |
1367 gclog_or_tty->print("Event: "); | |
1368 print_event(gclog_or_tty, cur_evnt_kind); | |
1369 gclog_or_tty->print_cr(""); | |
1370 cur_evnt++; | |
1371 if (cur_evnt < MaxRecordedEvents) { | |
1372 cur_evnt_kind = _recorded_events[cur_evnt]; | |
1373 cur_evnt_ind = _recorded_event_index[cur_evnt]; | |
1374 } | |
1375 } | |
1376 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]" | |
1377 " for ref " PTR_FORMAT ".\n", | |
1378 _recorded_cards[i], _recorded_regions[i]->bottom(), | |
1379 _recorded_oops[i]); | |
1380 } | |
1381 } | |
1382 | |
1383 #ifndef PRODUCT | |
1384 void HeapRegionRemSet::test() { | |
1385 os::sleep(Thread::current(), (jlong)5000, false); | |
1386 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1387 | |
1388 // Run with "-XX:G1LogRSRegionEntries=2", so that 1 and 5 end up in same | |
1389 // hash bucket. | |
1390 HeapRegion* hr0 = g1h->region_at(0); | |
1391 HeapRegion* hr1 = g1h->region_at(1); | |
1392 HeapRegion* hr2 = g1h->region_at(5); | |
1393 HeapRegion* hr3 = g1h->region_at(6); | |
1394 HeapRegion* hr4 = g1h->region_at(7); | |
1395 HeapRegion* hr5 = g1h->region_at(8); | |
1396 | |
1397 HeapWord* hr1_start = hr1->bottom(); | |
1398 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; | |
1399 HeapWord* hr1_last = hr1->end() - 1; | |
1400 | |
1401 HeapWord* hr2_start = hr2->bottom(); | |
1402 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; | |
1403 HeapWord* hr2_last = hr2->end() - 1; | |
1404 | |
1405 HeapWord* hr3_start = hr3->bottom(); | |
1406 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; | |
1407 HeapWord* hr3_last = hr3->end() - 1; | |
1408 | |
1409 HeapRegionRemSet* hrrs = hr0->rem_set(); | |
1410 | |
1411 // Make three references from region 0x101... | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1412 hrrs->add_reference((OopOrNarrowOopStar)hr1_start); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1413 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1414 hrrs->add_reference((OopOrNarrowOopStar)hr1_last); |
342 | 1415 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1416 hrrs->add_reference((OopOrNarrowOopStar)hr2_start); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1417 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1418 hrrs->add_reference((OopOrNarrowOopStar)hr2_last); |
342 | 1419 |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1420 hrrs->add_reference((OopOrNarrowOopStar)hr3_start); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1421 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1422 hrrs->add_reference((OopOrNarrowOopStar)hr3_last); |
342 | 1423 |
1424 // Now cause a coarsening. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1425 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom()); |
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
807
diff
changeset
|
1426 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); |
342 | 1427 |
1428 // Now, does iteration yield these three? | |
1429 HeapRegionRemSetIterator iter; | |
1430 hrrs->init_iterator(&iter); | |
1431 size_t sum = 0; | |
1432 size_t card_index; | |
1433 while (iter.has_next(card_index)) { | |
1434 HeapWord* card_start = | |
1435 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); | |
1436 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start); | |
1437 sum++; | |
1438 } | |
1439 guarantee(sum == 11 - 3 + 2048, "Failure"); | |
1440 guarantee(sum == hrrs->occupied(), "Failure"); | |
1441 } | |
1442 #endif |