Mercurial > hg > truffle
comparison src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @ 6188:7994a5a35fcf
6921087: G1: remove per-GC-thread expansion tables from the fine-grain remembered sets
Summary: Remove the per-thread expansion tables (PosParPRT) and associated expansion and compaction from the fine grain RSet entries. This code has been unused for a while.
Reviewed-by: johnc, brutisso
Contributed-by: Thomas Schatzl <thomas.schatzl@jku.at>
author | johnc |
---|---|
date | Mon, 25 Jun 2012 16:00:55 -0700 |
parents | 5c8bd7c16119 |
children | 24b9c7f4cae6 |
comparison
equal
deleted
inserted
replaced
6161:1c280e5b8d31 | 6188:7994a5a35fcf |
---|---|
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" | 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
29 #include "gc_implementation/g1/heapRegionRemSet.hpp" | 29 #include "gc_implementation/g1/heapRegionRemSet.hpp" |
30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" | 30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
31 #include "memory/allocation.hpp" | 31 #include "memory/allocation.hpp" |
32 #include "memory/space.inline.hpp" | 32 #include "memory/space.inline.hpp" |
33 #include "oops/oop.inline.hpp" | |
33 #include "utilities/bitMap.inline.hpp" | 34 #include "utilities/bitMap.inline.hpp" |
34 #include "utilities/globalDefinitions.hpp" | 35 #include "utilities/globalDefinitions.hpp" |
35 | |
36 #define HRRS_VERBOSE 0 | |
37 | |
38 #define PRT_COUNT_OCCUPIED 1 | |
39 | 36 |
40 // OtherRegionsTable | 37 // OtherRegionsTable |
41 | 38 |
42 class PerRegionTable: public CHeapObj { | 39 class PerRegionTable: public CHeapObj { |
43 friend class OtherRegionsTable; | 40 friend class OtherRegionsTable; |
44 friend class HeapRegionRemSetIterator; | 41 friend class HeapRegionRemSetIterator; |
45 | 42 |
46 HeapRegion* _hr; | 43 HeapRegion* _hr; |
47 BitMap _bm; | 44 BitMap _bm; |
48 #if PRT_COUNT_OCCUPIED | |
49 jint _occupied; | 45 jint _occupied; |
50 #endif | 46 |
51 PerRegionTable* _next_free; | 47 // next pointer for free/allocated lis |
52 | 48 PerRegionTable* _next; |
53 PerRegionTable* next_free() { return _next_free; } | |
54 void set_next_free(PerRegionTable* prt) { _next_free = prt; } | |
55 | |
56 | 49 |
57 static PerRegionTable* _free_list; | 50 static PerRegionTable* _free_list; |
58 | 51 |
59 #ifdef _MSC_VER | 52 #ifdef _MSC_VER |
60 // For some reason even though the classes are marked as friend they are unable | 53 // For some reason even though the classes are marked as friend they are unable |
67 | 60 |
68 protected: | 61 protected: |
69 // We need access in order to union things into the base table. | 62 // We need access in order to union things into the base table. |
70 BitMap* bm() { return &_bm; } | 63 BitMap* bm() { return &_bm; } |
71 | 64 |
72 #if PRT_COUNT_OCCUPIED | |
73 void recount_occupied() { | 65 void recount_occupied() { |
74 _occupied = (jint) bm()->count_one_bits(); | 66 _occupied = (jint) bm()->count_one_bits(); |
75 } | 67 } |
76 #endif | |
77 | 68 |
78 PerRegionTable(HeapRegion* hr) : | 69 PerRegionTable(HeapRegion* hr) : |
79 _hr(hr), | 70 _hr(hr), |
80 #if PRT_COUNT_OCCUPIED | |
81 _occupied(0), | 71 _occupied(0), |
82 #endif | |
83 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */) | 72 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */) |
84 {} | 73 {} |
85 | |
86 static void free(PerRegionTable* prt) { | |
87 while (true) { | |
88 PerRegionTable* fl = _free_list; | |
89 prt->set_next_free(fl); | |
90 PerRegionTable* res = | |
91 (PerRegionTable*) | |
92 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | |
93 if (res == fl) return; | |
94 } | |
95 ShouldNotReachHere(); | |
96 } | |
97 | |
98 static PerRegionTable* alloc(HeapRegion* hr) { | |
99 PerRegionTable* fl = _free_list; | |
100 while (fl != NULL) { | |
101 PerRegionTable* nxt = fl->next_free(); | |
102 PerRegionTable* res = | |
103 (PerRegionTable*) | |
104 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | |
105 if (res == fl) { | |
106 fl->init(hr); | |
107 return fl; | |
108 } else { | |
109 fl = _free_list; | |
110 } | |
111 } | |
112 assert(fl == NULL, "Loop condition."); | |
113 return new PerRegionTable(hr); | |
114 } | |
115 | 74 |
116 void add_card_work(CardIdx_t from_card, bool par) { | 75 void add_card_work(CardIdx_t from_card, bool par) { |
117 if (!_bm.at(from_card)) { | 76 if (!_bm.at(from_card)) { |
118 if (par) { | 77 if (par) { |
119 if (_bm.par_at_put(from_card, 1)) { | 78 if (_bm.par_at_put(from_card, 1)) { |
120 #if PRT_COUNT_OCCUPIED | |
121 Atomic::inc(&_occupied); | 79 Atomic::inc(&_occupied); |
122 #endif | |
123 } | 80 } |
124 } else { | 81 } else { |
125 _bm.at_put(from_card, 1); | 82 _bm.at_put(from_card, 1); |
126 #if PRT_COUNT_OCCUPIED | |
127 _occupied++; | 83 _occupied++; |
128 #endif | |
129 } | 84 } |
130 } | 85 } |
131 } | 86 } |
132 | 87 |
133 void add_reference_work(OopOrNarrowOopStar from, bool par) { | 88 void add_reference_work(OopOrNarrowOopStar from, bool par) { |
134 // Must make this robust in case "from" is not in "_hr", because of | 89 // Must make this robust in case "from" is not in "_hr", because of |
135 // concurrency. | 90 // concurrency. |
136 | 91 |
137 #if HRRS_VERBOSE | 92 if (G1TraceHeapRegionRememberedSet) { |
138 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", | 93 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").", |
139 from, *from); | 94 from, |
140 #endif | 95 UseCompressedOops |
96 ? oopDesc::load_decode_heap_oop((narrowOop*)from) | |
97 : oopDesc::load_decode_heap_oop((oop*)from)); | |
98 } | |
141 | 99 |
142 HeapRegion* loc_hr = hr(); | 100 HeapRegion* loc_hr = hr(); |
143 // If the test below fails, then this table was reused concurrently | 101 // If the test below fails, then this table was reused concurrently |
144 // with this operation. This is OK, since the old table was coarsened, | 102 // with this operation. This is OK, since the old table was coarsened, |
145 // and adding a bit to the new table is never incorrect. | 103 // and adding a bit to the new table is never incorrect. |
160 | 118 |
161 public: | 119 public: |
162 | 120 |
163 HeapRegion* hr() const { return _hr; } | 121 HeapRegion* hr() const { return _hr; } |
164 | 122 |
165 #if PRT_COUNT_OCCUPIED | |
166 jint occupied() const { | 123 jint occupied() const { |
167 // Overkill, but if we ever need it... | 124 // Overkill, but if we ever need it... |
168 // guarantee(_occupied == _bm.count_one_bits(), "Check"); | 125 // guarantee(_occupied == _bm.count_one_bits(), "Check"); |
169 return _occupied; | 126 return _occupied; |
170 } | 127 } |
171 #else | |
172 jint occupied() const { | |
173 return _bm.count_one_bits(); | |
174 } | |
175 #endif | |
176 | 128 |
177 void init(HeapRegion* hr) { | 129 void init(HeapRegion* hr) { |
178 _hr = hr; | 130 _hr = hr; |
179 #if PRT_COUNT_OCCUPIED | 131 _next = NULL; |
180 _occupied = 0; | 132 _occupied = 0; |
181 #endif | |
182 _bm.clear(); | 133 _bm.clear(); |
183 } | 134 } |
184 | 135 |
185 void add_reference(OopOrNarrowOopStar from) { | 136 void add_reference(OopOrNarrowOopStar from) { |
186 add_reference_work(from, /*parallel*/ true); | 137 add_reference_work(from, /*parallel*/ true); |
192 | 143 |
193 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | 144 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { |
194 HeapWord* hr_bot = hr()->bottom(); | 145 HeapWord* hr_bot = hr()->bottom(); |
195 size_t hr_first_card_index = ctbs->index_for(hr_bot); | 146 size_t hr_first_card_index = ctbs->index_for(hr_bot); |
196 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); | 147 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); |
197 #if PRT_COUNT_OCCUPIED | |
198 recount_occupied(); | 148 recount_occupied(); |
199 #endif | |
200 } | 149 } |
201 | 150 |
202 void add_card(CardIdx_t from_card_index) { | 151 void add_card(CardIdx_t from_card_index) { |
203 add_card_work(from_card_index, /*parallel*/ true); | 152 add_card_work(from_card_index, /*parallel*/ true); |
204 } | 153 } |
214 } | 163 } |
215 | 164 |
216 // Mem size in bytes. | 165 // Mem size in bytes. |
217 size_t mem_size() const { | 166 size_t mem_size() const { |
218 return sizeof(this) + _bm.size_in_words() * HeapWordSize; | 167 return sizeof(this) + _bm.size_in_words() * HeapWordSize; |
219 } | |
220 | |
221 static size_t fl_mem_size() { | |
222 PerRegionTable* cur = _free_list; | |
223 size_t res = 0; | |
224 while (cur != NULL) { | |
225 res += sizeof(PerRegionTable); | |
226 cur = cur->next_free(); | |
227 } | |
228 return res; | |
229 } | 168 } |
230 | 169 |
231 // Requires "from" to be in "hr()". | 170 // Requires "from" to be in "hr()". |
232 bool contains_reference(OopOrNarrowOopStar from) const { | 171 bool contains_reference(OopOrNarrowOopStar from) const { |
233 assert(hr()->is_in_reserved(from), "Precondition."); | 172 assert(hr()->is_in_reserved(from), "Precondition."); |
234 size_t card_ind = pointer_delta(from, hr()->bottom(), | 173 size_t card_ind = pointer_delta(from, hr()->bottom(), |
235 CardTableModRefBS::card_size); | 174 CardTableModRefBS::card_size); |
236 return _bm.at(card_ind); | 175 return _bm.at(card_ind); |
237 } | 176 } |
238 }; | 177 |
239 | 178 PerRegionTable* next() const { return _next; } |
240 PerRegionTable* PerRegionTable::_free_list = NULL; | 179 void set_next(PerRegionTable* nxt) { _next = nxt; } |
241 | 180 PerRegionTable** next_addr() { return &_next; } |
242 | 181 |
243 #define COUNT_PAR_EXPANDS 0 | 182 static void free(PerRegionTable* prt) { |
244 | |
245 #if COUNT_PAR_EXPANDS | |
246 static jint n_par_expands = 0; | |
247 static jint n_par_contracts = 0; | |
248 static jint par_expand_list_len = 0; | |
249 static jint max_par_expand_list_len = 0; | |
250 | |
251 static void print_par_expand() { | |
252 Atomic::inc(&n_par_expands); | |
253 Atomic::inc(&par_expand_list_len); | |
254 if (par_expand_list_len > max_par_expand_list_len) { | |
255 max_par_expand_list_len = par_expand_list_len; | |
256 } | |
257 if ((n_par_expands % 10) == 0) { | |
258 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, " | |
259 "len = %d, max_len = %d\n.", | |
260 n_par_expands, n_par_contracts, par_expand_list_len, | |
261 max_par_expand_list_len); | |
262 } | |
263 } | |
264 #endif | |
265 | |
266 class PosParPRT: public PerRegionTable { | |
267 PerRegionTable** _par_tables; | |
268 | |
269 enum SomePrivateConstants { | |
270 ReserveParTableExpansion = 1 | |
271 }; | |
272 | |
273 void par_contract() { | |
274 assert(_par_tables != NULL, "Precondition."); | |
275 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
276 for (int i = 0; i < n; i++) { | |
277 _par_tables[i]->union_bitmap_into(bm()); | |
278 PerRegionTable::free(_par_tables[i]); | |
279 _par_tables[i] = NULL; | |
280 } | |
281 #if PRT_COUNT_OCCUPIED | |
282 // We must recount the "occupied." | |
283 recount_occupied(); | |
284 #endif | |
285 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables); | |
286 _par_tables = NULL; | |
287 #if COUNT_PAR_EXPANDS | |
288 Atomic::inc(&n_par_contracts); | |
289 Atomic::dec(&par_expand_list_len); | |
290 #endif | |
291 } | |
292 | |
293 static PerRegionTable** _par_table_fl; | |
294 | |
295 PosParPRT* _next; | |
296 | |
297 static PosParPRT* _free_list; | |
298 | |
299 PerRegionTable** par_tables() const { | |
300 assert(uintptr_t(NULL) == 0, "Assumption."); | |
301 if (uintptr_t(_par_tables) <= ReserveParTableExpansion) | |
302 return NULL; | |
303 else | |
304 return _par_tables; | |
305 } | |
306 | |
307 PosParPRT* _next_par_expanded; | |
308 PosParPRT* next_par_expanded() { return _next_par_expanded; } | |
309 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; } | |
310 static PosParPRT* _par_expanded_list; | |
311 | |
312 public: | |
313 | |
314 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {} | |
315 | |
316 jint occupied() const { | |
317 jint res = PerRegionTable::occupied(); | |
318 if (par_tables() != NULL) { | |
319 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
320 res += par_tables()[i]->occupied(); | |
321 } | |
322 } | |
323 return res; | |
324 } | |
325 | |
326 void init(HeapRegion* hr) { | |
327 PerRegionTable::init(hr); | |
328 _next = NULL; | |
329 if (par_tables() != NULL) { | |
330 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
331 par_tables()[i]->init(hr); | |
332 } | |
333 } | |
334 } | |
335 | |
336 static void free(PosParPRT* prt) { | |
337 while (true) { | 183 while (true) { |
338 PosParPRT* fl = _free_list; | 184 PerRegionTable* fl = _free_list; |
339 prt->set_next(fl); | 185 prt->set_next(fl); |
340 PosParPRT* res = | 186 PerRegionTable* res = |
341 (PosParPRT*) | 187 (PerRegionTable*) |
342 Atomic::cmpxchg_ptr(prt, &_free_list, fl); | 188 Atomic::cmpxchg_ptr(prt, &_free_list, fl); |
343 if (res == fl) return; | 189 if (res == fl) return; |
344 } | 190 } |
345 ShouldNotReachHere(); | 191 ShouldNotReachHere(); |
346 } | 192 } |
347 | 193 |
348 static PosParPRT* alloc(HeapRegion* hr) { | 194 static PerRegionTable* alloc(HeapRegion* hr) { |
349 PosParPRT* fl = _free_list; | 195 PerRegionTable* fl = _free_list; |
350 while (fl != NULL) { | 196 while (fl != NULL) { |
351 PosParPRT* nxt = fl->next(); | 197 PerRegionTable* nxt = fl->next(); |
352 PosParPRT* res = | 198 PerRegionTable* res = |
353 (PosParPRT*) | 199 (PerRegionTable*) |
354 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); | 200 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); |
355 if (res == fl) { | 201 if (res == fl) { |
356 fl->init(hr); | 202 fl->init(hr); |
357 return fl; | 203 return fl; |
358 } else { | 204 } else { |
359 fl = _free_list; | 205 fl = _free_list; |
360 } | 206 } |
361 } | 207 } |
362 assert(fl == NULL, "Loop condition."); | 208 assert(fl == NULL, "Loop condition."); |
363 return new PosParPRT(hr); | 209 return new PerRegionTable(hr); |
364 } | |
365 | |
366 PosParPRT* next() const { return _next; } | |
367 void set_next(PosParPRT* nxt) { _next = nxt; } | |
368 PosParPRT** next_addr() { return &_next; } | |
369 | |
370 bool should_expand(int tid) { | |
371 // Given that we now defer RSet updates for after a GC we don't | |
372 // really need to expand the tables any more. This code should be | |
373 // cleaned up in the future (see CR 6921087). | |
374 return false; | |
375 } | |
376 | |
377 void par_expand() { | |
378 int n = HeapRegionRemSet::num_par_rem_sets()-1; | |
379 if (n <= 0) return; | |
380 if (_par_tables == NULL) { | |
381 PerRegionTable* res = | |
382 (PerRegionTable*) | |
383 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion, | |
384 &_par_tables, NULL); | |
385 if (res != NULL) return; | |
386 // Otherwise, we reserved the right to do the expansion. | |
387 | |
388 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n); | |
389 for (int i = 0; i < n; i++) { | |
390 PerRegionTable* ptable = PerRegionTable::alloc(hr()); | |
391 ptables[i] = ptable; | |
392 } | |
393 // Here we do not need an atomic. | |
394 _par_tables = ptables; | |
395 #if COUNT_PAR_EXPANDS | |
396 print_par_expand(); | |
397 #endif | |
398 // We must put this table on the expanded list. | |
399 PosParPRT* exp_head = _par_expanded_list; | |
400 while (true) { | |
401 set_next_par_expanded(exp_head); | |
402 PosParPRT* res = | |
403 (PosParPRT*) | |
404 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head); | |
405 if (res == exp_head) return; | |
406 // Otherwise. | |
407 exp_head = res; | |
408 } | |
409 ShouldNotReachHere(); | |
410 } | |
411 } | |
412 | |
413 void add_reference(OopOrNarrowOopStar from, int tid) { | |
414 // Expand if necessary. | |
415 PerRegionTable** pt = par_tables(); | |
416 if (pt != NULL) { | |
417 // We always have to assume that mods to table 0 are in parallel, | |
418 // because of the claiming scheme in parallel expansion. A thread | |
419 // with tid != 0 that finds the table to be NULL, but doesn't succeed | |
420 // in claiming the right of expanding it, will end up in the else | |
421 // clause of the above if test. That thread could be delayed, and a | |
422 // thread 0 add reference could see the table expanded, and come | |
423 // here. Both threads would be adding in parallel. But we get to | |
424 // not use atomics for tids > 0. | |
425 if (tid == 0) { | |
426 PerRegionTable::add_reference(from); | |
427 } else { | |
428 pt[tid-1]->seq_add_reference(from); | |
429 } | |
430 } else { | |
431 // Not expanded -- add to the base table. | |
432 PerRegionTable::add_reference(from); | |
433 } | |
434 } | |
435 | |
436 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { | |
437 assert(_par_tables == NULL, "Precondition"); | |
438 PerRegionTable::scrub(ctbs, card_bm); | |
439 } | |
440 | |
441 size_t mem_size() const { | |
442 size_t res = | |
443 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable); | |
444 if (_par_tables != NULL) { | |
445 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
446 res += _par_tables[i]->mem_size(); | |
447 } | |
448 } | |
449 return res; | |
450 } | 210 } |
451 | 211 |
452 static size_t fl_mem_size() { | 212 static size_t fl_mem_size() { |
453 PosParPRT* cur = _free_list; | 213 PerRegionTable* cur = _free_list; |
454 size_t res = 0; | 214 size_t res = 0; |
455 while (cur != NULL) { | 215 while (cur != NULL) { |
456 res += sizeof(PosParPRT); | 216 res += sizeof(PerRegionTable); |
457 cur = cur->next(); | 217 cur = cur->next(); |
458 } | 218 } |
459 return res; | 219 return res; |
460 } | 220 } |
461 | |
462 bool contains_reference(OopOrNarrowOopStar from) const { | |
463 if (PerRegionTable::contains_reference(from)) return true; | |
464 if (_par_tables != NULL) { | |
465 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) { | |
466 if (_par_tables[i]->contains_reference(from)) return true; | |
467 } | |
468 } | |
469 return false; | |
470 } | |
471 | |
472 static void par_contract_all(); | |
473 }; | 221 }; |
474 | 222 |
475 void PosParPRT::par_contract_all() { | 223 PerRegionTable* PerRegionTable::_free_list = NULL; |
476 PosParPRT* hd = _par_expanded_list; | |
477 while (hd != NULL) { | |
478 PosParPRT* nxt = hd->next_par_expanded(); | |
479 PosParPRT* res = | |
480 (PosParPRT*) | |
481 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd); | |
482 if (res == hd) { | |
483 // We claimed the right to contract this table. | |
484 hd->set_next_par_expanded(NULL); | |
485 hd->par_contract(); | |
486 hd = _par_expanded_list; | |
487 } else { | |
488 hd = res; | |
489 } | |
490 } | |
491 } | |
492 | |
493 PosParPRT* PosParPRT::_free_list = NULL; | |
494 PosParPRT* PosParPRT::_par_expanded_list = NULL; | |
495 | |
496 jint OtherRegionsTable::_cache_probes = 0; | |
497 jint OtherRegionsTable::_cache_hits = 0; | |
498 | 224 |
499 size_t OtherRegionsTable::_max_fine_entries = 0; | 225 size_t OtherRegionsTable::_max_fine_entries = 0; |
500 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; | 226 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; |
501 #if SAMPLE_FOR_EVICTION | |
502 size_t OtherRegionsTable::_fine_eviction_stride = 0; | 227 size_t OtherRegionsTable::_fine_eviction_stride = 0; |
503 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; | 228 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; |
504 #endif | |
505 | 229 |
506 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : | 230 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) : |
507 _g1h(G1CollectedHeap::heap()), | 231 _g1h(G1CollectedHeap::heap()), |
508 _m(Mutex::leaf, "An OtherRegionsTable lock", true), | 232 _m(Mutex::leaf, "An OtherRegionsTable lock", true), |
509 _hr(hr), | 233 _hr(hr), |
510 _coarse_map(G1CollectedHeap::heap()->max_regions(), | 234 _coarse_map(G1CollectedHeap::heap()->max_regions(), |
511 false /* in-resource-area */), | 235 false /* in-resource-area */), |
512 _fine_grain_regions(NULL), | 236 _fine_grain_regions(NULL), |
513 _n_fine_entries(0), _n_coarse_entries(0), | 237 _n_fine_entries(0), _n_coarse_entries(0), |
514 #if SAMPLE_FOR_EVICTION | |
515 _fine_eviction_start(0), | 238 _fine_eviction_start(0), |
516 #endif | |
517 _sparse_table(hr) | 239 _sparse_table(hr) |
518 { | 240 { |
519 typedef PosParPRT* PosParPRTPtr; | 241 typedef PerRegionTable* PerRegionTablePtr; |
242 | |
520 if (_max_fine_entries == 0) { | 243 if (_max_fine_entries == 0) { |
521 assert(_mod_max_fine_entries_mask == 0, "Both or none."); | 244 assert(_mod_max_fine_entries_mask == 0, "Both or none."); |
522 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); | 245 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); |
523 _max_fine_entries = (size_t)(1 << max_entries_log); | 246 _max_fine_entries = (size_t)(1 << max_entries_log); |
524 _mod_max_fine_entries_mask = _max_fine_entries - 1; | 247 _mod_max_fine_entries_mask = _max_fine_entries - 1; |
525 #if SAMPLE_FOR_EVICTION | 248 |
526 assert(_fine_eviction_sample_size == 0 | 249 assert(_fine_eviction_sample_size == 0 |
527 && _fine_eviction_stride == 0, "All init at same time."); | 250 && _fine_eviction_stride == 0, "All init at same time."); |
528 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); | 251 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); |
529 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; | 252 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; |
530 #endif | 253 } |
531 } | 254 |
532 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries]; | 255 _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries]; |
533 if (_fine_grain_regions == NULL) | 256 |
257 if (_fine_grain_regions == NULL) { | |
534 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, | 258 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, |
535 "Failed to allocate _fine_grain_entries."); | 259 "Failed to allocate _fine_grain_entries."); |
260 } | |
261 | |
536 for (size_t i = 0; i < _max_fine_entries; i++) { | 262 for (size_t i = 0; i < _max_fine_entries; i++) { |
537 _fine_grain_regions[i] = NULL; | 263 _fine_grain_regions[i] = NULL; |
538 } | 264 } |
539 } | 265 } |
540 | 266 |
541 int** OtherRegionsTable::_from_card_cache = NULL; | 267 int** OtherRegionsTable::_from_card_cache = NULL; |
542 size_t OtherRegionsTable::_from_card_cache_max_regions = 0; | 268 size_t OtherRegionsTable::_from_card_cache_max_regions = 0; |
543 size_t OtherRegionsTable::_from_card_cache_mem_size = 0; | 269 size_t OtherRegionsTable::_from_card_cache_mem_size = 0; |
544 | 270 |
545 void OtherRegionsTable::init_from_card_cache(size_t max_regions) { | 271 void OtherRegionsTable::init_from_card_cache(size_t max_regions) { |
546 _from_card_cache_max_regions = max_regions; | 272 _from_card_cache_max_regions = max_regions; |
577 #endif | 303 #endif |
578 | 304 |
579 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { | 305 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { |
580 size_t cur_hrs_ind = (size_t) hr()->hrs_index(); | 306 size_t cur_hrs_ind = (size_t) hr()->hrs_index(); |
581 | 307 |
582 #if HRRS_VERBOSE | 308 if (G1TraceHeapRegionRememberedSet) { |
583 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", | 309 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", |
584 from, | 310 from, |
585 UseCompressedOops | 311 UseCompressedOops |
586 ? oopDesc::load_decode_heap_oop((narrowOop*)from) | 312 ? oopDesc::load_decode_heap_oop((narrowOop*)from) |
587 : oopDesc::load_decode_heap_oop((oop*)from)); | 313 : oopDesc::load_decode_heap_oop((oop*)from)); |
588 #endif | 314 } |
589 | 315 |
590 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); | 316 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); |
591 | 317 |
592 #if HRRS_VERBOSE | 318 if (G1TraceHeapRegionRememberedSet) { |
593 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", | 319 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", |
594 hr()->bottom(), from_card, | 320 hr()->bottom(), from_card, |
595 _from_card_cache[tid][cur_hrs_ind]); | 321 _from_card_cache[tid][cur_hrs_ind]); |
596 #endif | 322 } |
597 | 323 |
598 #define COUNT_CACHE 0 | |
599 #if COUNT_CACHE | |
600 jint p = Atomic::add(1, &_cache_probes); | |
601 if ((p % 10000) == 0) { | |
602 jint hits = _cache_hits; | |
603 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.", | |
604 _cache_hits, p, 100.0* (float)hits/(float)p); | |
605 } | |
606 #endif | |
607 if (from_card == _from_card_cache[tid][cur_hrs_ind]) { | 324 if (from_card == _from_card_cache[tid][cur_hrs_ind]) { |
608 #if HRRS_VERBOSE | 325 if (G1TraceHeapRegionRememberedSet) { |
609 gclog_or_tty->print_cr(" from-card cache hit."); | 326 gclog_or_tty->print_cr(" from-card cache hit."); |
610 #endif | 327 } |
611 #if COUNT_CACHE | |
612 Atomic::inc(&_cache_hits); | |
613 #endif | |
614 assert(contains_reference(from), "We just added it!"); | 328 assert(contains_reference(from), "We just added it!"); |
615 return; | 329 return; |
616 } else { | 330 } else { |
617 _from_card_cache[tid][cur_hrs_ind] = from_card; | 331 _from_card_cache[tid][cur_hrs_ind] = from_card; |
618 } | 332 } |
621 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); | 335 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); |
622 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); | 336 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); |
623 | 337 |
624 // If the region is already coarsened, return. | 338 // If the region is already coarsened, return. |
625 if (_coarse_map.at(from_hrs_ind)) { | 339 if (_coarse_map.at(from_hrs_ind)) { |
626 #if HRRS_VERBOSE | 340 if (G1TraceHeapRegionRememberedSet) { |
627 gclog_or_tty->print_cr(" coarse map hit."); | 341 gclog_or_tty->print_cr(" coarse map hit."); |
628 #endif | 342 } |
629 assert(contains_reference(from), "We just added it!"); | 343 assert(contains_reference(from), "We just added it!"); |
630 return; | 344 return; |
631 } | 345 } |
632 | 346 |
633 // Otherwise find a per-region table to add it to. | 347 // Otherwise find a per-region table to add it to. |
634 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; | 348 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask; |
635 PosParPRT* prt = find_region_table(ind, from_hr); | 349 PerRegionTable* prt = find_region_table(ind, from_hr); |
636 if (prt == NULL) { | 350 if (prt == NULL) { |
637 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | 351 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); |
638 // Confirm that it's really not there... | 352 // Confirm that it's really not there... |
639 prt = find_region_table(ind, from_hr); | 353 prt = find_region_table(ind, from_hr); |
640 if (prt == NULL) { | 354 if (prt == NULL) { |
647 "Must be in range."); | 361 "Must be in range."); |
648 if (G1HRRSUseSparseTable && | 362 if (G1HRRSUseSparseTable && |
649 _sparse_table.add_card(from_hrs_ind, card_index)) { | 363 _sparse_table.add_card(from_hrs_ind, card_index)) { |
650 if (G1RecordHRRSOops) { | 364 if (G1RecordHRRSOops) { |
651 HeapRegionRemSet::record(hr(), from); | 365 HeapRegionRemSet::record(hr(), from); |
652 #if HRRS_VERBOSE | 366 if (G1TraceHeapRegionRememberedSet) { |
653 gclog_or_tty->print(" Added card " PTR_FORMAT " to region " | 367 gclog_or_tty->print(" Added card " PTR_FORMAT " to region " |
654 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | 368 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", |
655 align_size_down(uintptr_t(from), | 369 align_size_down(uintptr_t(from), |
656 CardTableModRefBS::card_size), | 370 CardTableModRefBS::card_size), |
657 hr()->bottom(), from); | 371 hr()->bottom(), from); |
658 #endif | 372 } |
659 } | 373 } |
660 #if HRRS_VERBOSE | 374 if (G1TraceHeapRegionRememberedSet) { |
661 gclog_or_tty->print_cr(" added card to sparse table."); | 375 gclog_or_tty->print_cr(" added card to sparse table."); |
662 #endif | 376 } |
663 assert(contains_reference_locked(from), "We just added it!"); | 377 assert(contains_reference_locked(from), "We just added it!"); |
664 return; | 378 return; |
665 } else { | 379 } else { |
666 #if HRRS_VERBOSE | 380 if (G1TraceHeapRegionRememberedSet) { |
667 gclog_or_tty->print_cr(" [tid %d] sparse table entry " | 381 gclog_or_tty->print_cr(" [tid %d] sparse table entry " |
668 "overflow(f: %d, t: %d)", | 382 "overflow(f: %d, t: %d)", |
669 tid, from_hrs_ind, cur_hrs_ind); | 383 tid, from_hrs_ind, cur_hrs_ind); |
670 #endif | 384 } |
671 } | 385 } |
672 | 386 |
673 if (_n_fine_entries == _max_fine_entries) { | 387 if (_n_fine_entries == _max_fine_entries) { |
674 prt = delete_region_table(); | 388 prt = delete_region_table(); |
675 } else { | 389 } else { |
676 prt = PosParPRT::alloc(from_hr); | 390 prt = PerRegionTable::alloc(from_hr); |
677 } | 391 } |
678 prt->init(from_hr); | 392 prt->init(from_hr); |
679 | 393 |
680 PosParPRT* first_prt = _fine_grain_regions[ind]; | 394 PerRegionTable* first_prt = _fine_grain_regions[ind]; |
681 prt->set_next(first_prt); // XXX Maybe move to init? | 395 prt->set_next(first_prt); // XXX Maybe move to init? |
682 _fine_grain_regions[ind] = prt; | 396 _fine_grain_regions[ind] = prt; |
683 _n_fine_entries++; | 397 _n_fine_entries++; |
684 | 398 |
685 if (G1HRRSUseSparseTable) { | 399 if (G1HRRSUseSparseTable) { |
702 // Note that we can't assert "prt->hr() == from_hr", because of the | 416 // Note that we can't assert "prt->hr() == from_hr", because of the |
703 // possibility of concurrent reuse. But see head comment of | 417 // possibility of concurrent reuse. But see head comment of |
704 // OtherRegionsTable for why this is OK. | 418 // OtherRegionsTable for why this is OK. |
705 assert(prt != NULL, "Inv"); | 419 assert(prt != NULL, "Inv"); |
706 | 420 |
707 if (prt->should_expand(tid)) { | 421 prt->add_reference(from); |
708 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | 422 |
709 HeapRegion* prt_hr = prt->hr(); | |
710 if (prt_hr == from_hr) { | |
711 // Make sure the table still corresponds to the same region | |
712 prt->par_expand(); | |
713 prt->add_reference(from, tid); | |
714 } | |
715 // else: The table has been concurrently coarsened, evicted, and | |
716 // the table data structure re-used for another table. So, we | |
717 // don't need to add the reference any more given that the table | |
718 // has been coarsened and the whole region will be scanned anyway. | |
719 } else { | |
720 prt->add_reference(from, tid); | |
721 } | |
722 if (G1RecordHRRSOops) { | 423 if (G1RecordHRRSOops) { |
723 HeapRegionRemSet::record(hr(), from); | 424 HeapRegionRemSet::record(hr(), from); |
724 #if HRRS_VERBOSE | 425 if (G1TraceHeapRegionRememberedSet) { |
725 gclog_or_tty->print("Added card " PTR_FORMAT " to region " | 426 gclog_or_tty->print("Added card " PTR_FORMAT " to region " |
726 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", | 427 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", |
727 align_size_down(uintptr_t(from), | 428 align_size_down(uintptr_t(from), |
728 CardTableModRefBS::card_size), | 429 CardTableModRefBS::card_size), |
729 hr()->bottom(), from); | 430 hr()->bottom(), from); |
730 #endif | 431 } |
731 } | 432 } |
732 assert(contains_reference(from), "We just added it!"); | 433 assert(contains_reference(from), "We just added it!"); |
733 } | 434 } |
734 | 435 |
735 PosParPRT* | 436 PerRegionTable* |
736 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { | 437 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { |
737 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | 438 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); |
738 PosParPRT* prt = _fine_grain_regions[ind]; | 439 PerRegionTable* prt = _fine_grain_regions[ind]; |
739 while (prt != NULL && prt->hr() != hr) { | 440 while (prt != NULL && prt->hr() != hr) { |
740 prt = prt->next(); | 441 prt = prt->next(); |
741 } | 442 } |
742 // Loop postcondition is the method postcondition. | 443 // Loop postcondition is the method postcondition. |
743 return prt; | 444 return prt; |
744 } | 445 } |
745 | 446 |
746 | |
747 #define DRT_CENSUS 0 | |
748 | |
749 #if DRT_CENSUS | |
750 static const int HistoSize = 6; | |
751 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
752 static int coarsenings = 0; | |
753 static int occ_sum = 0; | |
754 #endif | |
755 | |
756 jint OtherRegionsTable::_n_coarsenings = 0; | 447 jint OtherRegionsTable::_n_coarsenings = 0; |
757 | 448 |
758 PosParPRT* OtherRegionsTable::delete_region_table() { | 449 PerRegionTable* OtherRegionsTable::delete_region_table() { |
759 #if DRT_CENSUS | |
760 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 }; | |
761 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 }; | |
762 #endif | |
763 | |
764 assert(_m.owned_by_self(), "Precondition"); | 450 assert(_m.owned_by_self(), "Precondition"); |
765 assert(_n_fine_entries == _max_fine_entries, "Precondition"); | 451 assert(_n_fine_entries == _max_fine_entries, "Precondition"); |
766 PosParPRT* max = NULL; | 452 PerRegionTable* max = NULL; |
767 jint max_occ = 0; | 453 jint max_occ = 0; |
768 PosParPRT** max_prev; | 454 PerRegionTable** max_prev; |
769 size_t max_ind; | 455 size_t max_ind; |
770 | 456 |
771 #if SAMPLE_FOR_EVICTION | |
772 size_t i = _fine_eviction_start; | 457 size_t i = _fine_eviction_start; |
773 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { | 458 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { |
774 size_t ii = i; | 459 size_t ii = i; |
775 // Make sure we get a non-NULL sample. | 460 // Make sure we get a non-NULL sample. |
776 while (_fine_grain_regions[ii] == NULL) { | 461 while (_fine_grain_regions[ii] == NULL) { |
777 ii++; | 462 ii++; |
778 if (ii == _max_fine_entries) ii = 0; | 463 if (ii == _max_fine_entries) ii = 0; |
779 guarantee(ii != i, "We must find one."); | 464 guarantee(ii != i, "We must find one."); |
780 } | 465 } |
781 PosParPRT** prev = &_fine_grain_regions[ii]; | 466 PerRegionTable** prev = &_fine_grain_regions[ii]; |
782 PosParPRT* cur = *prev; | 467 PerRegionTable* cur = *prev; |
783 while (cur != NULL) { | 468 while (cur != NULL) { |
784 jint cur_occ = cur->occupied(); | 469 jint cur_occ = cur->occupied(); |
785 if (max == NULL || cur_occ > max_occ) { | 470 if (max == NULL || cur_occ > max_occ) { |
786 max = cur; | 471 max = cur; |
787 max_prev = prev; | 472 max_prev = prev; |
792 cur = cur->next(); | 477 cur = cur->next(); |
793 } | 478 } |
794 i = i + _fine_eviction_stride; | 479 i = i + _fine_eviction_stride; |
795 if (i >= _n_fine_entries) i = i - _n_fine_entries; | 480 if (i >= _n_fine_entries) i = i - _n_fine_entries; |
796 } | 481 } |
482 | |
797 _fine_eviction_start++; | 483 _fine_eviction_start++; |
798 if (_fine_eviction_start >= _n_fine_entries) | 484 |
485 if (_fine_eviction_start >= _n_fine_entries) { | |
799 _fine_eviction_start -= _n_fine_entries; | 486 _fine_eviction_start -= _n_fine_entries; |
800 #else | 487 } |
801 for (int i = 0; i < _max_fine_entries; i++) { | 488 |
802 PosParPRT** prev = &_fine_grain_regions[i]; | |
803 PosParPRT* cur = *prev; | |
804 while (cur != NULL) { | |
805 jint cur_occ = cur->occupied(); | |
806 #if DRT_CENSUS | |
807 for (int k = 0; k < HistoSize; k++) { | |
808 if (cur_occ <= histo_limits[k]) { | |
809 histo[k]++; global_histo[k]++; break; | |
810 } | |
811 } | |
812 #endif | |
813 if (max == NULL || cur_occ > max_occ) { | |
814 max = cur; | |
815 max_prev = prev; | |
816 max_ind = i; | |
817 max_occ = cur_occ; | |
818 } | |
819 prev = cur->next_addr(); | |
820 cur = cur->next(); | |
821 } | |
822 } | |
823 #endif | |
824 // XXX | |
825 guarantee(max != NULL, "Since _n_fine_entries > 0"); | 489 guarantee(max != NULL, "Since _n_fine_entries > 0"); |
826 #if DRT_CENSUS | |
827 gclog_or_tty->print_cr("In a coarsening: histo of occs:"); | |
828 for (int k = 0; k < HistoSize; k++) { | |
829 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]); | |
830 } | |
831 coarsenings++; | |
832 occ_sum += max_occ; | |
833 if ((coarsenings % 100) == 0) { | |
834 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings); | |
835 for (int k = 0; k < HistoSize; k++) { | |
836 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]); | |
837 } | |
838 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.", | |
839 (float)occ_sum/(float)coarsenings); | |
840 } | |
841 #endif | |
842 | 490 |
843 // Set the corresponding coarse bit. | 491 // Set the corresponding coarse bit. |
844 size_t max_hrs_index = (size_t) max->hr()->hrs_index(); | 492 size_t max_hrs_index = (size_t) max->hr()->hrs_index(); |
845 if (!_coarse_map.at(max_hrs_index)) { | 493 if (!_coarse_map.at(max_hrs_index)) { |
846 _coarse_map.at_put(max_hrs_index, true); | 494 _coarse_map.at_put(max_hrs_index, true); |
847 _n_coarse_entries++; | 495 _n_coarse_entries++; |
848 #if 0 | 496 if (G1TraceHeapRegionRememberedSet) { |
849 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " | 497 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " |
850 "for region [" PTR_FORMAT "...] (%d coarse entries).\n", | 498 "for region [" PTR_FORMAT "...] (%d coarse entries).\n", |
851 hr()->bottom(), | 499 hr()->bottom(), |
852 max->hr()->bottom(), | 500 max->hr()->bottom(), |
853 _n_coarse_entries); | 501 _n_coarse_entries); |
854 #endif | 502 } |
855 } | 503 } |
856 | 504 |
857 // Unsplice. | 505 // Unsplice. |
858 *max_prev = max->next(); | 506 *max_prev = max->next(); |
859 Atomic::inc(&_n_coarsenings); | 507 Atomic::inc(&_n_coarsenings); |
881 gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries); | 529 gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries); |
882 } | 530 } |
883 | 531 |
884 // Now do the fine-grained maps. | 532 // Now do the fine-grained maps. |
885 for (size_t i = 0; i < _max_fine_entries; i++) { | 533 for (size_t i = 0; i < _max_fine_entries; i++) { |
886 PosParPRT* cur = _fine_grain_regions[i]; | 534 PerRegionTable* cur = _fine_grain_regions[i]; |
887 PosParPRT** prev = &_fine_grain_regions[i]; | 535 PerRegionTable** prev = &_fine_grain_regions[i]; |
888 while (cur != NULL) { | 536 while (cur != NULL) { |
889 PosParPRT* nxt = cur->next(); | 537 PerRegionTable* nxt = cur->next(); |
890 // If the entire region is dead, eliminate. | 538 // If the entire region is dead, eliminate. |
891 if (G1RSScrubVerbose) { | 539 if (G1RSScrubVerbose) { |
892 gclog_or_tty->print_cr(" For other region %u:", | 540 gclog_or_tty->print_cr(" For other region %u:", |
893 cur->hr()->hrs_index()); | 541 cur->hr()->hrs_index()); |
894 } | 542 } |
897 cur->set_next(NULL); | 545 cur->set_next(NULL); |
898 _n_fine_entries--; | 546 _n_fine_entries--; |
899 if (G1RSScrubVerbose) { | 547 if (G1RSScrubVerbose) { |
900 gclog_or_tty->print_cr(" deleted via region map."); | 548 gclog_or_tty->print_cr(" deleted via region map."); |
901 } | 549 } |
902 PosParPRT::free(cur); | 550 PerRegionTable::free(cur); |
903 } else { | 551 } else { |
904 // Do fine-grain elimination. | 552 // Do fine-grain elimination. |
905 if (G1RSScrubVerbose) { | 553 if (G1RSScrubVerbose) { |
906 gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); | 554 gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); |
907 } | 555 } |
912 // Did that empty the table completely? | 560 // Did that empty the table completely? |
913 if (cur->occupied() == 0) { | 561 if (cur->occupied() == 0) { |
914 *prev = nxt; | 562 *prev = nxt; |
915 cur->set_next(NULL); | 563 cur->set_next(NULL); |
916 _n_fine_entries--; | 564 _n_fine_entries--; |
917 PosParPRT::free(cur); | 565 PerRegionTable::free(cur); |
918 } else { | 566 } else { |
919 prev = cur->next_addr(); | 567 prev = cur->next_addr(); |
920 } | 568 } |
921 } | 569 } |
922 cur = nxt; | 570 cur = nxt; |
938 } | 586 } |
939 | 587 |
940 size_t OtherRegionsTable::occ_fine() const { | 588 size_t OtherRegionsTable::occ_fine() const { |
941 size_t sum = 0; | 589 size_t sum = 0; |
942 for (size_t i = 0; i < _max_fine_entries; i++) { | 590 for (size_t i = 0; i < _max_fine_entries; i++) { |
943 PosParPRT* cur = _fine_grain_regions[i]; | 591 PerRegionTable* cur = _fine_grain_regions[i]; |
944 while (cur != NULL) { | 592 while (cur != NULL) { |
945 sum += cur->occupied(); | 593 sum += cur->occupied(); |
946 cur = cur->next(); | 594 cur = cur->next(); |
947 } | 595 } |
948 } | 596 } |
960 size_t OtherRegionsTable::mem_size() const { | 608 size_t OtherRegionsTable::mem_size() const { |
961 // Cast away const in this case. | 609 // Cast away const in this case. |
962 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); | 610 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag); |
963 size_t sum = 0; | 611 size_t sum = 0; |
964 for (size_t i = 0; i < _max_fine_entries; i++) { | 612 for (size_t i = 0; i < _max_fine_entries; i++) { |
965 PosParPRT* cur = _fine_grain_regions[i]; | 613 PerRegionTable* cur = _fine_grain_regions[i]; |
966 while (cur != NULL) { | 614 while (cur != NULL) { |
967 sum += cur->mem_size(); | 615 sum += cur->mem_size(); |
968 cur = cur->next(); | 616 cur = cur->next(); |
969 } | 617 } |
970 } | 618 } |
971 sum += (sizeof(PosParPRT*) * _max_fine_entries); | 619 sum += (sizeof(PerRegionTable*) * _max_fine_entries); |
972 sum += (_coarse_map.size_in_words() * HeapWordSize); | 620 sum += (_coarse_map.size_in_words() * HeapWordSize); |
973 sum += (_sparse_table.mem_size()); | 621 sum += (_sparse_table.mem_size()); |
974 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. | 622 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above. |
975 return sum; | 623 return sum; |
976 } | 624 } |
978 size_t OtherRegionsTable::static_mem_size() { | 626 size_t OtherRegionsTable::static_mem_size() { |
979 return _from_card_cache_mem_size; | 627 return _from_card_cache_mem_size; |
980 } | 628 } |
981 | 629 |
982 size_t OtherRegionsTable::fl_mem_size() { | 630 size_t OtherRegionsTable::fl_mem_size() { |
983 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size(); | 631 return PerRegionTable::fl_mem_size(); |
984 } | 632 } |
985 | 633 |
986 void OtherRegionsTable::clear_fcc() { | 634 void OtherRegionsTable::clear_fcc() { |
987 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { | 635 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { |
988 _from_card_cache[i][hr()->hrs_index()] = -1; | 636 _from_card_cache[i][hr()->hrs_index()] = -1; |
990 } | 638 } |
991 | 639 |
992 void OtherRegionsTable::clear() { | 640 void OtherRegionsTable::clear() { |
993 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); | 641 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); |
994 for (size_t i = 0; i < _max_fine_entries; i++) { | 642 for (size_t i = 0; i < _max_fine_entries; i++) { |
995 PosParPRT* cur = _fine_grain_regions[i]; | 643 PerRegionTable* cur = _fine_grain_regions[i]; |
996 while (cur != NULL) { | 644 while (cur != NULL) { |
997 PosParPRT* nxt = cur->next(); | 645 PerRegionTable* nxt = cur->next(); |
998 PosParPRT::free(cur); | 646 PerRegionTable::free(cur); |
999 cur = nxt; | 647 cur = nxt; |
1000 } | 648 } |
1001 _fine_grain_regions[i] = NULL; | 649 _fine_grain_regions[i] = NULL; |
1002 } | 650 } |
1003 _sparse_table.clear(); | 651 _sparse_table.clear(); |
1033 } | 681 } |
1034 | 682 |
1035 bool OtherRegionsTable::del_single_region_table(size_t ind, | 683 bool OtherRegionsTable::del_single_region_table(size_t ind, |
1036 HeapRegion* hr) { | 684 HeapRegion* hr) { |
1037 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); | 685 assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); |
1038 PosParPRT** prev_addr = &_fine_grain_regions[ind]; | 686 PerRegionTable** prev_addr = &_fine_grain_regions[ind]; |
1039 PosParPRT* prt = *prev_addr; | 687 PerRegionTable* prt = *prev_addr; |
1040 while (prt != NULL && prt->hr() != hr) { | 688 while (prt != NULL && prt->hr() != hr) { |
1041 prev_addr = prt->next_addr(); | 689 prev_addr = prt->next_addr(); |
1042 prt = prt->next(); | 690 prt = prt->next(); |
1043 } | 691 } |
1044 if (prt != NULL) { | 692 if (prt != NULL) { |
1045 assert(prt->hr() == hr, "Loop postcondition."); | 693 assert(prt->hr() == hr, "Loop postcondition."); |
1046 *prev_addr = prt->next(); | 694 *prev_addr = prt->next(); |
1047 PosParPRT::free(prt); | 695 PerRegionTable::free(prt); |
1048 _n_fine_entries--; | 696 _n_fine_entries--; |
1049 return true; | 697 return true; |
1050 } else { | 698 } else { |
1051 return false; | 699 return false; |
1052 } | 700 } |
1063 if (hr == NULL) return false; | 711 if (hr == NULL) return false; |
1064 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); | 712 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); |
1065 // Is this region in the coarse map? | 713 // Is this region in the coarse map? |
1066 if (_coarse_map.at(hr_ind)) return true; | 714 if (_coarse_map.at(hr_ind)) return true; |
1067 | 715 |
1068 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, | 716 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, |
1069 hr); | 717 hr); |
1070 if (prt != NULL) { | 718 if (prt != NULL) { |
1071 return prt->contains_reference(from); | 719 return prt->contains_reference(from); |
1072 | 720 |
1073 } else { | 721 } else { |
1143 while (iter.has_next(card_index)) { | 791 while (iter.has_next(card_index)) { |
1144 HeapWord* card_start = | 792 HeapWord* card_start = |
1145 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); | 793 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index); |
1146 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start); | 794 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start); |
1147 } | 795 } |
1148 // XXX | 796 |
1149 if (iter.n_yielded() != occupied()) { | 797 if (iter.n_yielded() != occupied()) { |
1150 gclog_or_tty->print_cr("Yielded disagrees with occupied:"); | 798 gclog_or_tty->print_cr("Yielded disagrees with occupied:"); |
1151 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", | 799 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).", |
1152 iter.n_yielded(), | 800 iter.n_yielded(), |
1153 iter.n_yielded_coarse(), iter.n_yielded_fine()); | 801 iter.n_yielded_coarse(), iter.n_yielded_fine()); |
1159 } | 807 } |
1160 #endif | 808 #endif |
1161 | 809 |
1162 void HeapRegionRemSet::cleanup() { | 810 void HeapRegionRemSet::cleanup() { |
1163 SparsePRT::cleanup_all(); | 811 SparsePRT::cleanup_all(); |
1164 } | |
1165 | |
1166 void HeapRegionRemSet::par_cleanup() { | |
1167 PosParPRT::par_contract_all(); | |
1168 } | 812 } |
1169 | 813 |
1170 void HeapRegionRemSet::clear() { | 814 void HeapRegionRemSet::clear() { |
1171 _other_regions.clear(); | 815 _other_regions.clear(); |
1172 assert(occupied() == 0, "Should be clear."); | 816 assert(occupied() == 0, "Should be clear."); |