comparison src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 7baf47cb97cb
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
82 _cache[worker_id][region_idx] = val; 82 _cache[worker_id][region_idx] = val;
83 } 83 }
84 84
85 static void initialize(uint n_par_rs, uint max_num_regions); 85 static void initialize(uint n_par_rs, uint max_num_regions);
86 86
87 static void shrink(uint new_num_regions); 87 static void invalidate(uint start_idx, size_t num_regions);
88 88
89 static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN; 89 static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
90 90
91 static size_t static_mem_size() { 91 static size_t static_mem_size() {
92 return _static_mem_size; 92 return _static_mem_size;
183 183
184 // Removes any entries shown by the given bitmaps to contain only dead 184 // Removes any entries shown by the given bitmaps to contain only dead
185 // objects. 185 // objects.
186 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); 186 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
187 187
188 // Returns whether this remembered set (and all sub-sets) contain no entries.
189 bool is_empty() const;
190
188 size_t occupied() const; 191 size_t occupied() const;
189 size_t occ_fine() const; 192 size_t occ_fine() const;
190 size_t occ_coarse() const; 193 size_t occ_coarse() const;
191 size_t occ_sparse() const; 194 size_t occ_sparse() const;
192 195
204 void clear(); 207 void clear();
205 208
206 // Specifically clear the from_card_cache. 209 // Specifically clear the from_card_cache.
207 void clear_fcc(); 210 void clear_fcc();
208 211
209 // "from_hr" is being cleared; remove any entries from it.
210 void clear_incoming_entry(HeapRegion* from_hr);
211
212 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); 212 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
213 213
214 // Declare the heap size (in # of regions) to the OtherRegionsTable. 214 // Declare the heap size (in # of regions) to the OtherRegionsTable.
215 // (Uses it to initialize from_card_cache). 215 // (Uses it to initialize from_card_cache).
216 static void init_from_card_cache(uint max_regions); 216 static void initialize(uint max_regions);
217 217
218 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. 218 // Declares that regions between start_idx <= i < start_idx + num_regions are
219 // Make sure any entries for higher regions are invalid. 219 // not in use. Make sure that any entries for these regions are invalid.
220 static void shrink_from_card_cache(uint new_num_regions); 220 static void invalidate(uint start_idx, size_t num_regions);
221 221
222 static void print_from_card_cache(); 222 static void print_from_card_cache();
223 }; 223 };
224 224
225 class HeapRegionRemSet : public CHeapObj<mtGC> { 225 class HeapRegionRemSet : public CHeapObj<mtGC> {
268 static uint num_par_rem_sets(); 268 static uint num_par_rem_sets();
269 static void setup_remset_size(); 269 static void setup_remset_size();
270 270
271 HeapRegion* hr() const { 271 HeapRegion* hr() const {
272 return _other_regions.hr(); 272 return _other_regions.hr();
273 }
274
275 bool is_empty() const {
276 return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
273 } 277 }
274 278
275 size_t occupied() { 279 size_t occupied() {
276 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); 280 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
277 return occupied_locked(); 281 return occupied_locked();
340 size_t mem_size() { 344 size_t mem_size() {
341 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); 345 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
342 return _other_regions.mem_size() 346 return _other_regions.mem_size()
343 // This correction is necessary because the above includes the second 347 // This correction is necessary because the above includes the second
344 // part. 348 // part.
345 + (sizeof(this) - sizeof(OtherRegionsTable)) 349 + (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable))
346 + strong_code_roots_mem_size(); 350 + strong_code_roots_mem_size();
347 } 351 }
348 352
349 // Returns the memory occupancy of all static data structures associated 353 // Returns the memory occupancy of all static data structures associated
350 // with remembered sets. 354 // with remembered sets.
353 } 357 }
354 358
355 // Returns the memory occupancy of all free_list data structures associated 359 // Returns the memory occupancy of all free_list data structures associated
356 // with remembered sets. 360 // with remembered sets.
357 static size_t fl_mem_size() { 361 static size_t fl_mem_size() {
358 return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size(); 362 return OtherRegionsTable::fl_mem_size();
359 } 363 }
360 364
361 bool contains_reference(OopOrNarrowOopStar from) const { 365 bool contains_reference(OopOrNarrowOopStar from) const {
362 return _other_regions.contains_reference(from); 366 return _other_regions.contains_reference(from);
363 } 367 }
364 368
365 // Routines for managing the list of code roots that point into 369 // Routines for managing the list of code roots that point into
366 // the heap region that owns this RSet. 370 // the heap region that owns this RSet.
367 void add_strong_code_root(nmethod* nm); 371 void add_strong_code_root(nmethod* nm);
372 void add_strong_code_root_locked(nmethod* nm);
368 void remove_strong_code_root(nmethod* nm); 373 void remove_strong_code_root(nmethod* nm);
369
370 // During a collection, migrate the successfully evacuated strong
371 // code roots that referenced into the region that owns this RSet
372 // to the RSets of the new regions that they now point into.
373 // Unsuccessfully evacuated code roots are not migrated.
374 void migrate_strong_code_roots();
375 374
376 // Applies blk->do_code_blob() to each of the entries in 375 // Applies blk->do_code_blob() to each of the entries in
377 // the strong code roots list 376 // the strong code roots list
378 void strong_code_roots_do(CodeBlobClosure* blk) const; 377 void strong_code_roots_do(CodeBlobClosure* blk) const;
379 378
379 void clean_strong_code_roots(HeapRegion* hr);
380
380 // Returns the number of elements in the strong code roots list 381 // Returns the number of elements in the strong code roots list
381 size_t strong_code_roots_list_length() { 382 size_t strong_code_roots_list_length() const {
382 return _code_roots.length(); 383 return _code_roots.length();
383 } 384 }
384 385
385 // Returns true if the strong code roots contains the given 386 // Returns true if the strong code roots contains the given
386 // nmethod. 387 // nmethod.
398 static void cleanup(); 399 static void cleanup();
399 400
400 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). 401 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
401 // (Uses it to initialize from_card_cache). 402 // (Uses it to initialize from_card_cache).
402 static void init_heap(uint max_regions) { 403 static void init_heap(uint max_regions) {
403 G1CodeRootSet::initialize(); 404 OtherRegionsTable::initialize(max_regions);
404 OtherRegionsTable::init_from_card_cache(max_regions); 405 }
405 } 406
406 407 static void invalidate(uint start_idx, uint num_regions) {
407 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. 408 OtherRegionsTable::invalidate(start_idx, num_regions);
408 static void shrink_heap(uint new_n_regs) {
409 OtherRegionsTable::shrink_from_card_cache(new_n_regs);
410 } 409 }
411 410
412 #ifndef PRODUCT 411 #ifndef PRODUCT
413 static void print_from_card_cache() { 412 static void print_from_card_cache() {
414 OtherRegionsTable::print_from_card_cache(); 413 OtherRegionsTable::print_from_card_cache();
431 static void test(); 430 static void test();
432 #endif 431 #endif
433 }; 432 };
434 433
435 class HeapRegionRemSetIterator : public StackObj { 434 class HeapRegionRemSetIterator : public StackObj {
436 435 private:
437 // The region RSet over which we're iterating. 436 // The region RSet over which we are iterating.
438 HeapRegionRemSet* _hrrs; 437 HeapRegionRemSet* _hrrs;
439 438
440 // Local caching of HRRS fields. 439 // Local caching of HRRS fields.
441 const BitMap* _coarse_map; 440 const BitMap* _coarse_map;
442 PerRegionTable** _fine_grain_regions;
443 441
444 G1BlockOffsetSharedArray* _bosa; 442 G1BlockOffsetSharedArray* _bosa;
445 G1CollectedHeap* _g1h; 443 G1CollectedHeap* _g1h;
446 444
447 // The number yielded since initialization. 445 // The number of cards yielded since initialization.
448 size_t _n_yielded_fine; 446 size_t _n_yielded_fine;
449 size_t _n_yielded_coarse; 447 size_t _n_yielded_coarse;
450 size_t _n_yielded_sparse; 448 size_t _n_yielded_sparse;
451 449
452 // Indicates what granularity of table that we're currently iterating over. 450 // Indicates what granularity of table that we are currently iterating over.
453 // We start iterating over the sparse table, progress to the fine grain 451 // We start iterating over the sparse table, progress to the fine grain
454 // table, and then finish with the coarse table. 452 // table, and then finish with the coarse table.
455 // See HeapRegionRemSetIterator::has_next().
456 enum IterState { 453 enum IterState {
457 Sparse, 454 Sparse,
458 Fine, 455 Fine,
459 Coarse 456 Coarse
460 }; 457 };
461 IterState _is; 458 IterState _is;
462 459
463 // In both kinds of iteration, heap offset of first card of current 460 // For both Coarse and Fine remembered set iteration this contains the
464 // region. 461 // first card number of the heap region we currently iterate over.
465 size_t _cur_region_card_offset; 462 size_t _cur_region_card_offset;
466 // Card offset within cur region. 463
467 size_t _cur_region_cur_card; 464 // Current region index for the Coarse remembered set iteration.
468
469 // Coarse table iteration fields:
470
471 // Current region index;
472 int _coarse_cur_region_index; 465 int _coarse_cur_region_index;
473 size_t _coarse_cur_region_cur_card; 466 size_t _coarse_cur_region_cur_card;
474 467
475 bool coarse_has_next(size_t& card_index); 468 bool coarse_has_next(size_t& card_index);
476 469
477 // Fine table iteration fields: 470 // The PRT we are currently iterating over.
478
479 // Index of bucket-list we're working on.
480 int _fine_array_index;
481
482 // Per Region Table we're doing within current bucket list.
483 PerRegionTable* _fine_cur_prt; 471 PerRegionTable* _fine_cur_prt;
484 472 // Card offset within the current PRT.
485 /* SparsePRT::*/ SparsePRTIter _sparse_iter; 473 size_t _cur_card_in_prt;
486 474
487 void fine_find_next_non_null_prt(); 475 // Update internal variables when switching to the given PRT.
488 476 void switch_to_prt(PerRegionTable* prt);
489 bool fine_has_next(); 477 bool fine_has_next();
490 bool fine_has_next(size_t& card_index); 478 bool fine_has_next(size_t& card_index);
491 479
492 public: 480 // The Sparse remembered set iterator.
493 // We require an iterator to be initialized before use, so the 481 SparsePRTIter _sparse_iter;
494 // constructor does little. 482
483 public:
495 HeapRegionRemSetIterator(HeapRegionRemSet* hrrs); 484 HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
496 485
497 // If there remains one or more cards to be yielded, returns true and 486 // If there remains one or more cards to be yielded, returns true and
498 // sets "card_index" to one of those cards (which is then considered 487 // sets "card_index" to one of those cards (which is then considered
499 // yielded.) Otherwise, returns false (and leaves "card_index" 488 // yielded.) Otherwise, returns false (and leaves "card_index"