comparison src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp @ 2431:02f49b66361a

7026932: G1: No need to abort VM when card count cache expansion fails Summary: Manage allocation/freeing of the card cache counts and epochs arrays directly so that an allocation failure while attempting to expand these arrays does not abort the JVM. Failure to expand these arrays is not fatal. Reviewed-by: iveresov, tonyp
author johnc
date Mon, 28 Mar 2011 10:58:54 -0700
parents 04d1138b4cce
children c84ee870e0b9
comparison
equal deleted inserted replaced
2430:5c0b591e1074 2431:02f49b66361a
29 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
30 #include "gc_implementation/g1/g1RemSet.hpp" 30 #include "gc_implementation/g1/g1RemSet.hpp"
31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 #include "memory/space.inline.hpp" 32 #include "memory/space.inline.hpp"
33 #include "runtime/atomic.hpp" 33 #include "runtime/atomic.hpp"
34 #include "runtime/java.hpp"
34 #include "utilities/copy.hpp" 35 #include "utilities/copy.hpp"
35 36
36 // Possible sizes for the card counts cache: odd primes that roughly double in size. 37 // Possible sizes for the card counts cache: odd primes that roughly double in size.
37 // (See jvmtiTagMap.cpp). 38 // (See jvmtiTagMap.cpp).
38 int ConcurrentG1Refine::_cc_cache_sizes[] = { 39
39 16381, 32771, 76831, 150001, 307261, 40 #define MAX_SIZE ((size_t) -1)
40 614563, 1228891, 2457733, 4915219, 9830479, 41
41 19660831, 39321619, 78643219, 157286461, -1 42 size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
43 16381, 32771, 76831, 150001, 307261,
44 614563, 1228891, 2457733, 4915219, 9830479,
45 19660831, 39321619, 78643219, 157286461, MAX_SIZE
42 }; 46 };
43 47
44 ConcurrentG1Refine::ConcurrentG1Refine() : 48 ConcurrentG1Refine::ConcurrentG1Refine() :
45 _card_counts(NULL), _card_epochs(NULL), 49 _card_counts(NULL), _card_epochs(NULL),
46 _n_card_counts(0), _max_n_card_counts(0), 50 _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
47 _cache_size_index(0), _expand_card_counts(false), 51 _cache_size_index(0), _expand_card_counts(false),
48 _hot_cache(NULL), 52 _hot_cache(NULL),
49 _def_use_cache(false), _use_cache(false), 53 _def_use_cache(false), _use_cache(false),
50 _n_periods(0), 54 _n_periods(0),
51 _threads(NULL), _n_threads(0) 55 _threads(NULL), _n_threads(0)
96 } 100 }
97 101
98 void ConcurrentG1Refine::init() { 102 void ConcurrentG1Refine::init() {
99 if (G1ConcRSLogCacheSize > 0) { 103 if (G1ConcRSLogCacheSize > 0) {
100 _g1h = G1CollectedHeap::heap(); 104 _g1h = G1CollectedHeap::heap();
101 _max_n_card_counts = 105
102 (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift); 106 _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
107 _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
103 108
104 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; 109 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
105 guarantee(_max_n_card_counts < max_card_num, "card_num representation"); 110 guarantee(_max_cards < max_card_num, "card_num representation");
106 111
107 int desired = _max_n_card_counts / InitialCacheFraction; 112 // We need _n_card_counts to be less than _max_n_card_counts here
108 for (_cache_size_index = 0; 113 // so that the expansion call (below) actually allocates the
109 _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) { 114 // _counts and _epochs arrays.
110 if (_cc_cache_sizes[_cache_size_index] >= desired) break; 115 assert(_n_card_counts == 0, "pre-condition");
111 } 116 assert(_max_n_card_counts > 0, "pre-condition");
112 _cache_size_index = MAX2(0, (_cache_size_index - 1)); 117
113 118 // Find the index into cache size array that is of a size that's
114 int initial_size = _cc_cache_sizes[_cache_size_index]; 119 // large enough to hold desired_sz.
115 if (initial_size < 0) initial_size = _max_n_card_counts; 120 size_t desired_sz = _max_cards / InitialCacheFraction;
116 121 int desired_sz_index = 0;
117 // Make sure we don't go bigger than we will ever need 122 while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
118 _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts); 123 desired_sz_index += 1;
119 124 assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
120 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); 125 }
121 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); 126 assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
127
128 // If the desired_sz value is between two sizes then
129 // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
130 // we will start with the lower size in the optimistic expectation that
131 // we will not need to expand up. Note desired_sz_index could also be 0.
132 if (desired_sz_index > 0 &&
133 _cc_cache_sizes[desired_sz_index] > desired_sz) {
134 desired_sz_index -= 1;
135 }
136
137 if (!expand_card_count_cache(desired_sz_index)) {
138 // Allocation was unsuccessful - exit
139 vm_exit_during_initialization("Could not reserve enough space for card count cache");
140 }
141 assert(_n_card_counts > 0, "post-condition");
142 assert(_cache_size_index == desired_sz_index, "post-condition");
122 143
123 Copy::fill_to_bytes(&_card_counts[0], 144 Copy::fill_to_bytes(&_card_counts[0],
124 _n_card_counts * sizeof(CardCountCacheEntry)); 145 _n_card_counts * sizeof(CardCountCacheEntry));
125 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); 146 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
126 147
161 } 182 }
162 } 183 }
163 184
164 ConcurrentG1Refine::~ConcurrentG1Refine() { 185 ConcurrentG1Refine::~ConcurrentG1Refine() {
165 if (G1ConcRSLogCacheSize > 0) { 186 if (G1ConcRSLogCacheSize > 0) {
187 // Please see the comment in allocate_card_count_cache
188 // for why we call os::malloc() and os::free() directly.
166 assert(_card_counts != NULL, "Logic"); 189 assert(_card_counts != NULL, "Logic");
167 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); 190 os::free(_card_counts);
168 assert(_card_epochs != NULL, "Logic"); 191 assert(_card_epochs != NULL, "Logic");
169 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); 192 os::free(_card_epochs);
193
170 assert(_hot_cache != NULL, "Logic"); 194 assert(_hot_cache != NULL, "Logic");
171 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); 195 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
172 } 196 }
173 if (_threads != NULL) { 197 if (_threads != NULL) {
174 for (int i = 0; i < _n_threads; i++) { 198 for (int i = 0; i < _n_threads; i++) {
380 } 404 }
381 } 405 }
382 } 406 }
383 } 407 }
384 408
385 void ConcurrentG1Refine::expand_card_count_cache() { 409 // The arrays used to hold the card counts and the epochs must have
410 // a 1:1 correspondence. Hence they are allocated and freed together
411 // Returns true if the allocations of both the counts and epochs
412 // were successful; false otherwise.
413 bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
414 CardCountCacheEntry** counts,
415 CardEpochCacheEntry** epochs) {
416 // We call the allocation/free routines directly for the counts
417 // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
418 // macros call AllocateHeap and FreeHeap respectively.
419 // AllocateHeap will call vm_exit_out_of_memory in the event
420 // of an allocation failure and abort the JVM. With the
421 // _counts/epochs arrays we only need to abort the JVM if the
422 // initial allocation of these arrays fails.
423 //
424 // Additionally AllocateHeap/FreeHeap do some tracing of
425 // allocate/free calls so calling one without calling the
426 // other can cause inconsistencies in the tracing. So we
427 // call neither.
428
429 assert(*counts == NULL, "out param");
430 assert(*epochs == NULL, "out param");
431
432 size_t counts_size = n * sizeof(CardCountCacheEntry);
433 size_t epochs_size = n * sizeof(CardEpochCacheEntry);
434
435 *counts = (CardCountCacheEntry*) os::malloc(counts_size);
436 if (*counts == NULL) {
437 // allocation was unsuccessful
438 return false;
439 }
440
441 *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
442 if (*epochs == NULL) {
443 // allocation was unsuccessful - free counts array
444 assert(*counts != NULL, "must be");
445 os::free(*counts);
446 *counts = NULL;
447 return false;
448 }
449
450 // We successfully allocated both counts and epochs
451 return true;
452 }
453
454 // Returns true if the card counts/epochs cache was
455 // successfully expanded; false otherwise.
456 bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
457 // Can we expand the card count and epoch tables?
386 if (_n_card_counts < _max_n_card_counts) { 458 if (_n_card_counts < _max_n_card_counts) {
387 int new_idx = _cache_size_index+1; 459 assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob");
388 int new_size = _cc_cache_sizes[new_idx]; 460
389 if (new_size < 0) new_size = _max_n_card_counts; 461 size_t cache_size = _cc_cache_sizes[cache_size_idx];
390
391 // Make sure we don't go bigger than we will ever need 462 // Make sure we don't go bigger than we will ever need
392 new_size = MIN2((unsigned) new_size, _max_n_card_counts); 463 cache_size = MIN2(cache_size, _max_n_card_counts);
393 464
394 // Expand the card count and card epoch tables 465 // Should we expand the card count and card epoch tables?
395 if (new_size > (int)_n_card_counts) { 466 if (cache_size > _n_card_counts) {
396 // We can just free and allocate a new array as we're 467 // We have been asked to allocate new, larger, arrays for
397 // not interested in preserving the contents 468 // the card counts and the epochs. Attempt the allocation
398 assert(_card_counts != NULL, "Logic!"); 469 // of both before we free the existing arrays in case
399 assert(_card_epochs != NULL, "Logic!"); 470 // the allocation is unsuccessful...
400 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); 471 CardCountCacheEntry* counts = NULL;
401 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); 472 CardEpochCacheEntry* epochs = NULL;
402 _n_card_counts = new_size; 473
403 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); 474 if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
404 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); 475 // Allocation was successful.
405 _cache_size_index = new_idx; 476 // We can just free the old arrays; we're
406 } 477 // not interested in preserving the contents
407 } 478 if (_card_counts != NULL) os::free(_card_counts);
479 if (_card_epochs != NULL) os::free(_card_epochs);
480
481 // Cache the size of the arrays and the index that got us there.
482 _n_card_counts = cache_size;
483 _cache_size_index = cache_size_idx;
484
485 _card_counts = counts;
486 _card_epochs = epochs;
487
488 // We successfully allocated/expanded the caches.
489 return true;
490 }
491 }
492 }
493
494 // We did not successfully expand the caches.
495 return false;
408 } 496 }
409 497
410 void ConcurrentG1Refine::clear_and_record_card_counts() { 498 void ConcurrentG1Refine::clear_and_record_card_counts() {
411 if (G1ConcRSLogCacheSize == 0) return; 499 if (G1ConcRSLogCacheSize == 0) return;
412 500
413 #ifndef PRODUCT 501 #ifndef PRODUCT
414 double start = os::elapsedTime(); 502 double start = os::elapsedTime();
415 #endif 503 #endif
416 504
417 if (_expand_card_counts) { 505 if (_expand_card_counts) {
418 expand_card_count_cache(); 506 int new_idx = _cache_size_index + 1;
507
508 if (expand_card_count_cache(new_idx)) {
509 // Allocation was successful and _n_card_counts has
510 // been updated to the new size. We only need to clear
511 // the epochs so we don't read a bogus epoch value
512 // when inserting a card into the hot card cache.
513 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
514 }
419 _expand_card_counts = false; 515 _expand_card_counts = false;
420 // Only need to clear the epochs.
421 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
422 } 516 }
423 517
424 int this_epoch = (int) _n_periods; 518 int this_epoch = (int) _n_periods;
425 assert((this_epoch+1) <= max_jint, "to many periods"); 519 assert((this_epoch+1) <= max_jint, "to many periods");
426 // Update epoch 520 // Update epoch