comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 545:58054a18d735

6484959: G1: introduce survivor spaces 6797754: G1: combined bugfix Summary: Implemented a policy to control G1 survivor space parameters. Reviewed-by: tonyp, iveresov
author apetrusenko
date Fri, 06 Feb 2009 01:38:50 +0300
parents 818efdefcc99
children 05c6d52fa7a9
comparison
equal deleted inserted replaced
544:82a980778b92 545:58054a18d735
139 YoungList::YoungList(G1CollectedHeap* g1h) 139 YoungList::YoungList(G1CollectedHeap* g1h)
140 : _g1h(g1h), _head(NULL), 140 : _g1h(g1h), _head(NULL),
141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), 141 _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
142 _length(0), _scan_only_length(0), 142 _length(0), _scan_only_length(0),
143 _last_sampled_rs_lengths(0), 143 _last_sampled_rs_lengths(0),
144 _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0) 144 _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
145 { 145 {
146 guarantee( check_list_empty(false), "just making sure..." ); 146 guarantee( check_list_empty(false), "just making sure..." );
147 } 147 }
148 148
149 void YoungList::push_region(HeapRegion *hr) { 149 void YoungList::push_region(HeapRegion *hr) {
157 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length); 157 double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
158 ++_length; 158 ++_length;
159 } 159 }
160 160
161 void YoungList::add_survivor_region(HeapRegion* hr) { 161 void YoungList::add_survivor_region(HeapRegion* hr) {
162 assert(!hr->is_survivor(), "should not already be for survived"); 162 assert(hr->is_survivor(), "should be flagged as survivor region");
163 assert(hr->get_next_young_region() == NULL, "cause it should!"); 163 assert(hr->get_next_young_region() == NULL, "cause it should!");
164 164
165 hr->set_next_young_region(_survivor_head); 165 hr->set_next_young_region(_survivor_head);
166 if (_survivor_head == NULL) { 166 if (_survivor_head == NULL) {
167 _survivors_tail = hr; 167 _survivor_tail = hr;
168 } 168 }
169 _survivor_head = hr; 169 _survivor_head = hr;
170 170
171 hr->set_survivor();
172 ++_survivor_length; 171 ++_survivor_length;
173 } 172 }
174 173
175 HeapRegion* YoungList::pop_region() { 174 HeapRegion* YoungList::pop_region() {
176 while (_head != NULL) { 175 while (_head != NULL) {
237 _scan_only_length = 0; 236 _scan_only_length = 0;
238 _curr_scan_only = NULL; 237 _curr_scan_only = NULL;
239 238
240 empty_list(_survivor_head); 239 empty_list(_survivor_head);
241 _survivor_head = NULL; 240 _survivor_head = NULL;
242 _survivors_tail = NULL; 241 _survivor_tail = NULL;
243 _survivor_length = 0; 242 _survivor_length = 0;
244 243
245 _last_sampled_rs_lengths = 0; 244 _last_sampled_rs_lengths = 0;
246 245
247 assert(check_list_empty(false), "just making sure..."); 246 assert(check_list_empty(false), "just making sure...");
389 guarantee( is_empty(), "young list should be empty" ); 388 guarantee( is_empty(), "young list should be empty" );
390 assert(check_list_well_formed(), "young list should be well formed"); 389 assert(check_list_well_formed(), "young list should be well formed");
391 390
392 // Add survivor regions to SurvRateGroup. 391 // Add survivor regions to SurvRateGroup.
393 _g1h->g1_policy()->note_start_adding_survivor_regions(); 392 _g1h->g1_policy()->note_start_adding_survivor_regions();
393 _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
394 for (HeapRegion* curr = _survivor_head; 394 for (HeapRegion* curr = _survivor_head;
395 curr != NULL; 395 curr != NULL;
396 curr = curr->get_next_young_region()) { 396 curr = curr->get_next_young_region()) {
397 _g1h->g1_policy()->set_region_survivors(curr); 397 _g1h->g1_policy()->set_region_survivors(curr);
398 } 398 }
399 _g1h->g1_policy()->note_stop_adding_survivor_regions(); 399 _g1h->g1_policy()->note_stop_adding_survivor_regions();
400 400
401 if (_survivor_head != NULL) { 401 if (_survivor_head != NULL) {
402 _head = _survivor_head; 402 _head = _survivor_head;
403 _length = _survivor_length + _scan_only_length; 403 _length = _survivor_length + _scan_only_length;
404 _survivors_tail->set_next_young_region(_scan_only_head); 404 _survivor_tail->set_next_young_region(_scan_only_head);
405 } else { 405 } else {
406 _head = _scan_only_head; 406 _head = _scan_only_head;
407 _length = _scan_only_length; 407 _length = _scan_only_length;
408 } 408 }
409 409
416 _scan_only_tail = NULL; 416 _scan_only_tail = NULL;
417 _scan_only_length = 0; 417 _scan_only_length = 0;
418 _curr_scan_only = NULL; 418 _curr_scan_only = NULL;
419 419
420 _survivor_head = NULL; 420 _survivor_head = NULL;
421 _survivors_tail = NULL; 421 _survivor_tail = NULL;
422 _survivor_length = 0; 422 _survivor_length = 0;
423 _g1h->g1_policy()->finished_recalculating_age_indexes(); 423 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
424 424
425 assert(check_list_well_formed(), "young list should be well formed"); 425 assert(check_list_well_formed(), "young list should be well formed");
426 } 426 }
427 427
428 void YoungList::print() { 428 void YoungList::print() {
551 bool zero_filled) { 551 bool zero_filled) {
552 HeapRegion* alloc_region = NULL; 552 HeapRegion* alloc_region = NULL;
553 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { 553 if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
554 alloc_region = newAllocRegion_work(word_size, true, zero_filled); 554 alloc_region = newAllocRegion_work(word_size, true, zero_filled);
555 if (purpose == GCAllocForSurvived && alloc_region != NULL) { 555 if (purpose == GCAllocForSurvived && alloc_region != NULL) {
556 _young_list->add_survivor_region(alloc_region); 556 alloc_region->set_survivor();
557 } 557 }
558 ++_gc_alloc_region_counts[purpose]; 558 ++_gc_alloc_region_counts[purpose];
559 } else { 559 } else {
560 g1_policy()->note_alloc_region_limit_reached(purpose); 560 g1_policy()->note_alloc_region_limit_reached(purpose);
561 } 561 }
2591 2591
2592 #if SCAN_ONLY_VERBOSE 2592 #if SCAN_ONLY_VERBOSE
2593 _young_list->print(); 2593 _young_list->print();
2594 #endif // SCAN_ONLY_VERBOSE 2594 #endif // SCAN_ONLY_VERBOSE
2595 2595
2596 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
2597 _young_list->first_survivor_region(),
2598 _young_list->last_survivor_region());
2596 _young_list->reset_auxilary_lists(); 2599 _young_list->reset_auxilary_lists();
2597 } 2600 }
2598 } else { 2601 } else {
2599 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 2602 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
2600 } 2603 }
2617 #if SCAN_ONLY_VERBOSE 2620 #if SCAN_ONLY_VERBOSE
2618 _young_list->print(); 2621 _young_list->print();
2619 #endif // SCAN_ONLY_VERBOSE 2622 #endif // SCAN_ONLY_VERBOSE
2620 2623
2621 double end_time_sec = os::elapsedTime(); 2624 double end_time_sec = os::elapsedTime();
2622 g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0); 2625 if (!evacuation_failed()) {
2626 g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
2627 }
2623 GCOverheadReporter::recordSTWEnd(end_time_sec); 2628 GCOverheadReporter::recordSTWEnd(end_time_sec);
2624 g1_policy()->record_collection_pause_end(popular_region != NULL, 2629 g1_policy()->record_collection_pause_end(popular_region != NULL,
2625 abandoned); 2630 abandoned);
2626 2631
2627 assert(regions_accounted_for(), "Region leakage."); 2632 assert(regions_accounted_for(), "Region leakage.");
2752 HeapRegion* r = _gc_alloc_region_list; 2757 HeapRegion* r = _gc_alloc_region_list;
2753 assert(r->is_gc_alloc_region(), "Invariant."); 2758 assert(r->is_gc_alloc_region(), "Invariant.");
2754 _gc_alloc_region_list = r->next_gc_alloc_region(); 2759 _gc_alloc_region_list = r->next_gc_alloc_region();
2755 r->set_next_gc_alloc_region(NULL); 2760 r->set_next_gc_alloc_region(NULL);
2756 r->set_is_gc_alloc_region(false); 2761 r->set_is_gc_alloc_region(false);
2762 if (r->is_survivor()) {
2763 if (r->is_empty()) {
2764 r->set_not_young();
2765 } else {
2766 _young_list->add_survivor_region(r);
2767 }
2768 }
2757 if (r->is_empty()) { 2769 if (r->is_empty()) {
2758 ++_free_regions; 2770 ++_free_regions;
2759 } 2771 }
2760 } 2772 }
2761 #ifdef G1_DEBUG 2773 #ifdef G1_DEBUG
3148 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size); 3160 block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
3149 } 3161 }
3150 return block; 3162 return block;
3151 } 3163 }
3152 3164
3165 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
3166 bool par) {
3167 // Another thread might have obtained alloc_region for the given
3168 // purpose, and might be attempting to allocate in it, and might
3169 // succeed. Therefore, we can't do the "finalization" stuff on the
3170 // region below until we're sure the last allocation has happened.
3171 // We ensure this by allocating the remaining space with a garbage
3172 // object.
3173 if (par) par_allocate_remaining_space(alloc_region);
3174 // Now we can do the post-GC stuff on the region.
3175 alloc_region->note_end_of_copying();
3176 g1_policy()->record_after_bytes(alloc_region->used());
3177 }
3178
3153 HeapWord* 3179 HeapWord*
3154 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, 3180 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
3155 HeapRegion* alloc_region, 3181 HeapRegion* alloc_region,
3156 bool par, 3182 bool par,
3157 size_t word_size) { 3183 size_t word_size) {
3165 block = alloc_region->par_allocate(word_size); 3191 block = alloc_region->par_allocate(word_size);
3166 if (block != NULL) return block; 3192 if (block != NULL) return block;
3167 // Otherwise, continue; this new region is empty, too. 3193 // Otherwise, continue; this new region is empty, too.
3168 } 3194 }
3169 assert(alloc_region != NULL, "We better have an allocation region"); 3195 assert(alloc_region != NULL, "We better have an allocation region");
3170 // Another thread might have obtained alloc_region for the given 3196 retire_alloc_region(alloc_region, par);
3171 // purpose, and might be attempting to allocate in it, and might
3172 // succeed. Therefore, we can't do the "finalization" stuff on the
3173 // region below until we're sure the last allocation has happened.
3174 // We ensure this by allocating the remaining space with a garbage
3175 // object.
3176 if (par) par_allocate_remaining_space(alloc_region);
3177 // Now we can do the post-GC stuff on the region.
3178 alloc_region->note_end_of_copying();
3179 g1_policy()->record_after_bytes(alloc_region->used());
3180 3197
3181 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) { 3198 if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
3182 // Cannot allocate more regions for the given purpose. 3199 // Cannot allocate more regions for the given purpose.
3183 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose); 3200 GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
3184 // Is there an alternative? 3201 // Is there an alternative?
3185 if (purpose != alt_purpose) { 3202 if (purpose != alt_purpose) {
3186 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose]; 3203 HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
3187 // Has not the alternative region been aliased? 3204 // Has not the alternative region been aliased?
3188 if (alloc_region != alt_region) { 3205 if (alloc_region != alt_region && alt_region != NULL) {
3189 // Try to allocate in the alternative region. 3206 // Try to allocate in the alternative region.
3190 if (par) { 3207 if (par) {
3191 block = alt_region->par_allocate(word_size); 3208 block = alt_region->par_allocate(word_size);
3192 } else { 3209 } else {
3193 block = alt_region->allocate(word_size); 3210 block = alt_region->allocate(word_size);
3194 } 3211 }
3195 // Make an alias. 3212 // Make an alias.
3196 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose]; 3213 _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
3197 } 3214 if (block != NULL) {
3198 if (block != NULL) { 3215 return block;
3199 return block; 3216 }
3217 retire_alloc_region(alt_region, par);
3200 } 3218 }
3201 // Both the allocation region and the alternative one are full 3219 // Both the allocation region and the alternative one are full
3202 // and aliased, replace them with a new allocation region. 3220 // and aliased, replace them with a new allocation region.
3203 purpose = alt_purpose; 3221 purpose = alt_purpose;
3204 } else { 3222 } else {
3495 3513
3496 typedef GrowableArray<oop*> OverflowQueue; 3514 typedef GrowableArray<oop*> OverflowQueue;
3497 OverflowQueue* _overflowed_refs; 3515 OverflowQueue* _overflowed_refs;
3498 3516
3499 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; 3517 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
3518 ageTable _age_table;
3500 3519
3501 size_t _alloc_buffer_waste; 3520 size_t _alloc_buffer_waste;
3502 size_t _undo_waste; 3521 size_t _undo_waste;
3503 3522
3504 OopsInHeapRegionClosure* _evac_failure_cl; 3523 OopsInHeapRegionClosure* _evac_failure_cl;
3536 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num) 3555 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3537 : _g1h(g1h), 3556 : _g1h(g1h),
3538 _refs(g1h->task_queue(queue_num)), 3557 _refs(g1h->task_queue(queue_num)),
3539 _hash_seed(17), _queue_num(queue_num), 3558 _hash_seed(17), _queue_num(queue_num),
3540 _term_attempts(0), 3559 _term_attempts(0),
3560 _age_table(false),
3541 #if G1_DETAILED_STATS 3561 #if G1_DETAILED_STATS
3542 _pushes(0), _pops(0), _steals(0), 3562 _pushes(0), _pops(0), _steals(0),
3543 _steal_attempts(0), _overflow_pushes(0), 3563 _steal_attempts(0), _overflow_pushes(0),
3544 #endif 3564 #endif
3545 _strong_roots_time(0), _term_time(0), 3565 _strong_roots_time(0), _term_time(0),
3570 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 3590 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
3571 } 3591 }
3572 3592
3573 RefToScanQueue* refs() { return _refs; } 3593 RefToScanQueue* refs() { return _refs; }
3574 OverflowQueue* overflowed_refs() { return _overflowed_refs; } 3594 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
3575 3595 ageTable* age_table() { return &_age_table; }
3576 inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 3596
3597 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
3577 return &_alloc_buffers[purpose]; 3598 return &_alloc_buffers[purpose];
3578 } 3599 }
3579 3600
3580 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } 3601 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3581 size_t undo_waste() { return _undo_waste; } 3602 size_t undo_waste() { return _undo_waste; }
3832 int young_index = from_region->young_index_in_cset()+1; 3853 int young_index = from_region->young_index_in_cset()+1;
3833 assert( (from_region->is_young() && young_index > 0) || 3854 assert( (from_region->is_young() && young_index > 0) ||
3834 (!from_region->is_young() && young_index == 0), "invariant" ); 3855 (!from_region->is_young() && young_index == 0), "invariant" );
3835 G1CollectorPolicy* g1p = _g1->g1_policy(); 3856 G1CollectorPolicy* g1p = _g1->g1_policy();
3836 markOop m = old->mark(); 3857 markOop m = old->mark();
3837 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(), 3858 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
3859 : m->age();
3860 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
3838 word_sz); 3861 word_sz);
3839 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); 3862 HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
3840 oop obj = oop(obj_ptr); 3863 oop obj = oop(obj_ptr);
3841 3864
3842 if (obj_ptr == NULL) { 3865 if (obj_ptr == NULL) {
3870 // which contains the forward pointer, was copied) 3893 // which contains the forward pointer, was copied)
3871 obj->set_mark(m); 3894 obj->set_mark(m);
3872 obj->incr_age(); 3895 obj->incr_age();
3873 } else { 3896 } else {
3874 m = m->incr_age(); 3897 m = m->incr_age();
3898 obj->set_mark(m);
3875 } 3899 }
3876 } 3900 _par_scan_state->age_table()->add(obj, word_sz);
3877 obj->set_mark(m); 3901 } else {
3902 obj->set_mark(m);
3903 }
3878 3904
3879 // preserve "next" mark bit 3905 // preserve "next" mark bit
3880 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 3906 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
3881 if (!use_local_bitmaps || 3907 if (!use_local_bitmaps ||
3882 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 3908 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
4126 evac.do_void(); 4152 evac.do_void();
4127 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 4153 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4128 double term_ms = pss.term_time()*1000.0; 4154 double term_ms = pss.term_time()*1000.0;
4129 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 4155 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4130 _g1h->g1_policy()->record_termination_time(i, term_ms); 4156 _g1h->g1_policy()->record_termination_time(i, term_ms);
4157 }
4158 if (G1UseSurvivorSpace) {
4159 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4131 } 4160 }
4132 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 4161 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4133 4162
4134 // Clean up any par-expanded rem sets. 4163 // Clean up any par-expanded rem sets.
4135 HeapRegionRemSet::par_cleanup(); 4164 HeapRegionRemSet::par_cleanup();
4366 g1_policy()->record_par_time(par_time); 4395 g1_policy()->record_par_time(par_time);
4367 set_par_threads(0); 4396 set_par_threads(0);
4368 // Is this the right thing to do here? We don't save marks 4397 // Is this the right thing to do here? We don't save marks
4369 // on individual heap regions when we allocate from 4398 // on individual heap regions when we allocate from
4370 // them in parallel, so this seems like the correct place for this. 4399 // them in parallel, so this seems like the correct place for this.
4371 all_alloc_regions_note_end_of_copying(); 4400 retire_all_alloc_regions();
4372 { 4401 {
4373 G1IsAliveClosure is_alive(this); 4402 G1IsAliveClosure is_alive(this);
4374 G1KeepAliveClosure keep_alive(this); 4403 G1KeepAliveClosure keep_alive(this);
4375 JNIHandles::weak_oops_do(&is_alive, &keep_alive); 4404 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4376 } 4405 }
5006 no_allocs = r == NULL || r->saved_mark_at_top(); 5035 no_allocs = r == NULL || r->saved_mark_at_top();
5007 } 5036 }
5008 return no_allocs; 5037 return no_allocs;
5009 } 5038 }
5010 5039
5011 void G1CollectedHeap::all_alloc_regions_note_end_of_copying() { 5040 void G1CollectedHeap::retire_all_alloc_regions() {
5012 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 5041 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
5013 HeapRegion* r = _gc_alloc_regions[ap]; 5042 HeapRegion* r = _gc_alloc_regions[ap];
5014 if (r != NULL) { 5043 if (r != NULL) {
5015 // Check for aliases. 5044 // Check for aliases.
5016 bool has_processed_alias = false; 5045 bool has_processed_alias = false;
5019 has_processed_alias = true; 5048 has_processed_alias = true;
5020 break; 5049 break;
5021 } 5050 }
5022 } 5051 }
5023 if (!has_processed_alias) { 5052 if (!has_processed_alias) {
5024 r->note_end_of_copying(); 5053 retire_alloc_region(r, false /* par */);
5025 g1_policy()->record_after_bytes(r->used());
5026 } 5054 }
5027 } 5055 }
5028 } 5056 }
5029 } 5057 }
5030 5058