comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 22901:c132be0fb74d

8060025: Object copy time regressions after JDK-8031323 and JDK-8057536 Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively. Reviewed-by: kbarrett, mgerdin, jmasa Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
author tschatzl
date Fri, 19 Dec 2014 09:21:06 +0100
parents f97f21d8d58c
children 8e9ede9dd2cd
comparison
equal deleted inserted replaced
22900:dfa21a177d66 22901:c132be0fb74d
4023 4023
4024 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); 4024 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4025 4025
4026 register_humongous_regions_with_in_cset_fast_test(); 4026 register_humongous_regions_with_in_cset_fast_test();
4027 4027
4028 assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
4029
4028 _cm->note_start_of_gc(); 4030 _cm->note_start_of_gc();
4029 // We should not verify the per-thread SATB buffers given that 4031 // We should not verify the per-thread SATB buffers given that
4030 // we have not filtered them yet (we'll do so during the 4032 // we have not filtered them yet (we'll do so during the
4031 // GC). We also call this after finalize_cset() to 4033 // GC). We also call this after finalize_cset() to
4032 // ensure that the CSet has been finalized. 4034 // ensure that the CSet has been finalized.
4252 } 4254 }
4253 4255
4254 return true; 4256 return true;
4255 } 4257 }
4256 4258
4257 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4258 {
4259 size_t gclab_word_size;
4260 switch (purpose) {
4261 case GCAllocForSurvived:
4262 gclab_word_size = _survivor_plab_stats.desired_plab_sz();
4263 break;
4264 case GCAllocForTenured:
4265 gclab_word_size = _old_plab_stats.desired_plab_sz();
4266 break;
4267 default:
4268 assert(false, "unknown GCAllocPurpose");
4269 gclab_word_size = _old_plab_stats.desired_plab_sz();
4270 break;
4271 }
4272
4273 // Prevent humongous PLAB sizes for two reasons:
4274 // * PLABs are allocated using a similar paths as oops, but should
4275 // never be in a humongous region
4276 // * Allowing humongous PLABs needlessly churns the region free lists
4277 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4278 }
4279
4280 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { 4259 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4281 _drain_in_progress = false; 4260 _drain_in_progress = false;
4282 set_evac_failure_closure(cl); 4261 set_evac_failure_closure(cl);
4283 _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); 4262 _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4284 } 4263 }
4414 _objs_with_preserved_marks.push(obj); 4393 _objs_with_preserved_marks.push(obj);
4415 _preserved_marks_of_objs.push(m); 4394 _preserved_marks_of_objs.push(m);
4416 } 4395 }
4417 } 4396 }
4418 4397
4419 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4420 size_t word_size,
4421 AllocationContext_t context) {
4422 if (purpose == GCAllocForSurvived) {
4423 HeapWord* result = survivor_attempt_allocation(word_size, context);
4424 if (result != NULL) {
4425 return result;
4426 } else {
4427 // Let's try to allocate in the old gen in case we can fit the
4428 // object there.
4429 return old_attempt_allocation(word_size, context);
4430 }
4431 } else {
4432 assert(purpose == GCAllocForTenured, "sanity");
4433 HeapWord* result = old_attempt_allocation(word_size, context);
4434 if (result != NULL) {
4435 return result;
4436 } else {
4437 // Let's try to allocate in the survivors in case we can fit the
4438 // object there.
4439 return survivor_attempt_allocation(word_size, context);
4440 }
4441 }
4442
4443 ShouldNotReachHere();
4444 // Trying to keep some compilers happy.
4445 return NULL;
4446 }
4447
4448 void G1ParCopyHelper::mark_object(oop obj) { 4398 void G1ParCopyHelper::mark_object(oop obj) {
4449 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); 4399 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4450 4400
4451 // We know that the object is not moving so it's safe to read its size. 4401 // We know that the object is not moving so it's safe to read its size.
4452 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); 4402 _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4485 4435
4486 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 4436 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4487 4437
4488 assert(_worker_id == _par_scan_state->queue_num(), "sanity"); 4438 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4489 4439
4490 G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); 4440 const InCSetState state = _g1->in_cset_state(obj);
4491 4441 if (state.is_in_cset()) {
4492 if (state == G1CollectedHeap::InCSet) {
4493 oop forwardee; 4442 oop forwardee;
4494 markOop m = obj->mark(); 4443 markOop m = obj->mark();
4495 if (m->is_marked()) { 4444 if (m->is_marked()) {
4496 forwardee = (oop) m->decode_pointer(); 4445 forwardee = (oop) m->decode_pointer();
4497 } else { 4446 } else {
4498 forwardee = _par_scan_state->copy_to_survivor_space(obj, m); 4447 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4499 } 4448 }
4500 assert(forwardee != NULL, "forwardee should not be NULL"); 4449 assert(forwardee != NULL, "forwardee should not be NULL");
4501 oopDesc::encode_store_heap_oop(p, forwardee); 4450 oopDesc::encode_store_heap_oop(p, forwardee);
4502 if (do_mark_object != G1MarkNone && forwardee != obj) { 4451 if (do_mark_object != G1MarkNone && forwardee != obj) {
4503 // If the object is self-forwarded we don't need to explicitly 4452 // If the object is self-forwarded we don't need to explicitly
4507 4456
4508 if (barrier == G1BarrierKlass) { 4457 if (barrier == G1BarrierKlass) {
4509 do_klass_barrier(p, forwardee); 4458 do_klass_barrier(p, forwardee);
4510 } 4459 }
4511 } else { 4460 } else {
4512 if (state == G1CollectedHeap::IsHumongous) { 4461 if (state.is_humongous()) {
4513 _g1->set_humongous_is_live(obj); 4462 _g1->set_humongous_is_live(obj);
4514 } 4463 }
4515 // The object is not in collection set. If we're a root scanning 4464 // The object is not in collection set. If we're a root scanning
4516 // closure during an initial mark pause then attempt to mark the object. 4465 // closure during an initial mark pause then attempt to mark the object.
4517 if (do_mark_object == G1MarkFromRoot) { 4466 if (do_mark_object == G1MarkFromRoot) {
5392 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } 5341 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5393 void do_oop(oop* p) { 5342 void do_oop(oop* p) {
5394 oop obj = *p; 5343 oop obj = *p;
5395 assert(obj != NULL, "the caller should have filtered out NULL values"); 5344 assert(obj != NULL, "the caller should have filtered out NULL values");
5396 5345
5397 G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj); 5346 const InCSetState cset_state = _g1->in_cset_state(obj);
5398 if (cset_state == G1CollectedHeap::InNeither) { 5347 if (!cset_state.is_in_cset_or_humongous()) {
5399 return; 5348 return;
5400 } 5349 }
5401 if (cset_state == G1CollectedHeap::InCSet) { 5350 if (cset_state.is_in_cset()) {
5402 assert( obj->is_forwarded(), "invariant" ); 5351 assert( obj->is_forwarded(), "invariant" );
5403 *p = obj->forwardee(); 5352 *p = obj->forwardee();
5404 } else { 5353 } else {
5405 assert(!obj->is_forwarded(), "invariant" ); 5354 assert(!obj->is_forwarded(), "invariant" );
5406 assert(cset_state == G1CollectedHeap::IsHumongous, 5355 assert(cset_state.is_humongous(),
5407 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state)); 5356 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5408 _g1->set_humongous_is_live(obj); 5357 _g1->set_humongous_is_live(obj);
5409 } 5358 }
5410 } 5359 }
5411 }; 5360 };
5412 5361
6223 if (!G1VerifyBitmaps) return; 6172 if (!G1VerifyBitmaps) return;
6224 6173
6225 G1VerifyBitmapClosure cl(caller, this); 6174 G1VerifyBitmapClosure cl(caller, this);
6226 heap_region_iterate(&cl); 6175 heap_region_iterate(&cl);
6227 guarantee(!cl.failures(), "bitmap verification"); 6176 guarantee(!cl.failures(), "bitmap verification");
6177 }
6178
6179 bool G1CollectedHeap::check_cset_fast_test() {
6180 bool failures = false;
6181 for (uint i = 0; i < _hrm.length(); i += 1) {
6182 HeapRegion* hr = _hrm.at(i);
6183 InCSetState cset_state = (InCSetState) _in_cset_fast_test.get_by_index((uint) i);
6184 if (hr->isHumongous()) {
6185 if (hr->in_collection_set()) {
6186 gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
6187 failures = true;
6188 break;
6189 }
6190 if (cset_state.is_in_cset()) {
6191 gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
6192 failures = true;
6193 break;
6194 }
6195 if (hr->continuesHumongous() && cset_state.is_humongous()) {
6196 gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
6197 failures = true;
6198 break;
6199 }
6200 } else {
6201 if (cset_state.is_humongous()) {
6202 gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
6203 failures = true;
6204 break;
6205 }
6206 if (hr->in_collection_set() != cset_state.is_in_cset()) {
6207 gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
6208 hr->in_collection_set(), cset_state.value(), i);
6209 failures = true;
6210 break;
6211 }
6212 if (cset_state.is_in_cset()) {
6213 if (hr->is_young() != (cset_state.is_young())) {
6214 gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
6215 hr->is_young(), cset_state.value(), i);
6216 failures = true;
6217 break;
6218 }
6219 if (hr->is_old() != (cset_state.is_old())) {
6220 gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
6221 hr->is_old(), cset_state.value(), i);
6222 failures = true;
6223 break;
6224 }
6225 }
6226 }
6227 }
6228 return !failures;
6228 } 6229 }
6229 #endif // PRODUCT 6230 #endif // PRODUCT
6230 6231
6231 void G1CollectedHeap::cleanUpCardTable() { 6232 void G1CollectedHeap::cleanUpCardTable() {
6232 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 6233 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6806 6807
6807 // Methods for the GC alloc regions 6808 // Methods for the GC alloc regions
6808 6809
6809 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, 6810 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6810 uint count, 6811 uint count,
6811 GCAllocPurpose ap) { 6812 InCSetState dest) {
6812 assert(FreeList_lock->owned_by_self(), "pre-condition"); 6813 assert(FreeList_lock->owned_by_self(), "pre-condition");
6813 6814
6814 if (count < g1_policy()->max_regions(ap)) { 6815 if (count < g1_policy()->max_regions(dest)) {
6815 bool survivor = (ap == GCAllocForSurvived); 6816 const bool is_survivor = (dest.is_young());
6816 HeapRegion* new_alloc_region = new_region(word_size, 6817 HeapRegion* new_alloc_region = new_region(word_size,
6817 !survivor, 6818 !is_survivor,
6818 true /* do_expand */); 6819 true /* do_expand */);
6819 if (new_alloc_region != NULL) { 6820 if (new_alloc_region != NULL) {
6820 // We really only need to do this for old regions given that we 6821 // We really only need to do this for old regions given that we
6821 // should never scan survivors. But it doesn't hurt to do it 6822 // should never scan survivors. But it doesn't hurt to do it
6822 // for survivors too. 6823 // for survivors too.
6823 new_alloc_region->record_timestamp(); 6824 new_alloc_region->record_timestamp();
6824 if (survivor) { 6825 if (is_survivor) {
6825 new_alloc_region->set_survivor(); 6826 new_alloc_region->set_survivor();
6826 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); 6827 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6827 check_bitmaps("Survivor Region Allocation", new_alloc_region); 6828 check_bitmaps("Survivor Region Allocation", new_alloc_region);
6828 } else { 6829 } else {
6829 new_alloc_region->set_old(); 6830 new_alloc_region->set_old();
6831 check_bitmaps("Old Region Allocation", new_alloc_region); 6832 check_bitmaps("Old Region Allocation", new_alloc_region);
6832 } 6833 }
6833 bool during_im = g1_policy()->during_initial_mark_pause(); 6834 bool during_im = g1_policy()->during_initial_mark_pause();
6834 new_alloc_region->note_start_of_copying(during_im); 6835 new_alloc_region->note_start_of_copying(during_im);
6835 return new_alloc_region; 6836 return new_alloc_region;
6836 } else {
6837 g1_policy()->note_alloc_region_limit_reached(ap);
6838 } 6837 }
6839 } 6838 }
6840 return NULL; 6839 return NULL;
6841 } 6840 }
6842 6841
6843 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, 6842 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6844 size_t allocated_bytes, 6843 size_t allocated_bytes,
6845 GCAllocPurpose ap) { 6844 InCSetState dest) {
6846 bool during_im = g1_policy()->during_initial_mark_pause(); 6845 bool during_im = g1_policy()->during_initial_mark_pause();
6847 alloc_region->note_end_of_copying(during_im); 6846 alloc_region->note_end_of_copying(during_im);
6848 g1_policy()->record_bytes_copied_during_gc(allocated_bytes); 6847 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6849 if (ap == GCAllocForSurvived) { 6848 if (dest.is_young()) {
6850 young_list()->add_survivor_region(alloc_region); 6849 young_list()->add_survivor_region(alloc_region);
6851 } else { 6850 } else {
6852 _old_set.add(alloc_region); 6851 _old_set.add(alloc_region);
6853 } 6852 }
6854 _hr_printer.retire(alloc_region); 6853 _hr_printer.retire(alloc_region);