comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 526:818efdefcc99

6484956: G1: improve evacuation pause efficiency Summary: A bunch of performance optimizations to decrease GC pause times in G1. Reviewed-by: apetrusenko, jmasa, iveresov
author tonyp
date Fri, 16 Jan 2009 13:02:20 -0500
parents 569b3b226089
children 58054a18d735
comparison
equal deleted inserted replaced
519:65de26b5ea82 526:818efdefcc99
1283 _full_collection(false), 1283 _full_collection(false),
1284 _unclean_region_list(), 1284 _unclean_region_list(),
1285 _unclean_regions_coming(false), 1285 _unclean_regions_coming(false),
1286 _young_list(new YoungList(this)), 1286 _young_list(new YoungList(this)),
1287 _gc_time_stamp(0), 1287 _gc_time_stamp(0),
1288 _surviving_young_words(NULL) 1288 _surviving_young_words(NULL),
1289 _in_cset_fast_test(NULL),
1290 _in_cset_fast_test_base(NULL)
1289 { 1291 {
1290 _g1h = this; // To catch bugs. 1292 _g1h = this; // To catch bugs.
1291 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 1293 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1292 vm_exit_during_initialization("Failed necessary allocation."); 1294 vm_exit_during_initialization("Failed necessary allocation.");
1293 } 1295 }
2483 } 2485 }
2484 2486
2485 g1_policy()->record_collection_pause_start(start_time_sec, 2487 g1_policy()->record_collection_pause_start(start_time_sec,
2486 start_used_bytes); 2488 start_used_bytes);
2487 2489
2490 guarantee(_in_cset_fast_test == NULL, "invariant");
2491 guarantee(_in_cset_fast_test_base == NULL, "invariant");
2492 _in_cset_fast_test_length = n_regions();
2493 _in_cset_fast_test_base =
2494 NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
2495 memset(_in_cset_fast_test_base, false,
2496 _in_cset_fast_test_length * sizeof(bool));
2497 // We're biasing _in_cset_fast_test to avoid subtracting the
2498 // beginning of the heap every time we want to index; basically
2499 // it's the same with what we do with the card table.
2500 _in_cset_fast_test = _in_cset_fast_test_base -
2501 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2502
2488 #if SCAN_ONLY_VERBOSE 2503 #if SCAN_ONLY_VERBOSE
2489 _young_list->print(); 2504 _young_list->print();
2490 #endif // SCAN_ONLY_VERBOSE 2505 #endif // SCAN_ONLY_VERBOSE
2491 2506
2492 if (g1_policy()->should_initiate_conc_mark()) { 2507 if (g1_policy()->should_initiate_conc_mark()) {
2550 2565
2551 // Actually do the work... 2566 // Actually do the work...
2552 evacuate_collection_set(); 2567 evacuate_collection_set();
2553 free_collection_set(g1_policy()->collection_set()); 2568 free_collection_set(g1_policy()->collection_set());
2554 g1_policy()->clear_collection_set(); 2569 g1_policy()->clear_collection_set();
2570
2571 FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
2572 // this is more for peace of mind; we're nulling them here and
2573 // we're expecting them to be null at the beginning of the next GC
2574 _in_cset_fast_test = NULL;
2575 _in_cset_fast_test_base = NULL;
2555 2576
2556 if (popular_region != NULL) { 2577 if (popular_region != NULL) {
2557 // We have to wait until now, because we don't want the region to 2578 // We have to wait until now, because we don't want the region to
2558 // be rescheduled for pop-evac during RS update. 2579 // be rescheduled for pop-evac during RS update.
2559 popular_region->set_popular_pending(false); 2580 popular_region->set_popular_pending(false);
3558 3579
3559 size_t alloc_buffer_waste() { return _alloc_buffer_waste; } 3580 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
3560 size_t undo_waste() { return _undo_waste; } 3581 size_t undo_waste() { return _undo_waste; }
3561 3582
3562 void push_on_queue(oop* ref) { 3583 void push_on_queue(oop* ref) {
3584 assert(ref != NULL, "invariant");
3585 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
3586
3563 if (!refs()->push(ref)) { 3587 if (!refs()->push(ref)) {
3564 overflowed_refs()->push(ref); 3588 overflowed_refs()->push(ref);
3565 IF_G1_DETAILED_STATS(note_overflow_push()); 3589 IF_G1_DETAILED_STATS(note_overflow_push());
3566 } else { 3590 } else {
3567 IF_G1_DETAILED_STATS(note_push()); 3591 IF_G1_DETAILED_STATS(note_push());
3570 3594
3571 void pop_from_queue(oop*& ref) { 3595 void pop_from_queue(oop*& ref) {
3572 if (!refs()->pop_local(ref)) { 3596 if (!refs()->pop_local(ref)) {
3573 ref = NULL; 3597 ref = NULL;
3574 } else { 3598 } else {
3599 assert(ref != NULL, "invariant");
3600 assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
3601 "invariant");
3602
3575 IF_G1_DETAILED_STATS(note_pop()); 3603 IF_G1_DETAILED_STATS(note_pop());
3576 } 3604 }
3577 } 3605 }
3578 3606
3579 void pop_from_overflow_queue(oop*& ref) { 3607 void pop_from_overflow_queue(oop*& ref) {
3599 // Otherwise. 3627 // Otherwise.
3600 alloc_buf->set_buf(buf); 3628 alloc_buf->set_buf(buf);
3601 3629
3602 obj = alloc_buf->allocate(word_sz); 3630 obj = alloc_buf->allocate(word_sz);
3603 assert(obj != NULL, "buffer was definitely big enough..."); 3631 assert(obj != NULL, "buffer was definitely big enough...");
3604 } 3632 } else {
3605 else {
3606 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 3633 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
3607 } 3634 }
3608 return obj; 3635 return obj;
3609 } 3636 }
3610 3637
3693 add_to_alloc_buffer_waste(waste); 3720 add_to_alloc_buffer_waste(waste);
3694 _alloc_buffers[ap].retire(true, false); 3721 _alloc_buffers[ap].retire(true, false);
3695 } 3722 }
3696 } 3723 }
3697 3724
3725 private:
3726 void deal_with_reference(oop* ref_to_scan) {
3727 if (has_partial_array_mask(ref_to_scan)) {
3728 _partial_scan_cl->do_oop_nv(ref_to_scan);
3729 } else {
3730 // Note: we can use "raw" versions of "region_containing" because
3731 // "obj_to_scan" is definitely in the heap, and is not in a
3732 // humongous region.
3733 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
3734 _evac_cl->set_region(r);
3735 _evac_cl->do_oop_nv(ref_to_scan);
3736 }
3737 }
3738
3739 public:
3698 void trim_queue() { 3740 void trim_queue() {
3741 // I've replicated the loop twice, first to drain the overflow
3742 // queue, second to drain the task queue. This is better than
3743 // having a single loop, which checks both conditions and, inside
3744 // it, either pops the overflow queue or the task queue, as each
3745 // loop is tighter. Also, the decision to drain the overflow queue
3746 // first is not arbitrary, as the overflow queue is not visible
3747 // to the other workers, whereas the task queue is. So, we want to
3748 // drain the "invisible" entries first, while allowing the other
3749 // workers to potentially steal the "visible" entries.
3750
3699 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { 3751 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
3700 oop *ref_to_scan = NULL; 3752 while (overflowed_refs_to_scan() > 0) {
3701 if (overflowed_refs_to_scan() == 0) { 3753 oop *ref_to_scan = NULL;
3754 pop_from_overflow_queue(ref_to_scan);
3755 assert(ref_to_scan != NULL, "invariant");
3756 // We shouldn't have pushed it on the queue if it was not
3757 // pointing into the CSet.
3758 assert(ref_to_scan != NULL, "sanity");
3759 assert(has_partial_array_mask(ref_to_scan) ||
3760 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3761
3762 deal_with_reference(ref_to_scan);
3763 }
3764
3765 while (refs_to_scan() > 0) {
3766 oop *ref_to_scan = NULL;
3702 pop_from_queue(ref_to_scan); 3767 pop_from_queue(ref_to_scan);
3703 } else { 3768
3704 pop_from_overflow_queue(ref_to_scan); 3769 if (ref_to_scan != NULL) {
3705 } 3770 // We shouldn't have pushed it on the queue if it was not
3706 if (ref_to_scan != NULL) { 3771 // pointing into the CSet.
3707 if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) { 3772 assert(has_partial_array_mask(ref_to_scan) ||
3708 _partial_scan_cl->do_oop_nv(ref_to_scan); 3773 _g1h->obj_in_cs(*ref_to_scan), "sanity");
3709 } else { 3774
3710 // Note: we can use "raw" versions of "region_containing" because 3775 deal_with_reference(ref_to_scan);
3711 // "obj_to_scan" is definitely in the heap, and is not in a
3712 // humongous region.
3713 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
3714 _evac_cl->set_region(r);
3715 _evac_cl->do_oop_nv(ref_to_scan);
3716 } 3776 }
3717 } 3777 }
3718 } 3778 }
3719 } 3779 }
3720 }; 3780 };
3726 3786
3727 // This closure is applied to the fields of the objects that have just been copied. 3787 // This closure is applied to the fields of the objects that have just been copied.
3728 // Should probably be made inline and moved in g1OopClosures.inline.hpp. 3788 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
3729 void G1ParScanClosure::do_oop_nv(oop* p) { 3789 void G1ParScanClosure::do_oop_nv(oop* p) {
3730 oop obj = *p; 3790 oop obj = *p;
3791
3731 if (obj != NULL) { 3792 if (obj != NULL) {
3732 if (_g1->obj_in_cs(obj)) { 3793 if (_g1->in_cset_fast_test(obj)) {
3733 if (obj->is_forwarded()) { 3794 // We're not going to even bother checking whether the object is
3734 *p = obj->forwardee(); 3795 // already forwarded or not, as this usually causes an immediate
3735 } else { 3796 // stall. We'll try to prefetch the object (for write, given that
3736 _par_scan_state->push_on_queue(p); 3797 // we might need to install the forwarding reference) and we'll
3737 return; 3798 // get back to it when pop it from the queue
3738 } 3799 Prefetch::write(obj->mark_addr(), 0);
3739 } 3800 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
3740 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 3801
3802 // slightly paranoid test; I'm trying to catch potential
3803 // problems before we go into push_on_queue to know where the
3804 // problem is coming from
3805 assert(obj == *p, "the value of *p should not have changed");
3806 _par_scan_state->push_on_queue(p);
3807 } else {
3808 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
3809 }
3741 } 3810 }
3742 } 3811 }
3743 3812
3744 void G1ParCopyHelper::mark_forwardee(oop* p) { 3813 void G1ParCopyHelper::mark_forwardee(oop* p) {
3745 // This is called _after_ do_oop_work has been called, hence after 3814 // This is called _after_ do_oop_work has been called, hence after
3775 // installed a forwarding pointer. 3844 // installed a forwarding pointer.
3776 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); 3845 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
3777 return _g1->handle_evacuation_failure_par(cl, old); 3846 return _g1->handle_evacuation_failure_par(cl, old);
3778 } 3847 }
3779 3848
3849 // We're going to allocate linearly, so might as well prefetch ahead.
3850 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
3851
3780 oop forward_ptr = old->forward_to_atomic(obj); 3852 oop forward_ptr = old->forward_to_atomic(obj);
3781 if (forward_ptr == NULL) { 3853 if (forward_ptr == NULL) {
3782 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 3854 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
3855 if (g1p->track_object_age(alloc_purpose)) {
3856 // We could simply do obj->incr_age(). However, this causes a
3857 // performance issue. obj->incr_age() will first check whether
3858 // the object has a displaced mark by checking its mark word;
3859 // getting the mark word from the new location of the object
3860 // stalls. So, given that we already have the mark word and we
3861 // are about to install it anyway, it's better to increase the
3862 // age on the mark word, when the object does not have a
3863 // displaced mark word. We're not expecting many objects to have
3864 // a displaced marked word, so that case is not optimized
3865 // further (it could be...) and we simply call obj->incr_age().
3866
3867 if (m->has_displaced_mark_helper()) {
3868 // in this case, we have to install the mark word first,
3869 // otherwise obj looks to be forwarded (the old mark word,
3870 // which contains the forward pointer, was copied)
3871 obj->set_mark(m);
3872 obj->incr_age();
3873 } else {
3874 m = m->incr_age();
3875 }
3876 }
3783 obj->set_mark(m); 3877 obj->set_mark(m);
3784 if (g1p->track_object_age(alloc_purpose)) { 3878
3785 obj->incr_age();
3786 }
3787 // preserve "next" mark bit 3879 // preserve "next" mark bit
3788 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 3880 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
3789 if (!use_local_bitmaps || 3881 if (!use_local_bitmaps ||
3790 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) { 3882 !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
3791 // if we couldn't mark it on the local bitmap (this happens when 3883 // if we couldn't mark it on the local bitmap (this happens when
3803 size_t* surv_young_words = _par_scan_state->surviving_young_words(); 3895 size_t* surv_young_words = _par_scan_state->surviving_young_words();
3804 surv_young_words[young_index] += word_sz; 3896 surv_young_words[young_index] += word_sz;
3805 3897
3806 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 3898 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
3807 arrayOop(old)->set_length(0); 3899 arrayOop(old)->set_length(0);
3808 _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK)); 3900 _par_scan_state->push_on_queue(set_partial_array_mask(old));
3809 } else { 3901 } else {
3810 _scanner->set_region(_g1->heap_region_containing(obj)); 3902 // No point in using the slower heap_region_containing() method,
3903 // given that we know obj is in the heap.
3904 _scanner->set_region(_g1->heap_region_containing_raw(obj));
3811 obj->oop_iterate_backwards(_scanner); 3905 obj->oop_iterate_backwards(_scanner);
3812 } 3906 }
3813 } else { 3907 } else {
3814 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); 3908 _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
3815 obj = forward_ptr; 3909 obj = forward_ptr;
3816 } 3910 }
3817 return obj; 3911 return obj;
3818 } 3912 }
3819 3913
3820 template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 3914 template<bool do_gen_barrier, G1Barrier barrier,
3821 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) { 3915 bool do_mark_forwardee, bool skip_cset_test>
3916 void G1ParCopyClosure<do_gen_barrier, barrier,
3917 do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
3822 oop obj = *p; 3918 oop obj = *p;
3823 assert(barrier != G1BarrierRS || obj != NULL, 3919 assert(barrier != G1BarrierRS || obj != NULL,
3824 "Precondition: G1BarrierRS implies obj is nonNull"); 3920 "Precondition: G1BarrierRS implies obj is nonNull");
3825 3921
3826 if (obj != NULL) { 3922 // The only time we skip the cset test is when we're scanning
3827 if (_g1->obj_in_cs(obj)) { 3923 // references popped from the queue. And we only push on the queue
3924 // references that we know point into the cset, so no point in
3925 // checking again. But we'll leave an assert here for peace of mind.
3926 assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
3927
3928 // here the null check is implicit in the cset_fast_test() test
3929 if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
3828 #if G1_REM_SET_LOGGING 3930 #if G1_REM_SET_LOGGING
3829 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.", 3931 gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
3830 p, (void*) obj); 3932 "into CS.", p, (void*) obj);
3831 #endif 3933 #endif
3832 if (obj->is_forwarded()) { 3934 if (obj->is_forwarded()) {
3833 *p = obj->forwardee(); 3935 *p = obj->forwardee();
3834 } else { 3936 } else {
3835 *p = copy_to_survivor_space(obj); 3937 *p = copy_to_survivor_space(obj);
3836 } 3938 }
3837 // When scanning the RS, we only care about objs in CS. 3939 // When scanning the RS, we only care about objs in CS.
3838 if (barrier == G1BarrierRS) { 3940 if (barrier == G1BarrierRS) {
3839 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
3840 }
3841 }
3842 // When scanning moved objs, must look at all oops.
3843 if (barrier == G1BarrierEvac) {
3844 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 3941 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
3845 } 3942 }
3846 3943 }
3847 if (do_gen_barrier) { 3944
3848 par_do_barrier(p); 3945 // When scanning moved objs, must look at all oops.
3849 } 3946 if (barrier == G1BarrierEvac && obj != NULL) {
3850 } 3947 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
3851 } 3948 }
3852 3949
3853 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); 3950 if (do_gen_barrier && obj != NULL) {
3854 3951 par_do_barrier(p);
3855 template <class T> void G1ParScanPartialArrayClosure::process_array_chunk( 3952 }
3953 }
3954
3955 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
3956
3957 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
3856 oop obj, int start, int end) { 3958 oop obj, int start, int end) {
3857 // process our set of indices (include header in first chunk) 3959 // process our set of indices (include header in first chunk)
3858 assert(start < end, "invariant"); 3960 assert(start < end, "invariant");
3859 T* const base = (T*)objArrayOop(obj)->base(); 3961 T* const base = (T*)objArrayOop(obj)->base();
3860 T* const start_addr = base + start; 3962 T* const start_addr = (start == 0) ? (T*) obj : base + start;
3861 T* const end_addr = base + end; 3963 T* const end_addr = base + end;
3862 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); 3964 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
3863 _scanner.set_region(_g1->heap_region_containing(obj)); 3965 _scanner.set_region(_g1->heap_region_containing(obj));
3864 obj->oop_iterate(&_scanner, mr); 3966 obj->oop_iterate(&_scanner, mr);
3865 } 3967 }
3866 3968
3867 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { 3969 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
3868 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); 3970 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
3869 oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK); 3971 assert(has_partial_array_mask(p), "invariant");
3972 oop old = clear_partial_array_mask(p);
3870 assert(old->is_objArray(), "must be obj array"); 3973 assert(old->is_objArray(), "must be obj array");
3871 assert(old->is_forwarded(), "must be forwarded"); 3974 assert(old->is_forwarded(), "must be forwarded");
3872 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 3975 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
3873 3976
3874 objArrayOop obj = objArrayOop(old->forwardee()); 3977 objArrayOop obj = objArrayOop(old->forwardee());
3882 if (remainder > 2 * ParGCArrayScanChunk) { 3985 if (remainder > 2 * ParGCArrayScanChunk) {
3883 // Test above combines last partial chunk with a full chunk 3986 // Test above combines last partial chunk with a full chunk
3884 end = start + ParGCArrayScanChunk; 3987 end = start + ParGCArrayScanChunk;
3885 arrayOop(old)->set_length(end); 3988 arrayOop(old)->set_length(end);
3886 // Push remainder. 3989 // Push remainder.
3887 _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK)); 3990 _par_scan_state->push_on_queue(set_partial_array_mask(old));
3888 } else { 3991 } else {
3889 // Restore length so that the heap remains parsable in 3992 // Restore length so that the heap remains parsable in
3890 // case of evacuation failure. 3993 // case of evacuation failure.
3891 arrayOop(old)->set_length(end); 3994 arrayOop(old)->set_length(end);
3892 } 3995 }
3893 3996
3894 // process our set of indices (include header in first chunk) 3997 // process our set of indices (include header in first chunk)
3895 process_array_chunk<oop>(obj, start, end); 3998 process_array_chunk<oop>(obj, start, end);
3896 oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start);
3897 oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length
3898 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
3899 _scanner.set_region(_g1->heap_region_containing(obj));
3900 obj->oop_iterate(&_scanner, mr);
3901 } 3999 }
3902 4000
3903 int G1ScanAndBalanceClosure::_nq = 0; 4001 int G1ScanAndBalanceClosure::_nq = 0;
3904 4002
3905 class G1ParEvacuateFollowersClosure : public VoidClosure { 4003 class G1ParEvacuateFollowersClosure : public VoidClosure {
3929 IF_G1_DETAILED_STATS(pss->note_steal_attempt()); 4027 IF_G1_DETAILED_STATS(pss->note_steal_attempt());
3930 if (queues()->steal(pss->queue_num(), 4028 if (queues()->steal(pss->queue_num(),
3931 pss->hash_seed(), 4029 pss->hash_seed(),
3932 ref_to_scan)) { 4030 ref_to_scan)) {
3933 IF_G1_DETAILED_STATS(pss->note_steal()); 4031 IF_G1_DETAILED_STATS(pss->note_steal());
4032
4033 // slightly paranoid tests; I'm trying to catch potential
4034 // problems before we go into push_on_queue to know where the
4035 // problem is coming from
4036 assert(ref_to_scan != NULL, "invariant");
4037 assert(has_partial_array_mask(ref_to_scan) ||
4038 _g1h->obj_in_cs(*ref_to_scan), "invariant");
3934 pss->push_on_queue(ref_to_scan); 4039 pss->push_on_queue(ref_to_scan);
3935 continue; 4040 continue;
3936 } 4041 }
3937 pss->start_term_time(); 4042 pss->start_term_time();
3938 if (terminator()->offer_termination()) break; 4043 if (terminator()->offer_termination()) break;
3974 4079
3975 void work(int i) { 4080 void work(int i) {
3976 ResourceMark rm; 4081 ResourceMark rm;
3977 HandleMark hm; 4082 HandleMark hm;
3978 4083
3979 G1ParScanThreadState pss(_g1h, i); 4084 G1ParScanThreadState pss(_g1h, i);
3980 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 4085 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
3981 G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss); 4086 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
3982 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 4087 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
3983 4088
3984 pss.set_evac_closure(&scan_evac_cl); 4089 pss.set_evac_closure(&scan_evac_cl);
3985 pss.set_evac_failure_closure(&evac_failure_cl); 4090 pss.set_evac_failure_closure(&evac_failure_cl);
3986 pss.set_partial_scan_closure(&partial_scan_cl); 4091 pss.set_partial_scan_closure(&partial_scan_cl);
3987 4092