comparison src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @ 1973:631f79e71e90

6974966: G1: unnecessary direct-to-old allocations Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier. Reviewed-by: johnc, ysr
author tonyp
date Tue, 24 Aug 2010 17:24:33 -0400
parents f95d63e2154a
children fd1d227ef1b9
comparison
equal deleted inserted replaced
1972:f95d63e2154a 1973:631f79e71e90
56 // This file is under construction. Search for "FIXME". 56 // This file is under construction. Search for "FIXME".
57 57
58 // INVARIANTS/NOTES 58 // INVARIANTS/NOTES
59 // 59 //
60 // All allocation activity covered by the G1CollectedHeap interface is 60 // All allocation activity covered by the G1CollectedHeap interface is
61 // serialized by acquiring the HeapLock. This happens in 61 // serialized by acquiring the HeapLock. This happens in mem_allocate
62 // mem_allocate_work, which all such allocation functions call. 62 // and allocate_new_tlab, which are the "entry" points to the
63 // (Note that this does not apply to TLAB allocation, which is not part 63 // allocation code from the rest of the JVM. (Note that this does not
64 // of this interface: it is done by clients of this interface.) 64 // apply to TLAB allocation, which is not part of this interface: it
65 // is done by clients of this interface.)
65 66
66 // Local to this file. 67 // Local to this file.
67 68
68 class RefineCardTableEntryClosure: public CardTableEntryClosure { 69 class RefineCardTableEntryClosure: public CardTableEntryClosure {
69 SuspendibleThreadSet* _sts; 70 SuspendibleThreadSet* _sts;
534 } 535 }
535 536
536 // If could fit into free regions w/o expansion, try. 537 // If could fit into free regions w/o expansion, try.
537 // Otherwise, if can expand, do so. 538 // Otherwise, if can expand, do so.
538 // Otherwise, if using ex regions might help, try with ex given back. 539 // Otherwise, if using ex regions might help, try with ex given back.
539 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) { 540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
541 assert_heap_locked_or_at_safepoint();
540 assert(regions_accounted_for(), "Region leakage!"); 542 assert(regions_accounted_for(), "Region leakage!");
541 543
542 // We can't allocate H regions while cleanupComplete is running, since 544 // We can't allocate humongous regions while cleanupComplete is
543 // some of the regions we find to be empty might not yet be added to the 545 // running, since some of the regions we find to be empty might not
544 // unclean list. (If we're already at a safepoint, this call is 546 // yet be added to the unclean list. If we're already at a
545 // unnecessary, not to mention wrong.) 547 // safepoint, this call is unnecessary, not to mention wrong.
546 if (!SafepointSynchronize::is_at_safepoint()) 548 if (!SafepointSynchronize::is_at_safepoint()) {
547 wait_for_cleanup_complete(); 549 wait_for_cleanup_complete();
550 }
548 551
549 size_t num_regions = 552 size_t num_regions =
550 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; 553 round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
551 554
552 // Special case if < one region??? 555 // Special case if < one region???
553 556
554 // Remember the ft size. 557 // Remember the ft size.
555 size_t x_size = expansion_regions(); 558 size_t x_size = expansion_regions();
596 } 599 }
597 assert(regions_accounted_for(), "Region Leakage"); 600 assert(regions_accounted_for(), "Region Leakage");
598 return res; 601 return res;
599 } 602 }
600 603
604 void
605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
606 // The cleanup operation might update _summary_bytes_used
607 // concurrently with this method. So, right now, if we don't wait
608 // for it to complete, updates to _summary_bytes_used might get
609 // lost. This will be resolved in the near future when the operation
610 // of the free region list is revamped as part of CR 6977804.
611 wait_for_cleanup_complete();
612
613 retire_cur_alloc_region_common(cur_alloc_region);
614 assert(_cur_alloc_region == NULL, "post-condition");
615 }
616
617 // See the comment in the .hpp file about the locking protocol and
618 // assumptions of this method (and other related ones).
601 HeapWord* 619 HeapWord*
602 G1CollectedHeap::attempt_allocation_slow(size_t word_size, 620 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
603 bool permit_collection_pause) { 621 bool at_safepoint,
604 HeapWord* res = NULL; 622 bool do_dirtying) {
605 HeapRegion* allocated_young_region = NULL; 623 assert_heap_locked_or_at_safepoint();
606 624 assert(_cur_alloc_region == NULL,
607 assert( SafepointSynchronize::is_at_safepoint() || 625 "replace_cur_alloc_region_and_allocate() should only be called "
608 Heap_lock->owned_by_self(), "pre condition of the call" ); 626 "after retiring the previous current alloc region");
609 627 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
610 if (isHumongous(word_size)) { 628 "at_safepoint and is_at_safepoint() should be a tautology");
611 // Allocation of a humongous object can, in a sense, complete a 629
612 // partial region, if the previous alloc was also humongous, and 630 if (!g1_policy()->is_young_list_full()) {
613 // caused the test below to succeed. 631 if (!at_safepoint) {
614 if (permit_collection_pause) 632 // The cleanup operation might update _summary_bytes_used
615 do_collection_pause_if_appropriate(word_size); 633 // concurrently with this method. So, right now, if we don't
616 res = humongousObjAllocate(word_size); 634 // wait for it to complete, updates to _summary_bytes_used might
617 assert(_cur_alloc_region == NULL 635 // get lost. This will be resolved in the near future when the
618 || !_cur_alloc_region->isHumongous(), 636 // operation of the free region list is revamped as part of
619 "Prevent a regression of this bug."); 637 // CR 6977804. If we're already at a safepoint, this call is
620 638 // unnecessary, not to mention wrong.
639 wait_for_cleanup_complete();
640 }
641
642 HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
643 false /* zero_filled */);
644 if (new_cur_alloc_region != NULL) {
645 assert(new_cur_alloc_region->is_empty(),
646 "the newly-allocated region should be empty, "
647 "as right now we only allocate new regions out of the free list");
648 g1_policy()->update_region_num(true /* next_is_young */);
649 _summary_bytes_used -= new_cur_alloc_region->used();
650 set_region_short_lived_locked(new_cur_alloc_region);
651
652 assert(!new_cur_alloc_region->isHumongous(),
653 "Catch a regression of this bug.");
654
655 // We need to ensure that the stores to _cur_alloc_region and,
656 // subsequently, to top do not float above the setting of the
657 // young type.
658 OrderAccess::storestore();
659
660 // Now allocate out of the new current alloc region. We could
661 // have re-used allocate_from_cur_alloc_region() but its
662 // operation is slightly different to what we need here. First,
663 // allocate_from_cur_alloc_region() is only called outside a
664 // safepoint and will always unlock the Heap_lock if it returns
665 // a non-NULL result. Second, it assumes that the current alloc
666 // region is what's already assigned in _cur_alloc_region. What
667 // we want here is to actually do the allocation first before we
668 // assign the new region to _cur_alloc_region. This ordering is
669 // not currently important, but it will be essential when we
670 // change the code to support CAS allocation in the future (see
671 // CR 6994297).
672 //
673 // This allocate method does BOT updates and we don't need them in
674 // the young generation. This will be fixed in the near future by
675 // CR 6994297.
676 HeapWord* result = new_cur_alloc_region->allocate(word_size);
677 assert(result != NULL, "we just allocate out of an empty region "
678 "so allocation should have been successful");
679 assert(is_in(result), "result should be in the heap");
680
681 _cur_alloc_region = new_cur_alloc_region;
682
683 if (!at_safepoint) {
684 Heap_lock->unlock();
685 }
686
687 // do the dirtying, if necessary, after we release the Heap_lock
688 if (do_dirtying) {
689 dirty_young_block(result, word_size);
690 }
691 return result;
692 }
693 }
694
695 assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
696 "alloc region, it should still be NULL");
697 assert_heap_locked_or_at_safepoint();
698 return NULL;
699 }
700
701 // See the comment in the .hpp file about the locking protocol and
702 // assumptions of this method (and other related ones).
703 HeapWord*
704 G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
705 assert_heap_locked_and_not_at_safepoint();
706 assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
707 "used for humongous allocations");
708
709 // We will loop while succeeded is false, which means that we tried
710 // to do a collection, but the VM op did not succeed. So, when we
711 // exit the loop, either one of the allocation attempts was
712 // successful, or we succeeded in doing the VM op but which was
713 // unable to allocate after the collection.
714 for (int try_count = 1; /* we'll return or break */; try_count += 1) {
715 bool succeeded = true;
716
717 {
718 // We may have concurrent cleanup working at the time. Wait for
719 // it to complete. In the future we would probably want to make
720 // the concurrent cleanup truly concurrent by decoupling it from
721 // the allocation. This will happen in the near future as part
722 // of CR 6977804 which will revamp the operation of the free
723 // region list. The fact that wait_for_cleanup_complete() will
724 // do a wait() means that we'll give up the Heap_lock. So, it's
725 // possible that when we exit wait_for_cleanup_complete() we
726 // might be able to allocate successfully (since somebody else
727 // might have done a collection meanwhile). So, we'll attempt to
728 // allocate again, just in case. When we make cleanup truly
729 // concurrent with allocation, we should remove this allocation
730 // attempt as it's redundant (we only reach here after an
731 // allocation attempt has been unsuccessful).
732 wait_for_cleanup_complete();
733 HeapWord* result = attempt_allocation(word_size);
734 if (result != NULL) {
735 assert_heap_not_locked();
736 return result;
737 }
738 }
739
740 if (GC_locker::is_active_and_needs_gc()) {
741 // We are locked out of GC because of the GC locker. Right now,
742 // we'll just stall until the GC locker-induced GC
743 // completes. This will be fixed in the near future by extending
744 // the eden while waiting for the GC locker to schedule the GC
745 // (see CR 6994056).
746
747 // If this thread is not in a jni critical section, we stall
748 // the requestor until the critical section has cleared and
749 // GC allowed. When the critical section clears, a GC is
750 // initiated by the last thread exiting the critical section; so
751 // we retry the allocation sequence from the beginning of the loop,
752 // rather than causing more, now probably unnecessary, GC attempts.
753 JavaThread* jthr = JavaThread::current();
754 assert(jthr != NULL, "sanity");
755 if (!jthr->in_critical()) {
756 MutexUnlocker mul(Heap_lock);
757 GC_locker::stall_until_clear();
758
759 // We'll then fall off the end of the ("if GC locker active")
760 // if-statement and retry the allocation further down in the
761 // loop.
762 } else {
763 if (CheckJNICalls) {
764 fatal("Possible deadlock due to allocating while"
765 " in jni critical section");
766 }
767 return NULL;
768 }
769 } else {
770 // We are not locked out. So, let's try to do a GC. The VM op
771 // will retry the allocation before it completes.
772
773 // Read the GC count while holding the Heap_lock
774 unsigned int gc_count_before = SharedHeap::heap()->total_collections();
775
776 Heap_lock->unlock();
777
778 HeapWord* result =
779 do_collection_pause(word_size, gc_count_before, &succeeded);
780 assert_heap_not_locked();
781 if (result != NULL) {
782 assert(succeeded, "the VM op should have succeeded");
783
784 // Allocations that take place on VM operations do not do any
785 // card dirtying and we have to do it here.
786 dirty_young_block(result, word_size);
787 return result;
788 }
789
790 Heap_lock->lock();
791 }
792
793 assert_heap_locked();
794
795 // We can reach here when we were unsuccessful in doing a GC,
796 // because another thread beat us to it, or because we were locked
797 // out of GC due to the GC locker. In either case a new alloc
798 // region might be available so we will retry the allocation.
799 HeapWord* result = attempt_allocation(word_size);
800 if (result != NULL) {
801 assert_heap_not_locked();
802 return result;
803 }
804
805 // So far our attempts to allocate failed. The only time we'll go
806 // around the loop and try again is if we tried to do a GC and the
807 // VM op that we tried to schedule was not successful because
808 // another thread beat us to it. If that happened it's possible
809 // that by the time we grabbed the Heap_lock again and tried to
810 // allocate other threads filled up the young generation, which
811 // means that the allocation attempt after the GC also failed. So,
812 // it's worth trying to schedule another GC pause.
813 if (succeeded) {
814 break;
815 }
816
817 // Give a warning if we seem to be looping forever.
818 if ((QueuedAllocationWarningCount > 0) &&
819 (try_count % QueuedAllocationWarningCount == 0)) {
820 warning("G1CollectedHeap::attempt_allocation_slow() "
821 "retries %d times", try_count);
822 }
823 }
824
825 assert_heap_locked();
826 return NULL;
827 }
828
829 // See the comment in the .hpp file about the locking protocol and
830 // assumptions of this method (and other related ones).
831 HeapWord*
832 G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
833 bool at_safepoint) {
834 // This is the method that will allocate a humongous object. All
835 // allocation paths that attempt to allocate a humongous object
836 // should eventually reach here. Currently, the only paths are from
837 // mem_allocate() and attempt_allocation_at_safepoint().
838 assert_heap_locked_or_at_safepoint();
839 assert(isHumongous(word_size), "attempt_allocation_humongous() "
840 "should only be used for humongous allocations");
841 assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
842 "at_safepoint and is_at_safepoint() should be a tautology");
843
844 HeapWord* result = NULL;
845
846 // We will loop while succeeded is false, which means that we tried
847 // to do a collection, but the VM op did not succeed. So, when we
848 // exit the loop, either one of the allocation attempts was
849 // successful, or we succeeded in doing the VM op but which was
850 // unable to allocate after the collection.
851 for (int try_count = 1; /* we'll return or break */; try_count += 1) {
852 bool succeeded = true;
853
854 // Given that humongous objects are not allocated in young
855 // regions, we'll first try to do the allocation without doing a
856 // collection hoping that there's enough space in the heap.
857 result = humongous_obj_allocate(word_size);
858 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
859 "catch a regression of this bug.");
860 if (result != NULL) {
861 if (!at_safepoint) {
862 // If we're not at a safepoint, unlock the Heap_lock.
863 Heap_lock->unlock();
864 }
865 return result;
866 }
867
868 // If we failed to allocate the humongous object, we should try to
869 // do a collection pause (if we're allowed) in case it reclaims
870 // enough space for the allocation to succeed after the pause.
871 if (!at_safepoint) {
872 // Read the GC count while holding the Heap_lock
873 unsigned int gc_count_before = SharedHeap::heap()->total_collections();
874
875 // If we're allowed to do a collection we're not at a
876 // safepoint, so it is safe to unlock the Heap_lock.
877 Heap_lock->unlock();
878
879 result = do_collection_pause(word_size, gc_count_before, &succeeded);
880 assert_heap_not_locked();
881 if (result != NULL) {
882 assert(succeeded, "the VM op should have succeeded");
883 return result;
884 }
885
886 // If we get here, the VM operation either did not succeed
887 // (i.e., another thread beat us to it) or it succeeded but
888 // failed to allocate the object.
889
890 // If we're allowed to do a collection we're not at a
891 // safepoint, so it is safe to lock the Heap_lock.
892 Heap_lock->lock();
893 }
894
895 assert(result == NULL, "otherwise we should have exited the loop earlier");
896
897 // So far our attempts to allocate failed. The only time we'll go
898 // around the loop and try again is if we tried to do a GC and the
899 // VM op that we tried to schedule was not successful because
900 // another thread beat us to it. That way it's possible that some
901 // space was freed up by the thread that successfully scheduled a
902 // GC. So it's worth trying to allocate again.
903 if (succeeded) {
904 break;
905 }
906
907 // Give a warning if we seem to be looping forever.
908 if ((QueuedAllocationWarningCount > 0) &&
909 (try_count % QueuedAllocationWarningCount == 0)) {
910 warning("G1CollectedHeap::attempt_allocation_humongous "
911 "retries %d times", try_count);
912 }
913 }
914
915 assert_heap_locked_or_at_safepoint();
916 return NULL;
917 }
918
919 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
920 bool expect_null_cur_alloc_region) {
921 assert_at_safepoint();
922 assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
923 "The current alloc region should only be non-NULL if we're "
924 "expecting it not to be NULL");
925
926 if (!isHumongous(word_size)) {
927 if (!expect_null_cur_alloc_region) {
928 HeapRegion* cur_alloc_region = _cur_alloc_region;
929 if (cur_alloc_region != NULL) {
930 // This allocate method does BOT updates and we don't need them in
931 // the young generation. This will be fixed in the near future by
932 // CR 6994297.
933 HeapWord* result = cur_alloc_region->allocate(word_size);
934 if (result != NULL) {
935 assert(is_in(result), "result should be in the heap");
936
937 // We will not do any dirtying here. This is guaranteed to be
938 // called during a safepoint and the thread that scheduled the
939 // pause will do the dirtying if we return a non-NULL result.
940 return result;
941 }
942
943 retire_cur_alloc_region_common(cur_alloc_region);
944 }
945 }
946
947 assert(_cur_alloc_region == NULL,
948 "at this point we should have no cur alloc region");
949 return replace_cur_alloc_region_and_allocate(word_size,
950 true, /* at_safepoint */
951 false /* do_dirtying */);
621 } else { 952 } else {
622 // We may have concurrent cleanup working at the time. Wait for it 953 return attempt_allocation_humongous(word_size,
623 // to complete. In the future we would probably want to make the 954 true /* at_safepoint */);
624 // concurrent cleanup truly concurrent by decoupling it from the 955 }
625 // allocation. 956
626 if (!SafepointSynchronize::is_at_safepoint()) 957 ShouldNotReachHere();
627 wait_for_cleanup_complete(); 958 }
628 // If we do a collection pause, this will be reset to a non-NULL 959
629 // value. If we don't, nulling here ensures that we allocate a new 960 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
630 // region below. 961 assert_heap_not_locked_and_not_at_safepoint();
631 if (_cur_alloc_region != NULL) { 962 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
632 // We're finished with the _cur_alloc_region. 963
633 // As we're builing (at least the young portion) of the collection 964 Heap_lock->lock();
634 // set incrementally we'll add the current allocation region to 965
635 // the collection set here. 966 // First attempt: try allocating out of the current alloc region or
636 if (_cur_alloc_region->is_young()) { 967 // after replacing the current alloc region.
637 g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); 968 HeapWord* result = attempt_allocation(word_size);
638 } 969 if (result != NULL) {
639 _summary_bytes_used += _cur_alloc_region->used(); 970 assert_heap_not_locked();
640 _cur_alloc_region = NULL; 971 return result;
641 } 972 }
642 assert(_cur_alloc_region == NULL, "Invariant."); 973
643 // Completion of a heap region is perhaps a good point at which to do 974 assert_heap_locked();
644 // a collection pause. 975
645 if (permit_collection_pause) 976 // Second attempt: go into the even slower path where we might
646 do_collection_pause_if_appropriate(word_size); 977 // try to schedule a collection.
647 // Make sure we have an allocation region available. 978 result = attempt_allocation_slow(word_size);
648 if (_cur_alloc_region == NULL) { 979 if (result != NULL) {
649 if (!SafepointSynchronize::is_at_safepoint()) 980 assert_heap_not_locked();
650 wait_for_cleanup_complete(); 981 return result;
651 bool next_is_young = should_set_young_locked(); 982 }
652 // If the next region is not young, make sure it's zero-filled. 983
653 _cur_alloc_region = newAllocRegion(word_size, !next_is_young); 984 assert_heap_locked();
654 if (_cur_alloc_region != NULL) { 985 Heap_lock->unlock();
655 _summary_bytes_used -= _cur_alloc_region->used(); 986 return NULL;
656 if (next_is_young) {
657 set_region_short_lived_locked(_cur_alloc_region);
658 allocated_young_region = _cur_alloc_region;
659 }
660 }
661 }
662 assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
663 "Prevent a regression of this bug.");
664
665 // Now retry the allocation.
666 if (_cur_alloc_region != NULL) {
667 if (allocated_young_region != NULL) {
668 // We need to ensure that the store to top does not
669 // float above the setting of the young type.
670 OrderAccess::storestore();
671 }
672 res = _cur_alloc_region->allocate(word_size);
673 }
674 }
675
676 // NOTE: fails frequently in PRT
677 assert(regions_accounted_for(), "Region leakage!");
678
679 if (res != NULL) {
680 if (!SafepointSynchronize::is_at_safepoint()) {
681 assert( permit_collection_pause, "invariant" );
682 assert( Heap_lock->owned_by_self(), "invariant" );
683 Heap_lock->unlock();
684 }
685
686 if (allocated_young_region != NULL) {
687 HeapRegion* hr = allocated_young_region;
688 HeapWord* bottom = hr->bottom();
689 HeapWord* end = hr->end();
690 MemRegion mr(bottom, end);
691 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
692 }
693 }
694
695 assert( SafepointSynchronize::is_at_safepoint() ||
696 (res == NULL && Heap_lock->owned_by_self()) ||
697 (res != NULL && !Heap_lock->owned_by_self()),
698 "post condition of the call" );
699
700 return res;
701 } 987 }
702 988
703 HeapWord* 989 HeapWord*
704 G1CollectedHeap::mem_allocate(size_t word_size, 990 G1CollectedHeap::mem_allocate(size_t word_size,
705 bool is_noref, 991 bool is_noref,
706 bool is_tlab, 992 bool is_tlab,
707 bool* gc_overhead_limit_was_exceeded) { 993 bool* gc_overhead_limit_was_exceeded) {
708 debug_only(check_for_valid_allocation_state()); 994 assert_heap_not_locked_and_not_at_safepoint();
709 assert(no_gc_in_progress(), "Allocation during gc not allowed"); 995 assert(!is_tlab, "mem_allocate() this should not be called directly "
710 HeapWord* result = NULL; 996 "to allocate TLABs");
711 997
712 // Loop until the allocation is satisified, 998 // Loop until the allocation is satisified,
713 // or unsatisfied after GC. 999 // or unsatisfied after GC.
714 for (int try_count = 1; /* return or throw */; try_count += 1) { 1000 for (int try_count = 1; /* we'll return */; try_count += 1) {
715 int gc_count_before; 1001 unsigned int gc_count_before;
716 { 1002 {
717 Heap_lock->lock(); 1003 Heap_lock->lock();
718 result = attempt_allocation(word_size); 1004
719 if (result != NULL) { 1005 if (!isHumongous(word_size)) {
720 // attempt_allocation should have unlocked the heap lock 1006 // First attempt: try allocating out of the current alloc
721 assert(is_in(result), "result not in heap"); 1007 // region or after replacing the current alloc region.
722 return result; 1008 HeapWord* result = attempt_allocation(word_size);
723 } 1009 if (result != NULL) {
1010 assert_heap_not_locked();
1011 return result;
1012 }
1013
1014 assert_heap_locked();
1015
1016 // Second attempt: go into the even slower path where we might
1017 // try to schedule a collection.
1018 result = attempt_allocation_slow(word_size);
1019 if (result != NULL) {
1020 assert_heap_not_locked();
1021 return result;
1022 }
1023 } else {
1024 HeapWord* result = attempt_allocation_humongous(word_size,
1025 false /* at_safepoint */);
1026 if (result != NULL) {
1027 assert_heap_not_locked();
1028 return result;
1029 }
1030 }
1031
1032 assert_heap_locked();
724 // Read the gc count while the heap lock is held. 1033 // Read the gc count while the heap lock is held.
725 gc_count_before = SharedHeap::heap()->total_collections(); 1034 gc_count_before = SharedHeap::heap()->total_collections();
1035 // We cannot be at a safepoint, so it is safe to unlock the Heap_lock
726 Heap_lock->unlock(); 1036 Heap_lock->unlock();
727 } 1037 }
728 1038
729 // Create the garbage collection operation... 1039 // Create the garbage collection operation...
730 VM_G1CollectForAllocation op(word_size, 1040 VM_G1CollectForAllocation op(gc_count_before, word_size);
731 gc_count_before);
732
733 // ...and get the VM thread to execute it. 1041 // ...and get the VM thread to execute it.
734 VMThread::execute(&op); 1042 VMThread::execute(&op);
735 if (op.prologue_succeeded()) { 1043
736 result = op.result(); 1044 assert_heap_not_locked();
737 assert(result == NULL || is_in(result), "result not in heap"); 1045 if (op.prologue_succeeded() && op.pause_succeeded()) {
1046 // If the operation was successful we'll return the result even
1047 // if it is NULL. If the allocation attempt failed immediately
1048 // after a Full GC, it's unlikely we'll be able to allocate now.
1049 HeapWord* result = op.result();
1050 if (result != NULL && !isHumongous(word_size)) {
1051 // Allocations that take place on VM operations do not do any
1052 // card dirtying and we have to do it here. We only have to do
1053 // this for non-humongous allocations, though.
1054 dirty_young_block(result, word_size);
1055 }
738 return result; 1056 return result;
1057 } else {
1058 assert(op.result() == NULL,
1059 "the result should be NULL if the VM op did not succeed");
739 } 1060 }
740 1061
741 // Give a warning if we seem to be looping forever. 1062 // Give a warning if we seem to be looping forever.
742 if ((QueuedAllocationWarningCount > 0) && 1063 if ((QueuedAllocationWarningCount > 0) &&
743 (try_count % QueuedAllocationWarningCount == 0)) { 1064 (try_count % QueuedAllocationWarningCount == 0)) {
744 warning("G1CollectedHeap::mem_allocate_work retries %d times", 1065 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
745 try_count); 1066 }
746 } 1067 }
747 } 1068
1069 ShouldNotReachHere();
748 } 1070 }
749 1071
750 void G1CollectedHeap::abandon_cur_alloc_region() { 1072 void G1CollectedHeap::abandon_cur_alloc_region() {
751 if (_cur_alloc_region != NULL) { 1073 if (_cur_alloc_region != NULL) {
752 // We're finished with the _cur_alloc_region. 1074 // We're finished with the _cur_alloc_region.
839 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i, 1161 _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
840 HeapRegion::RebuildRSClaimValue); 1162 HeapRegion::RebuildRSClaimValue);
841 } 1163 }
842 }; 1164 };
843 1165
844 void G1CollectedHeap::do_collection(bool explicit_gc, 1166 bool G1CollectedHeap::do_collection(bool explicit_gc,
845 bool clear_all_soft_refs, 1167 bool clear_all_soft_refs,
846 size_t word_size) { 1168 size_t word_size) {
847 if (GC_locker::check_active_before_gc()) { 1169 if (GC_locker::check_active_before_gc()) {
848 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 1170 return false;
849 } 1171 }
850 1172
851 ResourceMark rm; 1173 ResourceMark rm;
852 1174
853 if (PrintHeapAtGC) { 1175 if (PrintHeapAtGC) {
1045 increment_full_collections_completed(false /* outer */); 1367 increment_full_collections_completed(false /* outer */);
1046 1368
1047 if (PrintHeapAtGC) { 1369 if (PrintHeapAtGC) {
1048 Universe::print_heap_after_gc(); 1370 Universe::print_heap_after_gc();
1049 } 1371 }
1372
1373 return true;
1050 } 1374 }
1051 1375
1052 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 1376 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1053 do_collection(true, /* explicit_gc */ 1377 // do_collection() will return whether it succeeded in performing
1054 clear_all_soft_refs, 1378 // the GC. Currently, there is no facility on the
1055 0 /* word_size */); 1379 // do_full_collection() API to notify the caller than the collection
1380 // did not succeed (e.g., because it was locked out by the GC
1381 // locker). So, right now, we'll ignore the return value.
1382 bool dummy = do_collection(true, /* explicit_gc */
1383 clear_all_soft_refs,
1384 0 /* word_size */);
1056 } 1385 }
1057 1386
1058 // This code is mostly copied from TenuredGeneration. 1387 // This code is mostly copied from TenuredGeneration.
1059 void 1388 void
1060 G1CollectedHeap:: 1389 G1CollectedHeap::
1173 } 1502 }
1174 } 1503 }
1175 1504
1176 1505
1177 HeapWord* 1506 HeapWord*
1178 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) { 1507 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1179 HeapWord* result = NULL; 1508 bool* succeeded) {
1509 assert(SafepointSynchronize::is_at_safepoint(),
1510 "satisfy_failed_allocation() should only be called at a safepoint");
1511 assert(Thread::current()->is_VM_thread(),
1512 "satisfy_failed_allocation() should only be called by the VM thread");
1513
1514 *succeeded = true;
1515 // Let's attempt the allocation first.
1516 HeapWord* result = attempt_allocation_at_safepoint(word_size,
1517 false /* expect_null_cur_alloc_region */);
1518 if (result != NULL) {
1519 assert(*succeeded, "sanity");
1520 return result;
1521 }
1180 1522
1181 // In a G1 heap, we're supposed to keep allocation from failing by 1523 // In a G1 heap, we're supposed to keep allocation from failing by
1182 // incremental pauses. Therefore, at least for now, we'll favor 1524 // incremental pauses. Therefore, at least for now, we'll favor
1183 // expansion over collection. (This might change in the future if we can 1525 // expansion over collection. (This might change in the future if we can
1184 // do something smarter than full collection to satisfy a failed alloc.) 1526 // do something smarter than full collection to satisfy a failed alloc.)
1185
1186 result = expand_and_allocate(word_size); 1527 result = expand_and_allocate(word_size);
1187 if (result != NULL) { 1528 if (result != NULL) {
1188 assert(is_in(result), "result not in heap"); 1529 assert(*succeeded, "sanity");
1189 return result; 1530 return result;
1190 } 1531 }
1191 1532
1192 // OK, I guess we have to try collection. 1533 // Expansion didn't work, we'll try to do a Full GC.
1193 1534 bool gc_succeeded = do_collection(false, /* explicit_gc */
1194 do_collection(false, false, word_size); 1535 false, /* clear_all_soft_refs */
1195 1536 word_size);
1196 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1537 if (!gc_succeeded) {
1197 1538 *succeeded = false;
1539 return NULL;
1540 }
1541
1542 // Retry the allocation
1543 result = attempt_allocation_at_safepoint(word_size,
1544 true /* expect_null_cur_alloc_region */);
1198 if (result != NULL) { 1545 if (result != NULL) {
1199 assert(is_in(result), "result not in heap"); 1546 assert(*succeeded, "sanity");
1200 return result; 1547 return result;
1201 } 1548 }
1202 1549
1203 // Try collecting soft references. 1550 // Then, try a Full GC that will collect all soft references.
1204 do_collection(false, true, word_size); 1551 gc_succeeded = do_collection(false, /* explicit_gc */
1205 result = attempt_allocation(word_size, /*permit_collection_pause*/false); 1552 true, /* clear_all_soft_refs */
1553 word_size);
1554 if (!gc_succeeded) {
1555 *succeeded = false;
1556 return NULL;
1557 }
1558
1559 // Retry the allocation once more
1560 result = attempt_allocation_at_safepoint(word_size,
1561 true /* expect_null_cur_alloc_region */);
1206 if (result != NULL) { 1562 if (result != NULL) {
1207 assert(is_in(result), "result not in heap"); 1563 assert(*succeeded, "sanity");
1208 return result; 1564 return result;
1209 } 1565 }
1210 1566
1211 assert(!collector_policy()->should_clear_all_soft_refs(), 1567 assert(!collector_policy()->should_clear_all_soft_refs(),
1212 "Flag should have been handled and cleared prior to this point"); 1568 "Flag should have been handled and cleared prior to this point");
1213 1569
1214 // What else? We might try synchronous finalization later. If the total 1570 // What else? We might try synchronous finalization later. If the total
1215 // space available is large enough for the allocation, then a more 1571 // space available is large enough for the allocation, then a more
1216 // complete compaction phase than we've tried so far might be 1572 // complete compaction phase than we've tried so far might be
1217 // appropriate. 1573 // appropriate.
1574 assert(*succeeded, "sanity");
1218 return NULL; 1575 return NULL;
1219 } 1576 }
1220 1577
1221 // Attempting to expand the heap sufficiently 1578 // Attempting to expand the heap sufficiently
1222 // to support an allocation of the given "word_size". If 1579 // to support an allocation of the given "word_size". If
1223 // successful, perform the allocation and return the address of the 1580 // successful, perform the allocation and return the address of the
1224 // allocated block, or else "NULL". 1581 // allocated block, or else "NULL".
1225 1582
1226 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { 1583 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1584 assert(SafepointSynchronize::is_at_safepoint(),
1585 "expand_and_allocate() should only be called at a safepoint");
1586 assert(Thread::current()->is_VM_thread(),
1587 "expand_and_allocate() should only be called by the VM thread");
1588
1227 size_t expand_bytes = word_size * HeapWordSize; 1589 size_t expand_bytes = word_size * HeapWordSize;
1228 if (expand_bytes < MinHeapDeltaBytes) { 1590 if (expand_bytes < MinHeapDeltaBytes) {
1229 expand_bytes = MinHeapDeltaBytes; 1591 expand_bytes = MinHeapDeltaBytes;
1230 } 1592 }
1231 expand(expand_bytes); 1593 expand(expand_bytes);
1232 assert(regions_accounted_for(), "Region leakage!"); 1594 assert(regions_accounted_for(), "Region leakage!");
1233 HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */); 1595
1234 return result; 1596 return attempt_allocation_at_safepoint(word_size,
1597 true /* expect_null_cur_alloc_region */);
1235 } 1598 }
1236 1599
1237 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) { 1600 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
1238 size_t pre_used = 0; 1601 size_t pre_used = 0;
1239 size_t cleared_h_regions = 0; 1602 size_t cleared_h_regions = 0;
1840 2203
1841 unsigned int gc_count_before; 2204 unsigned int gc_count_before;
1842 unsigned int full_gc_count_before; 2205 unsigned int full_gc_count_before;
1843 { 2206 {
1844 MutexLocker ml(Heap_lock); 2207 MutexLocker ml(Heap_lock);
2208
2209 // Don't want to do a GC until cleanup is completed. This
2210 // limitation will be removed in the near future when the
2211 // operation of the free region list is revamped as part of
2212 // CR 6977804.
2213 wait_for_cleanup_complete();
2214
1845 // Read the GC count while holding the Heap_lock 2215 // Read the GC count while holding the Heap_lock
1846 gc_count_before = SharedHeap::heap()->total_collections(); 2216 gc_count_before = SharedHeap::heap()->total_collections();
1847 full_gc_count_before = SharedHeap::heap()->total_full_collections(); 2217 full_gc_count_before = SharedHeap::heap()->total_full_collections();
1848
1849 // Don't want to do a GC until cleanup is completed.
1850 wait_for_cleanup_complete();
1851
1852 // We give up heap lock; VMThread::execute gets it back below
1853 } 2218 }
1854 2219
1855 if (should_do_concurrent_full_gc(cause)) { 2220 if (should_do_concurrent_full_gc(cause)) {
1856 // Schedule an initial-mark evacuation pause that will start a 2221 // Schedule an initial-mark evacuation pause that will start a
1857 // concurrent cycle. 2222 // concurrent cycle. We're setting word_size to 0 which means that
2223 // we are not requesting a post-GC allocation.
1858 VM_G1IncCollectionPause op(gc_count_before, 2224 VM_G1IncCollectionPause op(gc_count_before,
1859 true, /* should_initiate_conc_mark */ 2225 0, /* word_size */
2226 true, /* should_initiate_conc_mark */
1860 g1_policy()->max_pause_time_ms(), 2227 g1_policy()->max_pause_time_ms(),
1861 cause); 2228 cause);
1862 VMThread::execute(&op); 2229 VMThread::execute(&op);
1863 } else { 2230 } else {
1864 if (cause == GCCause::_gc_locker 2231 if (cause == GCCause::_gc_locker
1865 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { 2232 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
1866 2233
1867 // Schedule a standard evacuation pause. 2234 // Schedule a standard evacuation pause. We're setting word_size
2235 // to 0 which means that we are not requesting a post-GC allocation.
1868 VM_G1IncCollectionPause op(gc_count_before, 2236 VM_G1IncCollectionPause op(gc_count_before,
2237 0, /* word_size */
1869 false, /* should_initiate_conc_mark */ 2238 false, /* should_initiate_conc_mark */
1870 g1_policy()->max_pause_time_ms(), 2239 g1_policy()->max_pause_time_ms(),
1871 cause); 2240 cause);
1872 VMThread::execute(&op); 2241 VMThread::execute(&op);
1873 } else { 2242 } else {
2217 return max_tlab_size; 2586 return max_tlab_size;
2218 } else { 2587 } else {
2219 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), 2588 return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
2220 max_tlab_size); 2589 max_tlab_size);
2221 } 2590 }
2222 }
2223
2224 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
2225 assert(!isHumongous(word_size),
2226 err_msg("a TLAB should not be of humongous size, "
2227 "word_size = "SIZE_FORMAT, word_size));
2228 bool dummy;
2229 return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy);
2230 } 2591 }
2231 2592
2232 bool G1CollectedHeap::allocs_are_zero_filled() { 2593 bool G1CollectedHeap::allocs_are_zero_filled() {
2233 return false; 2594 return false;
2234 } 2595 }
2631 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), 2992 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
2632 "derived pointer present")); 2993 "derived pointer present"));
2633 // always_do_update_barrier = true; 2994 // always_do_update_barrier = true;
2634 } 2995 }
2635 2996
2636 void G1CollectedHeap::do_collection_pause() { 2997 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2637 assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); 2998 unsigned int gc_count_before,
2638 2999 bool* succeeded) {
2639 // Read the GC count while holding the Heap_lock 3000 assert_heap_not_locked_and_not_at_safepoint();
2640 // we need to do this _before_ wait_for_cleanup_complete(), to
2641 // ensure that we do not give up the heap lock and potentially
2642 // pick up the wrong count
2643 unsigned int gc_count_before = SharedHeap::heap()->total_collections();
2644
2645 // Don't want to do a GC pause while cleanup is being completed!
2646 wait_for_cleanup_complete();
2647
2648 g1_policy()->record_stop_world_start(); 3001 g1_policy()->record_stop_world_start();
2649 { 3002 VM_G1IncCollectionPause op(gc_count_before,
2650 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 3003 word_size,
2651 VM_G1IncCollectionPause op(gc_count_before, 3004 false, /* should_initiate_conc_mark */
2652 false, /* should_initiate_conc_mark */ 3005 g1_policy()->max_pause_time_ms(),
2653 g1_policy()->max_pause_time_ms(), 3006 GCCause::_g1_inc_collection_pause);
2654 GCCause::_g1_inc_collection_pause); 3007 VMThread::execute(&op);
2655 VMThread::execute(&op); 3008
2656 } 3009 HeapWord* result = op.result();
3010 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3011 assert(result == NULL || ret_succeeded,
3012 "the result should be NULL if the VM did not succeed");
3013 *succeeded = ret_succeeded;
3014
3015 assert_heap_not_locked();
3016 return result;
2657 } 3017 }
2658 3018
2659 void 3019 void
2660 G1CollectedHeap::doConcurrentMark() { 3020 G1CollectedHeap::doConcurrentMark() {
2661 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 3021 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2795 task_queue(i)->stats.reset(); 3155 task_queue(i)->stats.reset();
2796 } 3156 }
2797 } 3157 }
2798 #endif // TASKQUEUE_STATS 3158 #endif // TASKQUEUE_STATS
2799 3159
2800 void 3160 bool
2801 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { 3161 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2802 if (GC_locker::check_active_before_gc()) { 3162 if (GC_locker::check_active_before_gc()) {
2803 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 3163 return false;
2804 } 3164 }
2805 3165
2806 if (PrintHeapAtGC) { 3166 if (PrintHeapAtGC) {
2807 Universe::print_heap_before_gc(); 3167 Universe::print_heap_before_gc();
2808 } 3168 }
3066 if (G1SummarizeRSetStats && 3426 if (G1SummarizeRSetStats &&
3067 (G1SummarizeRSetStatsPeriod > 0) && 3427 (G1SummarizeRSetStatsPeriod > 0) &&
3068 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { 3428 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3069 g1_rem_set()->print_summary_info(); 3429 g1_rem_set()->print_summary_info();
3070 } 3430 }
3431
3432 return true;
3071 } 3433 }
3072 3434
3073 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) 3435 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
3074 { 3436 {
3075 size_t gclab_word_size; 3437 size_t gclab_word_size;
3358 } 3720 }
3359 3721
3360 3722
3361 3723
3362 // *** Sequential G1 Evacuation 3724 // *** Sequential G1 Evacuation
3363
3364 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
3365 HeapRegion* alloc_region = _gc_alloc_regions[purpose];
3366 // let the caller handle alloc failure
3367 if (alloc_region == NULL) return NULL;
3368 assert(isHumongous(word_size) || !alloc_region->isHumongous(),
3369 "Either the object is humongous or the region isn't");
3370 HeapWord* block = alloc_region->allocate(word_size);
3371 if (block == NULL) {
3372 block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
3373 }
3374 return block;
3375 }
3376 3725
3377 class G1IsAliveClosure: public BoolObjectClosure { 3726 class G1IsAliveClosure: public BoolObjectClosure {
3378 G1CollectedHeap* _g1; 3727 G1CollectedHeap* _g1;
3379 public: 3728 public:
3380 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 3729 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4623 heap_region_iterate(&cleanup_verifier); 4972 heap_region_iterate(&cleanup_verifier);
4624 } 4973 }
4625 #endif 4974 #endif
4626 } 4975 }
4627 4976
4628 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
4629 if (g1_policy()->should_do_collection_pause(word_size)) {
4630 do_collection_pause();
4631 }
4632 }
4633
4634 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { 4977 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
4635 double young_time_ms = 0.0; 4978 double young_time_ms = 0.0;
4636 double non_young_time_ms = 0.0; 4979 double non_young_time_ms = 0.0;
4637 4980
4638 // Since the collection set is a superset of the the young list, 4981 // Since the collection set is a superset of the the young list,
4787 // finish. 5130 // finish.
4788 if (!b) Cleanup_mon->notify_all(); 5131 if (!b) Cleanup_mon->notify_all();
4789 } 5132 }
4790 5133
4791 void G1CollectedHeap::wait_for_cleanup_complete() { 5134 void G1CollectedHeap::wait_for_cleanup_complete() {
5135 assert_not_at_safepoint();
4792 MutexLockerEx x(Cleanup_mon); 5136 MutexLockerEx x(Cleanup_mon);
4793 wait_for_cleanup_complete_locked(); 5137 wait_for_cleanup_complete_locked();
4794 } 5138 }
4795 5139
4796 void G1CollectedHeap::wait_for_cleanup_complete_locked() { 5140 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
5089 n++; 5433 n++;
5090 } 5434 }
5091 size_t m = unclean_region_list_length(); 5435 size_t m = unclean_region_list_length();
5092 ZF_mon->unlock(); 5436 ZF_mon->unlock();
5093 return n + m; 5437 return n + m;
5094 }
5095
5096 bool G1CollectedHeap::should_set_young_locked() {
5097 assert(heap_lock_held_for_gc(),
5098 "the heap lock should already be held by or for this thread");
5099 return (g1_policy()->in_young_gc_mode() &&
5100 g1_policy()->should_add_next_region_to_young_list());
5101 } 5438 }
5102 5439
5103 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) { 5440 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5104 assert(heap_lock_held_for_gc(), 5441 assert(heap_lock_held_for_gc(),
5105 "the heap lock should already be held by or for this thread"); 5442 "the heap lock should already be held by or for this thread");