Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 20804:7848fc12602b
Merge with jdk8u40-b25
author | Gilles Duboscq <gilles.m.duboscq@oracle.com> |
---|---|
date | Tue, 07 Apr 2015 14:58:49 +0200 |
parents | 52b4284cb496 9fa3bf3043a2 |
children |
comparison
equal
deleted
inserted
replaced
20184:84105dcdb05b | 20804:7848fc12602b |
---|---|
31 #include "gc_implementation/shared/spaceDecorator.hpp" | 31 #include "gc_implementation/shared/spaceDecorator.hpp" |
32 #include "gc_interface/collectedHeap.inline.hpp" | 32 #include "gc_interface/collectedHeap.inline.hpp" |
33 #include "memory/allocation.inline.hpp" | 33 #include "memory/allocation.inline.hpp" |
34 #include "memory/blockOffsetTable.inline.hpp" | 34 #include "memory/blockOffsetTable.inline.hpp" |
35 #include "memory/resourceArea.hpp" | 35 #include "memory/resourceArea.hpp" |
36 #include "memory/space.inline.hpp" | |
36 #include "memory/universe.inline.hpp" | 37 #include "memory/universe.inline.hpp" |
37 #include "oops/oop.inline.hpp" | 38 #include "oops/oop.inline.hpp" |
38 #include "runtime/globals.hpp" | 39 #include "runtime/globals.hpp" |
39 #include "runtime/handles.inline.hpp" | 40 #include "runtime/handles.inline.hpp" |
40 #include "runtime/init.hpp" | 41 #include "runtime/init.hpp" |
41 #include "runtime/java.hpp" | 42 #include "runtime/java.hpp" |
43 #include "runtime/orderAccess.inline.hpp" | |
42 #include "runtime/vmThread.hpp" | 44 #include "runtime/vmThread.hpp" |
43 #include "utilities/copy.hpp" | 45 #include "utilities/copy.hpp" |
44 | 46 |
45 ///////////////////////////////////////////////////////////////////////// | 47 ///////////////////////////////////////////////////////////////////////// |
46 //// CompactibleFreeListSpace | 48 //// CompactibleFreeListSpace |
791 oop(cur)->oop_iterate(cl); | 793 oop(cur)->oop_iterate(cl); |
792 } | 794 } |
793 } | 795 } |
794 } | 796 } |
795 | 797 |
796 // Apply the given closure to each oop in the space \intersect memory region. | |
797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { | |
798 assert_lock_strong(freelistLock()); | |
799 if (is_empty()) { | |
800 return; | |
801 } | |
802 MemRegion cur = MemRegion(bottom(), end()); | |
803 mr = mr.intersection(cur); | |
804 if (mr.is_empty()) { | |
805 return; | |
806 } | |
807 if (mr.equals(cur)) { | |
808 oop_iterate(cl); | |
809 return; | |
810 } | |
811 assert(mr.end() <= end(), "just took an intersection above"); | |
812 HeapWord* obj_addr = block_start(mr.start()); | |
813 HeapWord* t = mr.end(); | |
814 | |
815 SpaceMemRegionOopsIterClosure smr_blk(cl, mr); | |
816 if (block_is_obj(obj_addr)) { | |
817 // Handle first object specially. | |
818 oop obj = oop(obj_addr); | |
819 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk)); | |
820 } else { | |
821 FreeChunk* fc = (FreeChunk*)obj_addr; | |
822 obj_addr += fc->size(); | |
823 } | |
824 while (obj_addr < t) { | |
825 HeapWord* obj = obj_addr; | |
826 obj_addr += block_size(obj_addr); | |
827 // If "obj_addr" is not greater than top, then the | |
828 // entire object "obj" is within the region. | |
829 if (obj_addr <= t) { | |
830 if (block_is_obj(obj)) { | |
831 oop(obj)->oop_iterate(cl); | |
832 } | |
833 } else { | |
834 // "obj" extends beyond end of region | |
835 if (block_is_obj(obj)) { | |
836 oop(obj)->oop_iterate(&smr_blk); | |
837 } | |
838 break; | |
839 } | |
840 } | |
841 } | |
842 | |
843 // NOTE: In the following methods, in order to safely be able to | 798 // NOTE: In the following methods, in order to safely be able to |
844 // apply the closure to an object, we need to be sure that the | 799 // apply the closure to an object, we need to be sure that the |
845 // object has been initialized. We are guaranteed that an object | 800 // object has been initialized. We are guaranteed that an object |
846 // is initialized if we are holding the Heap_lock with the | 801 // is initialized if we are holding the Heap_lock with the |
847 // world stopped. | 802 // world stopped. |
896 | 851 |
897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, | 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
898 UpwardsObjectClosure* cl) { | 853 UpwardsObjectClosure* cl) { |
899 assert_locked(freelistLock()); | 854 assert_locked(freelistLock()); |
900 NOT_PRODUCT(verify_objects_initialized()); | 855 NOT_PRODUCT(verify_objects_initialized()); |
901 Space::object_iterate_mem(mr, cl); | 856 assert(!mr.is_empty(), "Should be non-empty"); |
902 } | 857 // We use MemRegion(bottom(), end()) rather than used_region() below |
903 | 858 // because the two are not necessarily equal for some kinds of |
904 // Callers of this iterator beware: The closure application should | 859 // spaces, in particular, certain kinds of free list spaces. |
905 // be robust in the face of uninitialized objects and should (always) | 860 // We could use the more complicated but more precise: |
906 // return a correct size so that the next addr + size below gives us a | 861 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize)) |
907 // valid block boundary. [See for instance, | 862 // but the slight imprecision seems acceptable in the assertion check. |
908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() | 863 assert(MemRegion(bottom(), end()).contains(mr), |
909 // in ConcurrentMarkSweepGeneration.cpp.] | 864 "Should be within used space"); |
910 HeapWord* | 865 HeapWord* prev = cl->previous(); // max address from last time |
911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) { | 866 if (prev >= mr.end()) { // nothing to do |
912 assert_lock_strong(freelistLock()); | 867 return; |
913 HeapWord *addr, *last; | 868 } |
914 size_t size; | 869 // This assert will not work when we go from cms space to perm |
915 for (addr = bottom(), last = end(); | 870 // space, and use same closure. Easy fix deferred for later. XXX YSR |
916 addr < last; addr += size) { | 871 // assert(prev == NULL || contains(prev), "Should be within space"); |
917 FreeChunk* fc = (FreeChunk*)addr; | 872 |
918 if (fc->is_free()) { | 873 bool last_was_obj_array = false; |
919 // Since we hold the free list lock, which protects direct | 874 HeapWord *blk_start_addr, *region_start_addr; |
920 // allocation in this generation by mutators, a free object | 875 if (prev > mr.start()) { |
921 // will remain free throughout this iteration code. | 876 region_start_addr = prev; |
922 size = fc->size(); | 877 blk_start_addr = prev; |
878 // The previous invocation may have pushed "prev" beyond the | |
879 // last allocated block yet there may be still be blocks | |
880 // in this region due to a particular coalescing policy. | |
881 // Relax the assertion so that the case where the unallocated | |
882 // block is maintained and "prev" is beyond the unallocated | |
883 // block does not cause the assertion to fire. | |
884 assert((BlockOffsetArrayUseUnallocatedBlock && | |
885 (!is_in(prev))) || | |
886 (blk_start_addr == block_start(region_start_addr)), "invariant"); | |
887 } else { | |
888 region_start_addr = mr.start(); | |
889 blk_start_addr = block_start(region_start_addr); | |
890 } | |
891 HeapWord* region_end_addr = mr.end(); | |
892 MemRegion derived_mr(region_start_addr, region_end_addr); | |
893 while (blk_start_addr < region_end_addr) { | |
894 const size_t size = block_size(blk_start_addr); | |
895 if (block_is_obj(blk_start_addr)) { | |
896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); | |
923 } else { | 897 } else { |
924 // Note that the object need not necessarily be initialized, | 898 last_was_obj_array = false; |
925 // because (for instance) the free list lock does NOT protect | 899 } |
926 // object initialization. The closure application below must | 900 blk_start_addr += size; |
927 // therefore be correct in the face of uninitialized objects. | 901 } |
928 size = cl->do_object_careful(oop(addr)); | 902 if (!last_was_obj_array) { |
929 if (size == 0) { | 903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), |
930 // An unparsable object found. Signal early termination. | 904 "Should be within (closed) used space"); |
931 return addr; | 905 assert(blk_start_addr > prev, "Invariant"); |
932 } | 906 cl->set_previous(blk_start_addr); // min address for next time |
933 } | 907 } |
934 } | 908 } |
935 return NULL; | 909 |
936 } | |
937 | 910 |
938 // Callers of this iterator beware: The closure application should | 911 // Callers of this iterator beware: The closure application should |
939 // be robust in the face of uninitialized objects and should (always) | 912 // be robust in the face of uninitialized objects and should (always) |
940 // return a correct size so that the next addr + size below gives us a | 913 // return a correct size so that the next addr + size below gives us a |
941 // valid block boundary. [See for instance, | 914 // valid block boundary. [See for instance, |
2666 // book-keeping stats | 2639 // book-keeping stats |
2667 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) { | 2640 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) { |
2668 // Get the #blocks we want to claim | 2641 // Get the #blocks we want to claim |
2669 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); | 2642 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
2670 assert(n_blks > 0, "Error"); | 2643 assert(n_blks > 0, "Error"); |
2671 assert(ResizePLAB || n_blks == OldPLABSize, "Error"); | 2644 assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error"); |
2672 // In some cases, when the application has a phase change, | 2645 // In some cases, when the application has a phase change, |
2673 // there may be a sudden and sharp shift in the object survival | 2646 // there may be a sudden and sharp shift in the object survival |
2674 // profile, and updating the counts at the end of a scavenge | 2647 // profile, and updating the counts at the end of a scavenge |
2675 // may not be quick enough, giving rise to large scavenge pauses | 2648 // may not be quick enough, giving rise to large scavenge pauses |
2676 // during these phase changes. It is beneficial to detect such | 2649 // during these phase changes. It is beneficial to detect such |
2758 _num_blocks[i] = 0; | 2731 _num_blocks[i] = 0; |
2759 } | 2732 } |
2760 } | 2733 } |
2761 } | 2734 } |
2762 | 2735 |
2763 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { | 2736 // Used by par_get_chunk_of_blocks() for the chunks from the |
2764 assert(fl->count() == 0, "Precondition."); | 2737 // indexed_free_lists. Looks for a chunk with size that is a multiple |
2765 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | 2738 // of "word_sz" and if found, splits it into "word_sz" chunks and add |
2766 "Precondition"); | 2739 // to the free list "fl". "n" is the maximum number of chunks to |
2740 // be added to "fl". | |
2741 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { | |
2767 | 2742 |
2768 // We'll try all multiples of word_sz in the indexed set, starting with | 2743 // We'll try all multiples of word_sz in the indexed set, starting with |
2769 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, | 2744 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
2770 // then try getting a big chunk and splitting it. | 2745 // then try getting a big chunk and splitting it. |
2771 { | 2746 { |
2842 size_t num = fl->count(); | 2817 size_t num = fl->count(); |
2843 MutexLockerEx x(_indexedFreeListParLocks[word_sz], | 2818 MutexLockerEx x(_indexedFreeListParLocks[word_sz], |
2844 Mutex::_no_safepoint_check_flag); | 2819 Mutex::_no_safepoint_check_flag); |
2845 ssize_t births = _indexedFreeList[word_sz].split_births() + num; | 2820 ssize_t births = _indexedFreeList[word_sz].split_births() + num; |
2846 _indexedFreeList[word_sz].set_split_births(births); | 2821 _indexedFreeList[word_sz].set_split_births(births); |
2847 return; | 2822 return true; |
2848 } | 2823 } |
2849 } | 2824 } |
2850 } | 2825 return found; |
2851 // Otherwise, we'll split a block from the dictionary. | 2826 } |
2827 } | |
2828 | |
2829 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) { | |
2830 | |
2852 FreeChunk* fc = NULL; | 2831 FreeChunk* fc = NULL; |
2853 FreeChunk* rem_fc = NULL; | 2832 FreeChunk* rem_fc = NULL; |
2854 size_t rem; | 2833 size_t rem; |
2855 { | 2834 { |
2856 MutexLockerEx x(parDictionaryAllocLock(), | 2835 MutexLockerEx x(parDictionaryAllocLock(), |
2857 Mutex::_no_safepoint_check_flag); | 2836 Mutex::_no_safepoint_check_flag); |
2858 while (n > 0) { | 2837 while (n > 0) { |
2859 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()), | 2838 fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()), |
2860 FreeBlockDictionary<FreeChunk>::atLeast); | 2839 FreeBlockDictionary<FreeChunk>::atLeast); |
2861 if (fc != NULL) { | 2840 if (fc != NULL) { |
2862 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk | |
2863 dictionary()->dict_census_update(fc->size(), | |
2864 true /*split*/, | |
2865 false /*birth*/); | |
2866 break; | 2841 break; |
2867 } else { | 2842 } else { |
2868 n--; | 2843 n--; |
2869 } | 2844 } |
2870 } | 2845 } |
2871 if (fc == NULL) return; | 2846 if (fc == NULL) return NULL; |
2872 // Otherwise, split up that block. | 2847 // Otherwise, split up that block. |
2873 assert((ssize_t)n >= 1, "Control point invariant"); | 2848 assert((ssize_t)n >= 1, "Control point invariant"); |
2874 assert(fc->is_free(), "Error: should be a free block"); | 2849 assert(fc->is_free(), "Error: should be a free block"); |
2875 _bt.verify_single_block((HeapWord*)fc, fc->size()); | 2850 _bt.verify_single_block((HeapWord*)fc, fc->size()); |
2876 const size_t nn = fc->size() / word_sz; | 2851 const size_t nn = fc->size() / word_sz; |
2888 // enough to leave a viable remainder. We are unable to | 2863 // enough to leave a viable remainder. We are unable to |
2889 // allocate even one block. Return fc to the | 2864 // allocate even one block. Return fc to the |
2890 // dictionary and return, leaving "fl" empty. | 2865 // dictionary and return, leaving "fl" empty. |
2891 if (n == 0) { | 2866 if (n == 0) { |
2892 returnChunkToDictionary(fc); | 2867 returnChunkToDictionary(fc); |
2893 assert(fl->count() == 0, "We never allocated any blocks"); | 2868 return NULL; |
2894 return; | 2869 } |
2895 } | 2870 |
2871 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk | |
2872 dictionary()->dict_census_update(fc->size(), | |
2873 true /*split*/, | |
2874 false /*birth*/); | |
2896 | 2875 |
2897 // First return the remainder, if any. | 2876 // First return the remainder, if any. |
2898 // Note that we hold the lock until we decide if we're going to give | 2877 // Note that we hold the lock until we decide if we're going to give |
2899 // back the remainder to the dictionary, since a concurrent allocation | 2878 // back the remainder to the dictionary, since a concurrent allocation |
2900 // may otherwise see the heap as empty. (We're willing to take that | 2879 // may otherwise see the heap as empty. (We're willing to take that |
2924 Mutex::_no_safepoint_check_flag); | 2903 Mutex::_no_safepoint_check_flag); |
2925 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); | 2904 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); |
2926 _indexedFreeList[rem].return_chunk_at_head(rem_fc); | 2905 _indexedFreeList[rem].return_chunk_at_head(rem_fc); |
2927 smallSplitBirth(rem); | 2906 smallSplitBirth(rem); |
2928 } | 2907 } |
2929 assert((ssize_t)n > 0 && fc != NULL, "Consistency"); | 2908 assert(n * word_sz == fc->size(), |
2909 err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by " | |
2910 SIZE_FORMAT " sized chunks of size " SIZE_FORMAT, | |
2911 fc->size(), n, word_sz)); | |
2912 return fc; | |
2913 } | |
2914 | |
2915 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) { | |
2916 | |
2917 FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks); | |
2918 | |
2919 if (fc == NULL) { | |
2920 return; | |
2921 } | |
2922 | |
2923 size_t n = fc->size() / word_sz; | |
2924 | |
2925 assert((ssize_t)n > 0, "Consistency"); | |
2930 // Now do the splitting up. | 2926 // Now do the splitting up. |
2931 // Must do this in reverse order, so that anybody attempting to | 2927 // Must do this in reverse order, so that anybody attempting to |
2932 // access the main chunk sees it as a single free block until we | 2928 // access the main chunk sees it as a single free block until we |
2933 // change it. | 2929 // change it. |
2934 size_t fc_size = n * word_sz; | 2930 size_t fc_size = n * word_sz; |
2970 // _indexedFreeList[word_sz].set_surplus(new_surplus); | 2966 // _indexedFreeList[word_sz].set_surplus(new_surplus); |
2971 } | 2967 } |
2972 | 2968 |
2973 // TRAP | 2969 // TRAP |
2974 assert(fl->tail()->next() == NULL, "List invariant."); | 2970 assert(fl->tail()->next() == NULL, "List invariant."); |
2971 } | |
2972 | |
2973 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { | |
2974 assert(fl->count() == 0, "Precondition."); | |
2975 assert(word_sz < CompactibleFreeListSpace::IndexSetSize, | |
2976 "Precondition"); | |
2977 | |
2978 if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) { | |
2979 // Got it | |
2980 return; | |
2981 } | |
2982 | |
2983 // Otherwise, we'll split a block from the dictionary. | |
2984 par_get_chunk_of_blocks_dictionary(word_sz, n, fl); | |
2975 } | 2985 } |
2976 | 2986 |
2977 // Set up the space's par_seq_tasks structure for work claiming | 2987 // Set up the space's par_seq_tasks structure for work claiming |
2978 // for parallel rescan. See CMSParRemarkTask where this is currently used. | 2988 // for parallel rescan. See CMSParRemarkTask where this is currently used. |
2979 // XXX Need to suitably abstract and generalize this and the next | 2989 // XXX Need to suitably abstract and generalize this and the next |