Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @ 12080:5888334c9c24
7145569: G1: optimize nmethods scanning
Summary: Add a list of nmethods to the RSet for a region that contain references into the region. Skip scanning the code cache during root scanning and scan the nmethod lists during RSet scanning instead.
Reviewed-by: tschatzl, brutisso, mgerdin, twisti, kvn
author | johnc |
---|---|
date | Thu, 15 Aug 2013 10:52:18 +0200 |
parents | 6702da6b6082 |
children | 190899198332 |
comparison
equal
deleted
inserted
replaced
12033:bd902affe102 | 12080:5888334c9c24 |
---|---|
31 #include "memory/allocation.hpp" | 31 #include "memory/allocation.hpp" |
32 #include "memory/space.inline.hpp" | 32 #include "memory/space.inline.hpp" |
33 #include "oops/oop.inline.hpp" | 33 #include "oops/oop.inline.hpp" |
34 #include "utilities/bitMap.inline.hpp" | 34 #include "utilities/bitMap.inline.hpp" |
35 #include "utilities/globalDefinitions.hpp" | 35 #include "utilities/globalDefinitions.hpp" |
36 #include "utilities/growableArray.hpp" | |
36 | 37 |
37 class PerRegionTable: public CHeapObj<mtGC> { | 38 class PerRegionTable: public CHeapObj<mtGC> { |
38 friend class OtherRegionsTable; | 39 friend class OtherRegionsTable; |
39 friend class HeapRegionRemSetIterator; | 40 friend class HeapRegionRemSetIterator; |
40 | 41 |
847 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); | 848 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); |
848 } | 849 } |
849 | 850 |
850 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, | 851 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, |
851 HeapRegion* hr) | 852 HeapRegion* hr) |
852 : _bosa(bosa), _other_regions(hr) { | 853 : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) { |
853 reset_for_par_iteration(); | 854 reset_for_par_iteration(); |
854 } | 855 } |
855 | 856 |
856 void HeapRegionRemSet::setup_remset_size() { | 857 void HeapRegionRemSet::setup_remset_size() { |
857 // Setup sparse and fine-grain tables sizes. | 858 // Setup sparse and fine-grain tables sizes. |
906 void HeapRegionRemSet::cleanup() { | 907 void HeapRegionRemSet::cleanup() { |
907 SparsePRT::cleanup_all(); | 908 SparsePRT::cleanup_all(); |
908 } | 909 } |
909 | 910 |
910 void HeapRegionRemSet::clear() { | 911 void HeapRegionRemSet::clear() { |
912 if (_strong_code_roots_list != NULL) { | |
913 delete _strong_code_roots_list; | |
914 } | |
915 _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC) | |
916 GrowableArray<nmethod*>(10, 0, NULL, true); | |
917 | |
911 _other_regions.clear(); | 918 _other_regions.clear(); |
912 assert(occupied() == 0, "Should be clear."); | 919 assert(occupied() == 0, "Should be clear."); |
913 reset_for_par_iteration(); | 920 reset_for_par_iteration(); |
914 } | 921 } |
915 | 922 |
921 } | 928 } |
922 | 929 |
923 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, | 930 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, |
924 BitMap* region_bm, BitMap* card_bm) { | 931 BitMap* region_bm, BitMap* card_bm) { |
925 _other_regions.scrub(ctbs, region_bm, card_bm); | 932 _other_regions.scrub(ctbs, region_bm, card_bm); |
933 } | |
934 | |
935 | |
936 // Code roots support | |
937 | |
938 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { | |
939 assert(nm != NULL, "sanity"); | |
940 // Search for the code blob from the RHS to avoid | |
941 // duplicate entries as much as possible | |
942 if (_strong_code_roots_list->find_from_end(nm) < 0) { | |
943 // Code blob isn't already in the list | |
944 _strong_code_roots_list->push(nm); | |
945 } | |
946 } | |
947 | |
948 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { | |
949 assert(nm != NULL, "sanity"); | |
950 int idx = _strong_code_roots_list->find(nm); | |
951 if (idx >= 0) { | |
952 _strong_code_roots_list->remove_at(idx); | |
953 } | |
954 // Check that there were no duplicates | |
955 guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found"); | |
956 } | |
957 | |
958 class NMethodMigrationOopClosure : public OopClosure { | |
959 G1CollectedHeap* _g1h; | |
960 HeapRegion* _from; | |
961 nmethod* _nm; | |
962 | |
963 uint _num_self_forwarded; | |
964 | |
965 template <class T> void do_oop_work(T* p) { | |
966 T heap_oop = oopDesc::load_heap_oop(p); | |
967 if (!oopDesc::is_null(heap_oop)) { | |
968 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); | |
969 if (_from->is_in(obj)) { | |
970 // Reference still points into the source region. | |
971 // Since roots are immediately evacuated this means that | |
972 // we must have self forwarded the object | |
973 assert(obj->is_forwarded(), | |
974 err_msg("code roots should be immediately evacuated. " | |
975 "Ref: "PTR_FORMAT", " | |
976 "Obj: "PTR_FORMAT", " | |
977 "Region: "HR_FORMAT, | |
978 p, (void*) obj, HR_FORMAT_PARAMS(_from))); | |
979 assert(obj->forwardee() == obj, | |
980 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj)); | |
981 | |
982 // The object has been self forwarded. | |
983 // Note, if we're during an initial mark pause, there is | |
984 // no need to explicitly mark object. It will be marked | |
985 // during the regular evacuation failure handling code. | |
986 _num_self_forwarded++; | |
987 } else { | |
988 // The reference points into a promotion or to-space region | |
989 HeapRegion* to = _g1h->heap_region_containing(obj); | |
990 to->rem_set()->add_strong_code_root(_nm); | |
991 } | |
992 } | |
993 } | |
994 | |
995 public: | |
996 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm): | |
997 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {} | |
998 | |
999 void do_oop(narrowOop* p) { do_oop_work(p); } | |
1000 void do_oop(oop* p) { do_oop_work(p); } | |
1001 | |
1002 uint retain() { return _num_self_forwarded > 0; } | |
1003 }; | |
1004 | |
1005 void HeapRegionRemSet::migrate_strong_code_roots() { | |
1006 assert(hr()->in_collection_set(), "only collection set regions"); | |
1007 assert(!hr()->isHumongous(), "not humongous regions"); | |
1008 | |
1009 ResourceMark rm; | |
1010 | |
1011 // List of code blobs to retain for this region | |
1012 GrowableArray<nmethod*> to_be_retained(10); | |
1013 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
1014 | |
1015 while (_strong_code_roots_list->is_nonempty()) { | |
1016 nmethod *nm = _strong_code_roots_list->pop(); | |
1017 if (nm != NULL) { | |
1018 NMethodMigrationOopClosure oop_cl(g1h, hr(), nm); | |
1019 nm->oops_do(&oop_cl); | |
1020 if (oop_cl.retain()) { | |
1021 to_be_retained.push(nm); | |
1022 } | |
1023 } | |
1024 } | |
1025 | |
1026 // Now push any code roots we need to retain | |
1027 assert(to_be_retained.is_empty() || hr()->evacuation_failed(), | |
1028 "Retained nmethod list must be empty or " | |
1029 "evacuation of this region failed"); | |
1030 | |
1031 while (to_be_retained.is_nonempty()) { | |
1032 nmethod* nm = to_be_retained.pop(); | |
1033 assert(nm != NULL, "sanity"); | |
1034 add_strong_code_root(nm); | |
1035 } | |
1036 } | |
1037 | |
1038 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { | |
1039 for (int i = 0; i < _strong_code_roots_list->length(); i += 1) { | |
1040 nmethod* nm = _strong_code_roots_list->at(i); | |
1041 blk->do_code_blob(nm); | |
1042 } | |
1043 } | |
1044 | |
1045 size_t HeapRegionRemSet::strong_code_roots_mem_size() { | |
1046 return sizeof(GrowableArray<nmethod*>) + | |
1047 _strong_code_roots_list->max_length() * sizeof(nmethod*); | |
926 } | 1048 } |
927 | 1049 |
928 //-------------------- Iteration -------------------- | 1050 //-------------------- Iteration -------------------- |
929 | 1051 |
930 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : | 1052 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : |