comparison src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @ 1886:72a161e62cc4

6991377: G1: race between concurrent refinement and humongous object allocation Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT. Reviewed-by: jcoomes, johnc, ysr
author tonyp
date Sat, 16 Oct 2010 17:12:19 -0400
parents 2dfd013a7465
children f95d63e2154a
comparison
equal deleted inserted replaced
1885:a5c514e74487 1886:72a161e62cc4
89 } 89 }
90 cur++; 90 cur++;
91 } 91 }
92 if (sumSizes >= word_size) { 92 if (sumSizes >= word_size) {
93 _alloc_search_start = cur; 93 _alloc_search_start = cur;
94 // Mark the allocated regions as allocated. 94
95 // We need to initialize the region(s) we just discovered. This is
96 // a bit tricky given that it can happen concurrently with
97 // refinement threads refining cards on these regions and
98 // potentially wanting to refine the BOT as they are scanning
99 // those cards (this can happen shortly after a cleanup; see CR
100 // 6991377). So we have to set up the region(s) carefully and in
101 // a specific order.
102
103 // Currently, allocs_are_zero_filled() returns false. The zero
104 // filling infrastructure will be going away soon (see CR 6977804).
105 // So no need to do anything else here.
95 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); 106 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
107 assert(!zf, "not supported");
108
109 // This will be the "starts humongous" region.
96 HeapRegion* first_hr = _regions.at(first); 110 HeapRegion* first_hr = _regions.at(first);
97 for (int i = first; i < cur; i++) { 111 {
98 HeapRegion* hr = _regions.at(i); 112 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
99 if (zf) 113 first_hr->set_zero_fill_allocated();
100 hr->ensure_zero_filled(); 114 }
115 // The header of the new object will be placed at the bottom of
116 // the first region.
117 HeapWord* new_obj = first_hr->bottom();
118 // This will be the new end of the first region in the series that
119 // should also match the end of the last region in the seriers.
120 // (Note: sumSizes = "region size" x "number of regions we found").
121 HeapWord* new_end = new_obj + sumSizes;
122 // This will be the new top of the first region that will reflect
123 // this allocation.
124 HeapWord* new_top = new_obj + word_size;
125
126 // First, we need to zero the header of the space that we will be
127 // allocating. When we update top further down, some refinement
128 // threads might try to scan the region. By zeroing the header we
129 // ensure that any thread that will try to scan the region will
130 // come across the zero klass word and bail out.
131 //
132 // NOTE: It would not have been correct to have used
133 // CollectedHeap::fill_with_object() and make the space look like
134 // an int array. The thread that is doing the allocation will
135 // later update the object header to a potentially different array
136 // type and, for a very short period of time, the klass and length
137 // fields will be inconsistent. This could cause a refinement
138 // thread to calculate the object size incorrectly.
139 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
140
141 // We will set up the first region as "starts humongous". This
142 // will also update the BOT covering all the regions to reflect
143 // that there is a single object that starts at the bottom of the
144 // first region.
145 first_hr->set_startsHumongous(new_end);
146
147 // Then, if there are any, we will set up the "continues
148 // humongous" regions.
149 HeapRegion* hr = NULL;
150 for (int i = first + 1; i < cur; ++i) {
151 hr = _regions.at(i);
101 { 152 {
102 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); 153 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
103 hr->set_zero_fill_allocated(); 154 hr->set_zero_fill_allocated();
104 } 155 }
105 size_t sz = hr->capacity() / HeapWordSize; 156 hr->set_continuesHumongous(first_hr);
106 HeapWord* tmp = hr->allocate(sz); 157 }
107 assert(tmp != NULL, "Humongous allocation failure"); 158 // If we have "continues humongous" regions (hr != NULL), then the
108 MemRegion mr = MemRegion(tmp, sz); 159 // end of the last one should match new_end.
109 CollectedHeap::fill_with_object(mr); 160 assert(hr == NULL || hr->end() == new_end, "sanity");
110 hr->declare_filled_region_to_BOT(mr); 161
111 if (i == first) { 162 // Up to this point no concurrent thread would have been able to
112 first_hr->set_startsHumongous(); 163 // do any scanning on any region in this series. All the top
164 // fields still point to bottom, so the intersection between
165 // [bottom,top] and [card_start,card_end] will be empty. Before we
166 // update the top fields, we'll do a storestore to make sure that
167 // no thread sees the update to top before the zeroing of the
168 // object header and the BOT initialization.
169 OrderAccess::storestore();
170
171 // Now that the BOT and the object header have been initialized,
172 // we can update top of the "starts humongous" region.
173 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
174 "new_top should be in this region");
175 first_hr->set_top(new_top);
176
177 // Now, we will update the top fields of the "continues humongous"
178 // regions. The reason we need to do this is that, otherwise,
179 // these regions would look empty and this will confuse parts of
180 // G1. For example, the code that looks for a consecutive number
181 // of empty regions will consider them empty and try to
182 // re-allocate them. We can extend is_empty() to also include
183 // !continuesHumongous(), but it is easier to just update the top
184 // fields here.
185 hr = NULL;
186 for (int i = first + 1; i < cur; ++i) {
187 hr = _regions.at(i);
188 if ((i + 1) == cur) {
189 // last continues humongous region
190 assert(hr->bottom() < new_top && new_top <= hr->end(),
191 "new_top should fall on this region");
192 hr->set_top(new_top);
113 } else { 193 } else {
114 assert(i > first, "sanity"); 194 // not last one
115 hr->set_continuesHumongous(first_hr); 195 assert(new_top > hr->end(), "new_top should be above this region");
196 hr->set_top(hr->end());
116 } 197 }
117 } 198 }
118 HeapWord* first_hr_bot = first_hr->bottom(); 199 // If we have continues humongous regions (hr != NULL), then the
119 HeapWord* obj_end = first_hr_bot + word_size; 200 // end of the last one should match new_end and its top should
120 first_hr->set_top(obj_end); 201 // match new_top.
121 return first_hr_bot; 202 assert(hr == NULL ||
203 (hr->end() == new_end && hr->top() == new_top), "sanity");
204
205 return new_obj;
122 } else { 206 } else {
123 // If we started from the beginning, we want to know why we can't alloc. 207 // If we started from the beginning, we want to know why we can't alloc.
124 return NULL; 208 return NULL;
125 } 209 }
126 } 210 }