Mercurial > hg > truffle
comparison src/share/vm/gc_interface/collectedHeap.cpp @ 1166:7b0e9cba0307
6896647: card marks can be deferred too long
Summary: Deferred card marks are now flushed during the gc prologue. Parallel[Scavege,OldGC] and SerialGC no longer defer card marks generated by COMPILER2 as a result of ReduceInitialCardMarks. For these cases, introduced a diagnostic option to defer the card marks, only for the purposes of testing and diagnostics. CMS and G1 continue to defer card marks. Potential performance concern related to single-threaded flushing of deferred card marks in the gc prologue will be addressed in the future.
Reviewed-by: never, johnc
author | ysr |
---|---|
date | Wed, 13 Jan 2010 15:26:39 -0800 |
parents | 2dd52dea6d28 |
children | 34fb2662f6c2 |
comparison
equal
deleted
inserted
replaced
1165:2dd52dea6d28 | 1166:7b0e9cba0307 |
---|---|
57 | 57 |
58 _perf_gc_lastcause = | 58 _perf_gc_lastcause = |
59 PerfDataManager::create_string_variable(SUN_GC, "lastCause", | 59 PerfDataManager::create_string_variable(SUN_GC, "lastCause", |
60 80, GCCause::to_string(_gc_lastcause), CHECK); | 60 80, GCCause::to_string(_gc_lastcause), CHECK); |
61 } | 61 } |
62 } | 62 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. |
63 | 63 } |
64 | |
65 void CollectedHeap::pre_initialize() { | |
66 // Used for ReduceInitialCardMarks (when COMPILER2 is used); | |
67 // otherwise remains unused. | |
68 #ifdef COMPLER2 | |
69 _defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store()); | |
70 #else | |
71 assert(_defer_initial_card_mark == false, "Who would set it?"); | |
72 #endif | |
73 } | |
64 | 74 |
65 #ifndef PRODUCT | 75 #ifndef PRODUCT |
66 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { | 76 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { |
67 if (CheckMemoryInitialization && ZapUnusedHeapArea) { | 77 if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
68 for (size_t slot = 0; slot < size; slot += 1) { | 78 for (size_t slot = 0; slot < size; slot += 1) { |
138 } | 148 } |
139 | 149 |
140 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { | 150 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { |
141 MemRegion deferred = thread->deferred_card_mark(); | 151 MemRegion deferred = thread->deferred_card_mark(); |
142 if (!deferred.is_empty()) { | 152 if (!deferred.is_empty()) { |
153 assert(_defer_initial_card_mark, "Otherwise should be empty"); | |
143 { | 154 { |
144 // Verify that the storage points to a parsable object in heap | 155 // Verify that the storage points to a parsable object in heap |
145 DEBUG_ONLY(oop old_obj = oop(deferred.start());) | 156 DEBUG_ONLY(oop old_obj = oop(deferred.start());) |
146 assert(is_in(old_obj), "Not in allocated heap"); | 157 assert(is_in(old_obj), "Not in allocated heap"); |
147 assert(!can_elide_initializing_store_barrier(old_obj), | 158 assert(!can_elide_initializing_store_barrier(old_obj), |
148 "Else should have been filtered in defer_store_barrier()"); | 159 "Else should have been filtered in new_store_pre_barrier()"); |
149 assert(!is_in_permanent(old_obj), "Sanity: not expected"); | 160 assert(!is_in_permanent(old_obj), "Sanity: not expected"); |
150 assert(old_obj->is_oop(true), "Not an oop"); | 161 assert(old_obj->is_oop(true), "Not an oop"); |
151 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); | 162 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); |
152 assert(deferred.word_size() == (size_t)(old_obj->size()), | 163 assert(deferred.word_size() == (size_t)(old_obj->size()), |
153 "Mismatch: multiple objects?"); | 164 "Mismatch: multiple objects?"); |
172 // in the old gen, and do not care if the card-mark | 183 // in the old gen, and do not care if the card-mark |
173 // succeeds or precedes the initializing stores themselves, | 184 // succeeds or precedes the initializing stores themselves, |
174 // so long as the card-mark is completed before the next | 185 // so long as the card-mark is completed before the next |
175 // scavenge. For all these cases, we can do a card mark | 186 // scavenge. For all these cases, we can do a card mark |
176 // at the point at which we do a slow path allocation | 187 // at the point at which we do a slow path allocation |
177 // in the old gen. For uniformity, however, we end | 188 // in the old gen, i.e. in this call. |
178 // up using the same scheme (see below) for all three | |
179 // cases (deferring the card-mark appropriately). | |
180 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires | 189 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires |
181 // in addition that the card-mark for an old gen allocated | 190 // in addition that the card-mark for an old gen allocated |
182 // object strictly follow any associated initializing stores. | 191 // object strictly follow any associated initializing stores. |
183 // In these cases, the memRegion remembered below is | 192 // In these cases, the memRegion remembered below is |
184 // used to card-mark the entire region either just before the next | 193 // used to card-mark the entire region either just before the next |
197 // optionally be refined by the concurrent update threads. Note | 206 // optionally be refined by the concurrent update threads. Note |
198 // that this barrier need only be applied to a non-young write, | 207 // that this barrier need only be applied to a non-young write, |
199 // but, like in CMS, because of the presence of concurrent refinement | 208 // but, like in CMS, because of the presence of concurrent refinement |
200 // (much like CMS' precleaning), must strictly follow the oop-store. | 209 // (much like CMS' precleaning), must strictly follow the oop-store. |
201 // Thus, using the same protocol for maintaining the intended | 210 // Thus, using the same protocol for maintaining the intended |
202 // invariants turns out, serendepitously, to be the same for all | 211 // invariants turns out, serendepitously, to be the same for both |
203 // three collectors/heap types above. | 212 // G1 and CMS. |
204 // | 213 // |
205 // For each future collector, this should be reexamined with | 214 // For any future collector, this code should be reexamined with |
206 // that specific collector in mind. | 215 // that specific collector in mind, and the documentation above suitably |
207 oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) { | 216 // extended and updated. |
217 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { | |
208 // If a previous card-mark was deferred, flush it now. | 218 // If a previous card-mark was deferred, flush it now. |
209 flush_deferred_store_barrier(thread); | 219 flush_deferred_store_barrier(thread); |
210 if (can_elide_initializing_store_barrier(new_obj)) { | 220 if (can_elide_initializing_store_barrier(new_obj)) { |
211 // The deferred_card_mark region should be empty | 221 // The deferred_card_mark region should be empty |
212 // following the flush above. | 222 // following the flush above. |
213 assert(thread->deferred_card_mark().is_empty(), "Error"); | 223 assert(thread->deferred_card_mark().is_empty(), "Error"); |
214 } else { | 224 } else { |
215 // Remember info for the newly deferred store barrier | 225 MemRegion mr((HeapWord*)new_obj, new_obj->size()); |
216 MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size()); | 226 assert(!mr.is_empty(), "Error"); |
217 assert(!deferred.is_empty(), "Error"); | 227 if (_defer_initial_card_mark) { |
218 thread->set_deferred_card_mark(deferred); | 228 // Defer the card mark |
229 thread->set_deferred_card_mark(mr); | |
230 } else { | |
231 // Do the card mark | |
232 BarrierSet* bs = barrier_set(); | |
233 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); | |
234 bs->write_region(mr); | |
235 } | |
219 } | 236 } |
220 return new_obj; | 237 return new_obj; |
221 } | 238 } |
222 | 239 |
223 size_t CollectedHeap::filler_array_hdr_size() { | 240 size_t CollectedHeap::filler_array_hdr_size() { |
309 } | 326 } |
310 | 327 |
311 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { | 328 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { |
312 guarantee(false, "thread-local allocation buffers not supported"); | 329 guarantee(false, "thread-local allocation buffers not supported"); |
313 return NULL; | 330 return NULL; |
314 } | |
315 | |
316 void CollectedHeap::fill_all_tlabs(bool retire) { | |
317 assert(UseTLAB, "should not reach here"); | |
318 // See note in ensure_parsability() below. | |
319 assert(SafepointSynchronize::is_at_safepoint() || | |
320 !is_init_completed(), | |
321 "should only fill tlabs at safepoint"); | |
322 // The main thread starts allocating via a TLAB even before it | |
323 // has added itself to the threads list at vm boot-up. | |
324 assert(Threads::first() != NULL, | |
325 "Attempt to fill tlabs before main thread has been added" | |
326 " to threads list is doomed to failure!"); | |
327 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { | |
328 thread->tlab().make_parsable(retire); | |
329 } | |
330 } | 331 } |
331 | 332 |
332 void CollectedHeap::ensure_parsability(bool retire_tlabs) { | 333 void CollectedHeap::ensure_parsability(bool retire_tlabs) { |
333 // The second disjunct in the assertion below makes a concession | 334 // The second disjunct in the assertion below makes a concession |
334 // for the start-up verification done while the VM is being | 335 // for the start-up verification done while the VM is being |
341 assert(SafepointSynchronize::is_at_safepoint() || | 342 assert(SafepointSynchronize::is_at_safepoint() || |
342 !is_init_completed(), | 343 !is_init_completed(), |
343 "Should only be called at a safepoint or at start-up" | 344 "Should only be called at a safepoint or at start-up" |
344 " otherwise concurrent mutator activity may make heap " | 345 " otherwise concurrent mutator activity may make heap " |
345 " unparsable again"); | 346 " unparsable again"); |
346 if (UseTLAB) { | 347 const bool use_tlab = UseTLAB; |
347 fill_all_tlabs(retire_tlabs); | 348 const bool deferred = _defer_initial_card_mark; |
349 // The main thread starts allocating via a TLAB even before it | |
350 // has added itself to the threads list at vm boot-up. | |
351 assert(!use_tlab || Threads::first() != NULL, | |
352 "Attempt to fill tlabs before main thread has been added" | |
353 " to threads list is doomed to failure!"); | |
354 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { | |
355 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); | |
356 #ifdef COMPILER2 | |
357 // The deferred store barriers must all have been flushed to the | |
358 // card-table (or other remembered set structure) before GC starts | |
359 // processing the card-table (or other remembered set). | |
360 if (deferred) flush_deferred_store_barrier(thread); | |
361 #else | |
362 assert(!deferred, "Should be false"); | |
363 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); | |
364 #endif | |
348 } | 365 } |
349 } | 366 } |
350 | 367 |
351 void CollectedHeap::accumulate_statistics_all_tlabs() { | 368 void CollectedHeap::accumulate_statistics_all_tlabs() { |
352 if (UseTLAB) { | 369 if (UseTLAB) { |