comparison src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp @ 12835:69944b868a32

8014555: G1: Memory ordering problem with Conc refinement and card marking Summary: Add a StoreLoad barrier in the G1 post-barrier to fix a race with concurrent refinement. Also-reviewed-by: martin.doerr@sap.com Reviewed-by: iveresov, tschatzl, brutisso, roland, kvn
author mgerdin
date Tue, 08 Oct 2013 17:35:51 +0200
parents d55c004e1d4d
children aa6f2ea19d8f
comparison
equal deleted inserted replaced
12834:04b18a42c2f3 12835:69944b868a32
68 jbyte val = _byte_map[card_index]; 68 jbyte val = _byte_map[card_index];
69 // It's already processed 69 // It's already processed
70 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 70 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
71 return false; 71 return false;
72 } 72 }
73
74 if (val == g1_young_gen) {
75 // the card is for a young gen region. We don't need to keep track of all pointers into young
76 return false;
77 }
78
73 // Cached bit can be installed either on a clean card or on a claimed card. 79 // Cached bit can be installed either on a clean card or on a claimed card.
74 jbyte new_val = val; 80 jbyte new_val = val;
75 if (val == clean_card_val()) { 81 if (val == clean_card_val()) {
76 new_val = (jbyte)deferred_card_val(); 82 new_val = (jbyte)deferred_card_val();
77 } else { 83 } else {
83 Atomic::cmpxchg(new_val, &_byte_map[card_index], val); 89 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
84 } 90 }
85 return true; 91 return true;
86 } 92 }
87 93
94 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
95 jbyte *const first = byte_for(mr.start());
96 jbyte *const last = byte_after(mr.last());
97
98 memset(first, g1_young_gen, last - first);
99 }
100
101 #ifndef PRODUCT
102 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
103 verify_region(mr, g1_young_gen, true);
104 }
105 #endif
106
88 G1SATBCardTableLoggingModRefBS:: 107 G1SATBCardTableLoggingModRefBS::
89 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, 108 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
90 int max_covered_regions) : 109 int max_covered_regions) :
91 G1SATBCardTableModRefBS(whole_heap, max_covered_regions), 110 G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
92 _dcqs(JavaThread::dirty_card_queue_set()) 111 _dcqs(JavaThread::dirty_card_queue_set())
95 } 114 }
96 115
97 void 116 void
98 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, 117 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
99 oop new_val) { 118 oop new_val) {
100 jbyte* byte = byte_for(field); 119 volatile jbyte* byte = byte_for(field);
120 if (*byte == g1_young_gen) {
121 return;
122 }
123 OrderAccess::storeload();
101 if (*byte != dirty_card) { 124 if (*byte != dirty_card) {
102 *byte = dirty_card; 125 *byte = dirty_card;
103 Thread* thr = Thread::current(); 126 Thread* thr = Thread::current();
104 if (thr->is_Java_thread()) { 127 if (thr->is_Java_thread()) {
105 JavaThread* jt = (JavaThread*)thr; 128 JavaThread* jt = (JavaThread*)thr;
127 g1_bs->write_ref_field_work(field, new_val); 150 g1_bs->write_ref_field_work(field, new_val);
128 } 151 }
129 152
130 void 153 void
131 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { 154 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
132 jbyte* byte = byte_for(mr.start()); 155 volatile jbyte* byte = byte_for(mr.start());
133 jbyte* last_byte = byte_for(mr.last()); 156 jbyte* last_byte = byte_for(mr.last());
134 Thread* thr = Thread::current(); 157 Thread* thr = Thread::current();
135 if (whole_heap) { 158 if (whole_heap) {
136 while (byte <= last_byte) { 159 while (byte <= last_byte) {
137 *byte = dirty_card; 160 *byte = dirty_card;
138 byte++; 161 byte++;
139 } 162 }
140 } else { 163 } else {
141 // Enqueue if necessary. 164 // skip all consecutive young cards
142 if (thr->is_Java_thread()) { 165 for (; byte <= last_byte && *byte == g1_young_gen; byte++);
143 JavaThread* jt = (JavaThread*)thr; 166
144 while (byte <= last_byte) { 167 if (byte <= last_byte) {
145 if (*byte != dirty_card) { 168 OrderAccess::storeload();
146 *byte = dirty_card; 169 // Enqueue if necessary.
147 jt->dirty_card_queue().enqueue(byte); 170 if (thr->is_Java_thread()) {
171 JavaThread* jt = (JavaThread*)thr;
172 for (; byte <= last_byte; byte++) {
173 if (*byte == g1_young_gen) {
174 continue;
175 }
176 if (*byte != dirty_card) {
177 *byte = dirty_card;
178 jt->dirty_card_queue().enqueue(byte);
179 }
148 } 180 }
149 byte++; 181 } else {
150 } 182 MutexLockerEx x(Shared_DirtyCardQ_lock,
151 } else { 183 Mutex::_no_safepoint_check_flag);
152 MutexLockerEx x(Shared_DirtyCardQ_lock, 184 for (; byte <= last_byte; byte++) {
153 Mutex::_no_safepoint_check_flag); 185 if (*byte == g1_young_gen) {
154 while (byte <= last_byte) { 186 continue;
155 if (*byte != dirty_card) { 187 }
156 *byte = dirty_card; 188 if (*byte != dirty_card) {
157 _dcqs.shared_dirty_card_queue()->enqueue(byte); 189 *byte = dirty_card;
190 _dcqs.shared_dirty_card_queue()->enqueue(byte);
191 }
158 } 192 }
159 byte++;
160 } 193 }
161 } 194 }
162 } 195 }
163 } 196 }