Mercurial > hg > graal-jvmci-8
comparison src/share/vm/memory/referenceProcessor.cpp @ 3917:eca1193ca245
4965777: GC changes to support use of discovered field for pending references
Summary: If and when the reference handler thread is able to use the discovered field to link reference objects in its pending list, so will GC. In that case, GC will scan through this field once a reference object has been placed on the pending list, but not scan that field before that stage, as the field is used by the concurrent GC thread to link discovered objects. When ReferenceHandleR thread does not use the discovered field for the purpose of linking the elements in the pending list, as would be the case in older JDKs, the JVM will fall back to the old behaviour of using the next field for that purpose.
Reviewed-by: jcoomes, mchung, stefank
author | ysr |
---|---|
date | Wed, 07 Sep 2011 13:55:42 -0700 |
parents | c2bf0120ee5d |
children | 4dfb2df418f2 |
comparison
equal
deleted
inserted
replaced
3916:05550041d664 | 3917:eca1193ca245 |
---|---|
34 #include "runtime/jniHandles.hpp" | 34 #include "runtime/jniHandles.hpp" |
35 | 35 |
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; | 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; |
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; | 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; |
38 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; | 38 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; |
39 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; | |
39 | 40 |
40 // List of discovered references. | 41 // List of discovered references. |
41 class DiscoveredList { | 42 class DiscoveredList { |
42 public: | 43 public: |
43 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } | 44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } |
85 vm_exit_during_initialization("Could not allocate reference policy object"); | 86 vm_exit_during_initialization("Could not allocate reference policy object"); |
86 } | 87 } |
87 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || | 88 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || |
88 RefDiscoveryPolicy == ReferentBasedDiscovery, | 89 RefDiscoveryPolicy == ReferentBasedDiscovery, |
89 "Unrecongnized RefDiscoveryPolicy"); | 90 "Unrecongnized RefDiscoveryPolicy"); |
91 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); | |
90 } | 92 } |
91 | 93 |
92 ReferenceProcessor::ReferenceProcessor(MemRegion span, | 94 ReferenceProcessor::ReferenceProcessor(MemRegion span, |
93 bool mt_processing, | 95 bool mt_processing, |
94 int mt_processing_degree, | 96 int mt_processing_degree, |
120 // Initialized all entries to NULL | 122 // Initialized all entries to NULL |
121 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { | 123 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { |
122 _discoveredSoftRefs[i].set_head(NULL); | 124 _discoveredSoftRefs[i].set_head(NULL); |
123 _discoveredSoftRefs[i].set_length(0); | 125 _discoveredSoftRefs[i].set_length(0); |
124 } | 126 } |
125 // If we do barreirs, cache a copy of the barrier set. | 127 // If we do barriers, cache a copy of the barrier set. |
126 if (discovered_list_needs_barrier) { | 128 if (discovered_list_needs_barrier) { |
127 _bs = Universe::heap()->barrier_set(); | 129 _bs = Universe::heap()->barrier_set(); |
128 } | 130 } |
129 setup_policy(false /* default soft ref policy */); | 131 setup_policy(false /* default soft ref policy */); |
130 } | 132 } |
305 } | 307 } |
306 | 308 |
307 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, | 309 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, |
308 HeapWord* pending_list_addr) { | 310 HeapWord* pending_list_addr) { |
309 // Given a list of refs linked through the "discovered" field | 311 // Given a list of refs linked through the "discovered" field |
310 // (java.lang.ref.Reference.discovered) chain them through the | 312 // (java.lang.ref.Reference.discovered), self-loop their "next" field |
311 // "next" field (java.lang.ref.Reference.next) and prepend | 313 // thus distinguishing them from active References, then |
312 // to the pending list. | 314 // prepend them to the pending list. |
315 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), | |
316 // the "next" field is used to chain the pending list, not the discovered | |
317 // field. | |
318 | |
313 if (TraceReferenceGC && PrintGCDetails) { | 319 if (TraceReferenceGC && PrintGCDetails) { |
314 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " | 320 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " |
315 INTPTR_FORMAT, (address)refs_list.head()); | 321 INTPTR_FORMAT, (address)refs_list.head()); |
316 } | 322 } |
317 | 323 |
318 oop obj = NULL; | 324 oop obj = NULL; |
319 oop next = refs_list.head(); | 325 oop next_d = refs_list.head(); |
320 // Walk down the list, copying the discovered field into | 326 if (pending_list_uses_discovered_field()) { // New behaviour |
321 // the next field and clearing it. | 327 // Walk down the list, self-looping the next field |
322 while (obj != next) { | 328 // so that the References are not considered active. |
323 obj = next; | 329 while (obj != next_d) { |
324 assert(obj->is_instanceRef(), "should be reference object"); | 330 obj = next_d; |
325 next = java_lang_ref_Reference::discovered(obj); | 331 assert(obj->is_instanceRef(), "should be reference object"); |
326 if (TraceReferenceGC && PrintGCDetails) { | 332 next_d = java_lang_ref_Reference::discovered(obj); |
327 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, | 333 if (TraceReferenceGC && PrintGCDetails) { |
328 obj, next); | 334 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, |
329 } | 335 obj, next_d); |
330 assert(java_lang_ref_Reference::next(obj) == NULL, | 336 } |
331 "The reference should not be enqueued"); | 337 assert(java_lang_ref_Reference::next(obj) == NULL, |
332 if (next == obj) { // obj is last | 338 "Reference not active; should not be discovered"); |
333 // Swap refs_list into pendling_list_addr and | 339 // Self-loop next, so as to make Ref not active. |
334 // set obj's next to what we read from pending_list_addr. | 340 java_lang_ref_Reference::set_next(obj, obj); |
335 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); | 341 if (next_d == obj) { // obj is last |
336 // Need oop_check on pending_list_addr above; | 342 // Swap refs_list into pendling_list_addr and |
337 // see special oop-check code at the end of | 343 // set obj's discovered to what we read from pending_list_addr. |
338 // enqueue_discovered_reflists() further below. | 344 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); |
339 if (old == NULL) { | 345 // Need oop_check on pending_list_addr above; |
340 // obj should be made to point to itself, since | 346 // see special oop-check code at the end of |
341 // pending list was empty. | 347 // enqueue_discovered_reflists() further below. |
342 java_lang_ref_Reference::set_next(obj, obj); | 348 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL |
349 } | |
350 } | |
351 } else { // Old behaviour | |
352 // Walk down the list, copying the discovered field into | |
353 // the next field and clearing the discovered field. | |
354 while (obj != next_d) { | |
355 obj = next_d; | |
356 assert(obj->is_instanceRef(), "should be reference object"); | |
357 next_d = java_lang_ref_Reference::discovered(obj); | |
358 if (TraceReferenceGC && PrintGCDetails) { | |
359 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, | |
360 obj, next_d); | |
361 } | |
362 assert(java_lang_ref_Reference::next(obj) == NULL, | |
363 "The reference should not be enqueued"); | |
364 if (next_d == obj) { // obj is last | |
365 // Swap refs_list into pendling_list_addr and | |
366 // set obj's next to what we read from pending_list_addr. | |
367 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); | |
368 // Need oop_check on pending_list_addr above; | |
369 // see special oop-check code at the end of | |
370 // enqueue_discovered_reflists() further below. | |
371 if (old == NULL) { | |
372 // obj should be made to point to itself, since | |
373 // pending list was empty. | |
374 java_lang_ref_Reference::set_next(obj, obj); | |
375 } else { | |
376 java_lang_ref_Reference::set_next(obj, old); | |
377 } | |
343 } else { | 378 } else { |
344 java_lang_ref_Reference::set_next(obj, old); | 379 java_lang_ref_Reference::set_next(obj, next_d); |
345 } | 380 } |
346 } else { | 381 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); |
347 java_lang_ref_Reference::set_next(obj, next); | 382 } |
348 } | |
349 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); | |
350 } | 383 } |
351 } | 384 } |
352 | 385 |
353 // Parallel enqueue task | 386 // Parallel enqueue task |
354 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { | 387 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { |
613 // Close the reachable set | 646 // Close the reachable set |
614 complete_gc->do_void(); | 647 complete_gc->do_void(); |
615 NOT_PRODUCT( | 648 NOT_PRODUCT( |
616 if (PrintGCDetails && TraceReferenceGC) { | 649 if (PrintGCDetails && TraceReferenceGC) { |
617 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " | 650 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " |
618 "discovered Refs by policy list " INTPTR_FORMAT, | 651 "discovered Refs by policy, from list " INTPTR_FORMAT, |
619 iter.removed(), iter.processed(), (address)refs_list.head()); | 652 iter.removed(), iter.processed(), (address)refs_list.head()); |
620 } | 653 } |
621 ) | 654 ) |
622 } | 655 } |
623 | 656 |
1113 // Note: In the case of G1, this specific pre-barrier is strictly | 1146 // Note: In the case of G1, this specific pre-barrier is strictly |
1114 // not necessary because the only case we are interested in | 1147 // not necessary because the only case we are interested in |
1115 // here is when *discovered_addr is NULL (see the CAS further below), | 1148 // here is when *discovered_addr is NULL (see the CAS further below), |
1116 // so this will expand to nothing. As a result, we have manually | 1149 // so this will expand to nothing. As a result, we have manually |
1117 // elided this out for G1, but left in the test for some future | 1150 // elided this out for G1, but left in the test for some future |
1118 // collector that might have need for a pre-barrier here. | 1151 // collector that might have need for a pre-barrier here, e.g.:- |
1119 if (_discovered_list_needs_barrier && !UseG1GC) { | 1152 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); |
1120 if (UseCompressedOops) { | 1153 assert(!_discovered_list_needs_barrier || UseG1GC, |
1121 _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); | 1154 "Need to check non-G1 collector: " |
1122 } else { | 1155 "may need a pre-write-barrier for CAS from NULL below"); |
1123 _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); | |
1124 } | |
1125 guarantee(false, "Need to check non-G1 collector"); | |
1126 } | |
1127 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, | 1156 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, |
1128 NULL); | 1157 NULL); |
1129 if (retest == NULL) { | 1158 if (retest == NULL) { |
1130 // This thread just won the right to enqueue the object. | 1159 // This thread just won the right to enqueue the object. |
1131 // We have separate lists for enqueueing so no synchronization | 1160 // We have separate lists for enqueueing, so no synchronization |
1132 // is necessary. | 1161 // is necessary. |
1133 refs_list.set_head(obj); | 1162 refs_list.set_head(obj); |
1134 refs_list.inc_length(1); | 1163 refs_list.inc_length(1); |
1135 if (_discovered_list_needs_barrier) { | 1164 if (_discovered_list_needs_barrier) { |
1136 _bs->write_ref_field((void*)discovered_addr, next_discovered); | 1165 _bs->write_ref_field((void*)discovered_addr, next_discovered); |
1137 } | 1166 } |
1138 | 1167 |
1139 if (TraceReferenceGC) { | 1168 if (TraceReferenceGC) { |
1140 gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)", | 1169 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", |
1141 obj, obj->blueprint()->internal_name()); | 1170 obj, obj->blueprint()->internal_name()); |
1142 } | 1171 } |
1143 } else { | 1172 } else { |
1144 // If retest was non NULL, another thread beat us to it: | 1173 // If retest was non NULL, another thread beat us to it: |
1145 // The reference has already been discovered... | 1174 // The reference has already been discovered... |
1146 if (TraceReferenceGC) { | 1175 if (TraceReferenceGC) { |
1147 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", | 1176 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", |
1148 obj, obj->blueprint()->internal_name()); | 1177 obj, obj->blueprint()->internal_name()); |
1149 } | 1178 } |
1150 } | 1179 } |
1151 } | 1180 } |
1152 | 1181 |
1167 // We mention two of several possible choices here: | 1196 // We mention two of several possible choices here: |
1168 // #0: if the reference object is not in the "originating generation" | 1197 // #0: if the reference object is not in the "originating generation" |
1169 // (or part of the heap being collected, indicated by our "span" | 1198 // (or part of the heap being collected, indicated by our "span" |
1170 // we don't treat it specially (i.e. we scan it as we would | 1199 // we don't treat it specially (i.e. we scan it as we would |
1171 // a normal oop, treating its references as strong references). | 1200 // a normal oop, treating its references as strong references). |
1172 // This means that references can't be enqueued unless their | 1201 // This means that references can't be discovered unless their |
1173 // referent is also in the same span. This is the simplest, | 1202 // referent is also in the same span. This is the simplest, |
1174 // most "local" and most conservative approach, albeit one | 1203 // most "local" and most conservative approach, albeit one |
1175 // that may cause weak references to be enqueued least promptly. | 1204 // that may cause weak references to be enqueued least promptly. |
1176 // We call this choice the "ReferenceBasedDiscovery" policy. | 1205 // We call this choice the "ReferenceBasedDiscovery" policy. |
1177 // #1: the reference object may be in any generation (span), but if | 1206 // #1: the reference object may be in any generation (span), but if |
1189 // in certain cases, enqueue references somewhat sooner than | 1218 // in certain cases, enqueue references somewhat sooner than |
1190 // might Policy #0 above, but at marginally increased cost | 1219 // might Policy #0 above, but at marginally increased cost |
1191 // and complexity in processing these references. | 1220 // and complexity in processing these references. |
1192 // We call this choice the "RefeferentBasedDiscovery" policy. | 1221 // We call this choice the "RefeferentBasedDiscovery" policy. |
1193 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { | 1222 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { |
1194 // We enqueue references only if we are discovering refs | 1223 // Make sure we are discovering refs (rather than processing discovered refs). |
1195 // (rather than processing discovered refs). | |
1196 if (!_discovering_refs || !RegisterReferences) { | 1224 if (!_discovering_refs || !RegisterReferences) { |
1197 return false; | 1225 return false; |
1198 } | 1226 } |
1199 // We only enqueue active references. | 1227 // We only discover active references. |
1200 oop next = java_lang_ref_Reference::next(obj); | 1228 oop next = java_lang_ref_Reference::next(obj); |
1201 if (next != NULL) { | 1229 if (next != NULL) { // Ref is no longer active |
1202 return false; | 1230 return false; |
1203 } | 1231 } |
1204 | 1232 |
1205 HeapWord* obj_addr = (HeapWord*)obj; | 1233 HeapWord* obj_addr = (HeapWord*)obj; |
1206 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && | 1234 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && |
1209 // don't treat it specially (i.e. we want to scan it as a normal | 1237 // don't treat it specially (i.e. we want to scan it as a normal |
1210 // object with strong references). | 1238 // object with strong references). |
1211 return false; | 1239 return false; |
1212 } | 1240 } |
1213 | 1241 |
1214 // We only enqueue references whose referents are not (yet) strongly | 1242 // We only discover references whose referents are not (yet) |
1215 // reachable. | 1243 // known to be strongly reachable. |
1216 if (is_alive_non_header() != NULL) { | 1244 if (is_alive_non_header() != NULL) { |
1217 verify_referent(obj); | 1245 verify_referent(obj); |
1218 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { | 1246 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { |
1219 return false; // referent is reachable | 1247 return false; // referent is reachable |
1220 } | 1248 } |
1236 const oop discovered = java_lang_ref_Reference::discovered(obj); | 1264 const oop discovered = java_lang_ref_Reference::discovered(obj); |
1237 assert(discovered->is_oop_or_null(), "bad discovered field"); | 1265 assert(discovered->is_oop_or_null(), "bad discovered field"); |
1238 if (discovered != NULL) { | 1266 if (discovered != NULL) { |
1239 // The reference has already been discovered... | 1267 // The reference has already been discovered... |
1240 if (TraceReferenceGC) { | 1268 if (TraceReferenceGC) { |
1241 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", | 1269 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", |
1242 obj, obj->blueprint()->internal_name()); | 1270 obj, obj->blueprint()->internal_name()); |
1243 } | 1271 } |
1244 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { | 1272 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { |
1245 // assumes that an object is not processed twice; | 1273 // assumes that an object is not processed twice; |
1246 // if it's been already discovered it must be on another | 1274 // if it's been already discovered it must be on another |
1258 } | 1286 } |
1259 } | 1287 } |
1260 | 1288 |
1261 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { | 1289 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { |
1262 verify_referent(obj); | 1290 verify_referent(obj); |
1263 // enqueue if and only if either: | 1291 // Discover if and only if EITHER: |
1264 // reference is in our span or | 1292 // .. reference is in our span, OR |
1265 // we are an atomic collector and referent is in our span | 1293 // .. we are an atomic collector and referent is in our span |
1266 if (_span.contains(obj_addr) || | 1294 if (_span.contains(obj_addr) || |
1267 (discovery_is_atomic() && | 1295 (discovery_is_atomic() && |
1268 _span.contains(java_lang_ref_Reference::referent(obj)))) { | 1296 _span.contains(java_lang_ref_Reference::referent(obj)))) { |
1269 // should_enqueue = true; | 1297 // should_enqueue = true; |
1270 } else { | 1298 } else { |
1292 // The last ref must have its discovered field pointing to itself. | 1320 // The last ref must have its discovered field pointing to itself. |
1293 oop next_discovered = (current_head != NULL) ? current_head : obj; | 1321 oop next_discovered = (current_head != NULL) ? current_head : obj; |
1294 | 1322 |
1295 // As in the case further above, since we are over-writing a NULL | 1323 // As in the case further above, since we are over-writing a NULL |
1296 // pre-value, we can safely elide the pre-barrier here for the case of G1. | 1324 // pre-value, we can safely elide the pre-barrier here for the case of G1. |
1325 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); | |
1297 assert(discovered == NULL, "control point invariant"); | 1326 assert(discovered == NULL, "control point invariant"); |
1298 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 | 1327 assert(!_discovered_list_needs_barrier || UseG1GC, |
1299 if (UseCompressedOops) { | 1328 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); |
1300 _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); | |
1301 } else { | |
1302 _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); | |
1303 } | |
1304 guarantee(false, "Need to check non-G1 collector"); | |
1305 } | |
1306 oop_store_raw(discovered_addr, next_discovered); | 1329 oop_store_raw(discovered_addr, next_discovered); |
1307 if (_discovered_list_needs_barrier) { | 1330 if (_discovered_list_needs_barrier) { |
1308 _bs->write_ref_field((void*)discovered_addr, next_discovered); | 1331 _bs->write_ref_field((void*)discovered_addr, next_discovered); |
1309 } | 1332 } |
1310 list->set_head(obj); | 1333 list->set_head(obj); |
1311 list->inc_length(1); | 1334 list->inc_length(1); |
1312 | 1335 |
1313 if (TraceReferenceGC) { | 1336 if (TraceReferenceGC) { |
1314 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", | 1337 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", |
1315 obj, obj->blueprint()->internal_name()); | 1338 obj, obj->blueprint()->internal_name()); |
1316 } | 1339 } |
1317 } | 1340 } |
1318 assert(obj->is_oop(), "Enqueued a bad reference"); | 1341 assert(obj->is_oop(), "Discovered a bad reference"); |
1319 verify_referent(obj); | 1342 verify_referent(obj); |
1320 return true; | 1343 return true; |
1321 } | 1344 } |
1322 | 1345 |
1323 // Preclean the discovered references by removing those | 1346 // Preclean the discovered references by removing those |