Mercurial > hg > graal-jvmci-8
comparison src/share/vm/memory/referenceProcessor.cpp @ 17746:a258f8cb530f
8029255: G1: Reference processing should not enqueue references on the shared SATB queue
Reviewed-by: brutisso, tschatzl
author | pliden |
---|---|
date | Fri, 10 Jan 2014 09:53:53 +0100 |
parents | 190899198332 |
children | 63a4eb8bcd23 d60ecdb2773e |
comparison
equal
deleted
inserted
replaced
17745:c96e9c8adb81 | 17746:a258f8cb530f |
---|---|
98 bool discovered_list_needs_barrier) : | 98 bool discovered_list_needs_barrier) : |
99 _discovering_refs(false), | 99 _discovering_refs(false), |
100 _enqueuing_is_done(false), | 100 _enqueuing_is_done(false), |
101 _is_alive_non_header(is_alive_non_header), | 101 _is_alive_non_header(is_alive_non_header), |
102 _discovered_list_needs_barrier(discovered_list_needs_barrier), | 102 _discovered_list_needs_barrier(discovered_list_needs_barrier), |
103 _bs(NULL), | |
104 _processing_is_mt(mt_processing), | 103 _processing_is_mt(mt_processing), |
105 _next_id(0) | 104 _next_id(0) |
106 { | 105 { |
107 _span = span; | 106 _span = span; |
108 _discovery_is_atomic = atomic_discovery; | 107 _discovery_is_atomic = atomic_discovery; |
124 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { | 123 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
125 _discovered_refs[i].set_head(NULL); | 124 _discovered_refs[i].set_head(NULL); |
126 _discovered_refs[i].set_length(0); | 125 _discovered_refs[i].set_length(0); |
127 } | 126 } |
128 | 127 |
129 // If we do barriers, cache a copy of the barrier set. | |
130 if (discovered_list_needs_barrier) { | |
131 _bs = Universe::heap()->barrier_set(); | |
132 } | |
133 setup_policy(false /* default soft ref policy */); | 128 setup_policy(false /* default soft ref policy */); |
134 } | 129 } |
135 | 130 |
136 #ifndef PRODUCT | 131 #ifndef PRODUCT |
137 void ReferenceProcessor::verify_no_references_recorded() { | 132 void ReferenceProcessor::verify_no_references_recorded() { |
315 T old_pending_list_value = *pending_list_addr; | 310 T old_pending_list_value = *pending_list_addr; |
316 | 311 |
317 // Enqueue references that are not made active again, and | 312 // Enqueue references that are not made active again, and |
318 // clear the decks for the next collection (cycle). | 313 // clear the decks for the next collection (cycle). |
319 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); | 314 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); |
320 // Do the oop-check on pending_list_addr missed in | 315 // Do the post-barrier on pending_list_addr missed in |
321 // enqueue_discovered_reflist. We should probably | 316 // enqueue_discovered_reflist. |
322 // do a raw oop_check so that future such idempotent | 317 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); |
323 // oop_stores relying on the oop-check side-effect | |
324 // may be elided automatically and safely without | |
325 // affecting correctness. | |
326 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); | |
327 | 318 |
328 // Stop treating discovered references specially. | 319 // Stop treating discovered references specially. |
329 ref->disable_discovery(); | 320 ref->disable_discovery(); |
330 | 321 |
331 // Return true if new pending references were added | 322 // Return true if new pending references were added |
370 (void *)obj, (void *)next_d); | 361 (void *)obj, (void *)next_d); |
371 } | 362 } |
372 assert(java_lang_ref_Reference::next(obj) == NULL, | 363 assert(java_lang_ref_Reference::next(obj) == NULL, |
373 "Reference not active; should not be discovered"); | 364 "Reference not active; should not be discovered"); |
374 // Self-loop next, so as to make Ref not active. | 365 // Self-loop next, so as to make Ref not active. |
375 java_lang_ref_Reference::set_next(obj, obj); | 366 // Post-barrier not needed when looping to self. |
367 java_lang_ref_Reference::set_next_raw(obj, obj); | |
376 if (next_d == obj) { // obj is last | 368 if (next_d == obj) { // obj is last |
377 // Swap refs_list into pendling_list_addr and | 369 // Swap refs_list into pendling_list_addr and |
378 // set obj's discovered to what we read from pending_list_addr. | 370 // set obj's discovered to what we read from pending_list_addr. |
379 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); | 371 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); |
380 // Need oop_check on pending_list_addr above; | 372 // Need post-barrier on pending_list_addr above; |
381 // see special oop-check code at the end of | 373 // see special post-barrier code at the end of |
382 // enqueue_discovered_reflists() further below. | 374 // enqueue_discovered_reflists() further below. |
383 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL | 375 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL |
376 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); | |
384 } | 377 } |
385 } | 378 } |
386 } else { // Old behaviour | 379 } else { // Old behaviour |
387 // Walk down the list, copying the discovered field into | 380 // Walk down the list, copying the discovered field into |
388 // the next field and clearing the discovered field. | 381 // the next field and clearing the discovered field. |
514 // For G1 we don't want to use set_next - it | 507 // For G1 we don't want to use set_next - it |
515 // will dirty the card for the next field of | 508 // will dirty the card for the next field of |
516 // the reference object and will fail | 509 // the reference object and will fail |
517 // CT verification. | 510 // CT verification. |
518 if (UseG1GC) { | 511 if (UseG1GC) { |
519 BarrierSet* bs = oopDesc::bs(); | |
520 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); | 512 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); |
521 | |
522 if (UseCompressedOops) { | 513 if (UseCompressedOops) { |
523 bs->write_ref_field_pre((narrowOop*)next_addr, NULL); | 514 oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); |
524 } else { | 515 } else { |
525 bs->write_ref_field_pre((oop*)next_addr, NULL); | 516 oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); |
526 } | 517 } |
527 java_lang_ref_Reference::set_next_raw(_ref, NULL); | 518 java_lang_ref_Reference::set_next_raw(_ref, NULL); |
528 } else { | 519 } else { |
529 java_lang_ref_Reference::set_next(_ref, NULL); | 520 java_lang_ref_Reference::set_next(_ref, NULL); |
530 } | 521 } |
788 private: | 779 private: |
789 bool _clear_referent; | 780 bool _clear_referent; |
790 }; | 781 }; |
791 | 782 |
792 void ReferenceProcessor::set_discovered(oop ref, oop value) { | 783 void ReferenceProcessor::set_discovered(oop ref, oop value) { |
784 java_lang_ref_Reference::set_discovered_raw(ref, value); | |
793 if (_discovered_list_needs_barrier) { | 785 if (_discovered_list_needs_barrier) { |
794 java_lang_ref_Reference::set_discovered(ref, value); | 786 oopDesc::bs()->write_ref_field(ref, value); |
795 } else { | |
796 java_lang_ref_Reference::set_discovered_raw(ref, value); | |
797 } | 787 } |
798 } | 788 } |
799 | 789 |
800 // Balances reference queues. | 790 // Balances reference queues. |
801 // Move entries from all queues[0, 1, ..., _max_num_q-1] to | 791 // Move entries from all queues[0, 1, ..., _max_num_q-1] to |
1083 // not necessary because the only case we are interested in | 1073 // not necessary because the only case we are interested in |
1084 // here is when *discovered_addr is NULL (see the CAS further below), | 1074 // here is when *discovered_addr is NULL (see the CAS further below), |
1085 // so this will expand to nothing. As a result, we have manually | 1075 // so this will expand to nothing. As a result, we have manually |
1086 // elided this out for G1, but left in the test for some future | 1076 // elided this out for G1, but left in the test for some future |
1087 // collector that might have need for a pre-barrier here, e.g.:- | 1077 // collector that might have need for a pre-barrier here, e.g.:- |
1088 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); | 1078 // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); |
1089 assert(!_discovered_list_needs_barrier || UseG1GC, | 1079 assert(!_discovered_list_needs_barrier || UseG1GC, |
1090 "Need to check non-G1 collector: " | 1080 "Need to check non-G1 collector: " |
1091 "may need a pre-write-barrier for CAS from NULL below"); | 1081 "may need a pre-write-barrier for CAS from NULL below"); |
1092 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, | 1082 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, |
1093 NULL); | 1083 NULL); |
1096 // We have separate lists for enqueueing, so no synchronization | 1086 // We have separate lists for enqueueing, so no synchronization |
1097 // is necessary. | 1087 // is necessary. |
1098 refs_list.set_head(obj); | 1088 refs_list.set_head(obj); |
1099 refs_list.inc_length(1); | 1089 refs_list.inc_length(1); |
1100 if (_discovered_list_needs_barrier) { | 1090 if (_discovered_list_needs_barrier) { |
1101 _bs->write_ref_field((void*)discovered_addr, next_discovered); | 1091 oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); |
1102 } | 1092 } |
1103 | 1093 |
1104 if (TraceReferenceGC) { | 1094 if (TraceReferenceGC) { |
1105 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", | 1095 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", |
1106 (void *)obj, obj->klass()->internal_name()); | 1096 (void *)obj, obj->klass()->internal_name()); |
1258 // The last ref must have its discovered field pointing to itself. | 1248 // The last ref must have its discovered field pointing to itself. |
1259 oop next_discovered = (current_head != NULL) ? current_head : obj; | 1249 oop next_discovered = (current_head != NULL) ? current_head : obj; |
1260 | 1250 |
1261 // As in the case further above, since we are over-writing a NULL | 1251 // As in the case further above, since we are over-writing a NULL |
1262 // pre-value, we can safely elide the pre-barrier here for the case of G1. | 1252 // pre-value, we can safely elide the pre-barrier here for the case of G1. |
1263 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); | 1253 // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); |
1264 assert(discovered == NULL, "control point invariant"); | 1254 assert(discovered == NULL, "control point invariant"); |
1265 assert(!_discovered_list_needs_barrier || UseG1GC, | 1255 assert(!_discovered_list_needs_barrier || UseG1GC, |
1266 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); | 1256 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); |
1267 oop_store_raw(discovered_addr, next_discovered); | 1257 oop_store_raw(discovered_addr, next_discovered); |
1268 if (_discovered_list_needs_barrier) { | 1258 if (_discovered_list_needs_barrier) { |
1269 _bs->write_ref_field((void*)discovered_addr, next_discovered); | 1259 oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); |
1270 } | 1260 } |
1271 list->set_head(obj); | 1261 list->set_head(obj); |
1272 list->inc_length(1); | 1262 list->inc_length(1); |
1273 | 1263 |
1274 if (TraceReferenceGC) { | 1264 if (TraceReferenceGC) { |