Mercurial > hg > graal-compiler
comparison src/share/vm/memory/referenceProcessor.cpp @ 14909:4ca6dc0799b6
Backout jdk9 merge
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Tue, 01 Apr 2014 13:57:07 +0200 |
parents | d8041d695d19 |
children | 52b4284cb496 |
comparison
equal
deleted
inserted
replaced
14908:8db6e76cb658 | 14909:4ca6dc0799b6 |
---|---|
43 void referenceProcessor_init() { | 43 void referenceProcessor_init() { |
44 ReferenceProcessor::init_statics(); | 44 ReferenceProcessor::init_statics(); |
45 } | 45 } |
46 | 46 |
47 void ReferenceProcessor::init_statics() { | 47 void ReferenceProcessor::init_statics() { |
48 // We need a monotonically non-decreasing time in ms but | 48 // We need a monotonically non-deccreasing time in ms but |
49 // os::javaTimeMillis() does not guarantee monotonicity. | 49 // os::javaTimeMillis() does not guarantee monotonicity. |
50 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; | 50 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
51 | 51 |
52 // Initialize the soft ref timestamp clock. | 52 // Initialize the soft ref timestamp clock. |
53 _soft_ref_timestamp_clock = now; | 53 _soft_ref_timestamp_clock = now; |
60 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { | 60 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { |
61 vm_exit_during_initialization("Could not allocate reference policy object"); | 61 vm_exit_during_initialization("Could not allocate reference policy object"); |
62 } | 62 } |
63 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || | 63 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || |
64 RefDiscoveryPolicy == ReferentBasedDiscovery, | 64 RefDiscoveryPolicy == ReferentBasedDiscovery, |
65 "Unrecognized RefDiscoveryPolicy"); | 65 "Unrecongnized RefDiscoveryPolicy"); |
66 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); | 66 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); |
67 } | 67 } |
68 | 68 |
69 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { | 69 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { |
70 #ifdef ASSERT | 70 #ifdef ASSERT |
93 uint mt_processing_degree, | 93 uint mt_processing_degree, |
94 bool mt_discovery, | 94 bool mt_discovery, |
95 uint mt_discovery_degree, | 95 uint mt_discovery_degree, |
96 bool atomic_discovery, | 96 bool atomic_discovery, |
97 BoolObjectClosure* is_alive_non_header, | 97 BoolObjectClosure* is_alive_non_header, |
98 bool discovered_list_needs_post_barrier) : | 98 bool discovered_list_needs_barrier) : |
99 _discovering_refs(false), | 99 _discovering_refs(false), |
100 _enqueuing_is_done(false), | 100 _enqueuing_is_done(false), |
101 _is_alive_non_header(is_alive_non_header), | 101 _is_alive_non_header(is_alive_non_header), |
102 _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier), | 102 _discovered_list_needs_barrier(discovered_list_needs_barrier), |
103 _bs(NULL), | |
103 _processing_is_mt(mt_processing), | 104 _processing_is_mt(mt_processing), |
104 _next_id(0) | 105 _next_id(0) |
105 { | 106 { |
106 _span = span; | 107 _span = span; |
107 _discovery_is_atomic = atomic_discovery; | 108 _discovery_is_atomic = atomic_discovery; |
123 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { | 124 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { |
124 _discovered_refs[i].set_head(NULL); | 125 _discovered_refs[i].set_head(NULL); |
125 _discovered_refs[i].set_length(0); | 126 _discovered_refs[i].set_length(0); |
126 } | 127 } |
127 | 128 |
129 // If we do barriers, cache a copy of the barrier set. | |
130 if (discovered_list_needs_barrier) { | |
131 _bs = Universe::heap()->barrier_set(); | |
132 } | |
128 setup_policy(false /* default soft ref policy */); | 133 setup_policy(false /* default soft ref policy */); |
129 } | 134 } |
130 | 135 |
131 #ifndef PRODUCT | 136 #ifndef PRODUCT |
132 void ReferenceProcessor::verify_no_references_recorded() { | 137 void ReferenceProcessor::verify_no_references_recorded() { |
150 | 155 |
151 void ReferenceProcessor::update_soft_ref_master_clock() { | 156 void ReferenceProcessor::update_soft_ref_master_clock() { |
152 // Update (advance) the soft ref master clock field. This must be done | 157 // Update (advance) the soft ref master clock field. This must be done |
153 // after processing the soft ref list. | 158 // after processing the soft ref list. |
154 | 159 |
155 // We need a monotonically non-decreasing time in ms but | 160 // We need a monotonically non-deccreasing time in ms but |
156 // os::javaTimeMillis() does not guarantee monotonicity. | 161 // os::javaTimeMillis() does not guarantee monotonicity. |
157 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; | 162 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
158 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); | 163 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); |
159 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); | 164 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); |
160 | 165 |
166 ) | 171 ) |
167 // The values of now and _soft_ref_timestamp_clock are set using | 172 // The values of now and _soft_ref_timestamp_clock are set using |
168 // javaTimeNanos(), which is guaranteed to be monotonically | 173 // javaTimeNanos(), which is guaranteed to be monotonically |
169 // non-decreasing provided the underlying platform provides such | 174 // non-decreasing provided the underlying platform provides such |
170 // a time source (and it is bug free). | 175 // a time source (and it is bug free). |
171 // In product mode, however, protect ourselves from non-monotonicity. | 176 // In product mode, however, protect ourselves from non-monotonicty. |
172 if (now > _soft_ref_timestamp_clock) { | 177 if (now > _soft_ref_timestamp_clock) { |
173 _soft_ref_timestamp_clock = now; | 178 _soft_ref_timestamp_clock = now; |
174 java_lang_ref_SoftReference::set_clock(now); | 179 java_lang_ref_SoftReference::set_clock(now); |
175 } | 180 } |
176 // Else leave clock stalled at its old value until time progresses | 181 // Else leave clock stalled at its old value until time progresses |
310 T old_pending_list_value = *pending_list_addr; | 315 T old_pending_list_value = *pending_list_addr; |
311 | 316 |
312 // Enqueue references that are not made active again, and | 317 // Enqueue references that are not made active again, and |
313 // clear the decks for the next collection (cycle). | 318 // clear the decks for the next collection (cycle). |
314 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); | 319 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); |
315 // Do the post-barrier on pending_list_addr missed in | 320 // Do the oop-check on pending_list_addr missed in |
316 // enqueue_discovered_reflist. | 321 // enqueue_discovered_reflist. We should probably |
317 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); | 322 // do a raw oop_check so that future such idempotent |
323 // oop_stores relying on the oop-check side-effect | |
324 // may be elided automatically and safely without | |
325 // affecting correctness. | |
326 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); | |
318 | 327 |
319 // Stop treating discovered references specially. | 328 // Stop treating discovered references specially. |
320 ref->disable_discovery(); | 329 ref->disable_discovery(); |
321 | 330 |
322 // Return true if new pending references were added | 331 // Return true if new pending references were added |
347 INTPTR_FORMAT, (address)refs_list.head()); | 356 INTPTR_FORMAT, (address)refs_list.head()); |
348 } | 357 } |
349 | 358 |
350 oop obj = NULL; | 359 oop obj = NULL; |
351 oop next_d = refs_list.head(); | 360 oop next_d = refs_list.head(); |
352 if (pending_list_uses_discovered_field()) { // New behavior | 361 if (pending_list_uses_discovered_field()) { // New behaviour |
353 // Walk down the list, self-looping the next field | 362 // Walk down the list, self-looping the next field |
354 // so that the References are not considered active. | 363 // so that the References are not considered active. |
355 while (obj != next_d) { | 364 while (obj != next_d) { |
356 obj = next_d; | 365 obj = next_d; |
357 assert(obj->is_instanceRef(), "should be reference object"); | 366 assert(obj->is_instanceRef(), "should be reference object"); |
361 (void *)obj, (void *)next_d); | 370 (void *)obj, (void *)next_d); |
362 } | 371 } |
363 assert(java_lang_ref_Reference::next(obj) == NULL, | 372 assert(java_lang_ref_Reference::next(obj) == NULL, |
364 "Reference not active; should not be discovered"); | 373 "Reference not active; should not be discovered"); |
365 // Self-loop next, so as to make Ref not active. | 374 // Self-loop next, so as to make Ref not active. |
366 // Post-barrier not needed when looping to self. | 375 java_lang_ref_Reference::set_next(obj, obj); |
367 java_lang_ref_Reference::set_next_raw(obj, obj); | |
368 if (next_d == obj) { // obj is last | 376 if (next_d == obj) { // obj is last |
369 // Swap refs_list into pending_list_addr and | 377 // Swap refs_list into pendling_list_addr and |
370 // set obj's discovered to what we read from pending_list_addr. | 378 // set obj's discovered to what we read from pending_list_addr. |
371 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); | 379 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); |
372 // Need post-barrier on pending_list_addr above; | 380 // Need oop_check on pending_list_addr above; |
373 // see special post-barrier code at the end of | 381 // see special oop-check code at the end of |
374 // enqueue_discovered_reflists() further below. | 382 // enqueue_discovered_reflists() further below. |
375 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL | 383 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL |
376 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); | |
377 } | 384 } |
378 } | 385 } |
379 } else { // Old behavior | 386 } else { // Old behaviour |
380 // Walk down the list, copying the discovered field into | 387 // Walk down the list, copying the discovered field into |
381 // the next field and clearing the discovered field. | 388 // the next field and clearing the discovered field. |
382 while (obj != next_d) { | 389 while (obj != next_d) { |
383 obj = next_d; | 390 obj = next_d; |
384 assert(obj->is_instanceRef(), "should be reference object"); | 391 assert(obj->is_instanceRef(), "should be reference object"); |
388 (void *)obj, (void *)next_d); | 395 (void *)obj, (void *)next_d); |
389 } | 396 } |
390 assert(java_lang_ref_Reference::next(obj) == NULL, | 397 assert(java_lang_ref_Reference::next(obj) == NULL, |
391 "The reference should not be enqueued"); | 398 "The reference should not be enqueued"); |
392 if (next_d == obj) { // obj is last | 399 if (next_d == obj) { // obj is last |
393 // Swap refs_list into pending_list_addr and | 400 // Swap refs_list into pendling_list_addr and |
394 // set obj's next to what we read from pending_list_addr. | 401 // set obj's next to what we read from pending_list_addr. |
395 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); | 402 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); |
396 // Need oop_check on pending_list_addr above; | 403 // Need oop_check on pending_list_addr above; |
397 // see special oop-check code at the end of | 404 // see special oop-check code at the end of |
398 // enqueue_discovered_reflists() further below. | 405 // enqueue_discovered_reflists() further below. |
488 // and _prev will be NULL. | 495 // and _prev will be NULL. |
489 new_next = _prev; | 496 new_next = _prev; |
490 } else { | 497 } else { |
491 new_next = _next; | 498 new_next = _next; |
492 } | 499 } |
493 // Remove Reference object from discovered list. Note that G1 does not need a | 500 |
494 // pre-barrier here because we know the Reference has already been found/marked, | 501 if (UseCompressedOops) { |
495 // that's how it ended up in the discovered list in the first place. | 502 // Remove Reference object from list. |
496 oop_store_raw(_prev_next, new_next); | 503 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); |
497 if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) { | 504 } else { |
498 // Needs post-barrier and this is not the list head (which is not on the heap) | 505 // Remove Reference object from list. |
499 oopDesc::bs()->write_ref_field(_prev_next, new_next); | 506 oopDesc::store_heap_oop((oop*)_prev_next, new_next); |
500 } | 507 } |
501 NOT_PRODUCT(_removed++); | 508 NOT_PRODUCT(_removed++); |
502 _refs_list.dec_length(1); | 509 _refs_list.dec_length(1); |
503 } | 510 } |
504 | 511 |
507 // For G1 we don't want to use set_next - it | 514 // For G1 we don't want to use set_next - it |
508 // will dirty the card for the next field of | 515 // will dirty the card for the next field of |
509 // the reference object and will fail | 516 // the reference object and will fail |
510 // CT verification. | 517 // CT verification. |
511 if (UseG1GC) { | 518 if (UseG1GC) { |
519 BarrierSet* bs = oopDesc::bs(); | |
512 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); | 520 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); |
521 | |
513 if (UseCompressedOops) { | 522 if (UseCompressedOops) { |
514 oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); | 523 bs->write_ref_field_pre((narrowOop*)next_addr, NULL); |
515 } else { | 524 } else { |
516 oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); | 525 bs->write_ref_field_pre((oop*)next_addr, NULL); |
517 } | 526 } |
518 java_lang_ref_Reference::set_next_raw(_ref, NULL); | 527 java_lang_ref_Reference::set_next_raw(_ref, NULL); |
519 } else { | 528 } else { |
520 java_lang_ref_Reference::set_next(_ref, NULL); | 529 java_lang_ref_Reference::set_next(_ref, NULL); |
521 } | 530 } |
542 ReferencePolicy* policy, | 551 ReferencePolicy* policy, |
543 BoolObjectClosure* is_alive, | 552 BoolObjectClosure* is_alive, |
544 OopClosure* keep_alive, | 553 OopClosure* keep_alive, |
545 VoidClosure* complete_gc) { | 554 VoidClosure* complete_gc) { |
546 assert(policy != NULL, "Must have a non-NULL policy"); | 555 assert(policy != NULL, "Must have a non-NULL policy"); |
547 DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier); | 556 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
548 // Decide which softly reachable refs should be kept alive. | 557 // Decide which softly reachable refs should be kept alive. |
549 while (iter.has_next()) { | 558 while (iter.has_next()) { |
550 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); | 559 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); |
551 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); | 560 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); |
552 if (referent_is_dead && | 561 if (referent_is_dead && |
582 void | 591 void |
583 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, | 592 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, |
584 BoolObjectClosure* is_alive, | 593 BoolObjectClosure* is_alive, |
585 OopClosure* keep_alive) { | 594 OopClosure* keep_alive) { |
586 assert(discovery_is_atomic(), "Error"); | 595 assert(discovery_is_atomic(), "Error"); |
587 DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier); | 596 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
588 while (iter.has_next()) { | 597 while (iter.has_next()) { |
589 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); | 598 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); |
590 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) | 599 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) |
591 assert(next == NULL, "Should not discover inactive Reference"); | 600 assert(next == NULL, "Should not discover inactive Reference"); |
592 if (iter.is_referent_alive()) { | 601 if (iter.is_referent_alive()) { |
619 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, | 628 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, |
620 BoolObjectClosure* is_alive, | 629 BoolObjectClosure* is_alive, |
621 OopClosure* keep_alive, | 630 OopClosure* keep_alive, |
622 VoidClosure* complete_gc) { | 631 VoidClosure* complete_gc) { |
623 assert(!discovery_is_atomic(), "Error"); | 632 assert(!discovery_is_atomic(), "Error"); |
624 DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier); | 633 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
625 while (iter.has_next()) { | 634 while (iter.has_next()) { |
626 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); | 635 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); |
627 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); | 636 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); |
628 oop next = java_lang_ref_Reference::next(iter.obj()); | 637 oop next = java_lang_ref_Reference::next(iter.obj()); |
629 if ((iter.referent() == NULL || iter.is_referent_alive() || | 638 if ((iter.referent() == NULL || iter.is_referent_alive() || |
662 bool clear_referent, | 671 bool clear_referent, |
663 BoolObjectClosure* is_alive, | 672 BoolObjectClosure* is_alive, |
664 OopClosure* keep_alive, | 673 OopClosure* keep_alive, |
665 VoidClosure* complete_gc) { | 674 VoidClosure* complete_gc) { |
666 ResourceMark rm; | 675 ResourceMark rm; |
667 DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier); | 676 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
668 while (iter.has_next()) { | 677 while (iter.has_next()) { |
669 iter.update_discovered(); | 678 iter.update_discovered(); |
670 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); | 679 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); |
671 if (clear_referent) { | 680 if (clear_referent) { |
672 // NULL out referent pointer | 681 // NULL out referent pointer |
779 private: | 788 private: |
780 bool _clear_referent; | 789 bool _clear_referent; |
781 }; | 790 }; |
782 | 791 |
783 void ReferenceProcessor::set_discovered(oop ref, oop value) { | 792 void ReferenceProcessor::set_discovered(oop ref, oop value) { |
784 java_lang_ref_Reference::set_discovered_raw(ref, value); | 793 if (_discovered_list_needs_barrier) { |
785 if (_discovered_list_needs_post_barrier) { | 794 java_lang_ref_Reference::set_discovered(ref, value); |
786 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value); | 795 } else { |
796 java_lang_ref_Reference::set_discovered_raw(ref, value); | |
787 } | 797 } |
788 } | 798 } |
789 | 799 |
790 // Balances reference queues. | 800 // Balances reference queues. |
791 // Move entries from all queues[0, 1, ..., _max_num_q-1] to | 801 // Move entries from all queues[0, 1, ..., _max_num_q-1] to |
978 } | 988 } |
979 } | 989 } |
980 | 990 |
981 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { | 991 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { |
982 assert(!discovery_is_atomic(), "Else why call this method?"); | 992 assert(!discovery_is_atomic(), "Else why call this method?"); |
983 DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier); | 993 DiscoveredListIterator iter(refs_list, NULL, NULL); |
984 while (iter.has_next()) { | 994 while (iter.has_next()) { |
985 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); | 995 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); |
986 oop next = java_lang_ref_Reference::next(iter.obj()); | 996 oop next = java_lang_ref_Reference::next(iter.obj()); |
987 assert(next->is_oop_or_null(), "bad next field"); | 997 assert(next->is_oop_or_null(), "bad next field"); |
988 // If referent has been cleared or Reference is not active, | 998 // If referent has been cleared or Reference is not active, |
1073 // not necessary because the only case we are interested in | 1083 // not necessary because the only case we are interested in |
1074 // here is when *discovered_addr is NULL (see the CAS further below), | 1084 // here is when *discovered_addr is NULL (see the CAS further below), |
1075 // so this will expand to nothing. As a result, we have manually | 1085 // so this will expand to nothing. As a result, we have manually |
1076 // elided this out for G1, but left in the test for some future | 1086 // elided this out for G1, but left in the test for some future |
1077 // collector that might have need for a pre-barrier here, e.g.:- | 1087 // collector that might have need for a pre-barrier here, e.g.:- |
1078 // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); | 1088 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); |
1079 assert(!_discovered_list_needs_post_barrier || UseG1GC, | 1089 assert(!_discovered_list_needs_barrier || UseG1GC, |
1080 "Need to check non-G1 collector: " | 1090 "Need to check non-G1 collector: " |
1081 "may need a pre-write-barrier for CAS from NULL below"); | 1091 "may need a pre-write-barrier for CAS from NULL below"); |
1082 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, | 1092 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, |
1083 NULL); | 1093 NULL); |
1084 if (retest == NULL) { | 1094 if (retest == NULL) { |
1085 // This thread just won the right to enqueue the object. | 1095 // This thread just won the right to enqueue the object. |
1086 // We have separate lists for enqueueing, so no synchronization | 1096 // We have separate lists for enqueueing, so no synchronization |
1087 // is necessary. | 1097 // is necessary. |
1088 refs_list.set_head(obj); | 1098 refs_list.set_head(obj); |
1089 refs_list.inc_length(1); | 1099 refs_list.inc_length(1); |
1090 if (_discovered_list_needs_post_barrier) { | 1100 if (_discovered_list_needs_barrier) { |
1091 oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); | 1101 _bs->write_ref_field((void*)discovered_addr, next_discovered); |
1092 } | 1102 } |
1093 | 1103 |
1094 if (TraceReferenceGC) { | 1104 if (TraceReferenceGC) { |
1095 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", | 1105 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", |
1096 (void *)obj, obj->klass()->internal_name()); | 1106 (void *)obj, obj->klass()->internal_name()); |
1238 } | 1248 } |
1239 | 1249 |
1240 if (_discovery_is_mt) { | 1250 if (_discovery_is_mt) { |
1241 add_to_discovered_list_mt(*list, obj, discovered_addr); | 1251 add_to_discovered_list_mt(*list, obj, discovered_addr); |
1242 } else { | 1252 } else { |
1243 // If "_discovered_list_needs_post_barrier", we do write barriers when | 1253 // If "_discovered_list_needs_barrier", we do write barriers when |
1244 // updating the discovered reference list. Otherwise, we do a raw store | 1254 // updating the discovered reference list. Otherwise, we do a raw store |
1245 // here: the field will be visited later when processing the discovered | 1255 // here: the field will be visited later when processing the discovered |
1246 // references. | 1256 // references. |
1247 oop current_head = list->head(); | 1257 oop current_head = list->head(); |
1248 // The last ref must have its discovered field pointing to itself. | 1258 // The last ref must have its discovered field pointing to itself. |
1249 oop next_discovered = (current_head != NULL) ? current_head : obj; | 1259 oop next_discovered = (current_head != NULL) ? current_head : obj; |
1250 | 1260 |
1251 // As in the case further above, since we are over-writing a NULL | 1261 // As in the case further above, since we are over-writing a NULL |
1252 // pre-value, we can safely elide the pre-barrier here for the case of G1. | 1262 // pre-value, we can safely elide the pre-barrier here for the case of G1. |
1253 // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); | 1263 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); |
1254 assert(discovered == NULL, "control point invariant"); | 1264 assert(discovered == NULL, "control point invariant"); |
1255 assert(!_discovered_list_needs_post_barrier || UseG1GC, | 1265 assert(!_discovered_list_needs_barrier || UseG1GC, |
1256 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); | 1266 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); |
1257 oop_store_raw(discovered_addr, next_discovered); | 1267 oop_store_raw(discovered_addr, next_discovered); |
1258 if (_discovered_list_needs_post_barrier) { | 1268 if (_discovered_list_needs_barrier) { |
1259 oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); | 1269 _bs->write_ref_field((void*)discovered_addr, next_discovered); |
1260 } | 1270 } |
1261 list->set_head(obj); | 1271 list->set_head(obj); |
1262 list->inc_length(1); | 1272 list->inc_length(1); |
1263 | 1273 |
1264 if (TraceReferenceGC) { | 1274 if (TraceReferenceGC) { |
1339 | 1349 |
1340 // Walk the given discovered ref list, and remove all reference objects | 1350 // Walk the given discovered ref list, and remove all reference objects |
1341 // whose referents are still alive, whose referents are NULL or which | 1351 // whose referents are still alive, whose referents are NULL or which |
1342 // are not active (have a non-NULL next field). NOTE: When we are | 1352 // are not active (have a non-NULL next field). NOTE: When we are |
1343 // thus precleaning the ref lists (which happens single-threaded today), | 1353 // thus precleaning the ref lists (which happens single-threaded today), |
1344 // we do not disable refs discovery to honor the correct semantics of | 1354 // we do not disable refs discovery to honour the correct semantics of |
1345 // java.lang.Reference. As a result, we need to be careful below | 1355 // java.lang.Reference. As a result, we need to be careful below |
1346 // that ref removal steps interleave safely with ref discovery steps | 1356 // that ref removal steps interleave safely with ref discovery steps |
1347 // (in this thread). | 1357 // (in this thread). |
1348 void | 1358 void |
1349 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, | 1359 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, |
1350 BoolObjectClosure* is_alive, | 1360 BoolObjectClosure* is_alive, |
1351 OopClosure* keep_alive, | 1361 OopClosure* keep_alive, |
1352 VoidClosure* complete_gc, | 1362 VoidClosure* complete_gc, |
1353 YieldClosure* yield) { | 1363 YieldClosure* yield) { |
1354 DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier); | 1364 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); |
1355 while (iter.has_next()) { | 1365 while (iter.has_next()) { |
1356 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); | 1366 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); |
1357 oop obj = iter.obj(); | 1367 oop obj = iter.obj(); |
1358 oop next = java_lang_ref_Reference::next(obj); | 1368 oop next = java_lang_ref_Reference::next(obj); |
1359 if (iter.referent() == NULL || iter.is_referent_alive() || | 1369 if (iter.referent() == NULL || iter.is_referent_alive() || |