changeset 17752:b828d0d08417

Merge
author jwilhelm
date Mon, 24 Mar 2014 06:55:00 -0700
parents f53edbc2b728 (diff) 3b4e1b5c13a0 (current diff)
children ac767c227ea2 fd54db8eb0df
files
diffstat 6 files changed, 102 insertions(+), 123 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Mar 24 06:55:00 2014 -0700
@@ -2529,6 +2529,11 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
+  if (has_overflown()) {
+    // We can not trust g1_is_alive if the marking stack overflowed
+    return;
+  }
+
   g1h->unlink_string_and_symbol_table(&g1_is_alive,
                                       /* process_strings */ false, // currently strings are always roots
                                       /* process_symbols */ true);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 24 06:55:00 2014 -0700
@@ -2265,7 +2265,7 @@
                                 // (for efficiency/performance)
                            false);
                                 // Setting next fields of discovered
-                                // lists requires a barrier.
+                                // lists does not require a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -4743,6 +4743,12 @@
   oop forward_ptr = old->forward_to_atomic(obj);
   if (forward_ptr == NULL) {
     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
+
+    // alloc_purpose is just a hint to allocate() above, recheck the type of region
+    // we actually allocated from and update alloc_purpose accordingly
+    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
+    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
+
     if (g1p->track_object_age(alloc_purpose)) {
       // We could simply do obj->incr_age(). However, this causes a
       // performance issue. obj->incr_age() will first check whether
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Mon Mar 24 06:55:00 2014 -0700
@@ -219,58 +219,52 @@
 }
 
 #ifdef ASSERT
-void SATBMarkQueueSet::dump_active_values(JavaThread* first,
-                                          bool expected_active) {
-  gclog_or_tty->print_cr("SATB queue active values for Java Threads");
-  gclog_or_tty->print_cr(" SATB queue set: active is %s",
-                         (is_active()) ? "TRUE" : "FALSE");
-  gclog_or_tty->print_cr(" expected_active is %s",
-                         (expected_active) ? "TRUE" : "FALSE");
-  for (JavaThread* t = first; t; t = t->next()) {
-    bool active = t->satb_mark_queue().is_active();
-    gclog_or_tty->print_cr("  thread %s, active is %s",
-                           t->name(), (active) ? "TRUE" : "FALSE");
+void SATBMarkQueueSet::dump_active_states(bool expected_active) {
+  gclog_or_tty->print_cr("Expected SATB active state: %s",
+                         expected_active ? "ACTIVE" : "INACTIVE");
+  gclog_or_tty->print_cr("Actual SATB active states:");
+  gclog_or_tty->print_cr("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    gclog_or_tty->print_cr("  Thread \"%s\" queue: %s", t->name(),
+                           t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
+  }
+  gclog_or_tty->print_cr("  Shared queue: %s",
+                         shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
+}
+
+void SATBMarkQueueSet::verify_active_states(bool expected_active) {
+  // Verify queue set state
+  if (is_active() != expected_active) {
+    dump_active_states(expected_active);
+    guarantee(false, "SATB queue set has an unexpected active state");
+  }
+
+  // Verify thread queue states
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    if (t->satb_mark_queue().is_active() != expected_active) {
+      dump_active_states(expected_active);
+      guarantee(false, "Thread SATB queue has an unexpected active state");
+    }
+  }
+
+  // Verify shared queue state
+  if (shared_satb_queue()->is_active() != expected_active) {
+    dump_active_states(expected_active);
+    guarantee(false, "Shared SATB queue has an unexpected active state");
   }
 }
 #endif // ASSERT
 
-void SATBMarkQueueSet::set_active_all_threads(bool b,
-                                              bool expected_active) {
+void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  JavaThread* first = Threads::first();
-
 #ifdef ASSERT
-  if (_all_active != expected_active) {
-    dump_active_values(first, expected_active);
-
-    // I leave this here as a guarantee, instead of an assert, so
-    // that it will still be compiled in if we choose to uncomment
-    // the #ifdef ASSERT in a product build. The whole block is
-    // within an #ifdef ASSERT so the guarantee will not be compiled
-    // in a product build anyway.
-    guarantee(false,
-              "SATB queue set has an unexpected active value");
-  }
+  verify_active_states(expected_active);
 #endif // ASSERT
-  _all_active = b;
-
-  for (JavaThread* t = first; t; t = t->next()) {
-#ifdef ASSERT
-    bool active = t->satb_mark_queue().is_active();
-    if (active != expected_active) {
-      dump_active_values(first, expected_active);
-
-      // I leave this here as a guarantee, instead of an assert, so
-      // that it will still be compiled in if we choose to uncomment
-      // the #ifdef ASSERT in a product build. The whole block is
-      // within an #ifdef ASSERT so the guarantee will not be compiled
-      // in a product build anyway.
-      guarantee(false,
-                "thread has an unexpected active value in its SATB queue");
-    }
-#endif // ASSERT
-    t->satb_mark_queue().set_active(b);
+  _all_active = active;
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    t->satb_mark_queue().set_active(active);
   }
+  shared_satb_queue()->set_active(active);
 }
 
 void SATBMarkQueueSet::filter_thread_buffers() {
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Mon Mar 24 06:55:00 2014 -0700
@@ -87,7 +87,8 @@
   bool apply_closure_to_completed_buffer_work(bool par, int worker);
 
 #ifdef ASSERT
-  void dump_active_values(JavaThread* first, bool expected_active);
+  void dump_active_states(bool expected_active);
+  void verify_active_states(bool expected_active);
 #endif // ASSERT
 
 public:
@@ -99,11 +100,11 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // Apply "set_active(b)" to all Java threads' SATB queues. It should be
+  // Apply "set_active(active)" to all SATB queues in the set. It should be
   // called only with the world stopped. The method will assert that the
   // SATB queues of all threads it visits, as well as the SATB queue
   // set itself, has an active value same as expected_active.
-  void set_active_all_threads(bool b, bool expected_active);
+  void set_active_all_threads(bool active, bool expected_active);
 
   // Filter all the currently-active SATB buffers.
   void filter_thread_buffers();
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Mar 24 06:55:00 2014 -0700
@@ -95,12 +95,11 @@
                                        uint      mt_discovery_degree,
                                        bool      atomic_discovery,
                                        BoolObjectClosure* is_alive_non_header,
-                                       bool      discovered_list_needs_barrier)  :
+                                       bool      discovered_list_needs_post_barrier)  :
   _discovering_refs(false),
   _enqueuing_is_done(false),
   _is_alive_non_header(is_alive_non_header),
-  _discovered_list_needs_barrier(discovered_list_needs_barrier),
-  _bs(NULL),
+  _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
   _processing_is_mt(mt_processing),
   _next_id(0)
 {
@@ -126,10 +125,6 @@
     _discovered_refs[i].set_length(0);
   }
 
-  // If we do barriers, cache a copy of the barrier set.
-  if (discovered_list_needs_barrier) {
-    _bs = Universe::heap()->barrier_set();
-  }
   setup_policy(false /* default soft ref policy */);
 }
 
@@ -317,13 +312,9 @@
   // Enqueue references that are not made active again, and
   // clear the decks for the next collection (cycle).
   ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
-  // Do the oop-check on pending_list_addr missed in
-  // enqueue_discovered_reflist. We should probably
-  // do a raw oop_check so that future such idempotent
-  // oop_stores relying on the oop-check side-effect
-  // may be elided automatically and safely without
-  // affecting correctness.
-  oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
+  // Do the post-barrier on pending_list_addr missed in
+  // enqueue_discovered_reflist.
+  oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
 
   // Stop treating discovered references specially.
   ref->disable_discovery();
@@ -372,15 +363,17 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "Reference not active; should not be discovered");
       // Self-loop next, so as to make Ref not active.
-      java_lang_ref_Reference::set_next(obj, obj);
+      // Post-barrier not needed when looping to self.
+      java_lang_ref_Reference::set_next_raw(obj, obj);
       if (next_d == obj) {  // obj is last
         // Swap refs_list into pendling_list_addr and
         // set obj's discovered to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
-        // Need oop_check on pending_list_addr above;
-        // see special oop-check code at the end of
+        // Need post-barrier on pending_list_addr above;
+        // see special post-barrier code at the end of
         // enqueue_discovered_reflists() further below.
-        java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
+        java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
+        oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
       }
     }
   } else { // Old behaviour
@@ -497,13 +490,13 @@
   } else {
     new_next = _next;
   }
-
-  if (UseCompressedOops) {
-    // Remove Reference object from list.
-    oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
-  } else {
-    // Remove Reference object from list.
-    oopDesc::store_heap_oop((oop*)_prev_next, new_next);
+  // Remove Reference object from discovered list. Note that G1 does not need a
+  // pre-barrier here because we know the Reference has already been found/marked,
+  // that's how it ended up in the discovered list in the first place.
+  oop_store_raw(_prev_next, new_next);
+  if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
+    // Needs post-barrier and this is not the list head (which is not on the heap)
+    oopDesc::bs()->write_ref_field(_prev_next, new_next);
   }
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
@@ -516,13 +509,11 @@
   // the reference object and will fail
   // CT verification.
   if (UseG1GC) {
-    BarrierSet* bs = oopDesc::bs();
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
-
     if (UseCompressedOops) {
-      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL);
     } else {
-      bs->write_ref_field_pre((oop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
     }
     java_lang_ref_Reference::set_next_raw(_ref, NULL);
   } else {
@@ -553,7 +544,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   assert(policy != NULL, "Must have a non-NULL policy");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   // Decide which softly reachable refs should be kept alive.
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -593,7 +584,7 @@
                              BoolObjectClosure* is_alive,
                              OopClosure*        keep_alive) {
   assert(discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
@@ -630,7 +621,7 @@
                                                   OopClosure*        keep_alive,
                                                   VoidClosure*       complete_gc) {
   assert(!discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
@@ -673,7 +664,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   ResourceMark rm;
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.update_discovered();
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -790,10 +781,9 @@
 };
 
 void ReferenceProcessor::set_discovered(oop ref, oop value) {
-  if (_discovered_list_needs_barrier) {
-    java_lang_ref_Reference::set_discovered(ref, value);
-  } else {
-    java_lang_ref_Reference::set_discovered_raw(ref, value);
+  java_lang_ref_Reference::set_discovered_raw(ref, value);
+  if (_discovered_list_needs_post_barrier) {
+    oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
   }
 }
 
@@ -990,7 +980,7 @@
 
 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
   assert(!discovery_is_atomic(), "Else why call this method?");
-  DiscoveredListIterator iter(refs_list, NULL, NULL);
+  DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
@@ -1085,8 +1075,8 @@
   // so this will expand to nothing. As a result, we have manually
   // elided this out for G1, but left in the test for some future
   // collector that might have need for a pre-barrier here, e.g.:-
-  // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
-  assert(!_discovered_list_needs_barrier || UseG1GC,
+  // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+  assert(!_discovered_list_needs_post_barrier || UseG1GC,
          "Need to check non-G1 collector: "
          "may need a pre-write-barrier for CAS from NULL below");
   oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
@@ -1097,8 +1087,8 @@
     // is necessary.
     refs_list.set_head(obj);
     refs_list.inc_length(1);
-    if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+    if (_discovered_list_needs_post_barrier) {
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
     if (TraceReferenceGC) {
@@ -1250,7 +1240,7 @@
   if (_discovery_is_mt) {
     add_to_discovered_list_mt(*list, obj, discovered_addr);
   } else {
-    // If "_discovered_list_needs_barrier", we do write barriers when
+    // If "_discovered_list_needs_post_barrier", we do write barriers when
     // updating the discovered reference list.  Otherwise, we do a raw store
     // here: the field will be visited later when processing the discovered
     // references.
@@ -1260,13 +1250,13 @@
 
     // As in the case further above, since we are over-writing a NULL
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
-    // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+    // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
     assert(discovered == NULL, "control point invariant");
-    assert(!_discovered_list_needs_barrier || UseG1GC,
+    assert(!_discovered_list_needs_post_barrier || UseG1GC,
            "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
     oop_store_raw(discovered_addr, next_discovered);
-    if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+    if (_discovered_list_needs_post_barrier) {
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
     list->inc_length(1);
@@ -1361,7 +1351,7 @@
                                                 OopClosure*        keep_alive,
                                                 VoidClosure*       complete_gc,
                                                 YieldClosure*      yield) {
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop obj = iter.obj();
--- a/src/share/vm/memory/referenceProcessor.hpp	Thu Mar 20 16:31:47 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.hpp	Mon Mar 24 06:55:00 2014 -0700
@@ -99,6 +99,7 @@
   oop                _referent;
   OopClosure*        _keep_alive;
   BoolObjectClosure* _is_alive;
+  bool               _discovered_list_needs_post_barrier;
 
   DEBUG_ONLY(
   oop                _first_seen; // cyclic linked list check
@@ -112,7 +113,8 @@
 public:
   inline DiscoveredListIterator(DiscoveredList&    refs_list,
                                 OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive):
+                                BoolObjectClosure* is_alive,
+                                bool               discovered_list_needs_post_barrier = false):
     _refs_list(refs_list),
     _prev_next(refs_list.adr_head()),
     _prev(NULL),
@@ -126,7 +128,8 @@
 #endif
     _next(NULL),
     _keep_alive(keep_alive),
-    _is_alive(is_alive)
+    _is_alive(is_alive),
+    _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
 { }
 
   // End Of List.
@@ -228,14 +231,13 @@
   bool        _discovery_is_mt;         // true if reference discovery is MT.
 
   // If true, setting "next" field of a discovered refs list requires
-  // write barrier(s).  (Must be true if used in a collector in which
+  // write post barrier.  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
   // example, a collector like Garbage-First that moves objects during a
   // long-term concurrent marking phase that does weak reference
   // discovery.)
-  bool        _discovered_list_needs_barrier;
+  bool        _discovered_list_needs_post_barrier;
 
-  BarrierSet* _bs;                      // Cached copy of BarrierSet.
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
                                         // reference processing is MT.
@@ -381,8 +383,8 @@
 
  protected:
   // Set the 'discovered' field of the given reference to
-  // the given value - emitting barriers depending upon
-  // the value of _discovered_list_needs_barrier.
+  // the given value - emitting post barriers depending upon
+  // the value of _discovered_list_needs_post_barrier.
   void set_discovered(oop ref, oop value);
 
   // "Preclean" the given discovered reference list
@@ -420,32 +422,13 @@
   void update_soft_ref_master_clock();
 
  public:
-  // constructor
-  ReferenceProcessor():
-    _span((HeapWord*)NULL, (HeapWord*)NULL),
-    _discovered_refs(NULL),
-    _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
-    _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
-    _discovering_refs(false),
-    _discovery_is_atomic(true),
-    _enqueuing_is_done(false),
-    _discovery_is_mt(false),
-    _discovered_list_needs_barrier(false),
-    _bs(NULL),
-    _is_alive_non_header(NULL),
-    _num_q(0),
-    _max_num_q(0),
-    _processing_is_mt(false),
-    _next_id(0)
-  { }
-
   // Default parameters give you a vanilla reference processor.
   ReferenceProcessor(MemRegion span,
                      bool mt_processing = false, uint mt_processing_degree = 1,
                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
                      bool atomic_discovery = true,
                      BoolObjectClosure* is_alive_non_header = NULL,
-                     bool discovered_list_needs_barrier = false);
+                     bool discovered_list_needs_post_barrier = false);
 
   // RefDiscoveryPolicy values
   enum DiscoveryPolicy {