diff src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 b840813adfcc
children
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Apr 07 11:20:51 2015 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Apr 07 14:58:49 2015 +0200
@@ -455,7 +455,7 @@
   } else {
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // We may immediately start allocating regions and placing them on the
@@ -828,7 +828,7 @@
 
   record_survivor_regions(0, NULL, NULL);
 
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
@@ -1046,7 +1046,7 @@
 
   bool new_in_marking_window = _in_marking_window;
   bool new_in_marking_window_im = false;
-  if (during_initial_mark_pause()) {
+  if (last_pause_included_initial_mark) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
@@ -1180,7 +1180,7 @@
 
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
@@ -1202,7 +1202,7 @@
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
   _heap_used_bytes_before_gc = _g1->used();
-  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
+  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
 
   _eden_capacity_bytes_before_gc =
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@@ -1425,6 +1425,18 @@
 #endif // PRODUCT
 }
 
+bool G1CollectorPolicy::is_young_list_full() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_target_length = _young_list_target_length;
+  return young_list_length >= young_list_target_length;
+}
+
+bool G1CollectorPolicy::can_expand_young_list() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_max_length = _young_list_max_length;
+  return young_list_length < young_list_max_length;
+}
+
 uint G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
@@ -1617,7 +1629,7 @@
 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
   _collectionSetChooser->clear();
 
-  uint region_num = _g1->n_regions();
+  uint region_num = _g1->num_regions();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     const uint OverpartitionFactor = 4;
     uint WorkUnit;
@@ -1638,7 +1650,7 @@
         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
              MinWorkUnit);
     }
-    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
+    _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
                                                            WorkUnit);
     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
                                             (int) WorkUnit);
@@ -1664,7 +1676,7 @@
 // Add the heap region at the head of the non-incremental collection set
 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
   assert(_inc_cset_build_state == Active, "Precondition");
-  assert(!hr->is_young(), "non-incremental add of young region");
+  assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
   hr->set_in_collection_set(true);
@@ -1810,7 +1822,7 @@
 // Add the region at the RHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
   // We should only ever be appending survivors at the end of a pause
-  assert( hr->is_survivor(), "Logic");
+  assert(hr->is_survivor(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1828,7 +1840,7 @@
 // Add the region to the LHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
   // Survivors should be added to the RHS at the end of a pause
-  assert(!hr->is_survivor(), "Logic");
+  assert(hr->is_eden(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1935,7 +1947,7 @@
   // of them are available.
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->n_regions();
+  const size_t region_num = g1h->num_regions();
   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
   size_t result = region_num * perc / 100;
   // emulate ceiling
@@ -1988,7 +2000,11 @@
   HeapRegion* hr = young_list->first_survivor_region();
   while (hr != NULL) {
     assert(hr->is_survivor(), "badly formed young list");
-    hr->set_young();
+    // There is a convention that all the young regions in the CSet
+    // are tagged as "eden", so we do this for the survivors here. We
+    // use the special set_eden_pre_gc() as it doesn't check that the
+    // region is free (which is not the case here).
+    hr->set_eden_pre_gc();
     hr = hr->get_next_young_region();
   }