changeset 20359:4d3a43351904

Merge
author tschatzl
date Wed, 27 Aug 2014 09:36:55 +0200
parents b95d569d10c1 (current diff) 439f0d76cff3 (diff)
children 833b0f92429a a8ea2f110d87
files src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/memory/genCollectedHeap.cpp
diffstat 7 files changed, 0 insertions(+), 55 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Aug 27 09:36:55 2014 +0200
@@ -2336,25 +2336,6 @@
   return blk.result();
 }
 
-size_t G1CollectedHeap::unsafe_max_alloc() {
-  if (num_free_regions() > 0) return HeapRegion::GrainBytes;
-  // otherwise, is there space in the current allocation region?
-
-  // We need to store the current allocation region in a local variable
-  // here. The problem is that this method doesn't take any locks and
-  // there may be other threads which overwrite the current allocation
-  // region field. attempt_allocation(), for example, sets it to NULL
-  // and this can happen *after* the NULL check here but before the call
-  // to free(), resulting in a SIGSEGV. Note that this doesn't appear
-  // to be a problem in the optimized build, since the two loads of the
-  // current allocation region field are optimized away.
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr == NULL) {
-    return 0;
-  }
-  return hr->free();
-}
-
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Aug 27 09:36:55 2014 +0200
@@ -1170,15 +1170,6 @@
   // end fields defining the extent of the contiguous allocation region.)
   // But G1CollectedHeap doesn't yet support this.
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc();
-
   virtual bool is_maximal_no_gc() const {
     return _hrs.available() == 0;
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Aug 27 09:36:55 2014 +0200
@@ -485,10 +485,6 @@
   young_gen()->eden_space()->ensure_parsability();
 }
 
-size_t ParallelScavengeHeap::unsafe_max_alloc() {
-  return young_gen()->eden_space()->free_in_bytes();
-}
-
 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
   return young_gen()->eden_space()->tlab_capacity(thr);
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Aug 27 09:36:55 2014 +0200
@@ -184,8 +184,6 @@
   void accumulate_statistics_all_tlabs();
   void resize_all_tlabs();
 
-  size_t unsafe_max_alloc();
-
   bool supports_tlab_allocation() const { return true; }
 
   size_t tlab_capacity(Thread* thr) const;
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Aug 27 09:36:55 2014 +0200
@@ -395,15 +395,6 @@
   // allocation from them and necessitating allocation of new TLABs.
   virtual void ensure_parsability(bool retire_tlabs);
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc() = 0;
-
   // Section on thread-local allocation buffers (TLABs)
   // If the heap supports thread-local allocation buffers, it should override
   // the following methods:
--- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Aug 27 09:36:55 2014 +0200
@@ -704,10 +704,6 @@
   return _gens[0]->end_addr();
 }
 
-size_t GenCollectedHeap::unsafe_max_alloc() {
-  return _gens[0]->unsafe_max_alloc_nogc();
-}
-
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
--- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Aug 26 14:15:42 2014 +0200
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Aug 27 09:36:55 2014 +0200
@@ -166,14 +166,6 @@
   HeapWord** top_addr() const;
   HeapWord** end_addr() const;
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection activity.  In a generational
-  // collector, for example, this is probably the largest allocation that
-  // could be supported in the youngest generation.  It is "unsafe" because
-  // no locks are taken; the result should be treated as an approximation,
-  // not a guarantee.
-  size_t unsafe_max_alloc();
-
   // Does this heap support heap inspection? (+PrintClassHistogram)
   virtual bool supports_heap_inspection() const { return true; }