# HG changeset patch # User brutisso # Date 1409041723 -7200 # Node ID 439f0d76cff3dc65b39e6d44311f09bf23be2fbc # Parent 47e8e40b94d3d5c47df9351320ac70ebd9ced05f 8029524: Remove unsused method CollectedHeap::unsafe_max_alloc() Reviewed-by: pliden, jmasa diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Aug 26 10:28:43 2014 +0200 @@ -2336,25 +2336,6 @@ return blk.result(); } -size_t G1CollectedHeap::unsafe_max_alloc() { - if (num_free_regions() > 0) return HeapRegion::GrainBytes; - // otherwise, is there space in the current allocation region? - - // We need to store the current allocation region in a local variable - // here. The problem is that this method doesn't take any locks and - // there may be other threads which overwrite the current allocation - // region field. attempt_allocation(), for example, sets it to NULL - // and this can happen *after* the NULL check here but before the call - // to free(), resulting in a SIGSEGV. Note that this doesn't appear - // to be a problem in the optimized build, since the two loads of the - // current allocation region field are optimized away. - HeapRegion* hr = _mutator_alloc_region.get(); - if (hr == NULL) { - return 0; - } - return hr->free(); -} - bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { switch (cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent; diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Aug 26 10:28:43 2014 +0200 @@ -1170,15 +1170,6 @@ // end fields defining the extent of the contiguous allocation region.) // But G1CollectedHeap doesn't yet support this. - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc(); - virtual bool is_maximal_no_gc() const { return _hrs.available() == 0; } diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Aug 26 10:28:43 2014 +0200 @@ -485,10 +485,6 @@ young_gen()->eden_space()->ensure_parsability(); } -size_t ParallelScavengeHeap::unsafe_max_alloc() { - return young_gen()->eden_space()->free_in_bytes(); -} - size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { return young_gen()->eden_space()->tlab_capacity(thr); } diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Aug 26 10:28:43 2014 +0200 @@ -184,8 +184,6 @@ void accumulate_statistics_all_tlabs(); void resize_all_tlabs(); - size_t unsafe_max_alloc(); - bool supports_tlab_allocation() const { return true; } size_t tlab_capacity(Thread* thr) const; diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Tue Aug 26 10:28:43 2014 +0200 @@ -395,15 +395,6 @@ // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc() = 0; - // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods: diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/memory/genCollectedHeap.cpp Tue Aug 26 10:28:43 2014 +0200 @@ -704,10 +704,6 @@ return _gens[0]->end_addr(); } -size_t GenCollectedHeap::unsafe_max_alloc() { - return _gens[0]->unsafe_max_alloc_nogc(); -} - // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { diff -r 47e8e40b94d3 -r 439f0d76cff3 src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp Mon Aug 25 17:05:18 2014 -0400 +++ b/src/share/vm/memory/genCollectedHeap.hpp Tue Aug 26 10:28:43 2014 +0200 @@ -166,14 +166,6 @@ HeapWord** top_addr() const; HeapWord** end_addr() const; - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection activity. In a generational - // collector, for example, this is probably the largest allocation that - // could be supported in the youngest generation. It is "unsafe" because - // no locks are taken; the result should be treated as an approximation, - // not a guarantee. - size_t unsafe_max_alloc(); - // Does this heap support heap inspection? (+PrintClassHistogram) virtual bool supports_heap_inspection() const { return true; }