Mercurial > hg > truffle
changeset 14192:ff355e26c78d
8029524: Remove unsused method CollectedHeap::unsafe_max_alloc()
Reviewed-by: pliden, jmasa
author | brutisso |
---|---|
date | Mon, 16 Dec 2013 08:54:14 +0100 |
parents | 050a626a8895 |
children | dd1b266713ea |
files | src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp src/share/vm/gc_interface/collectedHeap.hpp src/share/vm/memory/genCollectedHeap.cpp src/share/vm/memory/genCollectedHeap.hpp |
diffstat | 7 files changed, 0 insertions(+), 55 deletions(-) [+] |
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 16 08:54:14 2013 +0100 @@ -2376,25 +2376,6 @@ return blk.result(); } -size_t G1CollectedHeap::unsafe_max_alloc() { - if (free_regions() > 0) return HeapRegion::GrainBytes; - // otherwise, is there space in the current allocation region? - - // We need to store the current allocation region in a local variable - // here. The problem is that this method doesn't take any locks and - // there may be other threads which overwrite the current allocation - // region field. attempt_allocation(), for example, sets it to NULL - // and this can happen *after* the NULL check here but before the call - // to free(), resulting in a SIGSEGV. Note that this doesn't appear - // to be a problem in the optimized build, since the two loads of the - // current allocation region field are optimized away. - HeapRegion* hr = _mutator_alloc_region.get(); - if (hr == NULL) { - return 0; - } - return hr->free(); -} - bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { switch (cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Dec 16 08:54:14 2013 +0100 @@ -1183,15 +1183,6 @@ // end fields defining the extent of the contiguous allocation region.) // But G1CollectedHeap doesn't yet support this. - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc(); - virtual bool is_maximal_no_gc() const { return _g1_storage.uncommitted_size() == 0; }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Dec 16 08:54:14 2013 +0100 @@ -484,10 +484,6 @@ young_gen()->eden_space()->ensure_parsability(); } -size_t ParallelScavengeHeap::unsafe_max_alloc() { - return young_gen()->eden_space()->free_in_bytes(); -} - size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { return young_gen()->eden_space()->tlab_capacity(thr); }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Mon Dec 16 08:54:14 2013 +0100 @@ -184,8 +184,6 @@ void accumulate_statistics_all_tlabs(); void resize_all_tlabs(); - size_t unsafe_max_alloc(); - bool supports_tlab_allocation() const { return true; } size_t tlab_capacity(Thread* thr) const;
--- a/src/share/vm/gc_interface/collectedHeap.hpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Mon Dec 16 08:54:14 2013 +0100 @@ -389,15 +389,6 @@ // allocation from them and necessitating allocation of new TLABs. virtual void ensure_parsability(bool retire_tlabs); - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection or expansion activity. In a - // generational collector, for example, this is probably the largest - // allocation that could be supported (without expansion) in the youngest - // generation. It is "unsafe" because no locks are taken; the result - // should be treated as an approximation, not a guarantee, for use in - // heuristic resizing decisions. - virtual size_t unsafe_max_alloc() = 0; - // Section on thread-local allocation buffers (TLABs) // If the heap supports thread-local allocation buffers, it should override // the following methods:
--- a/src/share/vm/memory/genCollectedHeap.cpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/memory/genCollectedHeap.cpp Mon Dec 16 08:54:14 2013 +0100 @@ -673,10 +673,6 @@ return _gens[0]->end_addr(); } -size_t GenCollectedHeap::unsafe_max_alloc() { - return _gens[0]->unsafe_max_alloc_nogc(); -} - // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) {
--- a/src/share/vm/memory/genCollectedHeap.hpp Fri Dec 13 09:35:12 2013 -0800 +++ b/src/share/vm/memory/genCollectedHeap.hpp Mon Dec 16 08:54:14 2013 +0100 @@ -166,14 +166,6 @@ HeapWord** top_addr() const; HeapWord** end_addr() const; - // Return an estimate of the maximum allocation that could be performed - // without triggering any collection activity. In a generational - // collector, for example, this is probably the largest allocation that - // could be supported in the youngest generation. It is "unsafe" because - // no locks are taken; the result should be treated as an approximation, - // not a guarantee. - size_t unsafe_max_alloc(); - // Does this heap support heap inspection? (+PrintClassHistogram) virtual bool supports_heap_inspection() const { return true; }