# HG changeset patch # User goetz # Date 1399556237 -7200 # Node ID c49dcaf78a6554cbc65a81cffdeb7a74c598d8b8 # Parent ce8f6bb717c975c5578d3b844ebd94527d596cfd 8042737: Introduce umbrella header prefetch.inline.hpp Reviewed-by: twisti, stefank diff -r ce8f6bb717c9 -r c49dcaf78a65 src/os/aix/vm/thread_aix.inline.hpp --- a/src/os/aix/vm/thread_aix.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/os/aix/vm/thread_aix.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -26,12 +26,9 @@ #ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP #define OS_AIX_VM_THREAD_AIX_INLINE_HPP -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#include "prefetch_aix_ppc.inline.hpp" - // Contains inlined functions for class Thread and ThreadLocalStorage inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do diff -r ce8f6bb717c9 -r c49dcaf78a65 src/os/bsd/vm/thread_bsd.inline.hpp --- a/src/os/bsd/vm/thread_bsd.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/os/bsd/vm/thread_bsd.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -31,12 +31,6 @@ #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_bsd_x86 -# include "prefetch_bsd_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_bsd_zero -# include "prefetch_bsd_zero.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage diff -r ce8f6bb717c9 -r c49dcaf78a65 src/os/linux/vm/thread_linux.inline.hpp --- a/src/os/linux/vm/thread_linux.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/os/linux/vm/thread_linux.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -29,24 +29,8 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_linux_x86 -# include "prefetch_linux_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_sparc -# include "prefetch_linux_sparc.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_zero -# include "prefetch_linux_zero.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_arm -# include "prefetch_linux_arm.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_linux_ppc -# include "prefetch_linux_ppc.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage diff -r ce8f6bb717c9 -r c49dcaf78a65 src/os/solaris/vm/thread_solaris.inline.hpp --- a/src/os/solaris/vm/thread_solaris.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/os/solaris/vm/thread_solaris.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -30,15 +30,8 @@ #endif #include "runtime/atomic.inline.hpp" -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_solaris_x86 -# include "prefetch_solaris_x86.inline.hpp" -#endif -#ifdef TARGET_OS_ARCH_solaris_sparc -# include "prefetch_solaris_sparc.inline.hpp" -#endif // Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of // startup. diff -r ce8f6bb717c9 -r c49dcaf78a65 src/os/windows/vm/thread_windows.inline.hpp --- a/src/os/windows/vm/thread_windows.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/os/windows/vm/thread_windows.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -29,12 +29,8 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/prefetch.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp" -#ifdef TARGET_OS_ARCH_windows_x86 -# include "prefetch_windows_x86.inline.hpp" -#endif // Contains inlined functions for class Thread and ThreadLocalStorage diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu May 08 15:37:17 2014 +0200 @@ -33,6 +33,7 @@ #include "memory/allocation.inline.hpp" #include "memory/blockOffsetTable.inline.hpp" #include "memory/resourceArea.hpp" +#include "memory/space.inline.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu May 08 15:37:17 2014 +0200 @@ -45,6 +45,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" // Concurrent marking bit map wrapper diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 08 15:37:17 2014 +0200 @@ -60,6 +60,7 @@ #include "memory/referenceProcessor.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/ticks.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -31,6 +31,7 @@ #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" +#include "runtime/prefetch.inline.hpp" /* * This really ought to be an inline function, but apparently the C++ diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu May 08 15:37:17 2014 +0200 @@ -32,6 +32,7 @@ #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/iterator.hpp" +#include "memory/space.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/orderAccess.inline.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu May 08 15:37:17 2014 +0200 @@ -30,6 +30,7 @@ #include "gc_implementation/parallelScavenge/psYoungGen.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.psgc.inline.hpp" +#include "runtime/prefetch.inline.hpp" // Checks an individual oop for missing precise marks. Mark // may be either dirty or newgen. diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp Thu May 08 15:37:17 2014 +0200 @@ -32,6 +32,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "oops/oop.inline.hpp" +#include "runtime/prefetch.inline.hpp" PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL; diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/memory/defNewGeneration.cpp Thu May 08 15:37:17 2014 +0200 @@ -42,6 +42,7 @@ #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" #include "utilities/stack.inline.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/memory/space.cpp Thu May 08 15:37:17 2014 +0200 @@ -37,6 +37,7 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/java.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/copy.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/memory/space.hpp Thu May 08 15:37:17 2014 +0200 @@ -33,24 +33,8 @@ #include "memory/watermark.hpp" #include "oops/markOop.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/prefetch.hpp" #include "utilities/macros.hpp" #include "utilities/workgroup.hpp" -#ifdef TARGET_OS_FAMILY_linux -# include "os_linux.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_solaris -# include "os_solaris.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_windows -# include "os_windows.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_aix -# include "os_aix.inline.hpp" -#endif -#ifdef TARGET_OS_FAMILY_bsd -# include "os_bsd.inline.hpp" -#endif // A space is an abstraction for the "storage units" backing // up the generation abstraction. It includes specific @@ -512,272 +496,6 @@ size_t word_len); }; -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ - /* Compute the new addresses for the live objects and store it in the mark \ - * Used by universe::mark_sweep_phase2() \ - */ \ - HeapWord* compact_top; /* This is where we are currently compacting to. */ \ - \ - /* We're sure to be here before any objects are compacted into this \ - * space, so this is a good time to initialize this: \ - */ \ - set_compaction_top(bottom()); \ - \ - if (cp->space == NULL) { \ - assert(cp->gen != NULL, "need a generation"); \ - assert(cp->threshold == NULL, "just checking"); \ - assert(cp->gen->first_compaction_space() == this, "just checking"); \ - cp->space = cp->gen->first_compaction_space(); \ - compact_top = cp->space->bottom(); \ - cp->space->set_compaction_top(compact_top); \ - cp->threshold = cp->space->initialize_threshold(); \ - } else { \ - compact_top = cp->space->compaction_top(); \ - } \ - \ - /* We allow some amount of garbage towards the bottom of the space, so \ - * we don't start compacting before there is a significant gain to be made.\ - * Occasionally, we want to ensure a full compaction, which is determined \ - * by the MarkSweepAlwaysCompactCount parameter. \ - */ \ - uint invocations = MarkSweep::total_invocations(); \ - bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ - \ - size_t allowed_deadspace = 0; \ - if (skip_dead) { \ - const size_t ratio = allowed_dead_ratio(); \ - allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ - } \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = scan_limit(); \ - \ - HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ - live object. */ \ - HeapWord* first_dead = end();/* The first dead object. */ \ - LiveRange* liveRange = NULL; /* The current live range, recorded in the \ - first header of preceding free area. */ \ - _first_dead = first_dead; \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - while (q < t) { \ - assert(!block_is_obj(q) || \ - oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ - oop(q)->mark()->has_bias_pattern(), \ - "these are the only valid states during a mark sweep"); \ - if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - size_t size = block_size(q); \ - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ - q += size; \ - end_of_live = q; \ - } else { \ - /* run over all the contiguous dead objects */ \ - HeapWord* end = q; \ - do { \ - /* prefetch beyond end */ \ - Prefetch::write(end, interval); \ - end += block_size(end); \ - } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ - \ - /* see if we might want to pretend this object is alive so that \ - * we don't have to compact quite as often. \ - */ \ - if (allowed_deadspace > 0 && q == compact_top) { \ - size_t sz = pointer_delta(end, q); \ - if (insert_deadspace(allowed_deadspace, q, sz)) { \ - compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ - q = end; \ - end_of_live = end; \ - continue; \ - } \ - } \ - \ - /* otherwise, it really is a free region. */ \ - \ - /* for the previous LiveRange, record the end of the live objects. */ \ - if (liveRange) { \ - liveRange->set_end(q); \ - } \ - \ - /* record the current LiveRange object. \ - * liveRange->start() is overlaid on the mark word. \ - */ \ - liveRange = (LiveRange*)q; \ - liveRange->set_start(end); \ - liveRange->set_end(end); \ - \ - /* see if this is the first dead region. */ \ - if (q < first_dead) { \ - first_dead = q; \ - } \ - \ - /* move on to the next object */ \ - q = end; \ - } \ - } \ - \ - assert(q == t, "just checking"); \ - if (liveRange != NULL) { \ - liveRange->set_end(q); \ - } \ - _end_of_live = end_of_live; \ - if (end_of_live < first_dead) { \ - first_dead = end_of_live; \ - } \ - _first_dead = first_dead; \ - \ - /* save the compaction_top of the compaction space. */ \ - cp->space->set_compaction_top(compact_top); \ -} - -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ - /* adjust all the interior pointers to point at the new locations of objects \ - * Used by MarkSweep::mark_sweep_phase3() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ - \ - assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - /* we have a chunk of the space which hasn't moved and we've \ - * reinitialized the mark word during the previous pass, so we can't \ - * use is_gc_marked for the traversal. */ \ - HeapWord* end = _first_dead; \ - \ - while (q < end) { \ - /* I originally tried to conjoin "block_start(q) == q" to the \ - * assertion below, but that doesn't work, because you can't \ - * accurately traverse previous objects to get to the current one \ - * after their pointers have been \ - * updated, until the actual compaction is done. dld, 4/00 */ \ - assert(block_is_obj(q), \ - "should be at block boundaries, and should be looking at objs"); \ - \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - \ - q += size; \ - } \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ This is funky. Using this to read the previously written \ - * LiveRange. See also use below. */ \ - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - debug_only(HeapWord* prev_q = NULL); \ - while (q < t) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - if (oop(q)->is_gc_marked()) { \ - /* q is alive */ \ - /* point all the oops to the new location */ \ - size_t size = oop(q)->adjust_pointers(); \ - size = adjust_obj_size(size); \ - debug_only(prev_q = q); \ - q += size; \ - } else { \ - /* q is not a live object, so its mark should point at the next \ - * live object */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } \ - } \ - \ - assert(q == t, "just checking"); \ -} - -#define SCAN_AND_COMPACT(obj_size) { \ - /* Copy all live objects to their new location \ - * Used by MarkSweep::mark_sweep_phase4() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* const t = _end_of_live; \ - debug_only(HeapWord* prev_q = NULL); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - debug_only( \ - /* we have a chunk of the space which hasn't moved and we've reinitialized \ - * the mark word during the previous pass, so we can't use is_gc_marked for \ - * the traversal. */ \ - HeapWord* const end = _first_dead; \ - \ - while (q < end) { \ - size_t size = obj_size(q); \ - assert(!oop(q)->is_gc_marked(), \ - "should be unmarked (special dense prefix handling)"); \ - debug_only(prev_q = q); \ - q += size; \ - } \ - ) /* debug_only */ \ - \ - if (_first_dead == t) { \ - q = t; \ - } else { \ - /* $$$ Funky */ \ - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ - } \ - } \ - \ - const intx scan_interval = PrefetchScanIntervalInBytes; \ - const intx copy_interval = PrefetchCopyIntervalInBytes; \ - while (q < t) { \ - if (!oop(q)->is_gc_marked()) { \ - /* mark is pointer to next marked oop */ \ - debug_only(prev_q = q); \ - q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ - assert(q > prev_q, "we should be moving forward through memory"); \ - } else { \ - /* prefetch beyond q */ \ - Prefetch::read(q, scan_interval); \ - \ - /* size and destination */ \ - size_t size = obj_size(q); \ - HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ - \ - /* prefetch beyond compaction_top */ \ - Prefetch::write(compaction_top, copy_interval); \ - \ - /* copy object and reinit its mark */ \ - assert(q != compaction_top, "everything in this pass should be moving"); \ - Copy::aligned_conjoint_words(q, compaction_top, size); \ - oop(compaction_top)->init_mark(); \ - assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ - \ - debug_only(prev_q = q); \ - q += size; \ - } \ - } \ - \ - /* Let's remember if we were empty before we did the compaction. */ \ - bool was_empty = used_region().is_empty(); \ - /* Reset space after compaction is complete */ \ - reset_after_compaction(); \ - /* We do this clear, below, since it has overloaded meanings for some */ \ - /* space subtypes. For example, OffsetTableContigSpace's that were */ \ - /* compacted into will have had their offset table thresholds updated */ \ - /* continuously, but those that weren't need to have their thresholds */ \ - /* re-initialized. Also mangles unused area for debugging. */ \ - if (used_region().is_empty()) { \ - if (!was_empty) clear(SpaceDecorator::Mangle); \ - } else { \ - if (ZapUnusedHeapArea) mangle_unused_area(); \ - } \ -} - class GenSpaceMangler; // A space in which the free area is contiguous. It therefore supports diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/memory/space.inline.hpp --- a/src/share/vm/memory/space.inline.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/memory/space.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -28,12 +28,279 @@ #include "gc_interface/collectedHeap.hpp" #include "memory/space.hpp" #include "memory/universe.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" inline HeapWord* Space::block_start(const void* p) { return block_start_const(p); } +#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ + HeapWord* compact_top; /* This is where we are currently compacting to. */ \ + \ + /* We're sure to be here before any objects are compacted into this \ + * space, so this is a good time to initialize this: \ + */ \ + set_compaction_top(bottom()); \ + \ + if (cp->space == NULL) { \ + assert(cp->gen != NULL, "need a generation"); \ + assert(cp->threshold == NULL, "just checking"); \ + assert(cp->gen->first_compaction_space() == this, "just checking"); \ + cp->space = cp->gen->first_compaction_space(); \ + compact_top = cp->space->bottom(); \ + cp->space->set_compaction_top(compact_top); \ + cp->threshold = cp->space->initialize_threshold(); \ + } else { \ + compact_top = cp->space->compaction_top(); \ + } \ + \ + /* We allow some amount of garbage towards the bottom of the space, so \ + * we don't start compacting before there is a significant gain to be made.\ + * Occasionally, we want to ensure a full compaction, which is determined \ + * by the MarkSweepAlwaysCompactCount parameter. \ + */ \ + uint invocations = MarkSweep::total_invocations(); \ + bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ + \ + size_t allowed_deadspace = 0; \ + if (skip_dead) { \ + const size_t ratio = allowed_dead_ratio(); \ + allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ + } \ + \ + HeapWord* q = bottom(); \ + HeapWord* t = scan_limit(); \ + \ + HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ + live object. */ \ + HeapWord* first_dead = end();/* The first dead object. */ \ + LiveRange* liveRange = NULL; /* The current live range, recorded in the \ + first header of preceding free area. */ \ + _first_dead = first_dead; \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ + \ + while (q < t) { \ + assert(!block_is_obj(q) || \ + oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ + oop(q)->mark()->has_bias_pattern(), \ + "these are the only valid states during a mark sweep"); \ + if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + size_t size = block_size(q); \ + compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ + q += size; \ + end_of_live = q; \ + } else { \ + /* run over all the contiguous dead objects */ \ + HeapWord* end = q; \ + do { \ + /* prefetch beyond end */ \ + Prefetch::write(end, interval); \ + end += block_size(end); \ + } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ + \ + /* see if we might want to pretend this object is alive so that \ + * we don't have to compact quite as often. \ + */ \ + if (allowed_deadspace > 0 && q == compact_top) { \ + size_t sz = pointer_delta(end, q); \ + if (insert_deadspace(allowed_deadspace, q, sz)) { \ + compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ + q = end; \ + end_of_live = end; \ + continue; \ + } \ + } \ + \ + /* otherwise, it really is a free region. */ \ + \ + /* for the previous LiveRange, record the end of the live objects. */ \ + if (liveRange) { \ + liveRange->set_end(q); \ + } \ + \ + /* record the current LiveRange object. \ + * liveRange->start() is overlaid on the mark word. \ + */ \ + liveRange = (LiveRange*)q; \ + liveRange->set_start(end); \ + liveRange->set_end(end); \ + \ + /* see if this is the first dead region. */ \ + if (q < first_dead) { \ + first_dead = q; \ + } \ + \ + /* move on to the next object */ \ + q = end; \ + } \ + } \ + \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ + } \ + _end_of_live = end_of_live; \ + if (end_of_live < first_dead) { \ + first_dead = end_of_live; \ + } \ + _first_dead = first_dead; \ + \ + /* save the compaction_top of the compaction space. */ \ + cp->space->set_compaction_top(compact_top); \ +} + +#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ + /* adjust all the interior pointers to point at the new locations of objects \ + * Used by MarkSweep::mark_sweep_phase3() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ + \ + assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + /* we have a chunk of the space which hasn't moved and we've \ + * reinitialized the mark word during the previous pass, so we can't \ + * use is_gc_marked for the traversal. */ \ + HeapWord* end = _first_dead; \ + \ + while (q < end) { \ + /* I originally tried to conjoin "block_start(q) == q" to the \ + * assertion below, but that doesn't work, because you can't \ + * accurately traverse previous objects to get to the current one \ + * after their pointers have been \ + * updated, until the actual compaction is done. dld, 4/00 */ \ + assert(block_is_obj(q), \ + "should be at block boundaries, and should be looking at objs"); \ + \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + \ + q += size; \ + } \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ This is funky. Using this to read the previously written \ + * LiveRange. See also use below. */ \ + q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ + \ + debug_only(HeapWord* prev_q = NULL); \ + while (q < t) { \ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + if (oop(q)->is_gc_marked()) { \ + /* q is alive */ \ + /* point all the oops to the new location */ \ + size_t size = oop(q)->adjust_pointers(); \ + size = adjust_obj_size(size); \ + debug_only(prev_q = q); \ + q += size; \ + } else { \ + /* q is not a live object, so its mark should point at the next \ + * live object */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } \ + } \ + \ + assert(q == t, "just checking"); \ +} + +#define SCAN_AND_COMPACT(obj_size) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ + HeapWord* q = bottom(); \ + HeapWord* const t = _end_of_live; \ + debug_only(HeapWord* prev_q = NULL); \ + \ + if (q < t && _first_dead > q && \ + !oop(q)->is_gc_marked()) { \ + debug_only( \ + /* we have a chunk of the space which hasn't moved and we've reinitialized \ + * the mark word during the previous pass, so we can't use is_gc_marked for \ + * the traversal. */ \ + HeapWord* const end = _first_dead; \ + \ + while (q < end) { \ + size_t size = obj_size(q); \ + assert(!oop(q)->is_gc_marked(), \ + "should be unmarked (special dense prefix handling)"); \ + debug_only(prev_q = q); \ + q += size; \ + } \ + ) /* debug_only */ \ + \ + if (_first_dead == t) { \ + q = t; \ + } else { \ + /* $$$ Funky */ \ + q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ + } \ + } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ + const intx copy_interval = PrefetchCopyIntervalInBytes; \ + while (q < t) { \ + if (!oop(q)->is_gc_marked()) { \ + /* mark is pointer to next marked oop */ \ + debug_only(prev_q = q); \ + q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ + assert(q > prev_q, "we should be moving forward through memory"); \ + } else { \ + /* prefetch beyond q */ \ + Prefetch::read(q, scan_interval); \ + \ + /* size and destination */ \ + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ + assert(q != compaction_top, "everything in this pass should be moving"); \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ + \ + debug_only(prev_q = q); \ + q += size; \ + } \ + } \ + \ + /* Let's remember if we were empty before we did the compaction. */ \ + bool was_empty = used_region().is_empty(); \ + /* Reset space after compaction is complete */ \ + reset_after_compaction(); \ + /* We do this clear, below, since it has overloaded meanings for some */ \ + /* space subtypes. For example, OffsetTableContigSpace's that were */ \ + /* compacted into will have had their offset table thresholds updated */ \ + /* continuously, but those that weren't need to have their thresholds */ \ + /* re-initialized. Also mangles unused area for debugging. */ \ + if (used_region().is_empty()) { \ + if (!was_empty) clear(SpaceDecorator::Mangle); \ + } else { \ + if (ZapUnusedHeapArea) mangle_unused_area(); \ + } \ +} + inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/oops/klass.hpp Thu May 08 15:37:17 2014 +0200 @@ -584,36 +584,6 @@ static void clean_weak_klass_links(BoolObjectClosure* is_alive); - // Prefetch within oop iterators. This is a macro because we - // can't guarantee that the compiler will inline it. In 64-bit - // it generally doesn't. Signature is - // - // static void prefetch_beyond(oop* const start, - // oop* const end, - // const intx foffset, - // const Prefetch::style pstyle); -#define prefetch_beyond(start, end, foffset, pstyle) { \ - const intx foffset_ = (foffset); \ - const Prefetch::style pstyle_ = (pstyle); \ - assert(foffset_ > 0, "prefetch beyond, not behind"); \ - if (pstyle_ != Prefetch::do_none) { \ - oop* ref = (start); \ - if (ref < (end)) { \ - switch (pstyle_) { \ - case Prefetch::do_read: \ - Prefetch::read(*ref, foffset_); \ - break; \ - case Prefetch::do_write: \ - Prefetch::write(*ref, foffset_); \ - break; \ - default: \ - ShouldNotReachHere(); \ - break; \ - } \ - } \ - } \ - } - // iterators virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0; virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) { diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/precompiled/precompiled.hpp --- a/src/share/vm/precompiled/precompiled.hpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/precompiled/precompiled.hpp Thu May 08 15:37:17 2014 +0200 @@ -199,6 +199,7 @@ # include "runtime/perfData.hpp" # include "runtime/perfMemory.hpp" # include "runtime/prefetch.hpp" +# include "runtime/prefetch.inline.hpp" # include "runtime/reflection.hpp" # include "runtime/reflectionUtils.hpp" # include "runtime/registerMap.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/prims/unsafe.cpp --- a/src/share/vm/prims/unsafe.cpp Tue Apr 29 15:17:27 2014 +0200 +++ b/src/share/vm/prims/unsafe.cpp Thu May 08 15:37:17 2014 +0200 @@ -33,6 +33,7 @@ #include "prims/jvm.h" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" +#include "runtime/prefetch.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/reflection.hpp" #include "runtime/synchronizer.hpp" diff -r ce8f6bb717c9 -r c49dcaf78a65 src/share/vm/runtime/prefetch.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/runtime/prefetch.inline.hpp Thu May 08 15:37:17 2014 +0200 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP +#define SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP + +#include "runtime/prefetch.hpp" + +// Linux +#ifdef TARGET_OS_ARCH_linux_x86 +# include "prefetch_linux_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_sparc +# include "prefetch_linux_sparc.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_zero +# include "prefetch_linux_zero.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_arm +# include "prefetch_linux_arm.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_linux_ppc +# include "prefetch_linux_ppc.inline.hpp" +#endif + +// Solaris +#ifdef TARGET_OS_ARCH_solaris_x86 +# include "prefetch_solaris_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_solaris_sparc +# include "prefetch_solaris_sparc.inline.hpp" +#endif + +// Windows +#ifdef TARGET_OS_ARCH_windows_x86 +# include "prefetch_windows_x86.inline.hpp" +#endif + +// AIX +#ifdef TARGET_OS_ARCH_aix_ppc +# include "prefetch_aix_ppc.inline.hpp" +#endif + +// BSD +#ifdef TARGET_OS_ARCH_bsd_x86 +# include "prefetch_bsd_x86.inline.hpp" +#endif +#ifdef TARGET_OS_ARCH_bsd_zero +# include "prefetch_bsd_zero.inline.hpp" +#endif + +#endif // SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP