comparison src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @ 20265:5255b195f828

8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS Reviewed-by: brutisso, tschatzl, stefank
author mgerdin
date Wed, 12 Mar 2014 15:22:45 +0100
parents 30c99d8e0f02
children 6c523f5d5440
comparison
equal deleted inserted replaced
20264:30c99d8e0f02 20265:5255b195f828
851 851
852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
853 UpwardsObjectClosure* cl) { 853 UpwardsObjectClosure* cl) {
854 assert_locked(freelistLock()); 854 assert_locked(freelistLock());
855 NOT_PRODUCT(verify_objects_initialized()); 855 NOT_PRODUCT(verify_objects_initialized());
856 Space::object_iterate_mem(mr, cl); 856 assert(!mr.is_empty(), "Should be non-empty");
857 // We use MemRegion(bottom(), end()) rather than used_region() below
858 // because the two are not necessarily equal for some kinds of
859 // spaces, in particular, certain kinds of free list spaces.
860 // We could use the more complicated but more precise:
861 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
862 // but the slight imprecision seems acceptable in the assertion check.
863 assert(MemRegion(bottom(), end()).contains(mr),
864 "Should be within used space");
865 HeapWord* prev = cl->previous(); // max address from last time
866 if (prev >= mr.end()) { // nothing to do
867 return;
868 }
869 // This assert will not work when we go from cms space to perm
870 // space, and use same closure. Easy fix deferred for later. XXX YSR
871 // assert(prev == NULL || contains(prev), "Should be within space");
872
873 bool last_was_obj_array = false;
874 HeapWord *blk_start_addr, *region_start_addr;
875 if (prev > mr.start()) {
876 region_start_addr = prev;
877 blk_start_addr = prev;
878 // The previous invocation may have pushed "prev" beyond the
879 // last allocated block yet there may be still be blocks
880 // in this region due to a particular coalescing policy.
881 // Relax the assertion so that the case where the unallocated
882 // block is maintained and "prev" is beyond the unallocated
883 // block does not cause the assertion to fire.
884 assert((BlockOffsetArrayUseUnallocatedBlock &&
885 (!is_in(prev))) ||
886 (blk_start_addr == block_start(region_start_addr)), "invariant");
887 } else {
888 region_start_addr = mr.start();
889 blk_start_addr = block_start(region_start_addr);
890 }
891 HeapWord* region_end_addr = mr.end();
892 MemRegion derived_mr(region_start_addr, region_end_addr);
893 while (blk_start_addr < region_end_addr) {
894 const size_t size = block_size(blk_start_addr);
895 if (block_is_obj(blk_start_addr)) {
896 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
897 } else {
898 last_was_obj_array = false;
899 }
900 blk_start_addr += size;
901 }
902 if (!last_was_obj_array) {
903 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
904 "Should be within (closed) used space");
905 assert(blk_start_addr > prev, "Invariant");
906 cl->set_previous(blk_start_addr); // min address for next time
907 }
857 } 908 }
858 909
859 // Callers of this iterator beware: The closure application should 910 // Callers of this iterator beware: The closure application should
860 // be robust in the face of uninitialized objects and should (always) 911 // be robust in the face of uninitialized objects and should (always)
861 // return a correct size so that the next addr + size below gives us a 912 // return a correct size so that the next addr + size below gives us a