comparison src/share/vm/memory/space.cpp @ 20804:7848fc12602b

Merge with jdk8u40-b25
author Gilles Duboscq <gilles.m.duboscq@oracle.com>
date Tue, 07 Apr 2015 14:58:49 +0200
parents 52b4284cb496 ee019285a52c
children
comparison
equal deleted inserted replaced
20184:84105dcdb05b 20804:7848fc12602b
26 #include "classfile/systemDictionary.hpp" 26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp" 27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp" 28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp" 29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp" 30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "gc_interface/collectedHeap.inline.hpp"
31 #include "memory/blockOffsetTable.inline.hpp" 32 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp" 33 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp" 34 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp" 35 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp" 36 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp" 37 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp" 38 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp" 39 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp" 40 #include "runtime/java.hpp"
41 #include "runtime/prefetch.inline.hpp"
42 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/safepoint.hpp" 43 #include "runtime/safepoint.hpp"
41 #include "utilities/copy.hpp" 44 #include "utilities/copy.hpp"
42 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/globalDefinitions.hpp"
43 #include "utilities/macros.hpp" 46 #include "utilities/macros.hpp"
44
45 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
46 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
47 47
48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
49 49
50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
51 HeapWord* top_obj) { 51 HeapWord* top_obj) {
305 set_top(bottom()); 305 set_top(bottom());
306 set_saved_mark(); 306 set_saved_mark();
307 CompactibleSpace::clear(mangle_space); 307 CompactibleSpace::clear(mangle_space);
308 } 308 }
309 309
310 bool ContiguousSpace::is_in(const void* p) const {
311 return _bottom <= p && p < _top;
312 }
313
314 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 310 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
315 return p >= _top; 311 return p >= _top;
316 } 312 }
317 313
318 void OffsetTableContigSpace::clear(bool mangle_space) { 314 void OffsetTableContigSpace::clear(bool mangle_space) {
550 void Space::oop_iterate(ExtendedOopClosure* blk) { 546 void Space::oop_iterate(ExtendedOopClosure* blk) {
551 ObjectToOopClosure blk2(blk); 547 ObjectToOopClosure blk2(blk);
552 object_iterate(&blk2); 548 object_iterate(&blk2);
553 } 549 }
554 550
555 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
556 guarantee(false, "NYI");
557 return bottom();
558 }
559
560 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
561 ObjectClosureCareful* cl) {
562 guarantee(false, "NYI");
563 return bottom();
564 }
565
566
567 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
568 assert(!mr.is_empty(), "Should be non-empty");
569 // We use MemRegion(bottom(), end()) rather than used_region() below
570 // because the two are not necessarily equal for some kinds of
571 // spaces, in particular, certain kinds of free list spaces.
572 // We could use the more complicated but more precise:
573 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
574 // but the slight imprecision seems acceptable in the assertion check.
575 assert(MemRegion(bottom(), end()).contains(mr),
576 "Should be within used space");
577 HeapWord* prev = cl->previous(); // max address from last time
578 if (prev >= mr.end()) { // nothing to do
579 return;
580 }
581 // This assert will not work when we go from cms space to perm
582 // space, and use same closure. Easy fix deferred for later. XXX YSR
583 // assert(prev == NULL || contains(prev), "Should be within space");
584
585 bool last_was_obj_array = false;
586 HeapWord *blk_start_addr, *region_start_addr;
587 if (prev > mr.start()) {
588 region_start_addr = prev;
589 blk_start_addr = prev;
590 // The previous invocation may have pushed "prev" beyond the
591 // last allocated block yet there may be still be blocks
592 // in this region due to a particular coalescing policy.
593 // Relax the assertion so that the case where the unallocated
594 // block is maintained and "prev" is beyond the unallocated
595 // block does not cause the assertion to fire.
596 assert((BlockOffsetArrayUseUnallocatedBlock &&
597 (!is_in(prev))) ||
598 (blk_start_addr == block_start(region_start_addr)), "invariant");
599 } else {
600 region_start_addr = mr.start();
601 blk_start_addr = block_start(region_start_addr);
602 }
603 HeapWord* region_end_addr = mr.end();
604 MemRegion derived_mr(region_start_addr, region_end_addr);
605 while (blk_start_addr < region_end_addr) {
606 const size_t size = block_size(blk_start_addr);
607 if (block_is_obj(blk_start_addr)) {
608 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
609 } else {
610 last_was_obj_array = false;
611 }
612 blk_start_addr += size;
613 }
614 if (!last_was_obj_array) {
615 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
616 "Should be within (closed) used space");
617 assert(blk_start_addr > prev, "Invariant");
618 cl->set_previous(blk_start_addr); // min address for next time
619 }
620 }
621
622 bool Space::obj_is_alive(const HeapWord* p) const { 551 bool Space::obj_is_alive(const HeapWord* p) const {
623 assert (block_is_obj(p), "The address should point to an object"); 552 assert (block_is_obj(p), "The address should point to an object");
624 return true; 553 return true;
625 }
626
627 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
628 assert(!mr.is_empty(), "Should be non-empty");
629 assert(used_region().contains(mr), "Should be within used space");
630 HeapWord* prev = cl->previous(); // max address from last time
631 if (prev >= mr.end()) { // nothing to do
632 return;
633 }
634 // See comment above (in more general method above) in case you
635 // happen to use this method.
636 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
637
638 bool last_was_obj_array = false;
639 HeapWord *obj_start_addr, *region_start_addr;
640 if (prev > mr.start()) {
641 region_start_addr = prev;
642 obj_start_addr = prev;
643 assert(obj_start_addr == block_start(region_start_addr), "invariant");
644 } else {
645 region_start_addr = mr.start();
646 obj_start_addr = block_start(region_start_addr);
647 }
648 HeapWord* region_end_addr = mr.end();
649 MemRegion derived_mr(region_start_addr, region_end_addr);
650 while (obj_start_addr < region_end_addr) {
651 oop obj = oop(obj_start_addr);
652 const size_t size = obj->size();
653 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
654 obj_start_addr += size;
655 }
656 if (!last_was_obj_array) {
657 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
658 "Should be within (closed) used space");
659 assert(obj_start_addr > prev, "Invariant");
660 cl->set_previous(obj_start_addr); // min address for next time
661 }
662 } 554 }
663 555
664 #if INCLUDE_ALL_GCS 556 #if INCLUDE_ALL_GCS
665 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 557 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
666 \ 558 \
684 HeapWord* t = top(); 576 HeapWord* t = top();
685 // Could call objects iterate, but this is easier. 577 // Could call objects iterate, but this is easier.
686 while (obj_addr < t) { 578 while (obj_addr < t) {
687 obj_addr += oop(obj_addr)->oop_iterate(blk); 579 obj_addr += oop(obj_addr)->oop_iterate(blk);
688 } 580 }
689 }
690
691 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
692 if (is_empty()) {
693 return;
694 }
695 MemRegion cur = MemRegion(bottom(), top());
696 mr = mr.intersection(cur);
697 if (mr.is_empty()) {
698 return;
699 }
700 if (mr.equals(cur)) {
701 oop_iterate(blk);
702 return;
703 }
704 assert(mr.end() <= top(), "just took an intersection above");
705 HeapWord* obj_addr = block_start(mr.start());
706 HeapWord* t = mr.end();
707
708 // Handle first object specially.
709 oop obj = oop(obj_addr);
710 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
711 obj_addr += obj->oop_iterate(&smr_blk);
712 while (obj_addr < t) {
713 oop obj = oop(obj_addr);
714 assert(obj->is_oop(), "expected an oop");
715 obj_addr += obj->size();
716 // If "obj_addr" is not greater than top, then the
717 // entire object "obj" is within the region.
718 if (obj_addr <= t) {
719 obj->oop_iterate(blk);
720 } else {
721 // "obj" extends beyond end of region
722 obj->oop_iterate(&smr_blk);
723 break;
724 }
725 };
726 } 581 }
727 582
728 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 583 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
729 if (is_empty()) return; 584 if (is_empty()) return;
730 WaterMark bm = bottom_mark(); 585 WaterMark bm = bottom_mark();
828 } 683 }
829 684
830 // This version requires locking. 685 // This version requires locking.
831 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, 686 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
832 HeapWord* const end_value) { 687 HeapWord* const end_value) {
833 // In G1 there are places where a GC worker can allocates into a
834 // region using this serial allocation code without being prone to a
835 // race with other GC workers (we ensure that no other GC worker can
836 // access the same region at the same time). So the assert below is
837 // too strong in the case of G1.
838 assert(Heap_lock->owned_by_self() || 688 assert(Heap_lock->owned_by_self() ||
839 (SafepointSynchronize::is_at_safepoint() && 689 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
840 (Thread::current()->is_VM_thread() || UseG1GC)),
841 "not locked"); 690 "not locked");
842 HeapWord* obj = top(); 691 HeapWord* obj = top();
843 if (pointer_delta(end_value, obj) >= size) { 692 if (pointer_delta(end_value, obj) >= size) {
844 HeapWord* new_top = obj + size; 693 HeapWord* new_top = obj + size;
845 set_top(new_top); 694 set_top(new_top);
867 } 716 }
868 } else { 717 } else {
869 return NULL; 718 return NULL;
870 } 719 }
871 } while (true); 720 } while (true);
721 }
722
723 HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
724 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
725 HeapWord* end_value = end();
726
727 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
728 if (obj == NULL) {
729 return NULL;
730 }
731
732 if (pointer_delta(end_value, obj) >= size) {
733 HeapWord* new_top = obj + size;
734 set_top(new_top);
735 assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
736 "checking alignment");
737 return obj;
738 } else {
739 set_top(obj);
740 return NULL;
741 }
872 } 742 }
873 743
874 // Requires locking. 744 // Requires locking.
875 HeapWord* ContiguousSpace::allocate(size_t size) { 745 HeapWord* ContiguousSpace::allocate(size_t size) {
876 return allocate_impl(size, end()); 746 return allocate_impl(size, end());