comparison src/share/vm/memory/space.hpp @ 356:1ee8caae33af

Merge
author tonyp
date Thu, 21 Aug 2008 23:36:31 -0400
parents 60fb9c4db4e6 850fdf70db2b
children 122d10c82f3f
comparison
equal deleted inserted replaced
355:0edda524b58c 356:1ee8caae33af
1 /* 1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
134 // Initialization. 134 // Initialization.
135 // "initialize" should be called once on a space, before it is used for 135 // "initialize" should be called once on a space, before it is used for
136 // any purpose. The "mr" arguments gives the bounds of the space, and 136 // any purpose. The "mr" arguments gives the bounds of the space, and
137 // the "clear_space" argument should be true unless the memory in "mr" is 137 // the "clear_space" argument should be true unless the memory in "mr" is
138 // known to be zeroed. 138 // known to be zeroed.
139 virtual void initialize(MemRegion mr, bool clear_space); 139 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
140
141 // Sets the bounds (bottom and end) of the current space to those of "mr."
142 void set_bounds(MemRegion mr);
143 140
144 // The "clear" method must be called on a region that may have 141 // The "clear" method must be called on a region that may have
145 // had allocation performed in it, but is now to be considered empty. 142 // had allocation performed in it, but is now to be considered empty.
146 virtual void clear(); 143 virtual void clear(bool mangle_space);
147 144
148 // For detecting GC bugs. Should only be called at GC boundaries, since 145 // For detecting GC bugs. Should only be called at GC boundaries, since
149 // some unused space may be used as scratch space during GC's. 146 // some unused space may be used as scratch space during GC's.
150 // Default implementation does nothing. We also call this when expanding 147 // Default implementation does nothing. We also call this when expanding
151 // a space to satisfy an allocation request. See bug #4668531 148 // a space to satisfy an allocation request. See bug #4668531
152 virtual void mangle_unused_area() {} 149 virtual void mangle_unused_area() {}
150 virtual void mangle_unused_area_complete() {}
153 virtual void mangle_region(MemRegion mr) {} 151 virtual void mangle_region(MemRegion mr) {}
154 152
155 // Testers 153 // Testers
156 bool is_empty() const { return used() == 0; } 154 bool is_empty() const { return used() == 0; }
157 bool not_empty() const { return used() > 0; } 155 bool not_empty() const { return used() > 0; }
374 372
375 public: 373 public:
376 CompactibleSpace() : 374 CompactibleSpace() :
377 _compaction_top(NULL), _next_compaction_space(NULL) {} 375 _compaction_top(NULL), _next_compaction_space(NULL) {}
378 376
379 virtual void initialize(MemRegion mr, bool clear_space); 377 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
380 virtual void clear(); 378 virtual void clear(bool mangle_space);
381 379
382 // Used temporarily during a compaction phase to hold the value 380 // Used temporarily during a compaction phase to hold the value
383 // top should have when compaction is complete. 381 // top should have when compaction is complete.
384 HeapWord* compaction_top() const { return _compaction_top; } 382 HeapWord* compaction_top() const { return _compaction_top; }
385 383
659 size = adjust_obj_size(size); \ 657 size = adjust_obj_size(size); \
660 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ 658 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
661 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ 659 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
662 debug_only(prev_q = q); \ 660 debug_only(prev_q = q); \
663 q += size; \ 661 q += size; \
664 } else { \ 662 } else { \
665 /* q is not a live object, so its mark should point at the next \ 663 /* q is not a live object, so its mark should point at the next \
666 * live object */ \ 664 * live object */ \
667 debug_only(prev_q = q); \ 665 debug_only(prev_q = q); \
668 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 666 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
669 assert(q > prev_q, "we should be moving forward through memory"); \ 667 assert(q > prev_q, "we should be moving forward through memory"); \
670 } \ 668 } \
671 } \ 669 } \
672 \ 670 \
673 assert(q == t, "just checking"); \ 671 assert(q == t, "just checking"); \
674 } 672 }
675 673
676 #define SCAN_AND_COMPACT(obj_size) { \ 674 #define SCAN_AND_COMPACT(obj_size) { \
677 /* Copy all live objects to their new location \ 675 /* Copy all live objects to their new location \
678 * Used by MarkSweep::mark_sweep_phase4() */ \ 676 * Used by MarkSweep::mark_sweep_phase4() */ \
679 \ 677 \
680 HeapWord* q = bottom(); \ 678 HeapWord* q = bottom(); \
681 HeapWord* const t = _end_of_live; \ 679 HeapWord* const t = _end_of_live; \
682 debug_only(HeapWord* prev_q = NULL); \ 680 debug_only(HeapWord* prev_q = NULL); \
683 \ 681 \
684 if (q < t && _first_dead > q && \ 682 if (q < t && _first_dead > q && \
685 !oop(q)->is_gc_marked()) { \ 683 !oop(q)->is_gc_marked()) { \
686 debug_only( \ 684 debug_only( \
687 /* we have a chunk of the space which hasn't moved and we've reinitialized \ 685 /* we have a chunk of the space which hasn't moved and we've reinitialized \
688 * the mark word during the previous pass, so we can't use is_gc_marked for \ 686 * the mark word during the previous pass, so we can't use is_gc_marked for \
689 * the traversal. */ \ 687 * the traversal. */ \
690 HeapWord* const end = _first_dead; \ 688 HeapWord* const end = _first_dead; \
691 \ 689 \
692 while (q < end) { \ 690 while (q < end) { \
693 size_t size = obj_size(q); \ 691 size_t size = obj_size(q); \
694 assert(!oop(q)->is_gc_marked(), \ 692 assert(!oop(q)->is_gc_marked(), \
695 "should be unmarked (special dense prefix handling)"); \ 693 "should be unmarked (special dense prefix handling)"); \
696 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ 694 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
697 debug_only(prev_q = q); \ 695 debug_only(prev_q = q); \
698 q += size; \ 696 q += size; \
699 } \ 697 } \
700 ) /* debug_only */ \ 698 ) /* debug_only */ \
701 \ 699 \
702 if (_first_dead == t) { \ 700 if (_first_dead == t) { \
703 q = t; \ 701 q = t; \
704 } else { \ 702 } else { \
705 /* $$$ Funky */ \ 703 /* $$$ Funky */ \
706 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ 704 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
707 } \ 705 } \
708 } \ 706 } \
709 \ 707 \
710 const intx scan_interval = PrefetchScanIntervalInBytes; \ 708 const intx scan_interval = PrefetchScanIntervalInBytes; \
711 const intx copy_interval = PrefetchCopyIntervalInBytes; \ 709 const intx copy_interval = PrefetchCopyIntervalInBytes; \
712 while (q < t) { \ 710 while (q < t) { \
713 if (!oop(q)->is_gc_marked()) { \ 711 if (!oop(q)->is_gc_marked()) { \
714 /* mark is pointer to next marked oop */ \ 712 /* mark is pointer to next marked oop */ \
715 debug_only(prev_q = q); \ 713 debug_only(prev_q = q); \
716 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ 714 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
717 assert(q > prev_q, "we should be moving forward through memory"); \ 715 assert(q > prev_q, "we should be moving forward through memory"); \
718 } else { \ 716 } else { \
719 /* prefetch beyond q */ \ 717 /* prefetch beyond q */ \
720 Prefetch::read(q, scan_interval); \ 718 Prefetch::read(q, scan_interval); \
721 \ 719 \
722 /* size and destination */ \ 720 /* size and destination */ \
723 size_t size = obj_size(q); \ 721 size_t size = obj_size(q); \
724 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ 722 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
725 \ 723 \
726 /* prefetch beyond compaction_top */ \ 724 /* prefetch beyond compaction_top */ \
727 Prefetch::write(compaction_top, copy_interval); \ 725 Prefetch::write(compaction_top, copy_interval); \
728 \ 726 \
729 /* copy object and reinit its mark */ \ 727 /* copy object and reinit its mark */ \
730 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ 728 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
731 compaction_top)); \ 729 compaction_top)); \
732 assert(q != compaction_top, "everything in this pass should be moving"); \ 730 assert(q != compaction_top, "everything in this pass should be moving"); \
733 Copy::aligned_conjoint_words(q, compaction_top, size); \ 731 Copy::aligned_conjoint_words(q, compaction_top, size); \
734 oop(compaction_top)->init_mark(); \ 732 oop(compaction_top)->init_mark(); \
735 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ 733 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
736 \ 734 \
737 debug_only(prev_q = q); \ 735 debug_only(prev_q = q); \
738 q += size; \ 736 q += size; \
739 } \ 737 } \
740 } \ 738 } \
741 \ 739 \
742 /* Let's remember if we were empty before we did the compaction. */ \ 740 /* Let's remember if we were empty before we did the compaction. */ \
743 bool was_empty = used_region().is_empty(); \ 741 bool was_empty = used_region().is_empty(); \
744 /* Reset space after compaction is complete */ \ 742 /* Reset space after compaction is complete */ \
745 reset_after_compaction(); \ 743 reset_after_compaction(); \
746 /* We do this clear, below, since it has overloaded meanings for some */ \ 744 /* We do this clear, below, since it has overloaded meanings for some */ \
747 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 745 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
748 /* compacted into will have had their offset table thresholds updated */ \ 746 /* compacted into will have had their offset table thresholds updated */ \
749 /* continuously, but those that weren't need to have their thresholds */ \ 747 /* continuously, but those that weren't need to have their thresholds */ \
750 /* re-initialized. Also mangles unused area for debugging. */ \ 748 /* re-initialized. Also mangles unused area for debugging. */ \
751 if (used_region().is_empty()) { \ 749 if (used_region().is_empty()) { \
752 if (!was_empty) clear(); \ 750 if (!was_empty) clear(SpaceDecorator::Mangle); \
753 } else { \ 751 } else { \
754 if (ZapUnusedHeapArea) mangle_unused_area(); \ 752 if (ZapUnusedHeapArea) mangle_unused_area(); \
755 } \ 753 } \
756 } 754 }
755
756 class GenSpaceMangler;
757 757
758 // A space in which the free area is contiguous. It therefore supports 758 // A space in which the free area is contiguous. It therefore supports
759 // faster allocation, and compaction. 759 // faster allocation, and compaction.
760 class ContiguousSpace: public CompactibleSpace { 760 class ContiguousSpace: public CompactibleSpace {
761 friend class OneContigSpaceCardGeneration; 761 friend class OneContigSpaceCardGeneration;
762 friend class VMStructs; 762 friend class VMStructs;
763 protected: 763 protected:
764 HeapWord* _top; 764 HeapWord* _top;
765 HeapWord* _concurrent_iteration_safe_limit; 765 HeapWord* _concurrent_iteration_safe_limit;
766 // A helper for mangling the unused area of the space in debug builds.
767 GenSpaceMangler* _mangler;
768
769 GenSpaceMangler* mangler() { return _mangler; }
766 770
767 // Allocation helpers (return NULL if full). 771 // Allocation helpers (return NULL if full).
768 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); 772 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
769 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); 773 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
770 774
771 public: 775 public:
772 ContiguousSpace() : 776 ContiguousSpace();
773 _top(NULL), 777 ~ContiguousSpace();
774 _concurrent_iteration_safe_limit(NULL) {} 778
775 779 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
776 virtual void initialize(MemRegion mr, bool clear_space); 780 virtual void clear(bool mangle_space);
777 781
778 // Accessors 782 // Accessors
779 HeapWord* top() const { return _top; } 783 HeapWord* top() const { return _top; }
780 void set_top(HeapWord* value) { _top = value; } 784 void set_top(HeapWord* value) { _top = value; }
781 785
782 virtual void set_saved_mark() { _saved_mark_word = top(); } 786 virtual void set_saved_mark() { _saved_mark_word = top(); }
783 void reset_saved_mark() { _saved_mark_word = bottom(); } 787 void reset_saved_mark() { _saved_mark_word = bottom(); }
784
785 virtual void clear();
786 788
787 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 789 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
788 WaterMark top_mark() { return WaterMark(this, top()); } 790 WaterMark top_mark() { return WaterMark(this, top()); }
789 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } 791 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
790 bool saved_mark_at_top() const { return saved_mark_word() == top(); } 792 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
791 793
792 void mangle_unused_area(); 794 // In debug mode mangle (write it with a particular bit
793 void mangle_region(MemRegion mr); 795 // pattern) the unused part of a space.
796
797 // Used to save the an address in a space for later use during mangling.
798 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
799 // Used to save the space's current top for later use during mangling.
800 void set_top_for_allocations() PRODUCT_RETURN;
801
802 // Mangle regions in the space from the current top up to the
803 // previously mangled part of the space.
804 void mangle_unused_area() PRODUCT_RETURN;
805 // Mangle [top, end)
806 void mangle_unused_area_complete() PRODUCT_RETURN;
807 // Mangle the given MemRegion.
808 void mangle_region(MemRegion mr) PRODUCT_RETURN;
809
810 // Do some sparse checking on the area that should have been mangled.
811 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
812 // Check the complete area that should have been mangled.
813 // This code may be NULL depending on the macro DEBUG_MANGLING.
814 void check_mangled_unused_area_complete() PRODUCT_RETURN;
794 815
795 // Size computations: sizes in bytes. 816 // Size computations: sizes in bytes.
796 size_t capacity() const { return byte_size(bottom(), end()); } 817 size_t capacity() const { return byte_size(bottom(), end()); }
797 size_t used() const { return byte_size(bottom(), top()); } 818 size_t used() const { return byte_size(bottom(), top()); }
798 size_t free() const { return byte_size(top(), end()); } 819 size_t free() const { return byte_size(top(), end()); }
984 HeapWord* soft_end() { return _soft_end; } 1005 HeapWord* soft_end() { return _soft_end; }
985 HeapWord** soft_end_addr() { return &_soft_end; } 1006 HeapWord** soft_end_addr() { return &_soft_end; }
986 void set_soft_end(HeapWord* value) { _soft_end = value; } 1007 void set_soft_end(HeapWord* value) { _soft_end = value; }
987 1008
988 // Override. 1009 // Override.
989 void clear(); 1010 void clear(bool mangle_space);
990 1011
991 // Set both the 'hard' and 'soft' limits (_end and _soft_end). 1012 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
992 void set_end(HeapWord* value) { 1013 void set_end(HeapWord* value) {
993 set_soft_end(value); 1014 set_soft_end(value);
994 ContiguousSpace::set_end(value); 1015 ContiguousSpace::set_end(value);
1028 MemRegion mr); 1049 MemRegion mr);
1029 1050
1030 void set_bottom(HeapWord* value); 1051 void set_bottom(HeapWord* value);
1031 void set_end(HeapWord* value); 1052 void set_end(HeapWord* value);
1032 1053
1033 virtual void initialize(MemRegion mr, bool clear_space); 1054 void clear(bool mangle_space);
1034 void clear();
1035 1055
1036 inline HeapWord* block_start_const(const void* p) const; 1056 inline HeapWord* block_start_const(const void* p) const;
1037 1057
1038 // Add offset table update. 1058 // Add offset table update.
1039 virtual inline HeapWord* allocate(size_t word_size); 1059 virtual inline HeapWord* allocate(size_t word_size);