comparison src/share/vm/memory/space.hpp @ 6725:da91efe96a93

6964458: Reimplement class meta-data storage to use native memory Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
author coleenp
date Sat, 01 Sep 2012 13:25:18 -0400
parents d2a62e0f25eb
children b735136e0d82
comparison
equal deleted inserted replaced
6724:36d1d483d5d6 6725:da91efe96a93
63 // - EdenSpace -- contiguous space used as nursery 63 // - EdenSpace -- contiguous space used as nursery
64 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation 64 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
65 // - OffsetTableContigSpace -- contiguous space with a block offset array 65 // - OffsetTableContigSpace -- contiguous space with a block offset array
66 // that allows "fast" block_start calls 66 // that allows "fast" block_start calls
67 // - TenuredSpace -- (used for TenuredGeneration) 67 // - TenuredSpace -- (used for TenuredGeneration)
68 // - ContigPermSpace -- an offset table contiguous space for perm gen
69 68
70 // Forward decls. 69 // Forward decls.
71 class Space; 70 class Space;
72 class BlockOffsetArray; 71 class BlockOffsetArray;
73 class BlockOffsetArrayContigSpace; 72 class BlockOffsetArrayContigSpace;
77 class GenRemSet; 76 class GenRemSet;
78 class CardTableRS; 77 class CardTableRS;
79 class DirtyCardToOopClosure; 78 class DirtyCardToOopClosure;
80 79
81 // An oop closure that is circumscribed by a filtering memory region. 80 // An oop closure that is circumscribed by a filtering memory region.
82 class SpaceMemRegionOopsIterClosure: public OopClosure { 81 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
83 private: 82 private:
84 OopClosure* _cl; 83 ExtendedOopClosure* _cl;
85 MemRegion _mr; 84 MemRegion _mr;
86 protected: 85 protected:
87 template <class T> void do_oop_work(T* p) { 86 template <class T> void do_oop_work(T* p) {
88 if (_mr.contains(p)) { 87 if (_mr.contains(p)) {
89 _cl->do_oop(p); 88 _cl->do_oop(p);
90 } 89 }
91 } 90 }
92 public: 91 public:
93 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): 92 SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
94 _cl(cl), _mr(mr) {} 93 _cl(cl), _mr(mr) {}
95 virtual void do_oop(oop* p); 94 virtual void do_oop(oop* p);
96 virtual void do_oop(narrowOop* p); 95 virtual void do_oop(narrowOop* p);
96 virtual bool do_metadata() {
97 // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
98 assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
99 return false;
100 }
101 virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
102 virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
97 }; 103 };
98 104
99 // A Space describes a heap area. Class Space is an abstract 105 // A Space describes a heap area. Class Space is an abstract
100 // base class. 106 // base class.
101 // 107 //
207 virtual size_t free() const = 0; 213 virtual size_t free() const = 0;
208 214
209 // Iterate over all the ref-containing fields of all objects in the 215 // Iterate over all the ref-containing fields of all objects in the
210 // space, calling "cl.do_oop" on each. Fields in objects allocated by 216 // space, calling "cl.do_oop" on each. Fields in objects allocated by
211 // applications of the closure are not included in the iteration. 217 // applications of the closure are not included in the iteration.
212 virtual void oop_iterate(OopClosure* cl); 218 virtual void oop_iterate(ExtendedOopClosure* cl);
213 219
214 // Same as above, restricted to the intersection of a memory region and 220 // Same as above, restricted to the intersection of a memory region and
215 // the space. Fields in objects allocated by applications of the closure 221 // the space. Fields in objects allocated by applications of the closure
216 // are not included in the iteration. 222 // are not included in the iteration.
217 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; 223 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
218 224
219 // Iterate over all objects in the space, calling "cl.do_object" on 225 // Iterate over all objects in the space, calling "cl.do_object" on
220 // each. Objects allocated by applications of the closure are not 226 // each. Objects allocated by applications of the closure are not
221 // included in the iteration. 227 // included in the iteration.
222 virtual void object_iterate(ObjectClosure* blk) = 0; 228 virtual void object_iterate(ObjectClosure* blk) = 0;
244 250
245 // Create and return a new dirty card to oop closure. Can be 251 // Create and return a new dirty card to oop closure. Can be
246 // overriden to return the appropriate type of closure 252 // overriden to return the appropriate type of closure
247 // depending on the type of space in which the closure will 253 // depending on the type of space in which the closure will
248 // operate. ResourceArea allocated. 254 // operate. ResourceArea allocated.
249 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 255 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
250 CardTableModRefBS::PrecisionStyle precision, 256 CardTableModRefBS::PrecisionStyle precision,
251 HeapWord* boundary = NULL); 257 HeapWord* boundary = NULL);
252 258
253 // If "p" is in the space, returns the address of the start of the 259 // If "p" is in the space, returns the address of the start of the
254 // "block" that contains "p". We say "block" instead of "object" since 260 // "block" that contains "p". We say "block" instead of "object" since
319 // to support other space types. See ContiguousDCTOC for a sub-class 325 // to support other space types. See ContiguousDCTOC for a sub-class
320 // that works with ContiguousSpaces. 326 // that works with ContiguousSpaces.
321 327
322 class DirtyCardToOopClosure: public MemRegionClosureRO { 328 class DirtyCardToOopClosure: public MemRegionClosureRO {
323 protected: 329 protected:
324 OopClosure* _cl; 330 ExtendedOopClosure* _cl;
325 Space* _sp; 331 Space* _sp;
326 CardTableModRefBS::PrecisionStyle _precision; 332 CardTableModRefBS::PrecisionStyle _precision;
327 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 333 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
328 // pointing below boundary. 334 // pointing below boundary.
329 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 335 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
349 // classes should override this to provide more accurate 355 // classes should override this to provide more accurate
350 // or possibly more efficient walking. 356 // or possibly more efficient walking.
351 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); 357 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
352 358
353 public: 359 public:
354 DirtyCardToOopClosure(Space* sp, OopClosure* cl, 360 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
355 CardTableModRefBS::PrecisionStyle precision, 361 CardTableModRefBS::PrecisionStyle precision,
356 HeapWord* boundary) : 362 HeapWord* boundary) :
357 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 363 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
358 _min_done(NULL) { 364 _min_done(NULL) {
359 NOT_PRODUCT(_last_bottom = NULL); 365 NOT_PRODUCT(_last_bottom = NULL);
392 // compaction could still support compaction in full GC's. 398 // compaction could still support compaction in full GC's.
393 399
394 class CompactibleSpace: public Space { 400 class CompactibleSpace: public Space {
395 friend class VMStructs; 401 friend class VMStructs;
396 friend class CompactibleFreeListSpace; 402 friend class CompactibleFreeListSpace;
397 friend class CompactingPermGenGen;
398 friend class CMSPermGenGen;
399 private: 403 private:
400 HeapWord* _compaction_top; 404 HeapWord* _compaction_top;
401 CompactibleSpace* _next_compaction_space; 405 CompactibleSpace* _next_compaction_space;
402 406
403 public: 407 public:
530 /* We allow some amount of garbage towards the bottom of the space, so \ 534 /* We allow some amount of garbage towards the bottom of the space, so \
531 * we don't start compacting before there is a significant gain to be made.\ 535 * we don't start compacting before there is a significant gain to be made.\
532 * Occasionally, we want to ensure a full compaction, which is determined \ 536 * Occasionally, we want to ensure a full compaction, which is determined \
533 * by the MarkSweepAlwaysCompactCount parameter. \ 537 * by the MarkSweepAlwaysCompactCount parameter. \
534 */ \ 538 */ \
535 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ 539 int invocations = MarkSweep::total_invocations(); \
536 bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \ 540 bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
537 ||((invocations % MarkSweepAlwaysCompactCount) != 0); \ 541 ||((invocations % MarkSweepAlwaysCompactCount) != 0); \
538 \ 542 \
539 size_t allowed_deadspace = 0; \ 543 size_t allowed_deadspace = 0; \
540 if (skip_dead) { \ 544 if (skip_dead) { \
560 oop(q)->mark()->has_bias_pattern(), \ 564 oop(q)->mark()->has_bias_pattern(), \
561 "these are the only valid states during a mark sweep"); \ 565 "these are the only valid states during a mark sweep"); \
562 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 566 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
563 /* prefetch beyond q */ \ 567 /* prefetch beyond q */ \
564 Prefetch::write(q, interval); \ 568 Prefetch::write(q, interval); \
565 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
566 size_t size = block_size(q); \ 569 size_t size = block_size(q); \
567 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 570 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
568 q += size; \ 571 q += size; \
569 end_of_live = q; \ 572 end_of_live = q; \
570 } else { \ 573 } else { \
645 \ 648 \
646 while (q < end) { \ 649 while (q < end) { \
647 /* I originally tried to conjoin "block_start(q) == q" to the \ 650 /* I originally tried to conjoin "block_start(q) == q" to the \
648 * assertion below, but that doesn't work, because you can't \ 651 * assertion below, but that doesn't work, because you can't \
649 * accurately traverse previous objects to get to the current one \ 652 * accurately traverse previous objects to get to the current one \
650 * after their pointers (including pointers into permGen) have been \ 653 * after their pointers have been \
651 * updated, until the actual compaction is done. dld, 4/00 */ \ 654 * updated, until the actual compaction is done. dld, 4/00 */ \
652 assert(block_is_obj(q), \ 655 assert(block_is_obj(q), \
653 "should be at block boundaries, and should be looking at objs"); \ 656 "should be at block boundaries, and should be looking at objs"); \
654 \ 657 \
655 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ 658 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
869 virtual bool obj_allocated_since_save_marks(const oop obj) const { 872 virtual bool obj_allocated_since_save_marks(const oop obj) const {
870 return (HeapWord*)obj >= saved_mark_word(); 873 return (HeapWord*)obj >= saved_mark_word();
871 } 874 }
872 875
873 // Iteration 876 // Iteration
874 void oop_iterate(OopClosure* cl); 877 void oop_iterate(ExtendedOopClosure* cl);
875 void oop_iterate(MemRegion mr, OopClosure* cl); 878 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
876 void object_iterate(ObjectClosure* blk); 879 void object_iterate(ObjectClosure* blk);
877 // For contiguous spaces this method will iterate safely over objects 880 // For contiguous spaces this method will iterate safely over objects
878 // in the space (i.e., between bottom and top) when at a safepoint. 881 // in the space (i.e., between bottom and top) when at a safepoint.
879 void safe_object_iterate(ObjectClosure* blk); 882 void safe_object_iterate(ObjectClosure* blk);
880 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 883 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
890 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { 893 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
891 assert(new_limit <= top(), "uninitialized objects in the safe range"); 894 assert(new_limit <= top(), "uninitialized objects in the safe range");
892 _concurrent_iteration_safe_limit = new_limit; 895 _concurrent_iteration_safe_limit = new_limit;
893 } 896 }
894 897
898
895 #ifndef SERIALGC 899 #ifndef SERIALGC
896 // In support of parallel oop_iterate. 900 // In support of parallel oop_iterate.
897 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 901 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
898 void par_oop_iterate(MemRegion mr, OopClosureType* blk); 902 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
899 903
909 set_concurrent_iteration_safe_limit(compaction_top()); 913 set_concurrent_iteration_safe_limit(compaction_top());
910 } 914 }
911 virtual size_t minimum_free_block_size() const { return 0; } 915 virtual size_t minimum_free_block_size() const { return 0; }
912 916
913 // Override. 917 // Override.
914 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 918 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
915 CardTableModRefBS::PrecisionStyle precision, 919 CardTableModRefBS::PrecisionStyle precision,
916 HeapWord* boundary = NULL); 920 HeapWord* boundary = NULL);
917 921
918 // Apply "blk->do_oop" to the addresses of all reference fields in objects 922 // Apply "blk->do_oop" to the addresses of all reference fields in objects
919 // starting with the _saved_mark_word, which was noted during a generation's 923 // starting with the _saved_mark_word, which was noted during a generation's
979 // be a filtering closure which makes use of the _boundary. 983 // be a filtering closure which makes use of the _boundary.
980 // We offer two signatures, so the FilteringClosure static type is 984 // We offer two signatures, so the FilteringClosure static type is
981 // apparent. 985 // apparent.
982 virtual void walk_mem_region_with_cl(MemRegion mr, 986 virtual void walk_mem_region_with_cl(MemRegion mr,
983 HeapWord* bottom, HeapWord* top, 987 HeapWord* bottom, HeapWord* top,
984 OopClosure* cl) = 0; 988 ExtendedOopClosure* cl) = 0;
985 virtual void walk_mem_region_with_cl(MemRegion mr, 989 virtual void walk_mem_region_with_cl(MemRegion mr,
986 HeapWord* bottom, HeapWord* top, 990 HeapWord* bottom, HeapWord* top,
987 FilteringClosure* cl) = 0; 991 FilteringClosure* cl) = 0;
988 992
989 public: 993 public:
990 Filtering_DCTOC(Space* sp, OopClosure* cl, 994 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
991 CardTableModRefBS::PrecisionStyle precision, 995 CardTableModRefBS::PrecisionStyle precision,
992 HeapWord* boundary) : 996 HeapWord* boundary) :
993 DirtyCardToOopClosure(sp, cl, precision, boundary) {} 997 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
994 }; 998 };
995 999
1008 // Overrides. 1012 // Overrides.
1009 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); 1013 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
1010 1014
1011 virtual void walk_mem_region_with_cl(MemRegion mr, 1015 virtual void walk_mem_region_with_cl(MemRegion mr,
1012 HeapWord* bottom, HeapWord* top, 1016 HeapWord* bottom, HeapWord* top,
1013 OopClosure* cl); 1017 ExtendedOopClosure* cl);
1014 virtual void walk_mem_region_with_cl(MemRegion mr, 1018 virtual void walk_mem_region_with_cl(MemRegion mr,
1015 HeapWord* bottom, HeapWord* top, 1019 HeapWord* bottom, HeapWord* top,
1016 FilteringClosure* cl); 1020 FilteringClosure* cl);
1017 1021
1018 public: 1022 public:
1019 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, 1023 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
1020 CardTableModRefBS::PrecisionStyle precision, 1024 CardTableModRefBS::PrecisionStyle precision,
1021 HeapWord* boundary) : 1025 HeapWord* boundary) :
1022 Filtering_DCTOC(sp, cl, precision, boundary) 1026 Filtering_DCTOC(sp, cl, precision, boundary)
1023 {} 1027 {}
1024 }; 1028 };
1074 1078
1075 1079
1076 // A ContigSpace that Supports an efficient "block_start" operation via 1080 // A ContigSpace that Supports an efficient "block_start" operation via
1077 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with 1081 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
1078 // other spaces.) This is the abstract base class for old generation 1082 // other spaces.) This is the abstract base class for old generation
1079 // (tenured, perm) spaces. 1083 // (tenured) spaces.
1080 1084
1081 class OffsetTableContigSpace: public ContiguousSpace { 1085 class OffsetTableContigSpace: public ContiguousSpace {
1082 friend class VMStructs; 1086 friend class VMStructs;
1083 protected: 1087 protected:
1084 BlockOffsetArrayContigSpace _offsets; 1088 BlockOffsetArrayContigSpace _offsets;
1106 1110
1107 virtual void print_on(outputStream* st) const; 1111 virtual void print_on(outputStream* st) const;
1108 1112
1109 // Debugging 1113 // Debugging
1110 void verify() const; 1114 void verify() const;
1111
1112 // Shared space support
1113 void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
1114 }; 1115 };
1115 1116
1116 1117
1117 // Class TenuredSpace is used by TenuredGeneration 1118 // Class TenuredSpace is used by TenuredGeneration
1118 1119
1125 // Constructor 1126 // Constructor
1126 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, 1127 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
1127 MemRegion mr) : 1128 MemRegion mr) :
1128 OffsetTableContigSpace(sharedOffsetArray, mr) {} 1129 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1129 }; 1130 };
1130
1131
1132 // Class ContigPermSpace is used by CompactingPermGen
1133
1134 class ContigPermSpace: public OffsetTableContigSpace {
1135 friend class VMStructs;
1136 protected:
1137 // Mark sweep support
1138 size_t allowed_dead_ratio() const;
1139 public:
1140 // Constructor
1141 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
1142 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1143 };
1144
1145 #endif // SHARE_VM_MEMORY_SPACE_HPP 1131 #endif // SHARE_VM_MEMORY_SPACE_HPP