Mercurial > hg > truffle
annotate src/share/vm/memory/space.hpp @ 12888:4a2acfb16e97
8025657: compiler/intrinsics/mathexact/ConstantTest.java fails on assert in lcm.cpp on solaris x64
Reviewed-by: kvn, twisti
author | rbackman |
---|---|
date | Fri, 11 Oct 2013 12:06:14 +0200 |
parents | 12f651e29f6b |
children | de6a9e811145 bdd155477289 |
rev | line source |
---|---|
0 | 1 /* |
6008 | 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
845
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
845
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
845
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP |
26 #define SHARE_VM_MEMORY_SPACE_HPP | |
27 | |
28 #include "memory/allocation.hpp" | |
29 #include "memory/blockOffsetTable.hpp" | |
30 #include "memory/cardTableModRefBS.hpp" | |
31 #include "memory/iterator.hpp" | |
32 #include "memory/memRegion.hpp" | |
33 #include "memory/watermark.hpp" | |
34 #include "oops/markOop.hpp" | |
35 #include "runtime/mutexLocker.hpp" | |
36 #include "runtime/prefetch.hpp" | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
37 #include "utilities/macros.hpp" |
1972 | 38 #include "utilities/workgroup.hpp" |
39 #ifdef TARGET_OS_FAMILY_linux | |
40 # include "os_linux.inline.hpp" | |
41 #endif | |
42 #ifdef TARGET_OS_FAMILY_solaris | |
43 # include "os_solaris.inline.hpp" | |
44 #endif | |
45 #ifdef TARGET_OS_FAMILY_windows | |
46 # include "os_windows.inline.hpp" | |
47 #endif | |
3960 | 48 #ifdef TARGET_OS_FAMILY_bsd |
49 # include "os_bsd.inline.hpp" | |
50 #endif | |
1972 | 51 |
0 | 52 // A space is an abstraction for the "storage units" backing |
53 // up the generation abstraction. It includes specific | |
54 // implementations for keeping track of free and used space, | |
55 // for iterating over objects and free blocks, etc. | |
56 | |
57 // Here's the Space hierarchy: | |
58 // | |
59 // - Space -- an asbtract base class describing a heap area | |
60 // - CompactibleSpace -- a space supporting compaction | |
61 // - CompactibleFreeListSpace -- (used for CMS generation) | |
62 // - ContiguousSpace -- a compactible space in which all free space | |
63 // is contiguous | |
64 // - EdenSpace -- contiguous space used as nursery | |
65 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation | |
66 // - OffsetTableContigSpace -- contiguous space with a block offset array | |
67 // that allows "fast" block_start calls | |
68 // - TenuredSpace -- (used for TenuredGeneration) | |
69 | |
70 // Forward decls. | |
71 class Space; | |
72 class BlockOffsetArray; | |
73 class BlockOffsetArrayContigSpace; | |
74 class Generation; | |
75 class CompactibleSpace; | |
76 class BlockOffsetTable; | |
77 class GenRemSet; | |
78 class CardTableRS; | |
79 class DirtyCardToOopClosure; | |
80 | |
81 // An oop closure that is circumscribed by a filtering memory region. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
82 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure { |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
83 private: |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
84 ExtendedOopClosure* _cl; |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
85 MemRegion _mr; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
86 protected: |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
87 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
88 if (_mr.contains(p)) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
89 _cl->do_oop(p); |
0 | 90 } |
91 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
92 public: |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
93 SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr): |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
94 _cl(cl), _mr(mr) {} |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
95 virtual void do_oop(oop* p); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
96 virtual void do_oop(narrowOop* p); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
97 virtual bool do_metadata() { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
98 // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this. |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
99 assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen."); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
100 return false; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
101 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
102 virtual void do_klass(Klass* k) { ShouldNotReachHere(); } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
103 virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); } |
0 | 104 }; |
105 | |
106 // A Space describes a heap area. Class Space is an abstract | |
107 // base class. | |
108 // | |
109 // Space supports allocation, size computation and GC support is provided. | |
110 // | |
111 // Invariant: bottom() and end() are on page_size boundaries and | |
112 // bottom() <= top() <= end() | |
113 // top() is inclusive and end() is exclusive. | |
114 | |
6197 | 115 class Space: public CHeapObj<mtGC> { |
0 | 116 friend class VMStructs; |
117 protected: | |
118 HeapWord* _bottom; | |
119 HeapWord* _end; | |
120 | |
121 // Used in support of save_marks() | |
122 HeapWord* _saved_mark_word; | |
123 | |
124 MemRegionClosure* _preconsumptionDirtyCardClosure; | |
125 | |
126 // A sequential tasks done structure. This supports | |
127 // parallel GC, where we have threads dynamically | |
128 // claiming sub-tasks from a larger parallel task. | |
129 SequentialSubTasksDone _par_seq_tasks; | |
130 | |
131 Space(): | |
132 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } | |
133 | |
134 public: | |
135 // Accessors | |
136 HeapWord* bottom() const { return _bottom; } | |
137 HeapWord* end() const { return _end; } | |
138 virtual void set_bottom(HeapWord* value) { _bottom = value; } | |
139 virtual void set_end(HeapWord* value) { _end = value; } | |
140 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
141 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
579
diff
changeset
|
142 |
0 | 143 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } |
144 | |
145 MemRegionClosure* preconsumptionDirtyCardClosure() const { | |
146 return _preconsumptionDirtyCardClosure; | |
147 } | |
148 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { | |
149 _preconsumptionDirtyCardClosure = cl; | |
150 } | |
151 | |
152 // Returns a subregion of the space containing all the objects in | |
153 // the space. | |
154 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } | |
155 | |
156 // Returns a region that is guaranteed to contain (at least) all objects | |
157 // allocated at the time of the last call to "save_marks". If the space | |
158 // initializes its DirtyCardToOopClosure's specifying the "contig" option | |
159 // (that is, if the space is contiguous), then this region must contain only | |
160 // such objects: the memregion will be from the bottom of the region to the | |
161 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of | |
162 // the space must distiguish between objects in the region allocated before | |
163 // and after the call to save marks. | |
164 virtual MemRegion used_region_at_save_marks() const { | |
165 return MemRegion(bottom(), saved_mark_word()); | |
166 } | |
167 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
168 // Initialization. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
169 // "initialize" should be called once on a space, before it is used for |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
170 // any purpose. The "mr" arguments gives the bounds of the space, and |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
171 // the "clear_space" argument should be true unless the memory in "mr" is |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
172 // known to be zeroed. |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
173 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
174 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
175 // The "clear" method must be called on a region that may have |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
176 // had allocation performed in it, but is now to be considered empty. |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
177 virtual void clear(bool mangle_space); |
0 | 178 |
179 // For detecting GC bugs. Should only be called at GC boundaries, since | |
180 // some unused space may be used as scratch space during GC's. | |
181 // Default implementation does nothing. We also call this when expanding | |
182 // a space to satisfy an allocation request. See bug #4668531 | |
183 virtual void mangle_unused_area() {} | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
184 virtual void mangle_unused_area_complete() {} |
0 | 185 virtual void mangle_region(MemRegion mr) {} |
186 | |
187 // Testers | |
188 bool is_empty() const { return used() == 0; } | |
189 bool not_empty() const { return used() > 0; } | |
190 | |
191 // Returns true iff the given the space contains the | |
192 // given address as part of an allocated object. For | |
193 // ceratin kinds of spaces, this might be a potentially | |
194 // expensive operation. To prevent performance problems | |
195 // on account of its inadvertent use in product jvm's, | |
196 // we restrict its use to assertion checks only. | |
4708 | 197 virtual bool is_in(const void* p) const = 0; |
0 | 198 |
199 // Returns true iff the given reserved memory of the space contains the | |
200 // given address. | |
201 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } | |
202 | |
203 // Returns true iff the given block is not allocated. | |
204 virtual bool is_free_block(const HeapWord* p) const = 0; | |
205 | |
206 // Test whether p is double-aligned | |
207 static bool is_aligned(void* p) { | |
208 return ((intptr_t)p & (sizeof(double)-1)) == 0; | |
209 } | |
210 | |
211 // Size computations. Sizes are in bytes. | |
212 size_t capacity() const { return byte_size(bottom(), end()); } | |
213 virtual size_t used() const = 0; | |
214 virtual size_t free() const = 0; | |
215 | |
216 // Iterate over all the ref-containing fields of all objects in the | |
217 // space, calling "cl.do_oop" on each. Fields in objects allocated by | |
218 // applications of the closure are not included in the iteration. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
219 virtual void oop_iterate(ExtendedOopClosure* cl); |
0 | 220 |
221 // Same as above, restricted to the intersection of a memory region and | |
222 // the space. Fields in objects allocated by applications of the closure | |
223 // are not included in the iteration. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
224 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; |
0 | 225 |
226 // Iterate over all objects in the space, calling "cl.do_object" on | |
227 // each. Objects allocated by applications of the closure are not | |
228 // included in the iteration. | |
229 virtual void object_iterate(ObjectClosure* blk) = 0; | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
230 // Similar to object_iterate() except only iterates over |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
231 // objects whose internal references point to objects in the space. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
232 virtual void safe_object_iterate(ObjectClosure* blk) = 0; |
0 | 233 |
234 // Iterate over all objects that intersect with mr, calling "cl->do_object" | |
235 // on each. There is an exception to this: if this closure has already | |
236 // been invoked on an object, it may skip such objects in some cases. This is | |
237 // Most likely to happen in an "upwards" (ascending address) iteration of | |
238 // MemRegions. | |
239 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); | |
240 | |
241 // Iterate over as many initialized objects in the space as possible, | |
242 // calling "cl.do_object_careful" on each. Return NULL if all objects | |
243 // in the space (at the start of the iteration) were iterated over. | |
244 // Return an address indicating the extent of the iteration in the | |
245 // event that the iteration had to return because of finding an | |
246 // uninitialized object in the space, or if the closure "cl" | |
247 // signalled early termination. | |
248 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); | |
249 virtual HeapWord* object_iterate_careful_m(MemRegion mr, | |
250 ObjectClosureCareful* cl); | |
251 | |
252 // Create and return a new dirty card to oop closure. Can be | |
253 // overriden to return the appropriate type of closure | |
254 // depending on the type of space in which the closure will | |
255 // operate. ResourceArea allocated. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
256 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
0 | 257 CardTableModRefBS::PrecisionStyle precision, |
258 HeapWord* boundary = NULL); | |
259 | |
260 // If "p" is in the space, returns the address of the start of the | |
261 // "block" that contains "p". We say "block" instead of "object" since | |
262 // some heaps may not pack objects densely; a chunk may either be an | |
263 // object or a non-object. If "p" is not in the space, return NULL. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
264 virtual HeapWord* block_start_const(const void* p) const = 0; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
265 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
266 // The non-const version may have benevolent side effects on the data |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
267 // structure supporting these calls, possibly speeding up future calls. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
268 // The default implementation, however, is simply to call the const |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
269 // version. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
270 inline virtual HeapWord* block_start(const void* p); |
0 | 271 |
272 // Requires "addr" to be the start of a chunk, and returns its size. | |
273 // "addr + size" is required to be the start of a new chunk, or the end | |
274 // of the active area of the heap. | |
275 virtual size_t block_size(const HeapWord* addr) const = 0; | |
276 | |
277 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
278 // the block is an object. | |
279 virtual bool block_is_obj(const HeapWord* addr) const = 0; | |
280 | |
281 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
282 // the block is an object and the object is alive. | |
283 virtual bool obj_is_alive(const HeapWord* addr) const; | |
284 | |
285 // Allocation (return NULL if full). Assumes the caller has established | |
286 // mutually exclusive access to the space. | |
287 virtual HeapWord* allocate(size_t word_size) = 0; | |
288 | |
289 // Allocation (return NULL if full). Enforces mutual exclusion internally. | |
290 virtual HeapWord* par_allocate(size_t word_size) = 0; | |
291 | |
292 // Returns true if this object has been allocated since a | |
293 // generation's "save_marks" call. | |
294 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; | |
295 | |
296 // Mark-sweep-compact support: all spaces can update pointers to objects | |
297 // moving as a part of compaction. | |
298 virtual void adjust_pointers(); | |
299 | |
300 // PrintHeapAtGC support | |
301 virtual void print() const; | |
302 virtual void print_on(outputStream* st) const; | |
303 virtual void print_short() const; | |
304 virtual void print_short_on(outputStream* st) const; | |
305 | |
306 | |
307 // Accessor for parallel sequential tasks. | |
308 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } | |
309 | |
310 // IF "this" is a ContiguousSpace, return it, else return NULL. | |
311 virtual ContiguousSpace* toContiguousSpace() { | |
312 return NULL; | |
313 } | |
314 | |
315 // Debugging | |
6008 | 316 virtual void verify() const = 0; |
0 | 317 }; |
318 | |
319 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an | |
320 // OopClosure to (the addresses of) all the ref-containing fields that could | |
321 // be modified by virtue of the given MemRegion being dirty. (Note that | |
322 // because of the imprecise nature of the write barrier, this may iterate | |
323 // over oops beyond the region.) | |
324 // This base type for dirty card to oop closures handles memory regions | |
325 // in non-contiguous spaces with no boundaries, and should be sub-classed | |
326 // to support other space types. See ContiguousDCTOC for a sub-class | |
327 // that works with ContiguousSpaces. | |
328 | |
329 class DirtyCardToOopClosure: public MemRegionClosureRO { | |
330 protected: | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
331 ExtendedOopClosure* _cl; |
0 | 332 Space* _sp; |
333 CardTableModRefBS::PrecisionStyle _precision; | |
334 HeapWord* _boundary; // If non-NULL, process only non-NULL oops | |
335 // pointing below boundary. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
336 HeapWord* _min_done; // ObjHeadPreciseArray precision requires |
0 | 337 // a downwards traversal; this is the |
338 // lowest location already done (or, | |
339 // alternatively, the lowest address that | |
340 // shouldn't be done again. NULL means infinity.) | |
341 NOT_PRODUCT(HeapWord* _last_bottom;) | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
342 NOT_PRODUCT(HeapWord* _last_explicit_min_done;) |
0 | 343 |
344 // Get the actual top of the area on which the closure will | |
345 // operate, given where the top is assumed to be (the end of the | |
346 // memory region passed to do_MemRegion) and where the object | |
347 // at the top is assumed to start. For example, an object may | |
348 // start at the top but actually extend past the assumed top, | |
349 // in which case the top becomes the end of the object. | |
350 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); | |
351 | |
352 // Walk the given memory region from bottom to (actual) top | |
353 // looking for objects and applying the oop closure (_cl) to | |
354 // them. The base implementation of this treats the area as | |
355 // blocks, where a block may or may not be an object. Sub- | |
356 // classes should override this to provide more accurate | |
357 // or possibly more efficient walking. | |
358 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); | |
359 | |
360 public: | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
361 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, |
0 | 362 CardTableModRefBS::PrecisionStyle precision, |
363 HeapWord* boundary) : | |
364 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), | |
365 _min_done(NULL) { | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
366 NOT_PRODUCT(_last_bottom = NULL); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
367 NOT_PRODUCT(_last_explicit_min_done = NULL); |
0 | 368 } |
369 | |
370 void do_MemRegion(MemRegion mr); | |
371 | |
372 void set_min_done(HeapWord* min_done) { | |
373 _min_done = min_done; | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
374 NOT_PRODUCT(_last_explicit_min_done = _min_done); |
0 | 375 } |
376 #ifndef PRODUCT | |
377 void set_last_bottom(HeapWord* last_bottom) { | |
378 _last_bottom = last_bottom; | |
379 } | |
380 #endif | |
381 }; | |
382 | |
383 // A structure to represent a point at which objects are being copied | |
384 // during compaction. | |
385 class CompactPoint : public StackObj { | |
386 public: | |
387 Generation* gen; | |
388 CompactibleSpace* space; | |
389 HeapWord* threshold; | |
390 CompactPoint(Generation* _gen, CompactibleSpace* _space, | |
391 HeapWord* _threshold) : | |
392 gen(_gen), space(_space), threshold(_threshold) {} | |
393 }; | |
394 | |
395 | |
396 // A space that supports compaction operations. This is usually, but not | |
397 // necessarily, a space that is normally contiguous. But, for example, a | |
398 // free-list-based space whose normal collection is a mark-sweep without | |
399 // compaction could still support compaction in full GC's. | |
400 | |
401 class CompactibleSpace: public Space { | |
402 friend class VMStructs; | |
403 friend class CompactibleFreeListSpace; | |
404 private: | |
405 HeapWord* _compaction_top; | |
406 CompactibleSpace* _next_compaction_space; | |
407 | |
408 public: | |
347
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
342
diff
changeset
|
409 CompactibleSpace() : |
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
342
diff
changeset
|
410 _compaction_top(NULL), _next_compaction_space(NULL) {} |
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
342
diff
changeset
|
411 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
412 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
356 | 413 virtual void clear(bool mangle_space); |
0 | 414 |
415 // Used temporarily during a compaction phase to hold the value | |
416 // top should have when compaction is complete. | |
417 HeapWord* compaction_top() const { return _compaction_top; } | |
418 | |
419 void set_compaction_top(HeapWord* value) { | |
420 assert(value == NULL || (value >= bottom() && value <= end()), | |
421 "should point inside space"); | |
422 _compaction_top = value; | |
423 } | |
424 | |
425 // Perform operations on the space needed after a compaction | |
426 // has been performed. | |
427 virtual void reset_after_compaction() {} | |
428 | |
429 // Returns the next space (in the current generation) to be compacted in | |
430 // the global compaction order. Also is used to select the next | |
431 // space into which to compact. | |
432 | |
433 virtual CompactibleSpace* next_compaction_space() const { | |
434 return _next_compaction_space; | |
435 } | |
436 | |
437 void set_next_compaction_space(CompactibleSpace* csp) { | |
438 _next_compaction_space = csp; | |
439 } | |
440 | |
441 // MarkSweep support phase2 | |
442 | |
443 // Start the process of compaction of the current space: compute | |
444 // post-compaction addresses, and insert forwarding pointers. The fields | |
445 // "cp->gen" and "cp->compaction_space" are the generation and space into | |
446 // which we are currently compacting. This call updates "cp" as necessary, | |
447 // and leaves the "compaction_top" of the final value of | |
448 // "cp->compaction_space" up-to-date. Offset tables may be updated in | |
449 // this phase as if the final copy had occurred; if so, "cp->threshold" | |
450 // indicates when the next such action should be taken. | |
451 virtual void prepare_for_compaction(CompactPoint* cp); | |
452 // MarkSweep support phase3 | |
453 virtual void adjust_pointers(); | |
454 // MarkSweep support phase4 | |
455 virtual void compact(); | |
456 | |
457 // The maximum percentage of objects that can be dead in the compacted | |
458 // live part of a compacted space ("deadwood" support.) | |
438 | 459 virtual size_t allowed_dead_ratio() const { return 0; }; |
0 | 460 |
461 // Some contiguous spaces may maintain some data structures that should | |
462 // be updated whenever an allocation crosses a boundary. This function | |
463 // returns the first such boundary. | |
464 // (The default implementation returns the end of the space, so the | |
465 // boundary is never crossed.) | |
466 virtual HeapWord* initialize_threshold() { return end(); } | |
467 | |
468 // "q" is an object of the given "size" that should be forwarded; | |
469 // "cp" names the generation ("gen") and containing "this" (which must | |
470 // also equal "cp->space"). "compact_top" is where in "this" the | |
471 // next object should be forwarded to. If there is room in "this" for | |
472 // the object, insert an appropriate forwarding pointer in "q". | |
473 // If not, go to the next compaction space (there must | |
474 // be one, since compaction must succeed -- we go to the first space of | |
475 // the previous generation if necessary, updating "cp"), reset compact_top | |
476 // and then forward. In either case, returns the new value of "compact_top". | |
477 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" | |
478 // function of the then-current compaction space, and updates "cp->threshold | |
479 // accordingly". | |
480 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, | |
481 HeapWord* compact_top); | |
482 | |
483 // Return a size with adjusments as required of the space. | |
484 virtual size_t adjust_object_size_v(size_t size) const { return size; } | |
485 | |
486 protected: | |
487 // Used during compaction. | |
488 HeapWord* _first_dead; | |
489 HeapWord* _end_of_live; | |
490 | |
491 // Minimum size of a free block. | |
492 virtual size_t minimum_free_block_size() const = 0; | |
493 | |
494 // This the function is invoked when an allocation of an object covering | |
495 // "start" to "end occurs crosses the threshold; returns the next | |
496 // threshold. (The default implementation does nothing.) | |
497 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { | |
498 return end(); | |
499 } | |
500 | |
501 // Requires "allowed_deadspace_words > 0", that "q" is the start of a | |
502 // free block of the given "word_len", and that "q", were it an object, | |
503 // would not move if forwared. If the size allows, fill the free | |
504 // block with an object, to prevent excessive compaction. Returns "true" | |
505 // iff the free region was made deadspace, and modifies | |
506 // "allowed_deadspace_words" to reflect the number of available deadspace | |
507 // words remaining after this operation. | |
508 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, | |
509 size_t word_len); | |
510 }; | |
511 | |
512 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ | |
513 /* Compute the new addresses for the live objects and store it in the mark \ | |
514 * Used by universe::mark_sweep_phase2() \ | |
515 */ \ | |
516 HeapWord* compact_top; /* This is where we are currently compacting to. */ \ | |
517 \ | |
518 /* We're sure to be here before any objects are compacted into this \ | |
519 * space, so this is a good time to initialize this: \ | |
520 */ \ | |
521 set_compaction_top(bottom()); \ | |
522 \ | |
523 if (cp->space == NULL) { \ | |
524 assert(cp->gen != NULL, "need a generation"); \ | |
525 assert(cp->threshold == NULL, "just checking"); \ | |
526 assert(cp->gen->first_compaction_space() == this, "just checking"); \ | |
527 cp->space = cp->gen->first_compaction_space(); \ | |
528 compact_top = cp->space->bottom(); \ | |
529 cp->space->set_compaction_top(compact_top); \ | |
530 cp->threshold = cp->space->initialize_threshold(); \ | |
531 } else { \ | |
532 compact_top = cp->space->compaction_top(); \ | |
533 } \ | |
534 \ | |
535 /* We allow some amount of garbage towards the bottom of the space, so \ | |
536 * we don't start compacting before there is a significant gain to be made.\ | |
537 * Occasionally, we want to ensure a full compaction, which is determined \ | |
538 * by the MarkSweepAlwaysCompactCount parameter. \ | |
539 */ \ | |
10287
12f651e29f6b
6843347: Boundary values in some public GC options cause crashes
tschatzl
parents:
8001
diff
changeset
|
540 uint invocations = MarkSweep::total_invocations(); \ |
12f651e29f6b
6843347: Boundary values in some public GC options cause crashes
tschatzl
parents:
8001
diff
changeset
|
541 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ |
0 | 542 \ |
543 size_t allowed_deadspace = 0; \ | |
544 if (skip_dead) { \ | |
438 | 545 const size_t ratio = allowed_dead_ratio(); \ |
0 | 546 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ |
547 } \ | |
548 \ | |
549 HeapWord* q = bottom(); \ | |
550 HeapWord* t = scan_limit(); \ | |
551 \ | |
552 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ | |
553 live object. */ \ | |
554 HeapWord* first_dead = end();/* The first dead object. */ \ | |
555 LiveRange* liveRange = NULL; /* The current live range, recorded in the \ | |
556 first header of preceding free area. */ \ | |
557 _first_dead = first_dead; \ | |
558 \ | |
559 const intx interval = PrefetchScanIntervalInBytes; \ | |
560 \ | |
561 while (q < t) { \ | |
562 assert(!block_is_obj(q) || \ | |
563 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ | |
564 oop(q)->mark()->has_bias_pattern(), \ | |
565 "these are the only valid states during a mark sweep"); \ | |
566 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ | |
567 /* prefetch beyond q */ \ | |
568 Prefetch::write(q, interval); \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
569 size_t size = block_size(q); \ |
0 | 570 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ |
571 q += size; \ | |
572 end_of_live = q; \ | |
573 } else { \ | |
574 /* run over all the contiguous dead objects */ \ | |
575 HeapWord* end = q; \ | |
576 do { \ | |
577 /* prefetch beyond end */ \ | |
578 Prefetch::write(end, interval); \ | |
579 end += block_size(end); \ | |
580 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ | |
581 \ | |
582 /* see if we might want to pretend this object is alive so that \ | |
583 * we don't have to compact quite as often. \ | |
584 */ \ | |
585 if (allowed_deadspace > 0 && q == compact_top) { \ | |
586 size_t sz = pointer_delta(end, q); \ | |
587 if (insert_deadspace(allowed_deadspace, q, sz)) { \ | |
588 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ | |
589 q = end; \ | |
590 end_of_live = end; \ | |
591 continue; \ | |
592 } \ | |
593 } \ | |
594 \ | |
595 /* otherwise, it really is a free region. */ \ | |
596 \ | |
597 /* for the previous LiveRange, record the end of the live objects. */ \ | |
598 if (liveRange) { \ | |
599 liveRange->set_end(q); \ | |
600 } \ | |
601 \ | |
602 /* record the current LiveRange object. \ | |
603 * liveRange->start() is overlaid on the mark word. \ | |
604 */ \ | |
605 liveRange = (LiveRange*)q; \ | |
606 liveRange->set_start(end); \ | |
607 liveRange->set_end(end); \ | |
608 \ | |
609 /* see if this is the first dead region. */ \ | |
610 if (q < first_dead) { \ | |
611 first_dead = q; \ | |
612 } \ | |
613 \ | |
614 /* move on to the next object */ \ | |
615 q = end; \ | |
616 } \ | |
617 } \ | |
618 \ | |
619 assert(q == t, "just checking"); \ | |
620 if (liveRange != NULL) { \ | |
621 liveRange->set_end(q); \ | |
622 } \ | |
623 _end_of_live = end_of_live; \ | |
624 if (end_of_live < first_dead) { \ | |
625 first_dead = end_of_live; \ | |
626 } \ | |
627 _first_dead = first_dead; \ | |
628 \ | |
629 /* save the compaction_top of the compaction space. */ \ | |
630 cp->space->set_compaction_top(compact_top); \ | |
631 } | |
632 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
633 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
634 /* adjust all the interior pointers to point at the new locations of objects \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
635 * Used by MarkSweep::mark_sweep_phase3() */ \ |
0 | 636 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
637 HeapWord* q = bottom(); \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
638 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
0 | 639 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
640 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
0 | 641 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
642 if (q < t && _first_dead > q && \ |
0 | 643 !oop(q)->is_gc_marked()) { \ |
644 /* we have a chunk of the space which hasn't moved and we've \ | |
645 * reinitialized the mark word during the previous pass, so we can't \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
646 * use is_gc_marked for the traversal. */ \ |
0 | 647 HeapWord* end = _first_dead; \ |
648 \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
649 while (q < end) { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
650 /* I originally tried to conjoin "block_start(q) == q" to the \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
651 * assertion below, but that doesn't work, because you can't \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
652 * accurately traverse previous objects to get to the current one \ |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
653 * after their pointers have been \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
654 * updated, until the actual compaction is done. dld, 4/00 */ \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
655 assert(block_is_obj(q), \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
656 "should be at block boundaries, and should be looking at objs"); \ |
0 | 657 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
658 /* point all the oops to the new location */ \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
659 size_t size = oop(q)->adjust_pointers(); \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
660 size = adjust_obj_size(size); \ |
0 | 661 \ |
662 q += size; \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
663 } \ |
0 | 664 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
665 if (_first_dead == t) { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
666 q = t; \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
667 } else { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
668 /* $$$ This is funky. Using this to read the previously written \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
669 * LiveRange. See also use below. */ \ |
0 | 670 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
671 } \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
672 } \ |
0 | 673 \ |
674 const intx interval = PrefetchScanIntervalInBytes; \ | |
675 \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
676 debug_only(HeapWord* prev_q = NULL); \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
677 while (q < t) { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
678 /* prefetch beyond q */ \ |
0 | 679 Prefetch::write(q, interval); \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
680 if (oop(q)->is_gc_marked()) { \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
681 /* q is alive */ \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
682 /* point all the oops to the new location */ \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
683 size_t size = oop(q)->adjust_pointers(); \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
684 size = adjust_obj_size(size); \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
685 debug_only(prev_q = q); \ |
0 | 686 q += size; \ |
356 | 687 } else { \ |
688 /* q is not a live object, so its mark should point at the next \ | |
689 * live object */ \ | |
690 debug_only(prev_q = q); \ | |
691 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ | |
692 assert(q > prev_q, "we should be moving forward through memory"); \ | |
693 } \ | |
694 } \ | |
0 | 695 \ |
356 | 696 assert(q == t, "just checking"); \ |
0 | 697 } |
698 | |
356 | 699 #define SCAN_AND_COMPACT(obj_size) { \ |
0 | 700 /* Copy all live objects to their new location \ |
356 | 701 * Used by MarkSweep::mark_sweep_phase4() */ \ |
0 | 702 \ |
356 | 703 HeapWord* q = bottom(); \ |
704 HeapWord* const t = _end_of_live; \ | |
705 debug_only(HeapWord* prev_q = NULL); \ | |
0 | 706 \ |
356 | 707 if (q < t && _first_dead > q && \ |
0 | 708 !oop(q)->is_gc_marked()) { \ |
356 | 709 debug_only( \ |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
710 /* we have a chunk of the space which hasn't moved and we've reinitialized \ |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
711 * the mark word during the previous pass, so we can't use is_gc_marked for \ |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
712 * the traversal. */ \ |
356 | 713 HeapWord* const end = _first_dead; \ |
714 \ | |
715 while (q < end) { \ | |
0 | 716 size_t size = obj_size(q); \ |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
717 assert(!oop(q)->is_gc_marked(), \ |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
718 "should be unmarked (special dense prefix handling)"); \ |
356 | 719 debug_only(prev_q = q); \ |
0 | 720 q += size; \ |
356 | 721 } \ |
722 ) /* debug_only */ \ | |
723 \ | |
724 if (_first_dead == t) { \ | |
725 q = t; \ | |
726 } else { \ | |
727 /* $$$ Funky */ \ | |
728 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ | |
729 } \ | |
730 } \ | |
0 | 731 \ |
356 | 732 const intx scan_interval = PrefetchScanIntervalInBytes; \ |
733 const intx copy_interval = PrefetchCopyIntervalInBytes; \ | |
734 while (q < t) { \ | |
735 if (!oop(q)->is_gc_marked()) { \ | |
736 /* mark is pointer to next marked oop */ \ | |
737 debug_only(prev_q = q); \ | |
738 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ | |
739 assert(q > prev_q, "we should be moving forward through memory"); \ | |
740 } else { \ | |
741 /* prefetch beyond q */ \ | |
0 | 742 Prefetch::read(q, scan_interval); \ |
743 \ | |
744 /* size and destination */ \ | |
745 size_t size = obj_size(q); \ | |
746 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ | |
747 \ | |
356 | 748 /* prefetch beyond compaction_top */ \ |
0 | 749 Prefetch::write(compaction_top, copy_interval); \ |
750 \ | |
356 | 751 /* copy object and reinit its mark */ \ |
752 assert(q != compaction_top, "everything in this pass should be moving"); \ | |
753 Copy::aligned_conjoint_words(q, compaction_top, size); \ | |
754 oop(compaction_top)->init_mark(); \ | |
755 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ | |
0 | 756 \ |
356 | 757 debug_only(prev_q = q); \ |
0 | 758 q += size; \ |
356 | 759 } \ |
760 } \ | |
0 | 761 \ |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
762 /* Let's remember if we were empty before we did the compaction. */ \ |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
763 bool was_empty = used_region().is_empty(); \ |
0 | 764 /* Reset space after compaction is complete */ \ |
356 | 765 reset_after_compaction(); \ |
0 | 766 /* We do this clear, below, since it has overloaded meanings for some */ \ |
767 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ | |
768 /* compacted into will have had their offset table thresholds updated */ \ | |
769 /* continuously, but those that weren't need to have their thresholds */ \ | |
770 /* re-initialized. Also mangles unused area for debugging. */ \ | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
771 if (used_region().is_empty()) { \ |
356 | 772 if (!was_empty) clear(SpaceDecorator::Mangle); \ |
0 | 773 } else { \ |
774 if (ZapUnusedHeapArea) mangle_unused_area(); \ | |
775 } \ | |
776 } | |
777 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
778 class GenSpaceMangler; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
779 |
0 | 780 // A space in which the free area is contiguous. It therefore supports |
781 // faster allocation, and compaction. | |
782 class ContiguousSpace: public CompactibleSpace { | |
783 friend class OneContigSpaceCardGeneration; | |
784 friend class VMStructs; | |
785 protected: | |
786 HeapWord* _top; | |
787 HeapWord* _concurrent_iteration_safe_limit; | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
788 // A helper for mangling the unused area of the space in debug builds. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
789 GenSpaceMangler* _mangler; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
790 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
791 GenSpaceMangler* mangler() { return _mangler; } |
0 | 792 |
793 // Allocation helpers (return NULL if full). | |
794 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); | |
795 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); | |
796 | |
797 public: | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
798 ContiguousSpace(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
799 ~ContiguousSpace(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
800 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
801 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
356 | 802 virtual void clear(bool mangle_space); |
0 | 803 |
804 // Accessors | |
805 HeapWord* top() const { return _top; } | |
806 void set_top(HeapWord* value) { _top = value; } | |
807 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
808 virtual void set_saved_mark() { _saved_mark_word = top(); } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
809 void reset_saved_mark() { _saved_mark_word = bottom(); } |
0 | 810 |
811 WaterMark bottom_mark() { return WaterMark(this, bottom()); } | |
812 WaterMark top_mark() { return WaterMark(this, top()); } | |
813 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } | |
814 bool saved_mark_at_top() const { return saved_mark_word() == top(); } | |
815 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
816 // In debug mode mangle (write it with a particular bit |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
817 // pattern) the unused part of a space. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
818 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
819 // Used to save the an address in a space for later use during mangling. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
820 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
821 // Used to save the space's current top for later use during mangling. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
822 void set_top_for_allocations() PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
823 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
824 // Mangle regions in the space from the current top up to the |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
825 // previously mangled part of the space. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
826 void mangle_unused_area() PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
827 // Mangle [top, end) |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
828 void mangle_unused_area_complete() PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
829 // Mangle the given MemRegion. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
830 void mangle_region(MemRegion mr) PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
831 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
832 // Do some sparse checking on the area that should have been mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
833 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
834 // Check the complete area that should have been mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
835 // This code may be NULL depending on the macro DEBUG_MANGLING. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
836 void check_mangled_unused_area_complete() PRODUCT_RETURN; |
0 | 837 |
838 // Size computations: sizes in bytes. | |
839 size_t capacity() const { return byte_size(bottom(), end()); } | |
840 size_t used() const { return byte_size(bottom(), top()); } | |
841 size_t free() const { return byte_size(top(), end()); } | |
842 | |
843 // Override from space. | |
844 bool is_in(const void* p) const; | |
845 | |
846 virtual bool is_free_block(const HeapWord* p) const; | |
847 | |
848 // In a contiguous space we have a more obvious bound on what parts | |
849 // contain objects. | |
850 MemRegion used_region() const { return MemRegion(bottom(), top()); } | |
851 | |
852 MemRegion used_region_at_save_marks() const { | |
853 return MemRegion(bottom(), saved_mark_word()); | |
854 } | |
855 | |
856 // Allocation (return NULL if full) | |
857 virtual HeapWord* allocate(size_t word_size); | |
858 virtual HeapWord* par_allocate(size_t word_size); | |
859 | |
860 virtual bool obj_allocated_since_save_marks(const oop obj) const { | |
861 return (HeapWord*)obj >= saved_mark_word(); | |
862 } | |
863 | |
864 // Iteration | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
865 void oop_iterate(ExtendedOopClosure* cl); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
866 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
0 | 867 void object_iterate(ObjectClosure* blk); |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
868 // For contiguous spaces this method will iterate safely over objects |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
869 // in the space (i.e., between bottom and top) when at a safepoint. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
438
diff
changeset
|
870 void safe_object_iterate(ObjectClosure* blk); |
0 | 871 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
872 // iterates on objects up to the safe limit | |
873 HeapWord* object_iterate_careful(ObjectClosureCareful* cl); | |
6048
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
874 HeapWord* concurrent_iteration_safe_limit() { |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
875 assert(_concurrent_iteration_safe_limit <= top(), |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
876 "_concurrent_iteration_safe_limit update missed"); |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
877 return _concurrent_iteration_safe_limit; |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
878 } |
0 | 879 // changes the safe limit, all objects from bottom() to the new |
880 // limit should be properly initialized | |
6048
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
881 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
882 assert(new_limit <= top(), "uninitialized objects in the safe range"); |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
883 _concurrent_iteration_safe_limit = new_limit; |
a05a695ea044
7167437: Can't build on linux without precompiled headers
stefank
parents:
6008
diff
changeset
|
884 } |
0 | 885 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
886 |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
887 #if INCLUDE_ALL_GCS |
0 | 888 // In support of parallel oop_iterate. |
889 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ | |
890 void par_oop_iterate(MemRegion mr, OopClosureType* blk); | |
891 | |
892 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) | |
893 #undef ContigSpace_PAR_OOP_ITERATE_DECL | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
7448
diff
changeset
|
894 #endif // INCLUDE_ALL_GCS |
0 | 895 |
896 // Compaction support | |
897 virtual void reset_after_compaction() { | |
898 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); | |
899 set_top(compaction_top()); | |
900 // set new iteration safe limit | |
901 set_concurrent_iteration_safe_limit(compaction_top()); | |
902 } | |
903 virtual size_t minimum_free_block_size() const { return 0; } | |
904 | |
905 // Override. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
906 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
0 | 907 CardTableModRefBS::PrecisionStyle precision, |
908 HeapWord* boundary = NULL); | |
909 | |
910 // Apply "blk->do_oop" to the addresses of all reference fields in objects | |
911 // starting with the _saved_mark_word, which was noted during a generation's | |
912 // save_marks and is required to denote the head of an object. | |
913 // Fields in objects allocated by applications of the closure | |
914 // *are* included in the iteration. | |
915 // Updates _saved_mark_word to point to just after the last object | |
916 // iterated over. | |
917 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
918 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); | |
919 | |
920 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) | |
921 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL | |
922 | |
923 // Same as object_iterate, but starting from "mark", which is required | |
924 // to denote the start of an object. Objects allocated by | |
925 // applications of the closure *are* included in the iteration. | |
926 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); | |
927 | |
928 // Very inefficient implementation. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
929 virtual HeapWord* block_start_const(const void* p) const; |
0 | 930 size_t block_size(const HeapWord* p) const; |
931 // If a block is in the allocated area, it is an object. | |
932 bool block_is_obj(const HeapWord* p) const { return p < top(); } | |
933 | |
934 // Addresses for inlined allocation | |
935 HeapWord** top_addr() { return &_top; } | |
936 HeapWord** end_addr() { return &_end; } | |
937 | |
938 // Overrides for more efficient compaction support. | |
939 void prepare_for_compaction(CompactPoint* cp); | |
940 | |
941 // PrintHeapAtGC support. | |
942 virtual void print_on(outputStream* st) const; | |
943 | |
944 // Checked dynamic downcasts. | |
945 virtual ContiguousSpace* toContiguousSpace() { | |
946 return this; | |
947 } | |
948 | |
949 // Debugging | |
6008 | 950 virtual void verify() const; |
0 | 951 |
952 // Used to increase collection frequency. "factor" of 0 means entire | |
953 // space. | |
954 void allocate_temporary_filler(int factor); | |
955 | |
956 }; | |
957 | |
958 | |
959 // A dirty card to oop closure that does filtering. | |
960 // It knows how to filter out objects that are outside of the _boundary. | |
961 class Filtering_DCTOC : public DirtyCardToOopClosure { | |
962 protected: | |
963 // Override. | |
964 void walk_mem_region(MemRegion mr, | |
965 HeapWord* bottom, HeapWord* top); | |
966 | |
967 // Walk the given memory region, from bottom to top, applying | |
968 // the given oop closure to (possibly) all objects found. The | |
969 // given oop closure may or may not be the same as the oop | |
970 // closure with which this closure was created, as it may | |
971 // be a filtering closure which makes use of the _boundary. | |
972 // We offer two signatures, so the FilteringClosure static type is | |
973 // apparent. | |
974 virtual void walk_mem_region_with_cl(MemRegion mr, | |
975 HeapWord* bottom, HeapWord* top, | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
976 ExtendedOopClosure* cl) = 0; |
0 | 977 virtual void walk_mem_region_with_cl(MemRegion mr, |
978 HeapWord* bottom, HeapWord* top, | |
979 FilteringClosure* cl) = 0; | |
980 | |
981 public: | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
982 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, |
0 | 983 CardTableModRefBS::PrecisionStyle precision, |
984 HeapWord* boundary) : | |
985 DirtyCardToOopClosure(sp, cl, precision, boundary) {} | |
986 }; | |
987 | |
988 // A dirty card to oop closure for contiguous spaces | |
989 // (ContiguousSpace and sub-classes). | |
990 // It is a FilteringClosure, as defined above, and it knows: | |
991 // | |
992 // 1. That the actual top of any area in a memory region | |
993 // contained by the space is bounded by the end of the contiguous | |
994 // region of the space. | |
995 // 2. That the space is really made up of objects and not just | |
996 // blocks. | |
997 | |
998 class ContiguousSpaceDCTOC : public Filtering_DCTOC { | |
999 protected: | |
1000 // Overrides. | |
1001 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); | |
1002 | |
1003 virtual void walk_mem_region_with_cl(MemRegion mr, | |
1004 HeapWord* bottom, HeapWord* top, | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1005 ExtendedOopClosure* cl); |
0 | 1006 virtual void walk_mem_region_with_cl(MemRegion mr, |
1007 HeapWord* bottom, HeapWord* top, | |
1008 FilteringClosure* cl); | |
1009 | |
1010 public: | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1011 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, |
0 | 1012 CardTableModRefBS::PrecisionStyle precision, |
1013 HeapWord* boundary) : | |
1014 Filtering_DCTOC(sp, cl, precision, boundary) | |
1015 {} | |
1016 }; | |
1017 | |
1018 | |
1019 // Class EdenSpace describes eden-space in new generation. | |
1020 | |
1021 class DefNewGeneration; | |
1022 | |
1023 class EdenSpace : public ContiguousSpace { | |
1024 friend class VMStructs; | |
1025 private: | |
1026 DefNewGeneration* _gen; | |
1027 | |
1028 // _soft_end is used as a soft limit on allocation. As soft limits are | |
1029 // reached, the slow-path allocation code can invoke other actions and then | |
1030 // adjust _soft_end up to a new soft limit or to end(). | |
1031 HeapWord* _soft_end; | |
1032 | |
1033 public: | |
347
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
342
diff
changeset
|
1034 EdenSpace(DefNewGeneration* gen) : |
60fb9c4db4e6
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
ysr
parents:
342
diff
changeset
|
1035 _gen(gen), _soft_end(NULL) {} |
0 | 1036 |
1037 // Get/set just the 'soft' limit. | |
1038 HeapWord* soft_end() { return _soft_end; } | |
1039 HeapWord** soft_end_addr() { return &_soft_end; } | |
1040 void set_soft_end(HeapWord* value) { _soft_end = value; } | |
1041 | |
1042 // Override. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1043 void clear(bool mangle_space); |
0 | 1044 |
1045 // Set both the 'hard' and 'soft' limits (_end and _soft_end). | |
1046 void set_end(HeapWord* value) { | |
1047 set_soft_end(value); | |
1048 ContiguousSpace::set_end(value); | |
1049 } | |
1050 | |
1051 // Allocation (return NULL if full) | |
1052 HeapWord* allocate(size_t word_size); | |
1053 HeapWord* par_allocate(size_t word_size); | |
1054 }; | |
1055 | |
1056 // Class ConcEdenSpace extends EdenSpace for the sake of safe | |
1057 // allocation while soft-end is being modified concurrently | |
1058 | |
1059 class ConcEdenSpace : public EdenSpace { | |
1060 public: | |
1061 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } | |
1062 | |
1063 // Allocation (return NULL if full) | |
1064 HeapWord* par_allocate(size_t word_size); | |
1065 }; | |
1066 | |
1067 | |
1068 // A ContigSpace that Supports an efficient "block_start" operation via | |
1069 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with | |
1070 // other spaces.) This is the abstract base class for old generation | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6197
diff
changeset
|
1071 // (tenured) spaces. |
0 | 1072 |
1073 class OffsetTableContigSpace: public ContiguousSpace { | |
1074 friend class VMStructs; | |
1075 protected: | |
1076 BlockOffsetArrayContigSpace _offsets; | |
1077 Mutex _par_alloc_lock; | |
1078 | |
1079 public: | |
1080 // Constructor | |
1081 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, | |
1082 MemRegion mr); | |
1083 | |
1084 void set_bottom(HeapWord* value); | |
1085 void set_end(HeapWord* value); | |
1086 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
1087 void clear(bool mangle_space); |
0 | 1088 |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
1089 inline HeapWord* block_start_const(const void* p) const; |
0 | 1090 |
1091 // Add offset table update. | |
1092 virtual inline HeapWord* allocate(size_t word_size); | |
1093 inline HeapWord* par_allocate(size_t word_size); | |
1094 | |
1095 // MarkSweep support phase3 | |
1096 virtual HeapWord* initialize_threshold(); | |
1097 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); | |
1098 | |
1099 virtual void print_on(outputStream* st) const; | |
1100 | |
1101 // Debugging | |
6008 | 1102 void verify() const; |
0 | 1103 }; |
1104 | |
1105 | |
1106 // Class TenuredSpace is used by TenuredGeneration | |
1107 | |
1108 class TenuredSpace: public OffsetTableContigSpace { | |
1109 friend class VMStructs; | |
1110 protected: | |
1111 // Mark sweep support | |
438 | 1112 size_t allowed_dead_ratio() const; |
0 | 1113 public: |
1114 // Constructor | |
1115 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, | |
1116 MemRegion mr) : | |
1117 OffsetTableContigSpace(sharedOffsetArray, mr) {} | |
1118 }; | |
1972 | 1119 #endif // SHARE_VM_MEMORY_SPACE_HPP |