Mercurial > hg > truffle
annotate src/share/vm/memory/generation.hpp @ 1839:e41cd7fd68a6
6794422: Perm gen expansion policy for concurrent collectors
Summary: Concurrent collectors should expand the perm gen without a full STW GC, but possibly by triggering a concurrent collection. Temporary band-aid for G1 where no concurrent collection is kicked off since the perm gen is not collected concurrently.
Reviewed-by: johnc
author | ysr |
---|---|
date | Fri, 01 Oct 2010 16:12:54 -0700 |
parents | 126ea7725993 |
children | a7214d79fcf1 |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1145
diff
changeset
|
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1145
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1145
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1145
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 // A Generation models a heap area for similarly-aged objects. | |
26 // It will contain one ore more spaces holding the actual objects. | |
27 // | |
28 // The Generation class hierarchy: | |
29 // | |
30 // Generation - abstract base class | |
31 // - DefNewGeneration - allocation area (copy collected) | |
32 // - ParNewGeneration - a DefNewGeneration that is collected by | |
33 // several threads | |
34 // - CardGeneration - abstract class adding offset array behavior | |
35 // - OneContigSpaceCardGeneration - abstract class holding a single | |
36 // contiguous space with card marking | |
37 // - TenuredGeneration - tenured (old object) space (markSweepCompact) | |
38 // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...) | |
39 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation | |
40 // (Detlefs-Printezis refinement of | |
41 // Boehm-Demers-Schenker) | |
42 // | |
43 // The system configurations currently allowed are: | |
44 // | |
45 // DefNewGeneration + TenuredGeneration + PermGeneration | |
46 // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen | |
47 // | |
48 // ParNewGeneration + TenuredGeneration + PermGeneration | |
49 // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen | |
50 // | |
51 | |
52 class DefNewGeneration; | |
53 class GenerationSpec; | |
54 class CompactibleSpace; | |
55 class ContiguousSpace; | |
56 class CompactPoint; | |
57 class OopsInGenClosure; | |
58 class OopClosure; | |
59 class ScanClosure; | |
60 class FastScanClosure; | |
61 class GenCollectedHeap; | |
62 class GenRemSet; | |
63 class GCStats; | |
64 | |
65 // A "ScratchBlock" represents a block of memory in one generation usable by | |
66 // another. It represents "num_words" free words, starting at and including | |
67 // the address of "this". | |
68 struct ScratchBlock { | |
69 ScratchBlock* next; | |
70 size_t num_words; | |
71 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming | |
72 // first two fields are word-sized.) | |
73 }; | |
74 | |
75 | |
76 class Generation: public CHeapObj { | |
77 friend class VMStructs; | |
78 private: | |
79 jlong _time_of_last_gc; // time when last gc on this generation happened (ms) | |
80 MemRegion _prev_used_region; // for collectors that want to "remember" a value for | |
81 // used region at some specific point during collection. | |
82 | |
83 protected: | |
84 // Minimum and maximum addresses for memory reserved (not necessarily | |
85 // committed) for generation. | |
86 // Used by card marking code. Must not overlap with address ranges of | |
87 // other generations. | |
88 MemRegion _reserved; | |
89 | |
90 // Memory area reserved for generation | |
91 VirtualSpace _virtual_space; | |
92 | |
93 // Level in the generation hierarchy. | |
94 int _level; | |
95 | |
96 // ("Weak") Reference processing support | |
97 ReferenceProcessor* _ref_processor; | |
98 | |
99 // Performance Counters | |
100 CollectorCounters* _gc_counters; | |
101 | |
102 // Statistics for garbage collection | |
103 GCStats* _gc_stats; | |
104 | |
105 // Returns the next generation in the configuration, or else NULL if this | |
106 // is the highest generation. | |
107 Generation* next_gen() const; | |
108 | |
109 // Initialize the generation. | |
110 Generation(ReservedSpace rs, size_t initial_byte_size, int level); | |
111 | |
112 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in | |
113 // "sp" that point into younger generations. | |
114 // The iteration is only over objects allocated at the start of the | |
115 // iterations; objects allocated as a result of applying the closure are | |
116 // not included. | |
117 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); | |
118 | |
119 public: | |
120 // The set of possible generation kinds. | |
121 enum Name { | |
122 ASParNew, | |
123 ASConcurrentMarkSweep, | |
124 DefNew, | |
125 ParNew, | |
126 MarkSweepCompact, | |
127 ConcurrentMarkSweep, | |
128 Other | |
129 }; | |
130 | |
131 enum SomePublicConstants { | |
132 // Generations are GenGrain-aligned and have size that are multiples of | |
133 // GenGrain. | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
134 // Note: on ARM we add 1 bit for card_table_base to be properly aligned |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
135 // (we expect its low byte to be zero - see implementation of post_barrier) |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1552
diff
changeset
|
136 LogOfGenGrain = 16 ARM_ONLY(+1), |
0 | 137 GenGrain = 1 << LogOfGenGrain |
138 }; | |
139 | |
140 // allocate and initialize ("weak") refs processing support | |
141 virtual void ref_processor_init(); | |
142 void set_ref_processor(ReferenceProcessor* rp) { | |
143 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); | |
144 _ref_processor = rp; | |
145 } | |
146 | |
147 virtual Generation::Name kind() { return Generation::Other; } | |
148 GenerationSpec* spec(); | |
149 | |
150 // This properly belongs in the collector, but for now this | |
151 // will do. | |
152 virtual bool refs_discovery_is_atomic() const { return true; } | |
153 virtual bool refs_discovery_is_mt() const { return false; } | |
154 | |
155 // Space enquiries (results in bytes) | |
156 virtual size_t capacity() const = 0; // The maximum number of object bytes the | |
157 // generation can currently hold. | |
158 virtual size_t used() const = 0; // The number of used bytes in the gen. | |
159 virtual size_t free() const = 0; // The number of free bytes in the gen. | |
160 | |
161 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. | |
162 // Returns the total number of bytes available in a generation | |
163 // for the allocation of objects. | |
164 virtual size_t max_capacity() const; | |
165 | |
166 // If this is a young generation, the maximum number of bytes that can be | |
167 // allocated in this generation before a GC is triggered. | |
168 virtual size_t capacity_before_gc() const { return 0; } | |
169 | |
170 // The largest number of contiguous free bytes in the generation, | |
171 // including expansion (Assumes called at a safepoint.) | |
172 virtual size_t contiguous_available() const = 0; | |
173 // The largest number of contiguous free bytes in this or any higher generation. | |
174 virtual size_t max_contiguous_available() const; | |
175 | |
176 // Returns true if promotions of the specified amount can | |
177 // be attempted safely (without a vm failure). | |
178 // Promotion of the full amount is not guaranteed but | |
179 // can be attempted. | |
180 // younger_handles_promotion_failure | |
181 // is true if the younger generation handles a promotion | |
182 // failure. | |
183 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, | |
184 bool younger_handles_promotion_failure) const; | |
185 | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
186 // For a non-young generation, this interface can be used to inform a |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
187 // generation that a promotion attempt into that generation failed. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
188 // Typically used to enable diagnostic output for post-mortem analysis, |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
189 // but other uses of the interface are not ruled out. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
190 virtual void promotion_failure_occurred() { /* does nothing */ } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
579
diff
changeset
|
191 |
0 | 192 // Return an estimate of the maximum allocation that could be performed |
193 // in the generation without triggering any collection or expansion | |
194 // activity. It is "unsafe" because no locks are taken; the result | |
195 // should be treated as an approximation, not a guarantee, for use in | |
196 // heuristic resizing decisions. | |
197 virtual size_t unsafe_max_alloc_nogc() const = 0; | |
198 | |
199 // Returns true if this generation cannot be expanded further | |
200 // without a GC. Override as appropriate. | |
201 virtual bool is_maximal_no_gc() const { | |
202 return _virtual_space.uncommitted_size() == 0; | |
203 } | |
204 | |
205 MemRegion reserved() const { return _reserved; } | |
206 | |
207 // Returns a region guaranteed to contain all the objects in the | |
208 // generation. | |
209 virtual MemRegion used_region() const { return _reserved; } | |
210 | |
211 MemRegion prev_used_region() const { return _prev_used_region; } | |
212 virtual void save_used_region() { _prev_used_region = used_region(); } | |
213 | |
214 // Returns "TRUE" iff "p" points into an allocated object in the generation. | |
215 // For some kinds of generations, this may be an expensive operation. | |
216 // To avoid performance problems stemming from its inadvertent use in | |
217 // product jvm's, we restrict its use to assertion checking or | |
218 // verification only. | |
219 virtual bool is_in(const void* p) const; | |
220 | |
221 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ | |
222 bool is_in_reserved(const void* p) const { | |
223 return _reserved.contains(p); | |
224 } | |
225 | |
226 // Check that the generation kind is DefNewGeneration or a sub | |
227 // class of DefNewGeneration and return a DefNewGeneration* | |
228 DefNewGeneration* as_DefNewGeneration(); | |
229 | |
230 // If some space in the generation contains the given "addr", return a | |
231 // pointer to that space, else return "NULL". | |
232 virtual Space* space_containing(const void* addr) const; | |
233 | |
234 // Iteration - do not use for time critical operations | |
235 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; | |
236 | |
237 // Returns the first space, if any, in the generation that can participate | |
238 // in compaction, or else "NULL". | |
239 virtual CompactibleSpace* first_compaction_space() const = 0; | |
240 | |
241 // Returns "true" iff this generation should be used to allocate an | |
242 // object of the given size. Young generations might | |
243 // wish to exclude very large objects, for example, since, if allocated | |
244 // often, they would greatly increase the frequency of young-gen | |
245 // collection. | |
246 virtual bool should_allocate(size_t word_size, bool is_tlab) { | |
247 bool result = false; | |
248 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); | |
249 if (!is_tlab || supports_tlab_allocation()) { | |
250 result = (word_size > 0) && (word_size < overflow_limit); | |
251 } | |
252 return result; | |
253 } | |
254 | |
255 // Allocate and returns a block of the requested size, or returns "NULL". | |
256 // Assumes the caller has done any necessary locking. | |
257 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; | |
258 | |
259 // Like "allocate", but performs any necessary locking internally. | |
260 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; | |
261 | |
262 // A 'younger' gen has reached an allocation limit, and uses this to notify | |
263 // the next older gen. The return value is a new limit, or NULL if none. The | |
264 // caller must do the necessary locking. | |
265 virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, | |
266 size_t word_size) { | |
267 return NULL; | |
268 } | |
269 | |
270 // Some generation may offer a region for shared, contiguous allocation, | |
271 // via inlined code (by exporting the address of the top and end fields | |
272 // defining the extent of the contiguous allocation region.) | |
273 | |
274 // This function returns "true" iff the heap supports this kind of | |
275 // allocation. (More precisely, this means the style of allocation that | |
276 // increments *top_addr()" with a CAS.) (Default is "no".) | |
277 // A generation that supports this allocation style must use lock-free | |
278 // allocation for *all* allocation, since there are times when lock free | |
279 // allocation will be concurrent with plain "allocate" calls. | |
280 virtual bool supports_inline_contig_alloc() const { return false; } | |
281 | |
282 // These functions return the addresses of the fields that define the | |
283 // boundaries of the contiguous allocation area. (These fields should be | |
284 // physicall near to one another.) | |
285 virtual HeapWord** top_addr() const { return NULL; } | |
286 virtual HeapWord** end_addr() const { return NULL; } | |
287 | |
288 // Thread-local allocation buffers | |
289 virtual bool supports_tlab_allocation() const { return false; } | |
290 virtual size_t tlab_capacity() const { | |
291 guarantee(false, "Generation doesn't support thread local allocation buffers"); | |
292 return 0; | |
293 } | |
294 virtual size_t unsafe_max_tlab_alloc() const { | |
295 guarantee(false, "Generation doesn't support thread local allocation buffers"); | |
296 return 0; | |
297 } | |
298 | |
299 // "obj" is the address of an object in a younger generation. Allocate space | |
300 // for "obj" in the current (or some higher) generation, and copy "obj" into | |
301 // the newly allocated space, if possible, returning the result (or NULL if | |
302 // the allocation failed). | |
303 // | |
304 // The "obj_size" argument is just obj->size(), passed along so the caller can | |
305 // avoid repeating the virtual call to retrieve it. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
306 virtual oop promote(oop obj, size_t obj_size); |
0 | 307 |
308 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote | |
309 // object "obj", whose original mark word was "m", and whose size is | |
310 // "word_sz". If possible, allocate space for "obj", copy obj into it | |
311 // (taking care to copy "m" into the mark word when done, since the mark | |
312 // word of "obj" may have been overwritten with a forwarding pointer, and | |
313 // also taking care to copy the klass pointer *last*. Returns the new | |
314 // object if successful, or else NULL. | |
315 virtual oop par_promote(int thread_num, | |
316 oop obj, markOop m, size_t word_sz); | |
317 | |
318 // Undo, if possible, the most recent par_promote_alloc allocation by | |
319 // "thread_num" ("obj", of "word_sz"). | |
320 virtual void par_promote_alloc_undo(int thread_num, | |
321 HeapWord* obj, size_t word_sz); | |
322 | |
323 // Informs the current generation that all par_promote_alloc's in the | |
324 // collection have been completed; any supporting data structures can be | |
325 // reset. Default is to do nothing. | |
326 virtual void par_promote_alloc_done(int thread_num) {} | |
327 | |
328 // Informs the current generation that all oop_since_save_marks_iterates | |
329 // performed by "thread_num" in the current collection, if any, have been | |
330 // completed; any supporting data structures can be reset. Default is to | |
331 // do nothing. | |
332 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} | |
333 | |
334 // This generation will collect all younger generations | |
335 // during a full collection. | |
336 virtual bool full_collects_younger_generations() const { return false; } | |
337 | |
338 // This generation does in-place marking, meaning that mark words | |
339 // are mutated during the marking phase and presumably reinitialized | |
340 // to a canonical value after the GC. This is currently used by the | |
341 // biased locking implementation to determine whether additional | |
342 // work is required during the GC prologue and epilogue. | |
343 virtual bool performs_in_place_marking() const { return true; } | |
344 | |
345 // Returns "true" iff collect() should subsequently be called on this | |
346 // this generation. See comment below. | |
347 // This is a generic implementation which can be overridden. | |
348 // | |
349 // Note: in the current (1.4) implementation, when genCollectedHeap's | |
350 // incremental_collection_will_fail flag is set, all allocations are | |
351 // slow path (the only fast-path place to allocate is DefNew, which | |
352 // will be full if the flag is set). | |
353 // Thus, older generations which collect younger generations should | |
354 // test this flag and collect if it is set. | |
355 virtual bool should_collect(bool full, | |
356 size_t word_size, | |
357 bool is_tlab) { | |
358 return (full || should_allocate(word_size, is_tlab)); | |
359 } | |
360 | |
361 // Perform a garbage collection. | |
362 // If full is true attempt a full garbage collection of this generation. | |
363 // Otherwise, attempting to (at least) free enough space to support an | |
364 // allocation of the given "word_size". | |
365 virtual void collect(bool full, | |
366 bool clear_all_soft_refs, | |
367 size_t word_size, | |
368 bool is_tlab) = 0; | |
369 | |
370 // Perform a heap collection, attempting to create (at least) enough | |
371 // space to support an allocation of the given "word_size". If | |
372 // successful, perform the allocation and return the resulting | |
373 // "oop" (initializing the allocated block). If the allocation is | |
374 // still unsuccessful, return "NULL". | |
375 virtual HeapWord* expand_and_allocate(size_t word_size, | |
376 bool is_tlab, | |
377 bool parallel = false) = 0; | |
378 | |
379 // Some generations may require some cleanup or preparation actions before | |
380 // allowing a collection. The default is to do nothing. | |
381 virtual void gc_prologue(bool full) {}; | |
382 | |
383 // Some generations may require some cleanup actions after a collection. | |
384 // The default is to do nothing. | |
385 virtual void gc_epilogue(bool full) {}; | |
386 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
387 // Save the high water marks for the used space in a generation. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
388 virtual void record_spaces_top() {}; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
389 |
0 | 390 // Some generations may need to be "fixed-up" after some allocation |
391 // activity to make them parsable again. The default is to do nothing. | |
392 virtual void ensure_parsability() {}; | |
393 | |
394 // Time (in ms) when we were last collected or now if a collection is | |
395 // in progress. | |
396 virtual jlong time_of_last_gc(jlong now) { | |
397 // XXX See note in genCollectedHeap::millis_since_last_gc() | |
398 NOT_PRODUCT( | |
399 if (now < _time_of_last_gc) { | |
400 warning("time warp: %d to %d", _time_of_last_gc, now); | |
401 } | |
402 ) | |
403 return _time_of_last_gc; | |
404 } | |
405 | |
406 virtual void update_time_of_last_gc(jlong now) { | |
407 _time_of_last_gc = now; | |
408 } | |
409 | |
410 // Generations may keep statistics about collection. This | |
411 // method updates those statistics. current_level is | |
412 // the level of the collection that has most recently | |
413 // occurred. This allows the generation to decide what | |
414 // statistics are valid to collect. For example, the | |
415 // generation can decide to gather the amount of promoted data | |
416 // if the collection of the younger generations has completed. | |
417 GCStats* gc_stats() const { return _gc_stats; } | |
418 virtual void update_gc_stats(int current_level, bool full) {} | |
419 | |
420 // Mark sweep support phase2 | |
421 virtual void prepare_for_compaction(CompactPoint* cp); | |
422 // Mark sweep support phase3 | |
423 virtual void pre_adjust_pointers() {ShouldNotReachHere();} | |
424 virtual void adjust_pointers(); | |
425 // Mark sweep support phase4 | |
426 virtual void compact(); | |
427 virtual void post_compact() {ShouldNotReachHere();} | |
428 | |
429 // Support for CMS's rescan. In this general form we return a pointer | |
430 // to an abstract object that can be used, based on specific previously | |
431 // decided protocols, to exchange information between generations, | |
432 // information that may be useful for speeding up certain types of | |
433 // garbage collectors. A NULL value indicates to the client that | |
434 // no data recording is expected by the provider. The data-recorder is | |
435 // expected to be GC worker thread-local, with the worker index | |
436 // indicated by "thr_num". | |
437 virtual void* get_data_recorder(int thr_num) { return NULL; } | |
438 | |
439 // Some generations may require some cleanup actions before allowing | |
440 // a verification. | |
441 virtual void prepare_for_verify() {}; | |
442 | |
443 // Accessing "marks". | |
444 | |
445 // This function gives a generation a chance to note a point between | |
446 // collections. For example, a contiguous generation might note the | |
447 // beginning allocation point post-collection, which might allow some later | |
448 // operations to be optimized. | |
449 virtual void save_marks() {} | |
450 | |
451 // This function allows generations to initialize any "saved marks". That | |
452 // is, should only be called when the generation is empty. | |
453 virtual void reset_saved_marks() {} | |
454 | |
455 // This function is "true" iff any no allocations have occurred in the | |
456 // generation since the last call to "save_marks". | |
457 virtual bool no_allocs_since_save_marks() = 0; | |
458 | |
459 // Apply "cl->apply" to (the addresses of) all reference fields in objects | |
460 // allocated in the current generation since the last call to "save_marks". | |
461 // If more objects are allocated in this generation as a result of applying | |
462 // the closure, iterates over reference fields in those objects as well. | |
463 // Calls "save_marks" at the end of the iteration. | |
464 // General signature... | |
465 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; | |
466 // ...and specializations for de-virtualization. (The general | |
467 // implemention of the _nv versions call the virtual version. | |
468 // Note that the _nv suffix is not really semantically necessary, | |
469 // but it avoids some not-so-useful warnings on Solaris.) | |
470 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
471 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
472 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ | |
473 } | |
474 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) | |
475 | |
476 #undef Generation_SINCE_SAVE_MARKS_DECL | |
477 | |
478 // The "requestor" generation is performing some garbage collection | |
479 // action for which it would be useful to have scratch space. If | |
480 // the target is not the requestor, no gc actions will be required | |
481 // of the target. The requestor promises to allocate no more than | |
482 // "max_alloc_words" in the target generation (via promotion say, | |
483 // if the requestor is a young generation and the target is older). | |
484 // If the target generation can provide any scratch space, it adds | |
485 // it to "list", leaving "list" pointing to the head of the | |
486 // augmented list. The default is to offer no space. | |
487 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, | |
488 size_t max_alloc_words) {} | |
489 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
490 // Give each generation an opportunity to do clean up for any |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
491 // contributed scratch. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
492 virtual void reset_scratch() {}; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
493 |
0 | 494 // When an older generation has been collected, and perhaps resized, |
495 // this method will be invoked on all younger generations (from older to | |
496 // younger), allowing them to resize themselves as appropriate. | |
497 virtual void compute_new_size() = 0; | |
498 | |
499 // Printing | |
500 virtual const char* name() const = 0; | |
501 virtual const char* short_name() const = 0; | |
502 | |
503 int level() const { return _level; } | |
504 | |
505 // Attributes | |
506 | |
507 // True iff the given generation may only be the youngest generation. | |
508 virtual bool must_be_youngest() const = 0; | |
509 // True iff the given generation may only be the oldest generation. | |
510 virtual bool must_be_oldest() const = 0; | |
511 | |
512 // Reference Processing accessor | |
513 ReferenceProcessor* const ref_processor() { return _ref_processor; } | |
514 | |
515 // Iteration. | |
516 | |
517 // Iterate over all the ref-containing fields of all objects in the | |
518 // generation, calling "cl.do_oop" on each. | |
519 virtual void oop_iterate(OopClosure* cl); | |
520 | |
521 // Same as above, restricted to the intersection of a memory region and | |
522 // the generation. | |
523 virtual void oop_iterate(MemRegion mr, OopClosure* cl); | |
524 | |
525 // Iterate over all objects in the generation, calling "cl.do_object" on | |
526 // each. | |
527 virtual void object_iterate(ObjectClosure* cl); | |
528 | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
271
diff
changeset
|
529 // Iterate over all safe objects in the generation, calling "cl.do_object" on |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
271
diff
changeset
|
530 // each. An object is safe if its references point to other objects in |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
271
diff
changeset
|
531 // the heap. This defaults to object_iterate() unless overridden. |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
271
diff
changeset
|
532 virtual void safe_object_iterate(ObjectClosure* cl); |
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
271
diff
changeset
|
533 |
0 | 534 // Iterate over all objects allocated in the generation since the last |
535 // collection, calling "cl.do_object" on each. The generation must have | |
536 // been initialized properly to support this function, or else this call | |
537 // will fail. | |
538 virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; | |
539 | |
540 // Apply "cl->do_oop" to (the address of) all and only all the ref fields | |
541 // in the current generation that contain pointers to objects in younger | |
542 // generations. Objects allocated since the last "save_marks" call are | |
543 // excluded. | |
544 virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; | |
545 | |
546 // Inform a generation that it longer contains references to objects | |
547 // in any younger generation. [e.g. Because younger gens are empty, | |
548 // clear the card table.] | |
549 virtual void clear_remembered_set() { } | |
550 | |
551 // Inform a generation that some of its objects have moved. [e.g. The | |
552 // generation's spaces were compacted, invalidating the card table.] | |
553 virtual void invalidate_remembered_set() { } | |
554 | |
555 // Block abstraction. | |
556 | |
557 // Returns the address of the start of the "block" that contains the | |
558 // address "addr". We say "blocks" instead of "object" since some heaps | |
559 // may not pack objects densely; a chunk may either be an object or a | |
560 // non-object. | |
561 virtual HeapWord* block_start(const void* addr) const; | |
562 | |
563 // Requires "addr" to be the start of a chunk, and returns its size. | |
564 // "addr + size" is required to be the start of a new chunk, or the end | |
565 // of the active area of the heap. | |
566 virtual size_t block_size(const HeapWord* addr) const ; | |
567 | |
568 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
569 // the block is an object. | |
570 virtual bool block_is_obj(const HeapWord* addr) const; | |
571 | |
572 | |
573 // PrintGC, PrintGCDetails support | |
574 void print_heap_change(size_t prev_used) const; | |
575 | |
576 // PrintHeapAtGC support | |
577 virtual void print() const; | |
578 virtual void print_on(outputStream* st) const; | |
579 | |
580 virtual void verify(bool allow_dirty) = 0; | |
581 | |
582 struct StatRecord { | |
583 int invocations; | |
584 elapsedTimer accumulated_time; | |
585 StatRecord() : | |
586 invocations(0), | |
587 accumulated_time(elapsedTimer()) {} | |
588 }; | |
589 private: | |
590 StatRecord _stat_record; | |
591 public: | |
592 StatRecord* stat_record() { return &_stat_record; } | |
593 | |
594 virtual void print_summary_info(); | |
595 virtual void print_summary_info_on(outputStream* st); | |
596 | |
597 // Performance Counter support | |
598 virtual void update_counters() = 0; | |
599 virtual CollectorCounters* counters() { return _gc_counters; } | |
600 }; | |
601 | |
602 // Class CardGeneration is a generation that is covered by a card table, | |
603 // and uses a card-size block-offset array to implement block_start. | |
604 | |
605 // class BlockOffsetArray; | |
606 // class BlockOffsetArrayContigSpace; | |
607 class BlockOffsetSharedArray; | |
608 | |
609 class CardGeneration: public Generation { | |
610 friend class VMStructs; | |
611 protected: | |
612 // This is shared with other generations. | |
613 GenRemSet* _rs; | |
614 // This is local to this generation. | |
615 BlockOffsetSharedArray* _bts; | |
616 | |
617 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, | |
618 GenRemSet* remset); | |
619 | |
620 public: | |
621 | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
622 // Attempt to expand the generation by "bytes". Expand by at a |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
623 // minimum "expand_bytes". Return true if some amount (not |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
624 // necessarily the full "bytes") was done. |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
625 virtual bool expand(size_t bytes, size_t expand_bytes); |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
626 |
0 | 627 virtual void clear_remembered_set(); |
628 | |
629 virtual void invalidate_remembered_set(); | |
630 | |
631 virtual void prepare_for_verify(); | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
632 |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
633 // Grow generation with specified size (returns false if unable to grow) |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
634 virtual bool grow_by(size_t bytes) = 0; |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
635 // Grow generation to reserved size. |
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
636 virtual bool grow_to_reserved() = 0; |
0 | 637 }; |
638 | |
639 // OneContigSpaceCardGeneration models a heap of old objects contained in a single | |
640 // contiguous space. | |
641 // | |
642 // Garbage collection is performed using mark-compact. | |
643 | |
644 class OneContigSpaceCardGeneration: public CardGeneration { | |
645 friend class VMStructs; | |
646 // Abstractly, this is a subtype that gets access to protected fields. | |
647 friend class CompactingPermGen; | |
648 friend class VM_PopulateDumpSharedSpace; | |
649 | |
650 protected: | |
651 size_t _min_heap_delta_bytes; // Minimum amount to expand. | |
652 ContiguousSpace* _the_space; // actual space holding objects | |
653 WaterMark _last_gc; // watermark between objects allocated before | |
654 // and after last GC. | |
655 | |
656 // Grow generation with specified size (returns false if unable to grow) | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
657 virtual bool grow_by(size_t bytes); |
0 | 658 // Grow generation to reserved size. |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
659 virtual bool grow_to_reserved(); |
0 | 660 // Shrink generation with specified size (returns false if unable to shrink) |
661 void shrink_by(size_t bytes); | |
662 | |
663 // Allocation failure | |
271
818a18cd69a8
6730514: assertion failure in mangling code when expanding by 0 bytes
jmasa
parents:
269
diff
changeset
|
664 virtual bool expand(size_t bytes, size_t expand_bytes); |
0 | 665 void shrink(size_t bytes); |
666 | |
667 // Accessing spaces | |
668 ContiguousSpace* the_space() const { return _the_space; } | |
669 | |
670 public: | |
671 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, | |
672 size_t min_heap_delta_bytes, | |
673 int level, GenRemSet* remset, | |
674 ContiguousSpace* space) : | |
675 CardGeneration(rs, initial_byte_size, level, remset), | |
676 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) | |
677 {} | |
678 | |
679 inline bool is_in(const void* p) const; | |
680 | |
681 // Space enquiries | |
682 size_t capacity() const; | |
683 size_t used() const; | |
684 size_t free() const; | |
685 | |
686 MemRegion used_region() const; | |
687 | |
688 size_t unsafe_max_alloc_nogc() const; | |
689 size_t contiguous_available() const; | |
690 | |
691 // Iteration | |
692 void object_iterate(ObjectClosure* blk); | |
693 void space_iterate(SpaceClosure* blk, bool usedOnly = false); | |
694 void object_iterate_since_last_GC(ObjectClosure* cl); | |
695 | |
696 void younger_refs_iterate(OopsInGenClosure* blk); | |
697 | |
698 inline CompactibleSpace* first_compaction_space() const; | |
699 | |
700 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); | |
701 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); | |
702 | |
703 // Accessing marks | |
704 inline WaterMark top_mark(); | |
705 inline WaterMark bottom_mark(); | |
706 | |
707 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ | |
708 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); | |
709 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) | |
710 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) | |
711 | |
712 void save_marks(); | |
713 void reset_saved_marks(); | |
714 bool no_allocs_since_save_marks(); | |
715 | |
716 inline size_t block_size(const HeapWord* addr) const; | |
717 | |
718 inline bool block_is_obj(const HeapWord* addr) const; | |
719 | |
720 virtual void collect(bool full, | |
721 bool clear_all_soft_refs, | |
722 size_t size, | |
723 bool is_tlab); | |
724 HeapWord* expand_and_allocate(size_t size, | |
725 bool is_tlab, | |
726 bool parallel = false); | |
727 | |
728 virtual void prepare_for_verify(); | |
729 | |
730 virtual void gc_epilogue(bool full); | |
731 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
732 virtual void record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
733 |
0 | 734 virtual void verify(bool allow_dirty); |
735 virtual void print_on(outputStream* st) const; | |
736 }; |