comparison src/share/vm/memory/generation.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // A Generation models a heap area for similarly-aged objects.
26 // It will contain one ore more spaces holding the actual objects.
27 //
28 // The Generation class hierarchy:
29 //
30 // Generation - abstract base class
31 // - DefNewGeneration - allocation area (copy collected)
32 // - ParNewGeneration - a DefNewGeneration that is collected by
33 // several threads
34 // - CardGeneration - abstract class adding offset array behavior
35 // - OneContigSpaceCardGeneration - abstract class holding a single
36 // contiguous space with card marking
37 // - TenuredGeneration - tenured (old object) space (markSweepCompact)
38 // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...)
39 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
40 // (Detlefs-Printezis refinement of
41 // Boehm-Demers-Schenker)
42 //
43 // The system configurations currently allowed are:
44 //
45 // DefNewGeneration + TenuredGeneration + PermGeneration
46 // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
47 //
48 // ParNewGeneration + TenuredGeneration + PermGeneration
49 // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
50 //
51
52 class DefNewGeneration;
53 class GenerationSpec;
54 class CompactibleSpace;
55 class ContiguousSpace;
56 class CompactPoint;
57 class OopsInGenClosure;
58 class OopClosure;
59 class ScanClosure;
60 class FastScanClosure;
61 class GenCollectedHeap;
62 class GenRemSet;
63 class GCStats;
64
65 // A "ScratchBlock" represents a block of memory in one generation usable by
66 // another. It represents "num_words" free words, starting at and including
67 // the address of "this".
68 struct ScratchBlock {
69 ScratchBlock* next;
70 size_t num_words;
71 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
72 // first two fields are word-sized.)
73 };
74
75
76 class Generation: public CHeapObj {
77 friend class VMStructs;
78 private:
79 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
80 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
81 // used region at some specific point during collection.
82
83 protected:
84 // Minimum and maximum addresses for memory reserved (not necessarily
85 // committed) for generation.
86 // Used by card marking code. Must not overlap with address ranges of
87 // other generations.
88 MemRegion _reserved;
89
90 // Memory area reserved for generation
91 VirtualSpace _virtual_space;
92
93 // Level in the generation hierarchy.
94 int _level;
95
96 // ("Weak") Reference processing support
97 ReferenceProcessor* _ref_processor;
98
99 // Performance Counters
100 CollectorCounters* _gc_counters;
101
102 // Statistics for garbage collection
103 GCStats* _gc_stats;
104
105 // Returns the next generation in the configuration, or else NULL if this
106 // is the highest generation.
107 Generation* next_gen() const;
108
109 // Initialize the generation.
110 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
111
112 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
113 // "sp" that point into younger generations.
114 // The iteration is only over objects allocated at the start of the
115 // iterations; objects allocated as a result of applying the closure are
116 // not included.
117 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
118
119 public:
120 // The set of possible generation kinds.
121 enum Name {
122 ASParNew,
123 ASConcurrentMarkSweep,
124 DefNew,
125 ParNew,
126 MarkSweepCompact,
127 ConcurrentMarkSweep,
128 Other
129 };
130
131 enum SomePublicConstants {
132 // Generations are GenGrain-aligned and have size that are multiples of
133 // GenGrain.
134 LogOfGenGrain = 16,
135 GenGrain = 1 << LogOfGenGrain
136 };
137
138 // allocate and initialize ("weak") refs processing support
139 virtual void ref_processor_init();
140 void set_ref_processor(ReferenceProcessor* rp) {
141 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
142 _ref_processor = rp;
143 }
144
145 virtual Generation::Name kind() { return Generation::Other; }
146 GenerationSpec* spec();
147
148 // This properly belongs in the collector, but for now this
149 // will do.
150 virtual bool refs_discovery_is_atomic() const { return true; }
151 virtual bool refs_discovery_is_mt() const { return false; }
152
153 // Space enquiries (results in bytes)
154 virtual size_t capacity() const = 0; // The maximum number of object bytes the
155 // generation can currently hold.
156 virtual size_t used() const = 0; // The number of used bytes in the gen.
157 virtual size_t free() const = 0; // The number of free bytes in the gen.
158
159 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
160 // Returns the total number of bytes available in a generation
161 // for the allocation of objects.
162 virtual size_t max_capacity() const;
163
164 // If this is a young generation, the maximum number of bytes that can be
165 // allocated in this generation before a GC is triggered.
166 virtual size_t capacity_before_gc() const { return 0; }
167
168 // The largest number of contiguous free bytes in the generation,
169 // including expansion (Assumes called at a safepoint.)
170 virtual size_t contiguous_available() const = 0;
171 // The largest number of contiguous free bytes in this or any higher generation.
172 virtual size_t max_contiguous_available() const;
173
174 // Returns true if promotions of the specified amount can
175 // be attempted safely (without a vm failure).
176 // Promotion of the full amount is not guaranteed but
177 // can be attempted.
178 // younger_handles_promotion_failure
179 // is true if the younger generation handles a promotion
180 // failure.
181 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
182 bool younger_handles_promotion_failure) const;
183
184 // Return an estimate of the maximum allocation that could be performed
185 // in the generation without triggering any collection or expansion
186 // activity. It is "unsafe" because no locks are taken; the result
187 // should be treated as an approximation, not a guarantee, for use in
188 // heuristic resizing decisions.
189 virtual size_t unsafe_max_alloc_nogc() const = 0;
190
191 // Returns true if this generation cannot be expanded further
192 // without a GC. Override as appropriate.
193 virtual bool is_maximal_no_gc() const {
194 return _virtual_space.uncommitted_size() == 0;
195 }
196
197 MemRegion reserved() const { return _reserved; }
198
199 // Returns a region guaranteed to contain all the objects in the
200 // generation.
201 virtual MemRegion used_region() const { return _reserved; }
202
203 MemRegion prev_used_region() const { return _prev_used_region; }
204 virtual void save_used_region() { _prev_used_region = used_region(); }
205
206 // Returns "TRUE" iff "p" points into an allocated object in the generation.
207 // For some kinds of generations, this may be an expensive operation.
208 // To avoid performance problems stemming from its inadvertent use in
209 // product jvm's, we restrict its use to assertion checking or
210 // verification only.
211 virtual bool is_in(const void* p) const;
212
213 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */
214 bool is_in_reserved(const void* p) const {
215 return _reserved.contains(p);
216 }
217
218 // Check that the generation kind is DefNewGeneration or a sub
219 // class of DefNewGeneration and return a DefNewGeneration*
220 DefNewGeneration* as_DefNewGeneration();
221
222 // If some space in the generation contains the given "addr", return a
223 // pointer to that space, else return "NULL".
224 virtual Space* space_containing(const void* addr) const;
225
226 // Iteration - do not use for time critical operations
227 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
228
229 // Returns the first space, if any, in the generation that can participate
230 // in compaction, or else "NULL".
231 virtual CompactibleSpace* first_compaction_space() const = 0;
232
233 // Returns "true" iff this generation should be used to allocate an
234 // object of the given size. Young generations might
235 // wish to exclude very large objects, for example, since, if allocated
236 // often, they would greatly increase the frequency of young-gen
237 // collection.
238 virtual bool should_allocate(size_t word_size, bool is_tlab) {
239 bool result = false;
240 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
241 if (!is_tlab || supports_tlab_allocation()) {
242 result = (word_size > 0) && (word_size < overflow_limit);
243 }
244 return result;
245 }
246
247 // Allocate and returns a block of the requested size, or returns "NULL".
248 // Assumes the caller has done any necessary locking.
249 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
250
251 // Like "allocate", but performs any necessary locking internally.
252 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
253
254 // A 'younger' gen has reached an allocation limit, and uses this to notify
255 // the next older gen. The return value is a new limit, or NULL if none. The
256 // caller must do the necessary locking.
257 virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
258 size_t word_size) {
259 return NULL;
260 }
261
262 // Some generation may offer a region for shared, contiguous allocation,
263 // via inlined code (by exporting the address of the top and end fields
264 // defining the extent of the contiguous allocation region.)
265
266 // This function returns "true" iff the heap supports this kind of
267 // allocation. (More precisely, this means the style of allocation that
268 // increments *top_addr()" with a CAS.) (Default is "no".)
269 // A generation that supports this allocation style must use lock-free
270 // allocation for *all* allocation, since there are times when lock free
271 // allocation will be concurrent with plain "allocate" calls.
272 virtual bool supports_inline_contig_alloc() const { return false; }
273
274 // These functions return the addresses of the fields that define the
275 // boundaries of the contiguous allocation area. (These fields should be
276 // physicall near to one another.)
277 virtual HeapWord** top_addr() const { return NULL; }
278 virtual HeapWord** end_addr() const { return NULL; }
279
280 // Thread-local allocation buffers
281 virtual bool supports_tlab_allocation() const { return false; }
282 virtual size_t tlab_capacity() const {
283 guarantee(false, "Generation doesn't support thread local allocation buffers");
284 return 0;
285 }
286 virtual size_t unsafe_max_tlab_alloc() const {
287 guarantee(false, "Generation doesn't support thread local allocation buffers");
288 return 0;
289 }
290
291 // "obj" is the address of an object in a younger generation. Allocate space
292 // for "obj" in the current (or some higher) generation, and copy "obj" into
293 // the newly allocated space, if possible, returning the result (or NULL if
294 // the allocation failed).
295 //
296 // The "obj_size" argument is just obj->size(), passed along so the caller can
297 // avoid repeating the virtual call to retrieve it.
298 //
299 // The "ref" argument, if non-NULL, is the address of some reference to "obj"
300 // (that is "*ref == obj"); some generations may use this information to, for
301 // example, influence placement decisions.
302 //
303 // The default implementation ignores "ref" and calls allocate().
304 virtual oop promote(oop obj, size_t obj_size, oop* ref);
305
306 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
307 // object "obj", whose original mark word was "m", and whose size is
308 // "word_sz". If possible, allocate space for "obj", copy obj into it
309 // (taking care to copy "m" into the mark word when done, since the mark
310 // word of "obj" may have been overwritten with a forwarding pointer, and
311 // also taking care to copy the klass pointer *last*. Returns the new
312 // object if successful, or else NULL.
313 virtual oop par_promote(int thread_num,
314 oop obj, markOop m, size_t word_sz);
315
316 // Undo, if possible, the most recent par_promote_alloc allocation by
317 // "thread_num" ("obj", of "word_sz").
318 virtual void par_promote_alloc_undo(int thread_num,
319 HeapWord* obj, size_t word_sz);
320
321 // Informs the current generation that all par_promote_alloc's in the
322 // collection have been completed; any supporting data structures can be
323 // reset. Default is to do nothing.
324 virtual void par_promote_alloc_done(int thread_num) {}
325
326 // Informs the current generation that all oop_since_save_marks_iterates
327 // performed by "thread_num" in the current collection, if any, have been
328 // completed; any supporting data structures can be reset. Default is to
329 // do nothing.
330 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
331
332 // This generation will collect all younger generations
333 // during a full collection.
334 virtual bool full_collects_younger_generations() const { return false; }
335
336 // This generation does in-place marking, meaning that mark words
337 // are mutated during the marking phase and presumably reinitialized
338 // to a canonical value after the GC. This is currently used by the
339 // biased locking implementation to determine whether additional
340 // work is required during the GC prologue and epilogue.
341 virtual bool performs_in_place_marking() const { return true; }
342
343 // Returns "true" iff collect() should subsequently be called on this
344 // this generation. See comment below.
345 // This is a generic implementation which can be overridden.
346 //
347 // Note: in the current (1.4) implementation, when genCollectedHeap's
348 // incremental_collection_will_fail flag is set, all allocations are
349 // slow path (the only fast-path place to allocate is DefNew, which
350 // will be full if the flag is set).
351 // Thus, older generations which collect younger generations should
352 // test this flag and collect if it is set.
353 virtual bool should_collect(bool full,
354 size_t word_size,
355 bool is_tlab) {
356 return (full || should_allocate(word_size, is_tlab));
357 }
358
359 // Perform a garbage collection.
360 // If full is true attempt a full garbage collection of this generation.
361 // Otherwise, attempting to (at least) free enough space to support an
362 // allocation of the given "word_size".
363 virtual void collect(bool full,
364 bool clear_all_soft_refs,
365 size_t word_size,
366 bool is_tlab) = 0;
367
368 // Perform a heap collection, attempting to create (at least) enough
369 // space to support an allocation of the given "word_size". If
370 // successful, perform the allocation and return the resulting
371 // "oop" (initializing the allocated block). If the allocation is
372 // still unsuccessful, return "NULL".
373 virtual HeapWord* expand_and_allocate(size_t word_size,
374 bool is_tlab,
375 bool parallel = false) = 0;
376
377 // Some generations may require some cleanup or preparation actions before
378 // allowing a collection. The default is to do nothing.
379 virtual void gc_prologue(bool full) {};
380
381 // Some generations may require some cleanup actions after a collection.
382 // The default is to do nothing.
383 virtual void gc_epilogue(bool full) {};
384
385 // Some generations may need to be "fixed-up" after some allocation
386 // activity to make them parsable again. The default is to do nothing.
387 virtual void ensure_parsability() {};
388
389 // Time (in ms) when we were last collected or now if a collection is
390 // in progress.
391 virtual jlong time_of_last_gc(jlong now) {
392 // XXX See note in genCollectedHeap::millis_since_last_gc()
393 NOT_PRODUCT(
394 if (now < _time_of_last_gc) {
395 warning("time warp: %d to %d", _time_of_last_gc, now);
396 }
397 )
398 return _time_of_last_gc;
399 }
400
401 virtual void update_time_of_last_gc(jlong now) {
402 _time_of_last_gc = now;
403 }
404
405 // Generations may keep statistics about collection. This
406 // method updates those statistics. current_level is
407 // the level of the collection that has most recently
408 // occurred. This allows the generation to decide what
409 // statistics are valid to collect. For example, the
410 // generation can decide to gather the amount of promoted data
411 // if the collection of the younger generations has completed.
412 GCStats* gc_stats() const { return _gc_stats; }
413 virtual void update_gc_stats(int current_level, bool full) {}
414
415 // Mark sweep support phase2
416 virtual void prepare_for_compaction(CompactPoint* cp);
417 // Mark sweep support phase3
418 virtual void pre_adjust_pointers() {ShouldNotReachHere();}
419 virtual void adjust_pointers();
420 // Mark sweep support phase4
421 virtual void compact();
422 virtual void post_compact() {ShouldNotReachHere();}
423
424 // Support for CMS's rescan. In this general form we return a pointer
425 // to an abstract object that can be used, based on specific previously
426 // decided protocols, to exchange information between generations,
427 // information that may be useful for speeding up certain types of
428 // garbage collectors. A NULL value indicates to the client that
429 // no data recording is expected by the provider. The data-recorder is
430 // expected to be GC worker thread-local, with the worker index
431 // indicated by "thr_num".
432 virtual void* get_data_recorder(int thr_num) { return NULL; }
433
434 // Some generations may require some cleanup actions before allowing
435 // a verification.
436 virtual void prepare_for_verify() {};
437
438 // Accessing "marks".
439
440 // This function gives a generation a chance to note a point between
441 // collections. For example, a contiguous generation might note the
442 // beginning allocation point post-collection, which might allow some later
443 // operations to be optimized.
444 virtual void save_marks() {}
445
446 // This function allows generations to initialize any "saved marks". That
447 // is, should only be called when the generation is empty.
448 virtual void reset_saved_marks() {}
449
450 // This function is "true" iff any no allocations have occurred in the
451 // generation since the last call to "save_marks".
452 virtual bool no_allocs_since_save_marks() = 0;
453
454 // Apply "cl->apply" to (the addresses of) all reference fields in objects
455 // allocated in the current generation since the last call to "save_marks".
456 // If more objects are allocated in this generation as a result of applying
457 // the closure, iterates over reference fields in those objects as well.
458 // Calls "save_marks" at the end of the iteration.
459 // General signature...
460 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
461 // ...and specializations for de-virtualization. (The general
462 // implemention of the _nv versions call the virtual version.
463 // Note that the _nv suffix is not really semantically necessary,
464 // but it avoids some not-so-useful warnings on Solaris.)
465 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
466 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
467 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
468 }
469 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
470
471 #undef Generation_SINCE_SAVE_MARKS_DECL
472
473 // The "requestor" generation is performing some garbage collection
474 // action for which it would be useful to have scratch space. If
475 // the target is not the requestor, no gc actions will be required
476 // of the target. The requestor promises to allocate no more than
477 // "max_alloc_words" in the target generation (via promotion say,
478 // if the requestor is a young generation and the target is older).
479 // If the target generation can provide any scratch space, it adds
480 // it to "list", leaving "list" pointing to the head of the
481 // augmented list. The default is to offer no space.
482 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
483 size_t max_alloc_words) {}
484
485 // When an older generation has been collected, and perhaps resized,
486 // this method will be invoked on all younger generations (from older to
487 // younger), allowing them to resize themselves as appropriate.
488 virtual void compute_new_size() = 0;
489
490 // Printing
491 virtual const char* name() const = 0;
492 virtual const char* short_name() const = 0;
493
494 int level() const { return _level; }
495
496 // Attributes
497
498 // True iff the given generation may only be the youngest generation.
499 virtual bool must_be_youngest() const = 0;
500 // True iff the given generation may only be the oldest generation.
501 virtual bool must_be_oldest() const = 0;
502
503 // Reference Processing accessor
504 ReferenceProcessor* const ref_processor() { return _ref_processor; }
505
506 // Iteration.
507
508 // Iterate over all the ref-containing fields of all objects in the
509 // generation, calling "cl.do_oop" on each.
510 virtual void oop_iterate(OopClosure* cl);
511
512 // Same as above, restricted to the intersection of a memory region and
513 // the generation.
514 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
515
516 // Iterate over all objects in the generation, calling "cl.do_object" on
517 // each.
518 virtual void object_iterate(ObjectClosure* cl);
519
520 // Iterate over all objects allocated in the generation since the last
521 // collection, calling "cl.do_object" on each. The generation must have
522 // been initialized properly to support this function, or else this call
523 // will fail.
524 virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
525
526 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
527 // in the current generation that contain pointers to objects in younger
528 // generations. Objects allocated since the last "save_marks" call are
529 // excluded.
530 virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
531
532 // Inform a generation that it longer contains references to objects
533 // in any younger generation. [e.g. Because younger gens are empty,
534 // clear the card table.]
535 virtual void clear_remembered_set() { }
536
537 // Inform a generation that some of its objects have moved. [e.g. The
538 // generation's spaces were compacted, invalidating the card table.]
539 virtual void invalidate_remembered_set() { }
540
541 // Block abstraction.
542
543 // Returns the address of the start of the "block" that contains the
544 // address "addr". We say "blocks" instead of "object" since some heaps
545 // may not pack objects densely; a chunk may either be an object or a
546 // non-object.
547 virtual HeapWord* block_start(const void* addr) const;
548
549 // Requires "addr" to be the start of a chunk, and returns its size.
550 // "addr + size" is required to be the start of a new chunk, or the end
551 // of the active area of the heap.
552 virtual size_t block_size(const HeapWord* addr) const ;
553
554 // Requires "addr" to be the start of a block, and returns "TRUE" iff
555 // the block is an object.
556 virtual bool block_is_obj(const HeapWord* addr) const;
557
558
559 // PrintGC, PrintGCDetails support
560 void print_heap_change(size_t prev_used) const;
561
562 // PrintHeapAtGC support
563 virtual void print() const;
564 virtual void print_on(outputStream* st) const;
565
566 virtual void verify(bool allow_dirty) = 0;
567
568 struct StatRecord {
569 int invocations;
570 elapsedTimer accumulated_time;
571 StatRecord() :
572 invocations(0),
573 accumulated_time(elapsedTimer()) {}
574 };
575 private:
576 StatRecord _stat_record;
577 public:
578 StatRecord* stat_record() { return &_stat_record; }
579
580 virtual void print_summary_info();
581 virtual void print_summary_info_on(outputStream* st);
582
583 // Performance Counter support
584 virtual void update_counters() = 0;
585 virtual CollectorCounters* counters() { return _gc_counters; }
586 };
587
588 // Class CardGeneration is a generation that is covered by a card table,
589 // and uses a card-size block-offset array to implement block_start.
590
591 // class BlockOffsetArray;
592 // class BlockOffsetArrayContigSpace;
593 class BlockOffsetSharedArray;
594
595 class CardGeneration: public Generation {
596 friend class VMStructs;
597 protected:
598 // This is shared with other generations.
599 GenRemSet* _rs;
600 // This is local to this generation.
601 BlockOffsetSharedArray* _bts;
602
603 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
604 GenRemSet* remset);
605
606 public:
607
608 virtual void clear_remembered_set();
609
610 virtual void invalidate_remembered_set();
611
612 virtual void prepare_for_verify();
613 };
614
615 // OneContigSpaceCardGeneration models a heap of old objects contained in a single
616 // contiguous space.
617 //
618 // Garbage collection is performed using mark-compact.
619
620 class OneContigSpaceCardGeneration: public CardGeneration {
621 friend class VMStructs;
622 // Abstractly, this is a subtype that gets access to protected fields.
623 friend class CompactingPermGen;
624 friend class VM_PopulateDumpSharedSpace;
625
626 protected:
627 size_t _min_heap_delta_bytes; // Minimum amount to expand.
628 ContiguousSpace* _the_space; // actual space holding objects
629 WaterMark _last_gc; // watermark between objects allocated before
630 // and after last GC.
631
632 // Grow generation with specified size (returns false if unable to grow)
633 bool grow_by(size_t bytes);
634 // Grow generation to reserved size.
635 bool grow_to_reserved();
636 // Shrink generation with specified size (returns false if unable to shrink)
637 void shrink_by(size_t bytes);
638
639 // Allocation failure
640 void expand(size_t bytes, size_t expand_bytes);
641 void shrink(size_t bytes);
642
643 // Accessing spaces
644 ContiguousSpace* the_space() const { return _the_space; }
645
646 public:
647 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
648 size_t min_heap_delta_bytes,
649 int level, GenRemSet* remset,
650 ContiguousSpace* space) :
651 CardGeneration(rs, initial_byte_size, level, remset),
652 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
653 {}
654
655 inline bool is_in(const void* p) const;
656
657 // Space enquiries
658 size_t capacity() const;
659 size_t used() const;
660 size_t free() const;
661
662 MemRegion used_region() const;
663
664 size_t unsafe_max_alloc_nogc() const;
665 size_t contiguous_available() const;
666
667 // Iteration
668 void object_iterate(ObjectClosure* blk);
669 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
670 void object_iterate_since_last_GC(ObjectClosure* cl);
671
672 void younger_refs_iterate(OopsInGenClosure* blk);
673
674 inline CompactibleSpace* first_compaction_space() const;
675
676 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
677 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
678
679 // Accessing marks
680 inline WaterMark top_mark();
681 inline WaterMark bottom_mark();
682
683 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
684 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
685 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
686 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
687
688 void save_marks();
689 void reset_saved_marks();
690 bool no_allocs_since_save_marks();
691
692 inline size_t block_size(const HeapWord* addr) const;
693
694 inline bool block_is_obj(const HeapWord* addr) const;
695
696 virtual void collect(bool full,
697 bool clear_all_soft_refs,
698 size_t size,
699 bool is_tlab);
700 HeapWord* expand_and_allocate(size_t size,
701 bool is_tlab,
702 bool parallel = false);
703
704 virtual void prepare_for_verify();
705
706 virtual void gc_epilogue(bool full);
707
708 virtual void verify(bool allow_dirty);
709 virtual void print_on(outputStream* st) const;
710 };