comparison src/share/vm/memory/genCollectedHeap.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2 c0492d52d55b
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 class SubTasksDone;
26
27 // A "GenCollectedHeap" is a SharedHeap that uses generational
28 // collection. It is represented with a sequence of Generation's.
29 class GenCollectedHeap : public SharedHeap {
30 friend class GenCollectorPolicy;
31 friend class Generation;
32 friend class DefNewGeneration;
33 friend class TenuredGeneration;
34 friend class ConcurrentMarkSweepGeneration;
35 friend class CMSCollector;
36 friend class GenMarkSweep;
37 friend class VM_GenCollectForAllocation;
38 friend class VM_GenCollectFull;
39 friend class VM_GenCollectFullConcurrent;
40 friend class VM_GC_HeapInspection;
41 friend class VM_HeapDumper;
42 friend class HeapInspection;
43 friend class GCCauseSetter;
44 friend class VMStructs;
45 public:
46 enum SomeConstants {
47 max_gens = 10
48 };
49
50 friend class VM_PopulateDumpSharedSpace;
51
52 protected:
53 // Fields:
54 static GenCollectedHeap* _gch;
55
56 private:
57 int _n_gens;
58 Generation* _gens[max_gens];
59 GenerationSpec** _gen_specs;
60
61 // The generational collector policy.
62 GenCollectorPolicy* _gen_policy;
63
64 // If a generation would bail out of an incremental collection,
65 // it sets this flag. If the flag is set, satisfy_failed_allocation
66 // will attempt allocating in all generations before doing a full GC.
67 bool _incremental_collection_will_fail;
68 bool _last_incremental_collection_failed;
69
70 // In support of ExplicitGCInvokesConcurrent functionality
71 unsigned int _full_collections_completed;
72
73 // Data structure for claiming the (potentially) parallel tasks in
74 // (gen-specific) strong roots processing.
75 SubTasksDone* _gen_process_strong_tasks;
76
77 // In block contents verification, the number of header words to skip
78 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
79
80 // GC is not allowed during the dump of the shared classes. Keep track
81 // of this in order to provide an reasonable error message when terminating.
82 bool _preloading_shared_classes;
83
84 protected:
85 // Directs each generation up to and including "collectedGen" to recompute
86 // its desired size.
87 void compute_new_generation_sizes(int collectedGen);
88
89 // Helper functions for allocation
90 HeapWord* attempt_allocation(size_t size,
91 bool is_tlab,
92 bool first_only);
93
94 // Helper function for two callbacks below.
95 // Considers collection of the first max_level+1 generations.
96 void do_collection(bool full,
97 bool clear_all_soft_refs,
98 size_t size,
99 bool is_tlab,
100 int max_level);
101
102 // Callback from VM_GenCollectForAllocation operation.
103 // This function does everything necessary/possible to satisfy an
104 // allocation request that failed in the youngest generation that should
105 // have handled it (including collection, expansion, etc.)
106 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
107
108 // Callback from VM_GenCollectFull operation.
109 // Perform a full collection of the first max_level+1 generations.
110 void do_full_collection(bool clear_all_soft_refs, int max_level);
111
112 // Does the "cause" of GC indicate that
113 // we absolutely __must__ clear soft refs?
114 bool must_clear_all_soft_refs();
115
116 public:
117 GenCollectedHeap(GenCollectorPolicy *policy);
118
119 GCStats* gc_stats(int level) const;
120
121 // Returns JNI_OK on success
122 virtual jint initialize();
123 char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec,
124 size_t* _total_reserved, int* _n_covered_regions,
125 ReservedSpace* heap_rs);
126
127 // Does operations required after initialization has been done.
128 void post_initialize();
129
130 // Initialize ("weak") refs processing support
131 virtual void ref_processing_init();
132
133 virtual CollectedHeap::Name kind() const {
134 return CollectedHeap::GenCollectedHeap;
135 }
136
137 // The generational collector policy.
138 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
139
140 // Adaptive size policy
141 virtual AdaptiveSizePolicy* size_policy() {
142 return gen_policy()->size_policy();
143 }
144
145 size_t capacity() const;
146 size_t used() const;
147
148 // Save the "used_region" for generations level and lower,
149 // and, if perm is true, for perm gen.
150 void save_used_regions(int level, bool perm);
151
152 size_t max_capacity() const;
153
154 HeapWord* mem_allocate(size_t size,
155 bool is_large_noref,
156 bool is_tlab,
157 bool* gc_overhead_limit_was_exceeded);
158
159 // We may support a shared contiguous allocation area, if the youngest
160 // generation does.
161 bool supports_inline_contig_alloc() const;
162 HeapWord** top_addr() const;
163 HeapWord** end_addr() const;
164
165 // Return an estimate of the maximum allocation that could be performed
166 // without triggering any collection activity. In a generational
167 // collector, for example, this is probably the largest allocation that
168 // could be supported in the youngest generation. It is "unsafe" because
169 // no locks are taken; the result should be treated as an approximation,
170 // not a guarantee.
171 size_t unsafe_max_alloc();
172
173 // Does this heap support heap inspection? (+PrintClassHistogram)
174 virtual bool supports_heap_inspection() const { return true; }
175
176 // Perform a full collection of the heap; intended for use in implementing
177 // "System.gc". This implies as full a collection as the CollectedHeap
178 // supports. Caller does not hold the Heap_lock on entry.
179 void collect(GCCause::Cause cause);
180
181 // This interface assumes that it's being called by the
182 // vm thread. It collects the heap assuming that the
183 // heap lock is already held and that we are executing in
184 // the context of the vm thread.
185 void collect_as_vm_thread(GCCause::Cause cause);
186
187 // The same as above but assume that the caller holds the Heap_lock.
188 void collect_locked(GCCause::Cause cause);
189
190 // Perform a full collection of the first max_level+1 generations.
191 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
192 void collect(GCCause::Cause cause, int max_level);
193
194 // Returns "TRUE" iff "p" points into the allocated area of the heap.
195 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
196 // be expensive to compute in general, so, to prevent
197 // their inadvertent use in product jvm's, we restrict their use to
198 // assertion checking or verification only.
199 bool is_in(const void* p) const;
200
201 // override
202 bool is_in_closed_subset(const void* p) const {
203 if (UseConcMarkSweepGC) {
204 return is_in_reserved(p);
205 } else {
206 return is_in(p);
207 }
208 }
209
210 // Returns "TRUE" iff "p" points into the youngest generation.
211 bool is_in_youngest(void* p);
212
213 // Iteration functions.
214 void oop_iterate(OopClosure* cl);
215 void oop_iterate(MemRegion mr, OopClosure* cl);
216 void object_iterate(ObjectClosure* cl);
217 void object_iterate_since_last_GC(ObjectClosure* cl);
218 Space* space_containing(const void* addr) const;
219
220 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
221 // each address in the (reserved) heap is a member of exactly
222 // one block. The defining characteristic of a block is that it is
223 // possible to find its size, and thus to progress forward to the next
224 // block. (Blocks may be of different sizes.) Thus, blocks may
225 // represent Java objects, or they might be free blocks in a
226 // free-list-based heap (or subheap), as long as the two kinds are
227 // distinguishable and the size of each is determinable.
228
229 // Returns the address of the start of the "block" that contains the
230 // address "addr". We say "blocks" instead of "object" since some heaps
231 // may not pack objects densely; a chunk may either be an object or a
232 // non-object.
233 virtual HeapWord* block_start(const void* addr) const;
234
235 // Requires "addr" to be the start of a chunk, and returns its size.
236 // "addr + size" is required to be the start of a new chunk, or the end
237 // of the active area of the heap. Assumes (and verifies in non-product
238 // builds) that addr is in the allocated part of the heap and is
239 // the start of a chunk.
240 virtual size_t block_size(const HeapWord* addr) const;
241
242 // Requires "addr" to be the start of a block, and returns "TRUE" iff
243 // the block is an object. Assumes (and verifies in non-product
244 // builds) that addr is in the allocated part of the heap and is
245 // the start of a chunk.
246 virtual bool block_is_obj(const HeapWord* addr) const;
247
248 // Section on TLAB's.
249 virtual bool supports_tlab_allocation() const;
250 virtual size_t tlab_capacity(Thread* thr) const;
251 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
252 virtual HeapWord* allocate_new_tlab(size_t size);
253
254 // The "requestor" generation is performing some garbage collection
255 // action for which it would be useful to have scratch space. The
256 // requestor promises to allocate no more than "max_alloc_words" in any
257 // older generation (via promotion say.) Any blocks of space that can
258 // be provided are returned as a list of ScratchBlocks, sorted by
259 // decreasing size.
260 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
261
262 size_t large_typearray_limit();
263
264 // Ensure parsability: override
265 virtual void ensure_parsability(bool retire_tlabs);
266
267 // Time in ms since the longest time a collector ran in
268 // in any generation.
269 virtual jlong millis_since_last_gc();
270
271 // Total number of full collections completed.
272 unsigned int total_full_collections_completed() {
273 assert(_full_collections_completed <= _total_full_collections,
274 "Can't complete more collections than were started");
275 return _full_collections_completed;
276 }
277
278 // Update above counter, as appropriate, at the end of a stop-world GC cycle
279 unsigned int update_full_collections_completed();
280 // Update above counter, as appropriate, at the end of a concurrent GC cycle
281 unsigned int update_full_collections_completed(unsigned int count);
282
283 // Update "time of last gc" for all constituent generations
284 // to "now".
285 void update_time_of_last_gc(jlong now) {
286 for (int i = 0; i < _n_gens; i++) {
287 _gens[i]->update_time_of_last_gc(now);
288 }
289 perm_gen()->update_time_of_last_gc(now);
290 }
291
292 // Update the gc statistics for each generation.
293 // "level" is the level of the lastest collection
294 void update_gc_stats(int current_level, bool full) {
295 for (int i = 0; i < _n_gens; i++) {
296 _gens[i]->update_gc_stats(current_level, full);
297 }
298 perm_gen()->update_gc_stats(current_level, full);
299 }
300
301 // Override.
302 bool no_gc_in_progress() { return !is_gc_active(); }
303
304 // Override.
305 void prepare_for_verify();
306
307 // Override.
308 void verify(bool allow_dirty, bool silent);
309
310 // Override.
311 void print() const;
312 void print_on(outputStream* st) const;
313 virtual void print_gc_threads_on(outputStream* st) const;
314 virtual void gc_threads_do(ThreadClosure* tc) const;
315 virtual void print_tracing_info() const;
316
317 // PrintGC, PrintGCDetails support
318 void print_heap_change(size_t prev_used) const;
319 void print_perm_heap_change(size_t perm_prev_used) const;
320
321 // The functions below are helper functions that a subclass of
322 // "CollectedHeap" can use in the implementation of its virtual
323 // functions.
324
325 class GenClosure : public StackObj {
326 public:
327 virtual void do_generation(Generation* gen) = 0;
328 };
329
330 // Apply "cl.do_generation" to all generations in the heap (not including
331 // the permanent generation). If "old_to_young" determines the order.
332 void generation_iterate(GenClosure* cl, bool old_to_young);
333
334 void space_iterate(SpaceClosure* cl);
335
336 // Return "true" if all generations (but perm) have reached the
337 // maximal committed limit that they can reach, without a garbage
338 // collection.
339 virtual bool is_maximal_no_gc() const;
340
341 // Return the generation before "gen", or else NULL.
342 Generation* prev_gen(Generation* gen) const {
343 int l = gen->level();
344 if (l == 0) return NULL;
345 else return _gens[l-1];
346 }
347
348 // Return the generation after "gen", or else NULL.
349 Generation* next_gen(Generation* gen) const {
350 int l = gen->level() + 1;
351 if (l == _n_gens) return NULL;
352 else return _gens[l];
353 }
354
355 Generation* get_gen(int i) const {
356 if (i >= 0 && i < _n_gens)
357 return _gens[i];
358 else
359 return NULL;
360 }
361
362 int n_gens() const {
363 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
364 return _n_gens;
365 }
366
367 // Convenience function to be used in situations where the heap type can be
368 // asserted to be this type.
369 static GenCollectedHeap* heap();
370
371 void set_par_threads(int t);
372
373
374 // Invoke the "do_oop" method of one of the closures "not_older_gens"
375 // or "older_gens" on root locations for the generation at
376 // "level". (The "older_gens" closure is used for scanning references
377 // from older generations; "not_older_gens" is used everywhere else.)
378 // If "younger_gens_as_roots" is false, younger generations are
379 // not scanned as roots; in this case, the caller must be arranging to
380 // scan the younger generations itself. (For example, a generation might
381 // explicitly mark reachable objects in younger generations, to avoid
382 // excess storage retention.) If "collecting_perm_gen" is false, then
383 // roots that may only contain references to permGen objects are not
384 // scanned. The "so" argument determines which of the roots
385 // the closure is applied to:
386 // "SO_None" does none;
387 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
388 // "SO_SystemClasses" to all the "system" classes and loaders;
389 // "SO_Symbols_and_Strings" applies the closure to all entries in
390 // SymbolsTable and StringTable.
391 void gen_process_strong_roots(int level, bool younger_gens_as_roots,
392 bool collecting_perm_gen,
393 SharedHeap::ScanningOption so,
394 OopsInGenClosure* older_gens,
395 OopsInGenClosure* not_older_gens);
396
397 // Apply "blk" to all the weak roots of the system. These include
398 // JNI weak roots, the code cache, system dictionary, symbol table,
399 // string table, and referents of reachable weak refs.
400 void gen_process_weak_roots(OopClosure* root_closure,
401 OopClosure* non_root_closure);
402
403 // Set the saved marks of generations, if that makes sense.
404 // In particular, if any generation might iterate over the oops
405 // in other generations, it should call this method.
406 void save_marks();
407
408 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
409 // allocated since the last call to save_marks in generations at or above
410 // "level" (including the permanent generation.) The "cur" closure is
411 // applied to references in the generation at "level", and the "older"
412 // closure to older (and permanent) generations.
413 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
414 void oop_since_save_marks_iterate(int level, \
415 OopClosureType* cur, \
416 OopClosureType* older);
417
418 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
419
420 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
421
422 // Returns "true" iff no allocations have occurred in any generation at
423 // "level" or above (including the permanent generation) since the last
424 // call to "save_marks".
425 bool no_allocs_since_save_marks(int level);
426
427 // If a generation bails out of an incremental collection,
428 // it sets this flag.
429 bool incremental_collection_will_fail() {
430 return _incremental_collection_will_fail;
431 }
432 void set_incremental_collection_will_fail() {
433 _incremental_collection_will_fail = true;
434 }
435 void clear_incremental_collection_will_fail() {
436 _incremental_collection_will_fail = false;
437 }
438
439 bool last_incremental_collection_failed() const {
440 return _last_incremental_collection_failed;
441 }
442 void set_last_incremental_collection_failed() {
443 _last_incremental_collection_failed = true;
444 }
445 void clear_last_incremental_collection_failed() {
446 _last_incremental_collection_failed = false;
447 }
448
449 // Promotion of obj into gen failed. Try to promote obj to higher non-perm
450 // gens in ascending order; return the new location of obj if successful.
451 // Otherwise, try expand-and-allocate for obj in each generation starting at
452 // gen; return the new location of obj if successful. Otherwise, return NULL.
453 oop handle_failed_promotion(Generation* gen,
454 oop obj,
455 size_t obj_size,
456 oop* ref);
457
458 private:
459 // Accessor for memory state verification support
460 NOT_PRODUCT(
461 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
462 )
463
464 // Override
465 void check_for_non_bad_heap_word_value(HeapWord* addr,
466 size_t size) PRODUCT_RETURN;
467
468 // For use by mark-sweep. As implemented, mark-sweep-compact is global
469 // in an essential way: compaction is performed across generations, by
470 // iterating over spaces.
471 void prepare_for_compaction();
472
473 // Perform a full collection of the first max_level+1 generations.
474 // This is the low level interface used by the public versions of
475 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
476 void collect_locked(GCCause::Cause cause, int max_level);
477
478 // Returns success or failure.
479 bool create_cms_collector();
480
481 // In support of ExplicitGCInvokesConcurrent functionality
482 bool should_do_concurrent_full_gc(GCCause::Cause cause);
483 void collect_mostly_concurrent(GCCause::Cause cause);
484
485 protected:
486 virtual void gc_prologue(bool full);
487 virtual void gc_epilogue(bool full);
488
489 public:
490 virtual void preload_and_dump(TRAPS) KERNEL_RETURN;
491 };