Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/genCollectedHeap.hpp @ 10241:d17700c82d7d
8006088: Incompatible heap size flags accepted by VM
Summary: Make processing of minimum, initial and maximum heap size more intiutive by removing previous limitations on allowed values, and make error reporting consistent. Further, fix errors in ergonomic heap sizing.
Reviewed-by: johnc, jwilhelm, tamao
author | tschatzl |
---|---|
date | Mon, 06 May 2013 17:19:42 +0200 |
parents | a08c80e9e1e5 |
children | 71180a6e5080 |
rev | line source |
---|---|
0 | 1 /* |
6008 | 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |
26 #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP | |
27 | |
28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" | |
29 #include "memory/collectorPolicy.hpp" | |
30 #include "memory/generation.hpp" | |
31 #include "memory/sharedHeap.hpp" | |
32 | |
0 | 33 class SubTasksDone; |
34 | |
35 // A "GenCollectedHeap" is a SharedHeap that uses generational | |
36 // collection. It is represented with a sequence of Generation's. | |
37 class GenCollectedHeap : public SharedHeap { | |
38 friend class GenCollectorPolicy; | |
39 friend class Generation; | |
40 friend class DefNewGeneration; | |
41 friend class TenuredGeneration; | |
42 friend class ConcurrentMarkSweepGeneration; | |
43 friend class CMSCollector; | |
44 friend class GenMarkSweep; | |
45 friend class VM_GenCollectForAllocation; | |
46 friend class VM_GenCollectFull; | |
47 friend class VM_GenCollectFullConcurrent; | |
48 friend class VM_GC_HeapInspection; | |
49 friend class VM_HeapDumper; | |
50 friend class HeapInspection; | |
51 friend class GCCauseSetter; | |
52 friend class VMStructs; | |
53 public: | |
54 enum SomeConstants { | |
55 max_gens = 10 | |
56 }; | |
57 | |
58 friend class VM_PopulateDumpSharedSpace; | |
59 | |
60 protected: | |
61 // Fields: | |
62 static GenCollectedHeap* _gch; | |
63 | |
64 private: | |
65 int _n_gens; | |
66 Generation* _gens[max_gens]; | |
67 GenerationSpec** _gen_specs; | |
68 | |
69 // The generational collector policy. | |
70 GenCollectorPolicy* _gen_policy; | |
71 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
72 // Indicates that the most recent previous incremental collection failed. |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
73 // The flag is cleared when an action is taken that might clear the |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
74 // condition that caused that incremental collection to fail. |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
75 bool _incremental_collection_failed; |
0 | 76 |
77 // In support of ExplicitGCInvokesConcurrent functionality | |
78 unsigned int _full_collections_completed; | |
79 | |
80 // Data structure for claiming the (potentially) parallel tasks in | |
81 // (gen-specific) strong roots processing. | |
82 SubTasksDone* _gen_process_strong_tasks; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1552
diff
changeset
|
83 SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } |
0 | 84 |
85 // In block contents verification, the number of header words to skip | |
86 NOT_PRODUCT(static size_t _skip_header_HeapWords;) | |
87 | |
88 protected: | |
89 // Directs each generation up to and including "collectedGen" to recompute | |
90 // its desired size. | |
91 void compute_new_generation_sizes(int collectedGen); | |
92 | |
93 // Helper functions for allocation | |
94 HeapWord* attempt_allocation(size_t size, | |
95 bool is_tlab, | |
96 bool first_only); | |
97 | |
98 // Helper function for two callbacks below. | |
99 // Considers collection of the first max_level+1 generations. | |
100 void do_collection(bool full, | |
101 bool clear_all_soft_refs, | |
102 size_t size, | |
103 bool is_tlab, | |
104 int max_level); | |
105 | |
106 // Callback from VM_GenCollectForAllocation operation. | |
107 // This function does everything necessary/possible to satisfy an | |
108 // allocation request that failed in the youngest generation that should | |
109 // have handled it (including collection, expansion, etc.) | |
110 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); | |
111 | |
112 // Callback from VM_GenCollectFull operation. | |
113 // Perform a full collection of the first max_level+1 generations. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
114 virtual void do_full_collection(bool clear_all_soft_refs); |
0 | 115 void do_full_collection(bool clear_all_soft_refs, int max_level); |
116 | |
117 // Does the "cause" of GC indicate that | |
118 // we absolutely __must__ clear soft refs? | |
119 bool must_clear_all_soft_refs(); | |
120 | |
121 public: | |
122 GenCollectedHeap(GenCollectorPolicy *policy); | |
123 | |
124 GCStats* gc_stats(int level) const; | |
125 | |
126 // Returns JNI_OK on success | |
127 virtual jint initialize(); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
128 char* allocate(size_t alignment, |
0 | 129 size_t* _total_reserved, int* _n_covered_regions, |
130 ReservedSpace* heap_rs); | |
131 | |
132 // Does operations required after initialization has been done. | |
133 void post_initialize(); | |
134 | |
135 // Initialize ("weak") refs processing support | |
136 virtual void ref_processing_init(); | |
137 | |
138 virtual CollectedHeap::Name kind() const { | |
139 return CollectedHeap::GenCollectedHeap; | |
140 } | |
141 | |
142 // The generational collector policy. | |
143 GenCollectorPolicy* gen_policy() const { return _gen_policy; } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
144 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } |
0 | 145 |
146 // Adaptive size policy | |
147 virtual AdaptiveSizePolicy* size_policy() { | |
148 return gen_policy()->size_policy(); | |
149 } | |
150 | |
151 size_t capacity() const; | |
152 size_t used() const; | |
153 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
154 // Save the "used_region" for generations level and lower. |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
155 void save_used_regions(int level); |
0 | 156 |
157 size_t max_capacity() const; | |
158 | |
159 HeapWord* mem_allocate(size_t size, | |
160 bool* gc_overhead_limit_was_exceeded); | |
161 | |
162 // We may support a shared contiguous allocation area, if the youngest | |
163 // generation does. | |
164 bool supports_inline_contig_alloc() const; | |
165 HeapWord** top_addr() const; | |
166 HeapWord** end_addr() const; | |
167 | |
168 // Return an estimate of the maximum allocation that could be performed | |
169 // without triggering any collection activity. In a generational | |
170 // collector, for example, this is probably the largest allocation that | |
171 // could be supported in the youngest generation. It is "unsafe" because | |
172 // no locks are taken; the result should be treated as an approximation, | |
173 // not a guarantee. | |
174 size_t unsafe_max_alloc(); | |
175 | |
176 // Does this heap support heap inspection? (+PrintClassHistogram) | |
177 virtual bool supports_heap_inspection() const { return true; } | |
178 | |
179 // Perform a full collection of the heap; intended for use in implementing | |
180 // "System.gc". This implies as full a collection as the CollectedHeap | |
181 // supports. Caller does not hold the Heap_lock on entry. | |
182 void collect(GCCause::Cause cause); | |
183 | |
184 // The same as above but assume that the caller holds the Heap_lock. | |
185 void collect_locked(GCCause::Cause cause); | |
186 | |
187 // Perform a full collection of the first max_level+1 generations. | |
188 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. | |
189 void collect(GCCause::Cause cause, int max_level); | |
190 | |
4708 | 191 // Returns "TRUE" iff "p" points into the committed areas of the heap. |
0 | 192 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may |
193 // be expensive to compute in general, so, to prevent | |
194 // their inadvertent use in product jvm's, we restrict their use to | |
195 // assertion checking or verification only. | |
196 bool is_in(const void* p) const; | |
197 | |
198 // override | |
199 bool is_in_closed_subset(const void* p) const { | |
200 if (UseConcMarkSweepGC) { | |
201 return is_in_reserved(p); | |
202 } else { | |
203 return is_in(p); | |
204 } | |
205 } | |
206 | |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
207 // Returns true if the reference is to an object in the reserved space |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
208 // for the young generation. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
209 // Assumes the the young gen address range is less than that of the old gen. |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
210 bool is_in_young(oop p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
211 |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
212 #ifdef ASSERT |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
213 virtual bool is_in_partial_collection(const void* p); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
214 #endif |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
215 |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
216 virtual bool is_scavengable(const void* addr) { |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
217 return is_in_young((oop)addr); |
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
218 } |
0 | 219 |
220 // Iteration functions. | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
221 void oop_iterate(ExtendedOopClosure* cl); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
222 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
0 | 223 void object_iterate(ObjectClosure* cl); |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
224 void safe_object_iterate(ObjectClosure* cl); |
0 | 225 void object_iterate_since_last_GC(ObjectClosure* cl); |
226 Space* space_containing(const void* addr) const; | |
227 | |
228 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
229 // each address in the (reserved) heap is a member of exactly | |
230 // one block. The defining characteristic of a block is that it is | |
231 // possible to find its size, and thus to progress forward to the next | |
232 // block. (Blocks may be of different sizes.) Thus, blocks may | |
233 // represent Java objects, or they might be free blocks in a | |
234 // free-list-based heap (or subheap), as long as the two kinds are | |
235 // distinguishable and the size of each is determinable. | |
236 | |
237 // Returns the address of the start of the "block" that contains the | |
238 // address "addr". We say "blocks" instead of "object" since some heaps | |
239 // may not pack objects densely; a chunk may either be an object or a | |
240 // non-object. | |
241 virtual HeapWord* block_start(const void* addr) const; | |
242 | |
243 // Requires "addr" to be the start of a chunk, and returns its size. | |
244 // "addr + size" is required to be the start of a new chunk, or the end | |
245 // of the active area of the heap. Assumes (and verifies in non-product | |
246 // builds) that addr is in the allocated part of the heap and is | |
247 // the start of a chunk. | |
248 virtual size_t block_size(const HeapWord* addr) const; | |
249 | |
250 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
251 // the block is an object. Assumes (and verifies in non-product | |
252 // builds) that addr is in the allocated part of the heap and is | |
253 // the start of a chunk. | |
254 virtual bool block_is_obj(const HeapWord* addr) const; | |
255 | |
256 // Section on TLAB's. | |
257 virtual bool supports_tlab_allocation() const; | |
258 virtual size_t tlab_capacity(Thread* thr) const; | |
259 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
260 virtual HeapWord* allocate_new_tlab(size_t size); | |
261 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
262 // Can a compiler initialize a new object without store barriers? |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
263 // This permission only extends from the creation of a new object |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
264 // via a TLAB up to the first subsequent safepoint. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
265 virtual bool can_elide_tlab_store_barriers() const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
266 return true; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
267 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
268 |
1166 | 269 virtual bool card_mark_must_follow_store() const { |
270 return UseConcMarkSweepGC; | |
271 } | |
272 | |
1027
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
273 // We don't need barriers for stores to objects in the |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
274 // young gen and, a fortiori, for initializing stores to |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
275 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS} |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
276 // only and may need to be re-examined in case other |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
277 // kinds of collectors are implemented in the future. |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
278 virtual bool can_elide_initializing_store_barrier(oop new_obj) { |
1028
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
279 // We wanted to assert that:- |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
280 // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
281 // "Check can_elide_initializing_store_barrier() for this collector"); |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
282 // but unfortunately the flag UseSerialGC need not necessarily always |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
283 // be set when DefNew+Tenured are being used. |
3377
2aa9ddbb9e60
7041789: 30% perf regression with c2/arm following 7017732
jmasa
parents:
3293
diff
changeset
|
284 return is_in_young(new_obj); |
1027
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
285 } |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
286 |
0 | 287 // The "requestor" generation is performing some garbage collection |
288 // action for which it would be useful to have scratch space. The | |
289 // requestor promises to allocate no more than "max_alloc_words" in any | |
290 // older generation (via promotion say.) Any blocks of space that can | |
291 // be provided are returned as a list of ScratchBlocks, sorted by | |
292 // decreasing size. | |
293 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
294 // Allow each generation to reset any scratch space that it has |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
295 // contributed as it needs. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
296 void release_scratch(); |
0 | 297 |
298 // Ensure parsability: override | |
299 virtual void ensure_parsability(bool retire_tlabs); | |
300 | |
301 // Time in ms since the longest time a collector ran in | |
302 // in any generation. | |
303 virtual jlong millis_since_last_gc(); | |
304 | |
305 // Total number of full collections completed. | |
306 unsigned int total_full_collections_completed() { | |
307 assert(_full_collections_completed <= _total_full_collections, | |
308 "Can't complete more collections than were started"); | |
309 return _full_collections_completed; | |
310 } | |
311 | |
312 // Update above counter, as appropriate, at the end of a stop-world GC cycle | |
313 unsigned int update_full_collections_completed(); | |
314 // Update above counter, as appropriate, at the end of a concurrent GC cycle | |
315 unsigned int update_full_collections_completed(unsigned int count); | |
316 | |
317 // Update "time of last gc" for all constituent generations | |
318 // to "now". | |
319 void update_time_of_last_gc(jlong now) { | |
320 for (int i = 0; i < _n_gens; i++) { | |
321 _gens[i]->update_time_of_last_gc(now); | |
322 } | |
323 } | |
324 | |
325 // Update the gc statistics for each generation. | |
326 // "level" is the level of the lastest collection | |
327 void update_gc_stats(int current_level, bool full) { | |
328 for (int i = 0; i < _n_gens; i++) { | |
329 _gens[i]->update_gc_stats(current_level, full); | |
330 } | |
331 } | |
332 | |
333 // Override. | |
334 bool no_gc_in_progress() { return !is_gc_active(); } | |
335 | |
336 // Override. | |
337 void prepare_for_verify(); | |
338 | |
339 // Override. | |
6008 | 340 void verify(bool silent, VerifyOption option); |
0 | 341 |
342 // Override. | |
4073
53074c2c4600
7099849: G1: include heap region information in hs_err files
tonyp
parents:
3774
diff
changeset
|
343 virtual void print_on(outputStream* st) const; |
0 | 344 virtual void print_gc_threads_on(outputStream* st) const; |
345 virtual void gc_threads_do(ThreadClosure* tc) const; | |
346 virtual void print_tracing_info() const; | |
9076
7b835924c31c
8011872: Include Bit Map addresses in the hs_err files
stefank
parents:
6725
diff
changeset
|
347 virtual void print_on_error(outputStream* st) const; |
0 | 348 |
349 // PrintGC, PrintGCDetails support | |
350 void print_heap_change(size_t prev_used) const; | |
351 | |
352 // The functions below are helper functions that a subclass of | |
353 // "CollectedHeap" can use in the implementation of its virtual | |
354 // functions. | |
355 | |
356 class GenClosure : public StackObj { | |
357 public: | |
358 virtual void do_generation(Generation* gen) = 0; | |
359 }; | |
360 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
361 // Apply "cl.do_generation" to all generations in the heap |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
362 // If "old_to_young" determines the order. |
0 | 363 void generation_iterate(GenClosure* cl, bool old_to_young); |
364 | |
365 void space_iterate(SpaceClosure* cl); | |
366 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
367 // Return "true" if all generations have reached the |
0 | 368 // maximal committed limit that they can reach, without a garbage |
369 // collection. | |
370 virtual bool is_maximal_no_gc() const; | |
371 | |
372 // Return the generation before "gen", or else NULL. | |
373 Generation* prev_gen(Generation* gen) const { | |
374 int l = gen->level(); | |
375 if (l == 0) return NULL; | |
376 else return _gens[l-1]; | |
377 } | |
378 | |
379 // Return the generation after "gen", or else NULL. | |
380 Generation* next_gen(Generation* gen) const { | |
381 int l = gen->level() + 1; | |
382 if (l == _n_gens) return NULL; | |
383 else return _gens[l]; | |
384 } | |
385 | |
386 Generation* get_gen(int i) const { | |
387 if (i >= 0 && i < _n_gens) | |
388 return _gens[i]; | |
389 else | |
390 return NULL; | |
391 } | |
392 | |
393 int n_gens() const { | |
394 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); | |
395 return _n_gens; | |
396 } | |
397 | |
398 // Convenience function to be used in situations where the heap type can be | |
399 // asserted to be this type. | |
400 static GenCollectedHeap* heap(); | |
401 | |
4728
441e946dc1af
7121618: Change type of number of GC workers to unsigned int.
jmasa
parents:
4708
diff
changeset
|
402 void set_par_threads(uint t); |
0 | 403 |
404 // Invoke the "do_oop" method of one of the closures "not_older_gens" | |
405 // or "older_gens" on root locations for the generation at | |
406 // "level". (The "older_gens" closure is used for scanning references | |
407 // from older generations; "not_older_gens" is used everywhere else.) | |
408 // If "younger_gens_as_roots" is false, younger generations are | |
409 // not scanned as roots; in this case, the caller must be arranging to | |
410 // scan the younger generations itself. (For example, a generation might | |
411 // explicitly mark reachable objects in younger generations, to avoid | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
412 // excess storage retention.) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
413 // The "so" argument determines which of the roots |
0 | 414 // the closure is applied to: |
415 // "SO_None" does none; | |
416 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; | |
417 // "SO_SystemClasses" to all the "system" classes and loaders; | |
3293
1f4413413144
7039089: G1: changeset for 7037276 broke heap verification, and related cleanups
ysr
parents:
1994
diff
changeset
|
418 // "SO_Strings" applies the closure to all entries in the StringTable. |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
419 void gen_process_strong_roots(int level, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
420 bool younger_gens_as_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
421 // The remaining arguments are in an order |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
422 // consistent with SharedHeap::process_strong_roots: |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
423 bool activate_scope, |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
424 bool is_scavenging, |
0 | 425 SharedHeap::ScanningOption so, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
426 OopsInGenClosure* not_older_gens, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
427 bool do_code_roots, |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
428 OopsInGenClosure* older_gens, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
429 KlassClosure* klass_closure); |
0 | 430 |
431 // Apply "blk" to all the weak roots of the system. These include | |
432 // JNI weak roots, the code cache, system dictionary, symbol table, | |
433 // string table, and referents of reachable weak refs. | |
434 void gen_process_weak_roots(OopClosure* root_closure, | |
10179
a08c80e9e1e5
8012687: Remove unused is_root checks and closures
stefank
parents:
9076
diff
changeset
|
435 CodeBlobClosure* code_roots); |
0 | 436 |
437 // Set the saved marks of generations, if that makes sense. | |
438 // In particular, if any generation might iterate over the oops | |
439 // in other generations, it should call this method. | |
440 void save_marks(); | |
441 | |
442 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects | |
443 // allocated since the last call to save_marks in generations at or above | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
444 // "level". The "cur" closure is |
0 | 445 // applied to references in the generation at "level", and the "older" |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
446 // closure to older generations. |
0 | 447 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ |
448 void oop_since_save_marks_iterate(int level, \ | |
449 OopClosureType* cur, \ | |
450 OopClosureType* older); | |
451 | |
452 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) | |
453 | |
454 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL | |
455 | |
456 // Returns "true" iff no allocations have occurred in any generation at | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
457 // "level" or above since the last |
0 | 458 // call to "save_marks". |
459 bool no_allocs_since_save_marks(int level); | |
460 | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
461 // Returns true if an incremental collection is likely to fail. |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
462 // We optionally consult the young gen, if asked to do so; |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
463 // otherwise we base our answer on whether the previous incremental |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
464 // collection attempt failed with no corrective action as of yet. |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
465 bool incremental_collection_will_fail(bool consult_young) { |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
466 // Assumes a 2-generation system; the first disjunct remembers if an |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
467 // incremental collection failed, even when we thought (second disjunct) |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
468 // that it would not. |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
469 assert(heap()->collector_policy()->is_two_generation_policy(), |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
470 "the following definition may not be suitable for an n(>2)-generation system"); |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
471 return incremental_collection_failed() || |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
472 (consult_young && !get_gen(0)->collection_attempt_is_safe()); |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
473 } |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
474 |
0 | 475 // If a generation bails out of an incremental collection, |
476 // it sets this flag. | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
477 bool incremental_collection_failed() const { |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
478 return _incremental_collection_failed; |
0 | 479 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
480 void set_incremental_collection_failed() { |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
481 _incremental_collection_failed = true; |
0 | 482 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
483 void clear_incremental_collection_failed() { |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1833
diff
changeset
|
484 _incremental_collection_failed = false; |
0 | 485 } |
486 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6008
diff
changeset
|
487 // Promotion of obj into gen failed. Try to promote obj to higher |
0 | 488 // gens in ascending order; return the new location of obj if successful. |
489 // Otherwise, try expand-and-allocate for obj in each generation starting at | |
490 // gen; return the new location of obj if successful. Otherwise, return NULL. | |
491 oop handle_failed_promotion(Generation* gen, | |
492 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
493 size_t obj_size); |
0 | 494 |
495 private: | |
496 // Accessor for memory state verification support | |
497 NOT_PRODUCT( | |
498 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } | |
499 ) | |
500 | |
501 // Override | |
502 void check_for_non_bad_heap_word_value(HeapWord* addr, | |
503 size_t size) PRODUCT_RETURN; | |
504 | |
505 // For use by mark-sweep. As implemented, mark-sweep-compact is global | |
506 // in an essential way: compaction is performed across generations, by | |
507 // iterating over spaces. | |
508 void prepare_for_compaction(); | |
509 | |
510 // Perform a full collection of the first max_level+1 generations. | |
511 // This is the low level interface used by the public versions of | |
512 // collect() and collect_locked(). Caller holds the Heap_lock on entry. | |
513 void collect_locked(GCCause::Cause cause, int max_level); | |
514 | |
515 // Returns success or failure. | |
516 bool create_cms_collector(); | |
517 | |
518 // In support of ExplicitGCInvokesConcurrent functionality | |
519 bool should_do_concurrent_full_gc(GCCause::Cause cause); | |
520 void collect_mostly_concurrent(GCCause::Cause cause); | |
521 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
522 // Save the tops of the spaces in all generations |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
523 void record_gen_tops_before_GC() PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
524 |
0 | 525 protected: |
526 virtual void gc_prologue(bool full); | |
527 virtual void gc_epilogue(bool full); | |
528 }; | |
1972 | 529 |
530 #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |