Mercurial > hg > truffle
annotate src/share/vm/memory/genCollectedHeap.hpp @ 1905:ce6848d0666d
6968367: can_post_on_exceptions is still using VM_DeoptimizeFrame in some places
Reviewed-by: kvn, twisti
author | never |
---|---|
date | Tue, 19 Oct 2010 16:14:34 -0700 |
parents | 8b10f48633dc |
children | a7214d79fcf1 |
rev | line source |
---|---|
0 | 1 /* |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1552
diff
changeset
|
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1166
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
25 class SubTasksDone; | |
26 | |
27 // A "GenCollectedHeap" is a SharedHeap that uses generational | |
28 // collection. It is represented with a sequence of Generation's. | |
29 class GenCollectedHeap : public SharedHeap { | |
30 friend class GenCollectorPolicy; | |
31 friend class Generation; | |
32 friend class DefNewGeneration; | |
33 friend class TenuredGeneration; | |
34 friend class ConcurrentMarkSweepGeneration; | |
35 friend class CMSCollector; | |
36 friend class GenMarkSweep; | |
37 friend class VM_GenCollectForAllocation; | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
0
diff
changeset
|
38 friend class VM_GenCollectForPermanentAllocation; |
0 | 39 friend class VM_GenCollectFull; |
40 friend class VM_GenCollectFullConcurrent; | |
41 friend class VM_GC_HeapInspection; | |
42 friend class VM_HeapDumper; | |
43 friend class HeapInspection; | |
44 friend class GCCauseSetter; | |
45 friend class VMStructs; | |
46 public: | |
47 enum SomeConstants { | |
48 max_gens = 10 | |
49 }; | |
50 | |
51 friend class VM_PopulateDumpSharedSpace; | |
52 | |
53 protected: | |
54 // Fields: | |
55 static GenCollectedHeap* _gch; | |
56 | |
57 private: | |
58 int _n_gens; | |
59 Generation* _gens[max_gens]; | |
60 GenerationSpec** _gen_specs; | |
61 | |
62 // The generational collector policy. | |
63 GenCollectorPolicy* _gen_policy; | |
64 | |
65 // If a generation would bail out of an incremental collection, | |
66 // it sets this flag. If the flag is set, satisfy_failed_allocation | |
67 // will attempt allocating in all generations before doing a full GC. | |
68 bool _incremental_collection_will_fail; | |
69 bool _last_incremental_collection_failed; | |
70 | |
71 // In support of ExplicitGCInvokesConcurrent functionality | |
72 unsigned int _full_collections_completed; | |
73 | |
74 // Data structure for claiming the (potentially) parallel tasks in | |
75 // (gen-specific) strong roots processing. | |
76 SubTasksDone* _gen_process_strong_tasks; | |
1833
8b10f48633dc
6984287: Regularize how GC parallel workers are specified.
jmasa
parents:
1552
diff
changeset
|
77 SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } |
0 | 78 |
79 // In block contents verification, the number of header words to skip | |
80 NOT_PRODUCT(static size_t _skip_header_HeapWords;) | |
81 | |
82 // GC is not allowed during the dump of the shared classes. Keep track | |
83 // of this in order to provide an reasonable error message when terminating. | |
84 bool _preloading_shared_classes; | |
85 | |
86 protected: | |
87 // Directs each generation up to and including "collectedGen" to recompute | |
88 // its desired size. | |
89 void compute_new_generation_sizes(int collectedGen); | |
90 | |
91 // Helper functions for allocation | |
92 HeapWord* attempt_allocation(size_t size, | |
93 bool is_tlab, | |
94 bool first_only); | |
95 | |
96 // Helper function for two callbacks below. | |
97 // Considers collection of the first max_level+1 generations. | |
98 void do_collection(bool full, | |
99 bool clear_all_soft_refs, | |
100 size_t size, | |
101 bool is_tlab, | |
102 int max_level); | |
103 | |
104 // Callback from VM_GenCollectForAllocation operation. | |
105 // This function does everything necessary/possible to satisfy an | |
106 // allocation request that failed in the youngest generation that should | |
107 // have handled it (including collection, expansion, etc.) | |
108 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); | |
109 | |
110 // Callback from VM_GenCollectFull operation. | |
111 // Perform a full collection of the first max_level+1 generations. | |
112 void do_full_collection(bool clear_all_soft_refs, int max_level); | |
113 | |
114 // Does the "cause" of GC indicate that | |
115 // we absolutely __must__ clear soft refs? | |
116 bool must_clear_all_soft_refs(); | |
117 | |
118 public: | |
119 GenCollectedHeap(GenCollectorPolicy *policy); | |
120 | |
121 GCStats* gc_stats(int level) const; | |
122 | |
123 // Returns JNI_OK on success | |
124 virtual jint initialize(); | |
125 char* allocate(size_t alignment, PermanentGenerationSpec* perm_gen_spec, | |
126 size_t* _total_reserved, int* _n_covered_regions, | |
127 ReservedSpace* heap_rs); | |
128 | |
129 // Does operations required after initialization has been done. | |
130 void post_initialize(); | |
131 | |
132 // Initialize ("weak") refs processing support | |
133 virtual void ref_processing_init(); | |
134 | |
135 virtual CollectedHeap::Name kind() const { | |
136 return CollectedHeap::GenCollectedHeap; | |
137 } | |
138 | |
139 // The generational collector policy. | |
140 GenCollectorPolicy* gen_policy() const { return _gen_policy; } | |
141 | |
142 // Adaptive size policy | |
143 virtual AdaptiveSizePolicy* size_policy() { | |
144 return gen_policy()->size_policy(); | |
145 } | |
146 | |
147 size_t capacity() const; | |
148 size_t used() const; | |
149 | |
150 // Save the "used_region" for generations level and lower, | |
151 // and, if perm is true, for perm gen. | |
152 void save_used_regions(int level, bool perm); | |
153 | |
154 size_t max_capacity() const; | |
155 | |
156 HeapWord* mem_allocate(size_t size, | |
157 bool is_large_noref, | |
158 bool is_tlab, | |
159 bool* gc_overhead_limit_was_exceeded); | |
160 | |
161 // We may support a shared contiguous allocation area, if the youngest | |
162 // generation does. | |
163 bool supports_inline_contig_alloc() const; | |
164 HeapWord** top_addr() const; | |
165 HeapWord** end_addr() const; | |
166 | |
167 // Return an estimate of the maximum allocation that could be performed | |
168 // without triggering any collection activity. In a generational | |
169 // collector, for example, this is probably the largest allocation that | |
170 // could be supported in the youngest generation. It is "unsafe" because | |
171 // no locks are taken; the result should be treated as an approximation, | |
172 // not a guarantee. | |
173 size_t unsafe_max_alloc(); | |
174 | |
175 // Does this heap support heap inspection? (+PrintClassHistogram) | |
176 virtual bool supports_heap_inspection() const { return true; } | |
177 | |
178 // Perform a full collection of the heap; intended for use in implementing | |
179 // "System.gc". This implies as full a collection as the CollectedHeap | |
180 // supports. Caller does not hold the Heap_lock on entry. | |
181 void collect(GCCause::Cause cause); | |
182 | |
183 // This interface assumes that it's being called by the | |
184 // vm thread. It collects the heap assuming that the | |
185 // heap lock is already held and that we are executing in | |
186 // the context of the vm thread. | |
187 void collect_as_vm_thread(GCCause::Cause cause); | |
188 | |
189 // The same as above but assume that the caller holds the Heap_lock. | |
190 void collect_locked(GCCause::Cause cause); | |
191 | |
192 // Perform a full collection of the first max_level+1 generations. | |
193 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. | |
194 void collect(GCCause::Cause cause, int max_level); | |
195 | |
196 // Returns "TRUE" iff "p" points into the allocated area of the heap. | |
197 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may | |
198 // be expensive to compute in general, so, to prevent | |
199 // their inadvertent use in product jvm's, we restrict their use to | |
200 // assertion checking or verification only. | |
201 bool is_in(const void* p) const; | |
202 | |
203 // override | |
204 bool is_in_closed_subset(const void* p) const { | |
205 if (UseConcMarkSweepGC) { | |
206 return is_in_reserved(p); | |
207 } else { | |
208 return is_in(p); | |
209 } | |
210 } | |
211 | |
212 // Returns "TRUE" iff "p" points into the youngest generation. | |
213 bool is_in_youngest(void* p); | |
214 | |
215 // Iteration functions. | |
216 void oop_iterate(OopClosure* cl); | |
217 void oop_iterate(MemRegion mr, OopClosure* cl); | |
218 void object_iterate(ObjectClosure* cl); | |
517
e9be0e04635a
6689653: JMapPerm fails with UseConcMarkSweepIncGC and compressed oops off
jmasa
parents:
356
diff
changeset
|
219 void safe_object_iterate(ObjectClosure* cl); |
0 | 220 void object_iterate_since_last_GC(ObjectClosure* cl); |
221 Space* space_containing(const void* addr) const; | |
222 | |
223 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, | |
224 // each address in the (reserved) heap is a member of exactly | |
225 // one block. The defining characteristic of a block is that it is | |
226 // possible to find its size, and thus to progress forward to the next | |
227 // block. (Blocks may be of different sizes.) Thus, blocks may | |
228 // represent Java objects, or they might be free blocks in a | |
229 // free-list-based heap (or subheap), as long as the two kinds are | |
230 // distinguishable and the size of each is determinable. | |
231 | |
232 // Returns the address of the start of the "block" that contains the | |
233 // address "addr". We say "blocks" instead of "object" since some heaps | |
234 // may not pack objects densely; a chunk may either be an object or a | |
235 // non-object. | |
236 virtual HeapWord* block_start(const void* addr) const; | |
237 | |
238 // Requires "addr" to be the start of a chunk, and returns its size. | |
239 // "addr + size" is required to be the start of a new chunk, or the end | |
240 // of the active area of the heap. Assumes (and verifies in non-product | |
241 // builds) that addr is in the allocated part of the heap and is | |
242 // the start of a chunk. | |
243 virtual size_t block_size(const HeapWord* addr) const; | |
244 | |
245 // Requires "addr" to be the start of a block, and returns "TRUE" iff | |
246 // the block is an object. Assumes (and verifies in non-product | |
247 // builds) that addr is in the allocated part of the heap and is | |
248 // the start of a chunk. | |
249 virtual bool block_is_obj(const HeapWord* addr) const; | |
250 | |
251 // Section on TLAB's. | |
252 virtual bool supports_tlab_allocation() const; | |
253 virtual size_t tlab_capacity(Thread* thr) const; | |
254 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; | |
255 virtual HeapWord* allocate_new_tlab(size_t size); | |
256 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
257 // Can a compiler initialize a new object without store barriers? |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
258 // This permission only extends from the creation of a new object |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
259 // via a TLAB up to the first subsequent safepoint. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
260 virtual bool can_elide_tlab_store_barriers() const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
261 return true; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
262 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
263 |
1166 | 264 virtual bool card_mark_must_follow_store() const { |
265 return UseConcMarkSweepGC; | |
266 } | |
267 | |
1027
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
268 // We don't need barriers for stores to objects in the |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
269 // young gen and, a fortiori, for initializing stores to |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
270 // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS} |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
271 // only and may need to be re-examined in case other |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
272 // kinds of collectors are implemented in the future. |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
273 virtual bool can_elide_initializing_store_barrier(oop new_obj) { |
1028
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
274 // We wanted to assert that:- |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
275 // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
276 // "Check can_elide_initializing_store_barrier() for this collector"); |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
277 // but unfortunately the flag UseSerialGC need not necessarily always |
052a899eec3e
6892749: assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, "...") fails
ysr
parents:
1027
diff
changeset
|
278 // be set when DefNew+Tenured are being used. |
1027
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
279 return is_in_youngest((void*)new_obj); |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
280 } |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
281 |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
282 // Can a compiler elide a store barrier when it writes |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
283 // a permanent oop into the heap? Applies when the compiler |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
284 // is storing x to the heap, where x->is_perm() is true. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
285 virtual bool can_elide_permanent_oop_store_barriers() const { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
286 // CMS needs to see all, even intra-generational, ref updates. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
287 return !UseConcMarkSweepGC; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
288 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
142
diff
changeset
|
289 |
0 | 290 // The "requestor" generation is performing some garbage collection |
291 // action for which it would be useful to have scratch space. The | |
292 // requestor promises to allocate no more than "max_alloc_words" in any | |
293 // older generation (via promotion say.) Any blocks of space that can | |
294 // be provided are returned as a list of ScratchBlocks, sorted by | |
295 // decreasing size. | |
296 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
297 // Allow each generation to reset any scratch space that it has |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
298 // contributed as it needs. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
299 void release_scratch(); |
0 | 300 |
301 size_t large_typearray_limit(); | |
302 | |
303 // Ensure parsability: override | |
304 virtual void ensure_parsability(bool retire_tlabs); | |
305 | |
306 // Time in ms since the longest time a collector ran in | |
307 // in any generation. | |
308 virtual jlong millis_since_last_gc(); | |
309 | |
310 // Total number of full collections completed. | |
311 unsigned int total_full_collections_completed() { | |
312 assert(_full_collections_completed <= _total_full_collections, | |
313 "Can't complete more collections than were started"); | |
314 return _full_collections_completed; | |
315 } | |
316 | |
317 // Update above counter, as appropriate, at the end of a stop-world GC cycle | |
318 unsigned int update_full_collections_completed(); | |
319 // Update above counter, as appropriate, at the end of a concurrent GC cycle | |
320 unsigned int update_full_collections_completed(unsigned int count); | |
321 | |
322 // Update "time of last gc" for all constituent generations | |
323 // to "now". | |
324 void update_time_of_last_gc(jlong now) { | |
325 for (int i = 0; i < _n_gens; i++) { | |
326 _gens[i]->update_time_of_last_gc(now); | |
327 } | |
328 perm_gen()->update_time_of_last_gc(now); | |
329 } | |
330 | |
331 // Update the gc statistics for each generation. | |
332 // "level" is the level of the lastest collection | |
333 void update_gc_stats(int current_level, bool full) { | |
334 for (int i = 0; i < _n_gens; i++) { | |
335 _gens[i]->update_gc_stats(current_level, full); | |
336 } | |
337 perm_gen()->update_gc_stats(current_level, full); | |
338 } | |
339 | |
340 // Override. | |
341 bool no_gc_in_progress() { return !is_gc_active(); } | |
342 | |
343 // Override. | |
344 void prepare_for_verify(); | |
345 | |
346 // Override. | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
579
diff
changeset
|
347 void verify(bool allow_dirty, bool silent, bool /* option */); |
0 | 348 |
349 // Override. | |
350 void print() const; | |
351 void print_on(outputStream* st) const; | |
352 virtual void print_gc_threads_on(outputStream* st) const; | |
353 virtual void gc_threads_do(ThreadClosure* tc) const; | |
354 virtual void print_tracing_info() const; | |
355 | |
356 // PrintGC, PrintGCDetails support | |
357 void print_heap_change(size_t prev_used) const; | |
358 void print_perm_heap_change(size_t perm_prev_used) const; | |
359 | |
360 // The functions below are helper functions that a subclass of | |
361 // "CollectedHeap" can use in the implementation of its virtual | |
362 // functions. | |
363 | |
364 class GenClosure : public StackObj { | |
365 public: | |
366 virtual void do_generation(Generation* gen) = 0; | |
367 }; | |
368 | |
369 // Apply "cl.do_generation" to all generations in the heap (not including | |
370 // the permanent generation). If "old_to_young" determines the order. | |
371 void generation_iterate(GenClosure* cl, bool old_to_young); | |
372 | |
373 void space_iterate(SpaceClosure* cl); | |
374 | |
375 // Return "true" if all generations (but perm) have reached the | |
376 // maximal committed limit that they can reach, without a garbage | |
377 // collection. | |
378 virtual bool is_maximal_no_gc() const; | |
379 | |
380 // Return the generation before "gen", or else NULL. | |
381 Generation* prev_gen(Generation* gen) const { | |
382 int l = gen->level(); | |
383 if (l == 0) return NULL; | |
384 else return _gens[l-1]; | |
385 } | |
386 | |
387 // Return the generation after "gen", or else NULL. | |
388 Generation* next_gen(Generation* gen) const { | |
389 int l = gen->level() + 1; | |
390 if (l == _n_gens) return NULL; | |
391 else return _gens[l]; | |
392 } | |
393 | |
394 Generation* get_gen(int i) const { | |
395 if (i >= 0 && i < _n_gens) | |
396 return _gens[i]; | |
397 else | |
398 return NULL; | |
399 } | |
400 | |
401 int n_gens() const { | |
402 assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); | |
403 return _n_gens; | |
404 } | |
405 | |
406 // Convenience function to be used in situations where the heap type can be | |
407 // asserted to be this type. | |
408 static GenCollectedHeap* heap(); | |
409 | |
410 void set_par_threads(int t); | |
411 | |
412 | |
413 // Invoke the "do_oop" method of one of the closures "not_older_gens" | |
414 // or "older_gens" on root locations for the generation at | |
415 // "level". (The "older_gens" closure is used for scanning references | |
416 // from older generations; "not_older_gens" is used everywhere else.) | |
417 // If "younger_gens_as_roots" is false, younger generations are | |
418 // not scanned as roots; in this case, the caller must be arranging to | |
419 // scan the younger generations itself. (For example, a generation might | |
420 // explicitly mark reachable objects in younger generations, to avoid | |
421 // excess storage retention.) If "collecting_perm_gen" is false, then | |
422 // roots that may only contain references to permGen objects are not | |
423 // scanned. The "so" argument determines which of the roots | |
424 // the closure is applied to: | |
425 // "SO_None" does none; | |
426 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; | |
427 // "SO_SystemClasses" to all the "system" classes and loaders; | |
428 // "SO_Symbols_and_Strings" applies the closure to all entries in | |
429 // SymbolsTable and StringTable. | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
430 void gen_process_strong_roots(int level, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
431 bool younger_gens_as_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
432 // The remaining arguments are in an order |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
433 // consistent with SharedHeap::process_strong_roots: |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
434 bool activate_scope, |
0 | 435 bool collecting_perm_gen, |
436 SharedHeap::ScanningOption so, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
437 OopsInGenClosure* not_older_gens, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
438 bool do_code_roots, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
439 OopsInGenClosure* older_gens); |
0 | 440 |
441 // Apply "blk" to all the weak roots of the system. These include | |
442 // JNI weak roots, the code cache, system dictionary, symbol table, | |
443 // string table, and referents of reachable weak refs. | |
444 void gen_process_weak_roots(OopClosure* root_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
445 CodeBlobClosure* code_roots, |
0 | 446 OopClosure* non_root_closure); |
447 | |
448 // Set the saved marks of generations, if that makes sense. | |
449 // In particular, if any generation might iterate over the oops | |
450 // in other generations, it should call this method. | |
451 void save_marks(); | |
452 | |
453 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects | |
454 // allocated since the last call to save_marks in generations at or above | |
455 // "level" (including the permanent generation.) The "cur" closure is | |
456 // applied to references in the generation at "level", and the "older" | |
457 // closure to older (and permanent) generations. | |
458 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ | |
459 void oop_since_save_marks_iterate(int level, \ | |
460 OopClosureType* cur, \ | |
461 OopClosureType* older); | |
462 | |
463 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) | |
464 | |
465 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL | |
466 | |
467 // Returns "true" iff no allocations have occurred in any generation at | |
468 // "level" or above (including the permanent generation) since the last | |
469 // call to "save_marks". | |
470 bool no_allocs_since_save_marks(int level); | |
471 | |
472 // If a generation bails out of an incremental collection, | |
473 // it sets this flag. | |
474 bool incremental_collection_will_fail() { | |
475 return _incremental_collection_will_fail; | |
476 } | |
477 void set_incremental_collection_will_fail() { | |
478 _incremental_collection_will_fail = true; | |
479 } | |
480 void clear_incremental_collection_will_fail() { | |
481 _incremental_collection_will_fail = false; | |
482 } | |
483 | |
484 bool last_incremental_collection_failed() const { | |
485 return _last_incremental_collection_failed; | |
486 } | |
487 void set_last_incremental_collection_failed() { | |
488 _last_incremental_collection_failed = true; | |
489 } | |
490 void clear_last_incremental_collection_failed() { | |
491 _last_incremental_collection_failed = false; | |
492 } | |
493 | |
494 // Promotion of obj into gen failed. Try to promote obj to higher non-perm | |
495 // gens in ascending order; return the new location of obj if successful. | |
496 // Otherwise, try expand-and-allocate for obj in each generation starting at | |
497 // gen; return the new location of obj if successful. Otherwise, return NULL. | |
498 oop handle_failed_promotion(Generation* gen, | |
499 oop obj, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
500 size_t obj_size); |
0 | 501 |
502 private: | |
503 // Accessor for memory state verification support | |
504 NOT_PRODUCT( | |
505 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } | |
506 ) | |
507 | |
508 // Override | |
509 void check_for_non_bad_heap_word_value(HeapWord* addr, | |
510 size_t size) PRODUCT_RETURN; | |
511 | |
512 // For use by mark-sweep. As implemented, mark-sweep-compact is global | |
513 // in an essential way: compaction is performed across generations, by | |
514 // iterating over spaces. | |
515 void prepare_for_compaction(); | |
516 | |
517 // Perform a full collection of the first max_level+1 generations. | |
518 // This is the low level interface used by the public versions of | |
519 // collect() and collect_locked(). Caller holds the Heap_lock on entry. | |
520 void collect_locked(GCCause::Cause cause, int max_level); | |
521 | |
522 // Returns success or failure. | |
523 bool create_cms_collector(); | |
524 | |
525 // In support of ExplicitGCInvokesConcurrent functionality | |
526 bool should_do_concurrent_full_gc(GCCause::Cause cause); | |
527 void collect_mostly_concurrent(GCCause::Cause cause); | |
528 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
529 // Save the tops of the spaces in all generations |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
530 void record_gen_tops_before_GC() PRODUCT_RETURN; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
142
diff
changeset
|
531 |
0 | 532 protected: |
533 virtual void gc_prologue(bool full); | |
534 virtual void gc_epilogue(bool full); | |
535 | |
536 public: | |
537 virtual void preload_and_dump(TRAPS) KERNEL_RETURN; | |
538 }; |