0
|
1 /*
|
|
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
|
|
26 // is an abstract class: there may be many different kinds of heaps. This
|
|
27 // class defines the functions that a heap must implement, and contains
|
|
28 // infrastructure common to all heaps.
|
|
29
|
|
30 class BarrierSet;
|
|
31 class ThreadClosure;
|
|
32 class AdaptiveSizePolicy;
|
|
33 class Thread;
|
|
34
|
|
35 //
|
|
36 // CollectedHeap
|
|
37 // SharedHeap
|
|
38 // GenCollectedHeap
|
|
39 // G1CollectedHeap
|
|
40 // ParallelScavengeHeap
|
|
41 //
|
|
42 class CollectedHeap : public CHeapObj {
|
|
43 friend class VMStructs;
|
|
44 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
|
|
45
|
|
46 #ifdef ASSERT
|
|
47 static int _fire_out_of_memory_count;
|
|
48 #endif
|
|
49
|
|
50 protected:
|
|
51 MemRegion _reserved;
|
|
52 BarrierSet* _barrier_set;
|
|
53 bool _is_gc_active;
|
|
54 unsigned int _total_collections; // ... started
|
|
55 unsigned int _total_full_collections; // ... started
|
|
56 size_t _max_heap_capacity;
|
|
57 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
|
58 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
|
|
59
|
|
60 // Reason for current garbage collection. Should be set to
|
|
61 // a value reflecting no collection between collections.
|
|
62 GCCause::Cause _gc_cause;
|
|
63 GCCause::Cause _gc_lastcause;
|
|
64 PerfStringVariable* _perf_gc_cause;
|
|
65 PerfStringVariable* _perf_gc_lastcause;
|
|
66
|
|
67 // Constructor
|
|
68 CollectedHeap();
|
|
69
|
|
70 // Create a new tlab
|
|
71 virtual HeapWord* allocate_new_tlab(size_t size);
|
|
72
|
|
73 // Fix up tlabs to make the heap well-formed again,
|
|
74 // optionally retiring the tlabs.
|
|
75 virtual void fill_all_tlabs(bool retire);
|
|
76
|
|
77 // Accumulate statistics on all tlabs.
|
|
78 virtual void accumulate_statistics_all_tlabs();
|
|
79
|
|
80 // Reinitialize tlabs before resuming mutators.
|
|
81 virtual void resize_all_tlabs();
|
|
82
|
|
83 debug_only(static void check_for_valid_allocation_state();)
|
|
84
|
|
85 protected:
|
|
86 // Allocate from the current thread's TLAB, with broken-out slow path.
|
|
87 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
|
|
88 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
|
|
89
|
|
90 // Allocate an uninitialized block of the given size, or returns NULL if
|
|
91 // this is impossible.
|
|
92 inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
|
|
93
|
|
94 // Like allocate_init, but the block returned by a successful allocation
|
|
95 // is guaranteed initialized to zeros.
|
|
96 inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
|
|
97
|
|
98 // Same as common_mem version, except memory is allocated in the permanent area
|
|
99 // If there is no permanent area, revert to common_mem_allocate_noinit
|
|
100 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS);
|
|
101
|
|
102 // Same as common_mem version, except memory is allocated in the permanent area
|
|
103 // If there is no permanent area, revert to common_mem_allocate_init
|
|
104 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
|
|
105
|
|
106 // Helper functions for (VM) allocation.
|
|
107 inline static void post_allocation_setup_common(KlassHandle klass,
|
|
108 HeapWord* obj, size_t size);
|
|
109 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
|
|
110 HeapWord* objPtr,
|
|
111 size_t size);
|
|
112
|
|
113 inline static void post_allocation_setup_obj(KlassHandle klass,
|
|
114 HeapWord* obj, size_t size);
|
|
115
|
|
116 inline static void post_allocation_setup_array(KlassHandle klass,
|
|
117 HeapWord* obj, size_t size,
|
|
118 int length);
|
|
119
|
|
120 // Clears an allocated object.
|
|
121 inline static void init_obj(HeapWord* obj, size_t size);
|
|
122
|
|
123 // Verification functions
|
|
124 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
|
|
125 PRODUCT_RETURN;
|
|
126 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
|
|
127 PRODUCT_RETURN;
|
|
128
|
|
129 public:
|
|
130 enum Name {
|
|
131 Abstract,
|
|
132 SharedHeap,
|
|
133 GenCollectedHeap,
|
|
134 ParallelScavengeHeap,
|
|
135 G1CollectedHeap
|
|
136 };
|
|
137
|
|
138 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
|
|
139
|
|
140 /**
|
|
141 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
|
|
142 * and JNI_OK on success.
|
|
143 */
|
|
144 virtual jint initialize() = 0;
|
|
145
|
|
146 // In many heaps, there will be a need to perform some initialization activities
|
|
147 // after the Universe is fully formed, but before general heap allocation is allowed.
|
|
148 // This is the correct place to place such initialization methods.
|
|
149 virtual void post_initialize() = 0;
|
|
150
|
|
151 MemRegion reserved_region() const { return _reserved; }
|
|
152
|
|
153 // Return the number of bytes currently reserved, committed, and used,
|
|
154 // respectively, for holding objects.
|
|
155 size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
|
|
156
|
|
157 // Future cleanup here. The following functions should specify bytes or
|
|
158 // heapwords as part of their signature.
|
|
159 virtual size_t capacity() const = 0;
|
|
160 virtual size_t used() const = 0;
|
|
161
|
|
162 // Return "true" if the part of the heap that allocates Java
|
|
163 // objects has reached the maximal committed limit that it can
|
|
164 // reach, without a garbage collection.
|
|
165 virtual bool is_maximal_no_gc() const = 0;
|
|
166
|
|
167 virtual size_t permanent_capacity() const = 0;
|
|
168 virtual size_t permanent_used() const = 0;
|
|
169
|
|
170 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
|
|
171 // memory that the vm could make available for storing 'normal' java objects.
|
|
172 // This is based on the reserved address space, but should not include space
|
|
173 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
|
|
174 // perm gen space or, in the case of the young gen, one of the survivor
|
|
175 // spaces).
|
|
176 virtual size_t max_capacity() const = 0;
|
|
177
|
|
178 // Returns "TRUE" if "p" points into the reserved area of the heap.
|
|
179 bool is_in_reserved(const void* p) const {
|
|
180 return _reserved.contains(p);
|
|
181 }
|
|
182
|
|
183 bool is_in_reserved_or_null(const void* p) const {
|
|
184 return p == NULL || is_in_reserved(p);
|
|
185 }
|
|
186
|
|
187 // Returns "TRUE" if "p" points to the head of an allocated object in the
|
|
188 // heap. Since this method can be expensive in general, we restrict its
|
|
189 // use to assertion checking only.
|
|
190 virtual bool is_in(const void* p) const = 0;
|
|
191
|
|
192 bool is_in_or_null(const void* p) const {
|
|
193 return p == NULL || is_in(p);
|
|
194 }
|
|
195
|
|
196 // Let's define some terms: a "closed" subset of a heap is one that
|
|
197 //
|
|
198 // 1) contains all currently-allocated objects, and
|
|
199 //
|
|
200 // 2) is closed under reference: no object in the closed subset
|
|
201 // references one outside the closed subset.
|
|
202 //
|
|
203 // Membership in a heap's closed subset is useful for assertions.
|
|
204 // Clearly, the entire heap is a closed subset, so the default
|
|
205 // implementation is to use "is_in_reserved". But this may not be too
|
|
206 // liberal to perform useful checking. Also, the "is_in" predicate
|
|
207 // defines a closed subset, but may be too expensive, since "is_in"
|
|
208 // verifies that its argument points to an object head. The
|
|
209 // "closed_subset" method allows a heap to define an intermediate
|
|
210 // predicate, allowing more precise checking than "is_in_reserved" at
|
|
211 // lower cost than "is_in."
|
|
212
|
|
213 // One important case is a heap composed of disjoint contiguous spaces,
|
|
214 // such as the Garbage-First collector. Such heaps have a convenient
|
|
215 // closed subset consisting of the allocated portions of those
|
|
216 // contiguous spaces.
|
|
217
|
|
218 // Return "TRUE" iff the given pointer points into the heap's defined
|
|
219 // closed subset (which defaults to the entire heap).
|
|
220 virtual bool is_in_closed_subset(const void* p) const {
|
|
221 return is_in_reserved(p);
|
|
222 }
|
|
223
|
|
224 bool is_in_closed_subset_or_null(const void* p) const {
|
|
225 return p == NULL || is_in_closed_subset(p);
|
|
226 }
|
|
227
|
|
228 // Returns "TRUE" if "p" is allocated as "permanent" data.
|
|
229 // If the heap does not use "permanent" data, returns the same
|
|
230 // value is_in_reserved() would return.
|
|
231 // NOTE: this actually returns true if "p" is in reserved space
|
|
232 // for the space not that it is actually allocated (i.e. in committed
|
|
233 // space). If you need the more conservative answer use is_permanent().
|
|
234 virtual bool is_in_permanent(const void *p) const = 0;
|
|
235
|
|
236 // Returns "TRUE" if "p" is in the committed area of "permanent" data.
|
|
237 // If the heap does not use "permanent" data, returns the same
|
|
238 // value is_in() would return.
|
|
239 virtual bool is_permanent(const void *p) const = 0;
|
|
240
|
|
241 bool is_in_permanent_or_null(const void *p) const {
|
|
242 return p == NULL || is_in_permanent(p);
|
|
243 }
|
|
244
|
|
245 // Returns "TRUE" if "p" is a method oop in the
|
|
246 // current heap, with high probability. This predicate
|
|
247 // is not stable, in general.
|
|
248 bool is_valid_method(oop p) const;
|
|
249
|
|
250 void set_gc_cause(GCCause::Cause v) {
|
|
251 if (UsePerfData) {
|
|
252 _gc_lastcause = _gc_cause;
|
|
253 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
|
|
254 _perf_gc_cause->set_value(GCCause::to_string(v));
|
|
255 }
|
|
256 _gc_cause = v;
|
|
257 }
|
|
258 GCCause::Cause gc_cause() { return _gc_cause; }
|
|
259
|
|
260 // Preload classes into the shared portion of the heap, and then dump
|
|
261 // that data to a file so that it can be loaded directly by another
|
|
262 // VM (then terminate).
|
|
263 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); }
|
|
264
|
|
265 // General obj/array allocation facilities.
|
|
266 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
|
|
267 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
|
268 inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
|
|
269
|
|
270 // Special obj/array allocation facilities.
|
|
271 // Some heaps may want to manage "permanent" data uniquely. These default
|
|
272 // to the general routines if the heap does not support such handling.
|
|
273 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
|
|
274 // permanent_obj_allocate_no_klass_install() does not do the installation of
|
|
275 // the klass pointer in the newly created object (as permanent_obj_allocate()
|
|
276 // above does). This allows for a delay in the installation of the klass
|
|
277 // pointer that is needed during the create of klassKlass's. The
|
|
278 // method post_allocation_install_obj_klass() is used to install the
|
|
279 // klass pointer.
|
|
280 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
|
|
281 int size,
|
|
282 TRAPS);
|
|
283 inline static void post_allocation_install_obj_klass(KlassHandle klass,
|
|
284 oop obj,
|
|
285 int size);
|
|
286 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
|
287
|
|
288 // Raw memory allocation facilities
|
|
289 // The obj and array allocate methods are covers for these methods.
|
|
290 // The permanent allocation method should default to mem_allocate if
|
|
291 // permanent memory isn't supported.
|
|
292 virtual HeapWord* mem_allocate(size_t size,
|
|
293 bool is_noref,
|
|
294 bool is_tlab,
|
|
295 bool* gc_overhead_limit_was_exceeded) = 0;
|
|
296 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
|
|
297
|
|
298 // The boundary between a "large" and "small" array of primitives, in words.
|
|
299 virtual size_t large_typearray_limit() = 0;
|
|
300
|
|
301 // Some heaps may offer a contiguous region for shared non-blocking
|
|
302 // allocation, via inlined code (by exporting the address of the top and
|
|
303 // end fields defining the extent of the contiguous allocation region.)
|
|
304
|
|
305 // This function returns "true" iff the heap supports this kind of
|
|
306 // allocation. (Default is "no".)
|
|
307 virtual bool supports_inline_contig_alloc() const {
|
|
308 return false;
|
|
309 }
|
|
310 // These functions return the addresses of the fields that define the
|
|
311 // boundaries of the contiguous allocation area. (These fields should be
|
|
312 // physically near to one another.)
|
|
313 virtual HeapWord** top_addr() const {
|
|
314 guarantee(false, "inline contiguous allocation not supported");
|
|
315 return NULL;
|
|
316 }
|
|
317 virtual HeapWord** end_addr() const {
|
|
318 guarantee(false, "inline contiguous allocation not supported");
|
|
319 return NULL;
|
|
320 }
|
|
321
|
|
322 // Some heaps may be in an unparseable state at certain times between
|
|
323 // collections. This may be necessary for efficient implementation of
|
|
324 // certain allocation-related activities. Calling this function before
|
|
325 // attempting to parse a heap ensures that the heap is in a parsable
|
|
326 // state (provided other concurrent activity does not introduce
|
|
327 // unparsability). It is normally expected, therefore, that this
|
|
328 // method is invoked with the world stopped.
|
|
329 // NOTE: if you override this method, make sure you call
|
|
330 // super::ensure_parsability so that the non-generational
|
|
331 // part of the work gets done. See implementation of
|
|
332 // CollectedHeap::ensure_parsability and, for instance,
|
|
333 // that of GenCollectedHeap::ensure_parsability().
|
|
334 // The argument "retire_tlabs" controls whether existing TLABs
|
|
335 // are merely filled or also retired, thus preventing further
|
|
336 // allocation from them and necessitating allocation of new TLABs.
|
|
337 virtual void ensure_parsability(bool retire_tlabs);
|
|
338
|
|
339 // Return an estimate of the maximum allocation that could be performed
|
|
340 // without triggering any collection or expansion activity. In a
|
|
341 // generational collector, for example, this is probably the largest
|
|
342 // allocation that could be supported (without expansion) in the youngest
|
|
343 // generation. It is "unsafe" because no locks are taken; the result
|
|
344 // should be treated as an approximation, not a guarantee, for use in
|
|
345 // heuristic resizing decisions.
|
|
346 virtual size_t unsafe_max_alloc() = 0;
|
|
347
|
|
348 // Section on thread-local allocation buffers (TLABs)
|
|
349 // If the heap supports thread-local allocation buffers, it should override
|
|
350 // the following methods:
|
|
351 // Returns "true" iff the heap supports thread-local allocation buffers.
|
|
352 // The default is "no".
|
|
353 virtual bool supports_tlab_allocation() const {
|
|
354 return false;
|
|
355 }
|
|
356 // The amount of space available for thread-local allocation buffers.
|
|
357 virtual size_t tlab_capacity(Thread *thr) const {
|
|
358 guarantee(false, "thread-local allocation buffers not supported");
|
|
359 return 0;
|
|
360 }
|
|
361 // An estimate of the maximum allocation that could be performed
|
|
362 // for thread-local allocation buffers without triggering any
|
|
363 // collection or expansion activity.
|
|
364 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
|
|
365 guarantee(false, "thread-local allocation buffers not supported");
|
|
366 return 0;
|
|
367 }
|
|
368 // Can a compiler initialize a new object without store barriers?
|
|
369 // This permission only extends from the creation of a new object
|
|
370 // via a TLAB up to the first subsequent safepoint.
|
|
371 virtual bool can_elide_tlab_store_barriers() const {
|
|
372 guarantee(kind() < CollectedHeap::G1CollectedHeap, "else change or refactor this");
|
|
373 return true;
|
|
374 }
|
|
375 // If a compiler is eliding store barriers for TLAB-allocated objects,
|
|
376 // there is probably a corresponding slow path which can produce
|
|
377 // an object allocated anywhere. The compiler's runtime support
|
|
378 // promises to call this function on such a slow-path-allocated
|
|
379 // object before performing initializations that have elided
|
|
380 // store barriers. Returns new_obj, or maybe a safer copy thereof.
|
|
381 virtual oop new_store_barrier(oop new_obj);
|
|
382
|
|
383 // Can a compiler elide a store barrier when it writes
|
|
384 // a permanent oop into the heap? Applies when the compiler
|
|
385 // is storing x to the heap, where x->is_perm() is true.
|
|
386 virtual bool can_elide_permanent_oop_store_barriers() const;
|
|
387
|
|
388 // Does this heap support heap inspection (+PrintClassHistogram?)
|
|
389 virtual bool supports_heap_inspection() const {
|
|
390 return false; // Until RFE 5023697 is implemented
|
|
391 }
|
|
392
|
|
393 // Perform a collection of the heap; intended for use in implementing
|
|
394 // "System.gc". This probably implies as full a collection as the
|
|
395 // "CollectedHeap" supports.
|
|
396 virtual void collect(GCCause::Cause cause) = 0;
|
|
397
|
|
398 // This interface assumes that it's being called by the
|
|
399 // vm thread. It collects the heap assuming that the
|
|
400 // heap lock is already held and that we are executing in
|
|
401 // the context of the vm thread.
|
|
402 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
|
|
403
|
|
404 // Returns the barrier set for this heap
|
|
405 BarrierSet* barrier_set() { return _barrier_set; }
|
|
406
|
|
407 // Returns "true" iff there is a stop-world GC in progress. (I assume
|
|
408 // that it should answer "false" for the concurrent part of a concurrent
|
|
409 // collector -- dld).
|
|
410 bool is_gc_active() const { return _is_gc_active; }
|
|
411
|
|
412 // Total number of GC collections (started)
|
|
413 unsigned int total_collections() const { return _total_collections; }
|
|
414 unsigned int total_full_collections() const { return _total_full_collections;}
|
|
415
|
|
416 // Increment total number of GC collections (started)
|
|
417 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
|
|
418 void increment_total_collections(bool full = false) {
|
|
419 _total_collections++;
|
|
420 if (full) {
|
|
421 increment_total_full_collections();
|
|
422 }
|
|
423 }
|
|
424
|
|
425 void increment_total_full_collections() { _total_full_collections++; }
|
|
426
|
|
427 // Return the AdaptiveSizePolicy for the heap.
|
|
428 virtual AdaptiveSizePolicy* size_policy() = 0;
|
|
429
|
|
430 // Iterate over all the ref-containing fields of all objects, calling
|
|
431 // "cl.do_oop" on each. This includes objects in permanent memory.
|
|
432 virtual void oop_iterate(OopClosure* cl) = 0;
|
|
433
|
|
434 // Iterate over all objects, calling "cl.do_object" on each.
|
|
435 // This includes objects in permanent memory.
|
|
436 virtual void object_iterate(ObjectClosure* cl) = 0;
|
|
437
|
|
438 // Behaves the same as oop_iterate, except only traverses
|
|
439 // interior pointers contained in permanent memory. If there
|
|
440 // is no permanent memory, does nothing.
|
|
441 virtual void permanent_oop_iterate(OopClosure* cl) = 0;
|
|
442
|
|
443 // Behaves the same as object_iterate, except only traverses
|
|
444 // object contained in permanent memory. If there is no
|
|
445 // permanent memory, does nothing.
|
|
446 virtual void permanent_object_iterate(ObjectClosure* cl) = 0;
|
|
447
|
|
448 // NOTE! There is no requirement that a collector implement these
|
|
449 // functions.
|
|
450 //
|
|
451 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
|
|
452 // each address in the (reserved) heap is a member of exactly
|
|
453 // one block. The defining characteristic of a block is that it is
|
|
454 // possible to find its size, and thus to progress forward to the next
|
|
455 // block. (Blocks may be of different sizes.) Thus, blocks may
|
|
456 // represent Java objects, or they might be free blocks in a
|
|
457 // free-list-based heap (or subheap), as long as the two kinds are
|
|
458 // distinguishable and the size of each is determinable.
|
|
459
|
|
460 // Returns the address of the start of the "block" that contains the
|
|
461 // address "addr". We say "blocks" instead of "object" since some heaps
|
|
462 // may not pack objects densely; a chunk may either be an object or a
|
|
463 // non-object.
|
|
464 virtual HeapWord* block_start(const void* addr) const = 0;
|
|
465
|
|
466 // Requires "addr" to be the start of a chunk, and returns its size.
|
|
467 // "addr + size" is required to be the start of a new chunk, or the end
|
|
468 // of the active area of the heap.
|
|
469 virtual size_t block_size(const HeapWord* addr) const = 0;
|
|
470
|
|
471 // Requires "addr" to be the start of a block, and returns "TRUE" iff
|
|
472 // the block is an object.
|
|
473 virtual bool block_is_obj(const HeapWord* addr) const = 0;
|
|
474
|
|
475 // Returns the longest time (in ms) that has elapsed since the last
|
|
476 // time that any part of the heap was examined by a garbage collection.
|
|
477 virtual jlong millis_since_last_gc() = 0;
|
|
478
|
|
479 // Perform any cleanup actions necessary before allowing a verification.
|
|
480 virtual void prepare_for_verify() = 0;
|
|
481
|
|
482 virtual void print() const = 0;
|
|
483 virtual void print_on(outputStream* st) const = 0;
|
|
484
|
|
485 // Print all GC threads (other than the VM thread)
|
|
486 // used by this heap.
|
|
487 virtual void print_gc_threads_on(outputStream* st) const = 0;
|
|
488 void print_gc_threads() { print_gc_threads_on(tty); }
|
|
489 // Iterator for all GC threads (other than VM thread)
|
|
490 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
|
|
491
|
|
492 // Print any relevant tracing info that flags imply.
|
|
493 // Default implementation does nothing.
|
|
494 virtual void print_tracing_info() const = 0;
|
|
495
|
|
496 // Heap verification
|
|
497 virtual void verify(bool allow_dirty, bool silent) = 0;
|
|
498
|
|
499 // Non product verification and debugging.
|
|
500 #ifndef PRODUCT
|
|
501 // Support for PromotionFailureALot. Return true if it's time to cause a
|
|
502 // promotion failure. The no-argument version uses
|
|
503 // this->_promotion_failure_alot_count as the counter.
|
|
504 inline bool promotion_should_fail(volatile size_t* count);
|
|
505 inline bool promotion_should_fail();
|
|
506
|
|
507 // Reset the PromotionFailureALot counters. Should be called at the end of a
|
|
508 // GC in which promotion failure ocurred.
|
|
509 inline void reset_promotion_should_fail(volatile size_t* count);
|
|
510 inline void reset_promotion_should_fail();
|
|
511 #endif // #ifndef PRODUCT
|
|
512
|
|
513 #ifdef ASSERT
|
|
514 static int fired_fake_oom() {
|
|
515 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
|
|
516 }
|
|
517 #endif
|
|
518 };
|
|
519
|
|
520 // Class to set and reset the GC cause for a CollectedHeap.
|
|
521
|
|
522 class GCCauseSetter : StackObj {
|
|
523 CollectedHeap* _heap;
|
|
524 GCCause::Cause _previous_cause;
|
|
525 public:
|
|
526 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
|
|
527 assert(SafepointSynchronize::is_at_safepoint(),
|
|
528 "This method manipulates heap state without locking");
|
|
529 _heap = heap;
|
|
530 _previous_cause = _heap->gc_cause();
|
|
531 _heap->set_gc_cause(cause);
|
|
532 }
|
|
533
|
|
534 ~GCCauseSetter() {
|
|
535 assert(SafepointSynchronize::is_at_safepoint(),
|
|
536 "This method manipulates heap state without locking");
|
|
537 _heap->set_gc_cause(_previous_cause);
|
|
538 }
|
|
539 };
|