Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @ 2267:02f78cfa4656
7020992: jmm_DumpThreads should not allocate system object arrays outside the perm gen
Summary: Allocate ordinary object arrays
Reviewed-by: ysr, never, mchung
author | stefank |
---|---|
date | Mon, 21 Feb 2011 11:26:45 +0100 |
parents | f95d63e2154a |
children | 336d17dff7cc |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" | |
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" | |
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" | |
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" | |
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" | |
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" | |
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" | |
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" | |
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" | |
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" | |
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" | |
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" | |
38 #include "memory/gcLocker.inline.hpp" | |
39 #include "oops/oop.inline.hpp" | |
40 #include "runtime/handles.inline.hpp" | |
41 #include "runtime/java.hpp" | |
42 #include "runtime/vmThread.hpp" | |
43 #include "utilities/vmError.hpp" | |
0 | 44 |
45 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; | |
46 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; | |
47 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; | |
48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; | |
49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; | |
50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; | |
51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; | |
52 | |
53 static void trace_gen_sizes(const char* const str, | |
54 size_t pg_min, size_t pg_max, | |
55 size_t og_min, size_t og_max, | |
56 size_t yg_min, size_t yg_max) | |
57 { | |
58 if (TracePageSizes) { | |
59 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " | |
60 SIZE_FORMAT "," SIZE_FORMAT " " | |
61 SIZE_FORMAT "," SIZE_FORMAT " " | |
62 SIZE_FORMAT, | |
63 str, pg_min / K, pg_max / K, | |
64 og_min / K, og_max / K, | |
65 yg_min / K, yg_max / K, | |
66 (pg_max + og_max + yg_max) / K); | |
67 } | |
68 } | |
69 | |
70 jint ParallelScavengeHeap::initialize() { | |
1166 | 71 CollectedHeap::pre_initialize(); |
72 | |
0 | 73 // Cannot be initialized until after the flags are parsed |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
74 // GenerationSizer flag_parser; |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
75 _collector_policy = new GenerationSizer(); |
0 | 76 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
77 size_t yg_min_size = _collector_policy->min_young_gen_size(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
78 size_t yg_max_size = _collector_policy->max_young_gen_size(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
79 size_t og_min_size = _collector_policy->min_old_gen_size(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
80 size_t og_max_size = _collector_policy->max_old_gen_size(); |
0 | 81 // Why isn't there a min_perm_gen_size()? |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
82 size_t pg_min_size = _collector_policy->perm_gen_size(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
83 size_t pg_max_size = _collector_policy->max_perm_gen_size(); |
0 | 84 |
85 trace_gen_sizes("ps heap raw", | |
86 pg_min_size, pg_max_size, | |
87 og_min_size, og_max_size, | |
88 yg_min_size, yg_max_size); | |
89 | |
90 // The ReservedSpace ctor used below requires that the page size for the perm | |
91 // gen is <= the page size for the rest of the heap (young + old gens). | |
92 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, | |
93 yg_max_size + og_max_size, | |
94 8); | |
95 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, | |
96 pg_max_size, 16), | |
97 og_page_sz); | |
98 | |
99 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); | |
100 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); | |
101 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); | |
102 | |
103 // Update sizes to reflect the selected page size(s). | |
104 // | |
105 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it | |
106 // should check UseAdaptiveSizePolicy. Changes from generationSizer could | |
107 // move to the common code. | |
108 yg_min_size = align_size_up(yg_min_size, yg_align); | |
109 yg_max_size = align_size_up(yg_max_size, yg_align); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
110 size_t yg_cur_size = |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
111 align_size_up(_collector_policy->young_gen_size(), yg_align); |
0 | 112 yg_cur_size = MAX2(yg_cur_size, yg_min_size); |
113 | |
114 og_min_size = align_size_up(og_min_size, og_align); | |
115 og_max_size = align_size_up(og_max_size, og_align); | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
116 size_t og_cur_size = |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
117 align_size_up(_collector_policy->old_gen_size(), og_align); |
0 | 118 og_cur_size = MAX2(og_cur_size, og_min_size); |
119 | |
120 pg_min_size = align_size_up(pg_min_size, pg_align); | |
121 pg_max_size = align_size_up(pg_max_size, pg_align); | |
122 size_t pg_cur_size = pg_min_size; | |
123 | |
124 trace_gen_sizes("ps heap rnd", | |
125 pg_min_size, pg_max_size, | |
126 og_min_size, og_max_size, | |
127 yg_min_size, yg_max_size); | |
128 | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
129 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
130 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
131 |
0 | 132 // The main part of the heap (old gen + young gen) can often use a larger page |
133 // size than is needed or wanted for the perm gen. Use the "compound | |
134 // alignment" ReservedSpace ctor to avoid having to use the same page size for | |
135 // all gens. | |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
136 |
237
1fdb98a17101
6716785: implicit null checks not triggering with CompressedOops
coleenp
parents:
196
diff
changeset
|
137 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, |
642
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
138 og_align, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
139 |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
140 if (UseCompressedOops) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
141 if (addr != NULL && !heap_rs.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
142 // Failed to reserve at specified address - the requested memory |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
143 // region is taken already, for example, by 'java' launcher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
144 // Try again to reserver heap higher. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
145 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
146 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
147 og_align, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
148 if (addr != NULL && !heap_rs0.is_reserved()) { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
149 // Failed to reserve at specified address again - give up. |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
150 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
151 assert(addr == NULL, ""); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
152 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
153 og_align, addr); |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
154 heap_rs = heap_rs1; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
155 } else { |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
156 heap_rs = heap_rs0; |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
157 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
158 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
159 } |
660978a2a31a
6791178: Specialize for zero as the compressed oop vm heap base
kvn
parents:
374
diff
changeset
|
160 |
0 | 161 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, |
162 heap_rs.base(), pg_max_size); | |
163 os::trace_page_sizes("ps main", og_min_size + yg_min_size, | |
164 og_max_size + yg_max_size, og_page_sz, | |
165 heap_rs.base() + pg_max_size, | |
166 heap_rs.size() - pg_max_size); | |
167 if (!heap_rs.is_reserved()) { | |
168 vm_shutdown_during_initialization( | |
169 "Could not reserve enough space for object heap"); | |
170 return JNI_ENOMEM; | |
171 } | |
172 | |
173 _reserved = MemRegion((HeapWord*)heap_rs.base(), | |
174 (HeapWord*)(heap_rs.base() + heap_rs.size())); | |
175 | |
176 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); | |
177 _barrier_set = barrier_set; | |
178 oopDesc::set_bs(_barrier_set); | |
179 if (_barrier_set == NULL) { | |
180 vm_shutdown_during_initialization( | |
181 "Could not reserve enough space for barrier set"); | |
182 return JNI_ENOMEM; | |
183 } | |
184 | |
185 // Initial young gen size is 4 Mb | |
186 // | |
187 // XXX - what about flag_parser.young_gen_size()? | |
188 const size_t init_young_size = align_size_up(4 * M, yg_align); | |
189 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); | |
190 | |
191 // Split the reserved space into perm gen and the main heap (everything else). | |
192 // The main heap uses a different alignment. | |
193 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); | |
194 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); | |
195 | |
196 // Make up the generations | |
197 // Calculate the maximum size that a generation can grow. This | |
198 // includes growth into the other generation. Note that the | |
199 // parameter _max_gen_size is kept as the maximum | |
200 // size of the generation as the boundaries currently stand. | |
201 // _max_gen_size is still used as that value. | |
202 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; | |
203 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; | |
204 | |
205 _gens = new AdjoiningGenerations(main_rs, | |
206 og_cur_size, | |
207 og_min_size, | |
208 og_max_size, | |
209 yg_cur_size, | |
210 yg_min_size, | |
211 yg_max_size, | |
212 yg_align); | |
213 | |
214 _old_gen = _gens->old_gen(); | |
215 _young_gen = _gens->young_gen(); | |
216 | |
217 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); | |
218 const size_t old_capacity = _old_gen->capacity_in_bytes(); | |
219 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); | |
220 _size_policy = | |
221 new PSAdaptiveSizePolicy(eden_capacity, | |
222 initial_promo_size, | |
223 young_gen()->to_space()->capacity_in_bytes(), | |
13
183f41cf8bfe
6557851: CMS: ergonomics defaults are not set with FLAG_SET_ERGO
jmasa
parents:
0
diff
changeset
|
224 intra_heap_alignment(), |
0 | 225 max_gc_pause_sec, |
226 max_gc_minor_pause_sec, | |
227 GCTimeRatio | |
228 ); | |
229 | |
230 _perm_gen = new PSPermGen(perm_rs, | |
231 pg_align, | |
232 pg_cur_size, | |
233 pg_cur_size, | |
234 pg_max_size, | |
235 "perm", 2); | |
236 | |
237 assert(!UseAdaptiveGCBoundary || | |
238 (old_gen()->virtual_space()->high_boundary() == | |
239 young_gen()->virtual_space()->low_boundary()), | |
240 "Boundaries must meet"); | |
241 // initialize the policy counters - 2 collectors, 3 generations | |
242 _gc_policy_counters = | |
243 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); | |
244 _psh = this; | |
245 | |
246 // Set up the GCTaskManager | |
247 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); | |
248 | |
249 if (UseParallelOldGC && !PSParallelCompact::initialize()) { | |
250 return JNI_ENOMEM; | |
251 } | |
252 | |
253 return JNI_OK; | |
254 } | |
255 | |
256 void ParallelScavengeHeap::post_initialize() { | |
257 // Need to init the tenuring threshold | |
258 PSScavenge::initialize(); | |
259 if (UseParallelOldGC) { | |
260 PSParallelCompact::post_initialize(); | |
261 } else { | |
262 PSMarkSweep::initialize(); | |
263 } | |
264 PSPromotionManager::initialize(); | |
265 } | |
266 | |
267 void ParallelScavengeHeap::update_counters() { | |
268 young_gen()->update_counters(); | |
269 old_gen()->update_counters(); | |
270 perm_gen()->update_counters(); | |
271 } | |
272 | |
273 size_t ParallelScavengeHeap::capacity() const { | |
274 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); | |
275 return value; | |
276 } | |
277 | |
278 size_t ParallelScavengeHeap::used() const { | |
279 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); | |
280 return value; | |
281 } | |
282 | |
283 bool ParallelScavengeHeap::is_maximal_no_gc() const { | |
284 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); | |
285 } | |
286 | |
287 | |
288 size_t ParallelScavengeHeap::permanent_capacity() const { | |
289 return perm_gen()->capacity_in_bytes(); | |
290 } | |
291 | |
292 size_t ParallelScavengeHeap::permanent_used() const { | |
293 return perm_gen()->used_in_bytes(); | |
294 } | |
295 | |
296 size_t ParallelScavengeHeap::max_capacity() const { | |
297 size_t estimated = reserved_region().byte_size(); | |
298 estimated -= perm_gen()->reserved().byte_size(); | |
299 if (UseAdaptiveSizePolicy) { | |
300 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); | |
301 } else { | |
302 estimated -= young_gen()->to_space()->capacity_in_bytes(); | |
303 } | |
304 return MAX2(estimated, capacity()); | |
305 } | |
306 | |
307 bool ParallelScavengeHeap::is_in(const void* p) const { | |
308 if (young_gen()->is_in(p)) { | |
309 return true; | |
310 } | |
311 | |
312 if (old_gen()->is_in(p)) { | |
313 return true; | |
314 } | |
315 | |
316 if (perm_gen()->is_in(p)) { | |
317 return true; | |
318 } | |
319 | |
320 return false; | |
321 } | |
322 | |
323 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { | |
324 if (young_gen()->is_in_reserved(p)) { | |
325 return true; | |
326 } | |
327 | |
328 if (old_gen()->is_in_reserved(p)) { | |
329 return true; | |
330 } | |
331 | |
332 if (perm_gen()->is_in_reserved(p)) { | |
333 return true; | |
334 } | |
335 | |
336 return false; | |
337 } | |
338 | |
339 // There are two levels of allocation policy here. | |
340 // | |
341 // When an allocation request fails, the requesting thread must invoke a VM | |
342 // operation, transfer control to the VM thread, and await the results of a | |
343 // garbage collection. That is quite expensive, and we should avoid doing it | |
344 // multiple times if possible. | |
345 // | |
346 // To accomplish this, we have a basic allocation policy, and also a | |
347 // failed allocation policy. | |
348 // | |
349 // The basic allocation policy controls how you allocate memory without | |
350 // attempting garbage collection. It is okay to grab locks and | |
351 // expand the heap, if that can be done without coming to a safepoint. | |
352 // It is likely that the basic allocation policy will not be very | |
353 // aggressive. | |
354 // | |
355 // The failed allocation policy is invoked from the VM thread after | |
356 // the basic allocation policy is unable to satisfy a mem_allocate | |
357 // request. This policy needs to cover the entire range of collection, | |
358 // heap expansion, and out-of-memory conditions. It should make every | |
359 // attempt to allocate the requested memory. | |
360 | |
361 // Basic allocation policy. Should never be called at a safepoint, or | |
362 // from the VM thread. | |
363 // | |
364 // This method must handle cases where many mem_allocate requests fail | |
365 // simultaneously. When that happens, only one VM operation will succeed, | |
366 // and the rest will not be executed. For that reason, this method loops | |
367 // during failed allocation attempts. If the java heap becomes exhausted, | |
368 // we rely on the size_policy object to force a bail out. | |
369 HeapWord* ParallelScavengeHeap::mem_allocate( | |
370 size_t size, | |
371 bool is_noref, | |
372 bool is_tlab, | |
373 bool* gc_overhead_limit_was_exceeded) { | |
374 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); | |
375 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); | |
376 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
377 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
378 // In general gc_overhead_limit_was_exceeded should be false so |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
379 // set it so here and reset it to true only if the gc time |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
380 // limit is being exceeded as checked below. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
381 *gc_overhead_limit_was_exceeded = false; |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
382 |
0 | 383 HeapWord* result = young_gen()->allocate(size, is_tlab); |
384 | |
385 uint loop_count = 0; | |
386 uint gc_count = 0; | |
387 | |
388 while (result == NULL) { | |
389 // We don't want to have multiple collections for a single filled generation. | |
390 // To prevent this, each thread tracks the total_collections() value, and if | |
391 // the count has changed, does not do a new collection. | |
392 // | |
393 // The collection count must be read only while holding the heap lock. VM | |
394 // operations also hold the heap lock during collections. There is a lock | |
395 // contention case where thread A blocks waiting on the Heap_lock, while | |
396 // thread B is holding it doing a collection. When thread A gets the lock, | |
397 // the collection count has already changed. To prevent duplicate collections, | |
398 // The policy MUST attempt allocations during the same period it reads the | |
399 // total_collections() value! | |
400 { | |
401 MutexLocker ml(Heap_lock); | |
402 gc_count = Universe::heap()->total_collections(); | |
403 | |
404 result = young_gen()->allocate(size, is_tlab); | |
405 | |
406 // (1) If the requested object is too large to easily fit in the | |
407 // young_gen, or | |
408 // (2) If GC is locked out via GCLocker, young gen is full and | |
409 // the need for a GC already signalled to GCLocker (done | |
410 // at a safepoint), | |
411 // ... then, rather than force a safepoint and (a potentially futile) | |
412 // collection (attempt) for each allocation, try allocation directly | |
413 // in old_gen. For case (2) above, we may in the future allow | |
414 // TLAB allocation directly in the old gen. | |
415 if (result != NULL) { | |
416 return result; | |
417 } | |
418 if (!is_tlab && | |
373
06df86c2ec37
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents:
269
diff
changeset
|
419 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { |
0 | 420 result = old_gen()->allocate(size, is_tlab); |
421 if (result != NULL) { | |
422 return result; | |
423 } | |
424 } | |
425 if (GC_locker::is_active_and_needs_gc()) { | |
426 // GC is locked out. If this is a TLAB allocation, | |
427 // return NULL; the requestor will retry allocation | |
428 // of an idividual object at a time. | |
429 if (is_tlab) { | |
430 return NULL; | |
431 } | |
432 | |
433 // If this thread is not in a jni critical section, we stall | |
434 // the requestor until the critical section has cleared and | |
435 // GC allowed. When the critical section clears, a GC is | |
436 // initiated by the last thread exiting the critical section; so | |
437 // we retry the allocation sequence from the beginning of the loop, | |
438 // rather than causing more, now probably unnecessary, GC attempts. | |
439 JavaThread* jthr = JavaThread::current(); | |
440 if (!jthr->in_critical()) { | |
441 MutexUnlocker mul(Heap_lock); | |
442 GC_locker::stall_until_clear(); | |
443 continue; | |
444 } else { | |
445 if (CheckJNICalls) { | |
446 fatal("Possible deadlock due to allocating while" | |
447 " in jni critical section"); | |
448 } | |
449 return NULL; | |
450 } | |
451 } | |
452 } | |
453 | |
454 if (result == NULL) { | |
455 | |
456 // Generate a VM operation | |
457 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); | |
458 VMThread::execute(&op); | |
459 | |
460 // Did the VM operation execute? If so, return the result directly. | |
461 // This prevents us from looping until time out on requests that can | |
462 // not be satisfied. | |
463 if (op.prologue_succeeded()) { | |
464 assert(Universe::heap()->is_in_or_null(op.result()), | |
465 "result not in heap"); | |
466 | |
467 // If GC was locked out during VM operation then retry allocation | |
468 // and/or stall as necessary. | |
469 if (op.gc_locked()) { | |
470 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); | |
471 continue; // retry and/or stall as necessary | |
472 } | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
473 |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
474 // Exit the loop if the gc time limit has been exceeded. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
475 // The allocation must have failed above ("result" guarding |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
476 // this path is NULL) and the most recent collection has exceeded the |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
477 // gc overhead limit (although enough may have been collected to |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
478 // satisfy the allocation). Exit the loop so that an out-of-memory |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
479 // will be thrown (return a NULL ignoring the contents of |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
480 // op.result()), |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
481 // but clear gc_overhead_limit_exceeded so that the next collection |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
482 // starts with a clean slate (i.e., forgets about previous overhead |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
483 // excesses). Fill op.result() with a filler object so that the |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
484 // heap remains parsable. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
485 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
486 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
487 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
488 if (limit_exceeded && softrefs_clear) { |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
489 *gc_overhead_limit_was_exceeded = true; |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
490 size_policy()->set_gc_overhead_limit_exceeded(false); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
491 if (PrintGCDetails && Verbose) { |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
492 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
493 "return NULL because gc_overhead_limit_exceeded is set"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
494 } |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
495 if (op.result() != NULL) { |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
496 CollectedHeap::fill_with_object(op.result(), size); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
497 } |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
498 return NULL; |
0 | 499 } |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
500 |
0 | 501 return op.result(); |
502 } | |
503 } | |
504 | |
505 // The policy object will prevent us from looping forever. If the | |
506 // time spent in gc crosses a threshold, we will bail out. | |
507 loop_count++; | |
508 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && | |
509 (loop_count % QueuedAllocationWarningCount == 0)) { | |
510 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" | |
511 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); | |
512 } | |
513 } | |
514 | |
515 return result; | |
516 } | |
517 | |
518 // Failed allocation policy. Must be called from the VM thread, and | |
519 // only at a safepoint! Note that this method has policy for allocation | |
520 // flow, and NOT collection policy. So we do not check for gc collection | |
521 // time over limit here, that is the responsibility of the heap specific | |
522 // collection methods. This method decides where to attempt allocations, | |
523 // and when to attempt collections, but no collection specific policy. | |
524 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { | |
525 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
526 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
527 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | |
528 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
529 | |
530 size_t mark_sweep_invocation_count = total_invocations(); | |
531 | |
532 // We assume (and assert!) that an allocation at this point will fail | |
533 // unless we collect. | |
534 | |
535 // First level allocation failure, scavenge and allocate in young gen. | |
536 GCCauseSetter gccs(this, GCCause::_allocation_failure); | |
537 PSScavenge::invoke(); | |
538 HeapWord* result = young_gen()->allocate(size, is_tlab); | |
539 | |
540 // Second level allocation failure. | |
541 // Mark sweep and allocate in young generation. | |
542 if (result == NULL) { | |
543 // There is some chance the scavenge method decided to invoke mark_sweep. | |
544 // Don't mark sweep twice if so. | |
545 if (mark_sweep_invocation_count == total_invocations()) { | |
546 invoke_full_gc(false); | |
547 result = young_gen()->allocate(size, is_tlab); | |
548 } | |
549 } | |
550 | |
551 // Third level allocation failure. | |
552 // After mark sweep and young generation allocation failure, | |
553 // allocate in old generation. | |
554 if (result == NULL && !is_tlab) { | |
555 result = old_gen()->allocate(size, is_tlab); | |
556 } | |
557 | |
558 // Fourth level allocation failure. We're running out of memory. | |
559 // More complete mark sweep and allocate in young generation. | |
560 if (result == NULL) { | |
561 invoke_full_gc(true); | |
562 result = young_gen()->allocate(size, is_tlab); | |
563 } | |
564 | |
565 // Fifth level allocation failure. | |
566 // After more complete mark sweep, allocate in old generation. | |
567 if (result == NULL && !is_tlab) { | |
568 result = old_gen()->allocate(size, is_tlab); | |
569 } | |
570 | |
571 return result; | |
572 } | |
573 | |
574 // | |
575 // This is the policy loop for allocating in the permanent generation. | |
576 // If the initial allocation fails, we create a vm operation which will | |
577 // cause a collection. | |
578 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { | |
579 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); | |
580 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); | |
581 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
582 | |
583 HeapWord* result; | |
584 | |
585 uint loop_count = 0; | |
586 uint gc_count = 0; | |
587 uint full_gc_count = 0; | |
588 | |
589 do { | |
590 // We don't want to have multiple collections for a single filled generation. | |
591 // To prevent this, each thread tracks the total_collections() value, and if | |
592 // the count has changed, does not do a new collection. | |
593 // | |
594 // The collection count must be read only while holding the heap lock. VM | |
595 // operations also hold the heap lock during collections. There is a lock | |
596 // contention case where thread A blocks waiting on the Heap_lock, while | |
597 // thread B is holding it doing a collection. When thread A gets the lock, | |
598 // the collection count has already changed. To prevent duplicate collections, | |
599 // The policy MUST attempt allocations during the same period it reads the | |
600 // total_collections() value! | |
601 { | |
602 MutexLocker ml(Heap_lock); | |
603 gc_count = Universe::heap()->total_collections(); | |
604 full_gc_count = Universe::heap()->total_full_collections(); | |
605 | |
606 result = perm_gen()->allocate_permanent(size); | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
607 |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
608 if (result != NULL) { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
609 return result; |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
610 } |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
611 |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
612 if (GC_locker::is_active_and_needs_gc()) { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
613 // If this thread is not in a jni critical section, we stall |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
614 // the requestor until the critical section has cleared and |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
615 // GC allowed. When the critical section clears, a GC is |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
616 // initiated by the last thread exiting the critical section; so |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
617 // we retry the allocation sequence from the beginning of the loop, |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
618 // rather than causing more, now probably unnecessary, GC attempts. |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
619 JavaThread* jthr = JavaThread::current(); |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
620 if (!jthr->in_critical()) { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
621 MutexUnlocker mul(Heap_lock); |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
622 GC_locker::stall_until_clear(); |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
623 continue; |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
624 } else { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
625 if (CheckJNICalls) { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
626 fatal("Possible deadlock due to allocating while" |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
627 " in jni critical section"); |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
628 } |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
629 return NULL; |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
630 } |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
631 } |
0 | 632 } |
633 | |
634 if (result == NULL) { | |
635 | |
636 // Exit the loop if the gc time limit has been exceeded. | |
637 // The allocation must have failed above (result must be NULL), | |
638 // and the most recent collection must have exceeded the | |
639 // gc time limit. Exit the loop so that an out-of-memory | |
640 // will be thrown (returning a NULL will do that), but | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
641 // clear gc_overhead_limit_exceeded so that the next collection |
0 | 642 // will succeeded if the applications decides to handle the |
643 // out-of-memory and tries to go on. | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
644 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
645 if (limit_exceeded) { |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
646 size_policy()->set_gc_overhead_limit_exceeded(false); |
0 | 647 if (PrintGCDetails && Verbose) { |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
648 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
649 " return NULL because gc_overhead_limit_exceeded is set"); |
0 | 650 } |
651 assert(result == NULL, "Allocation did not fail"); | |
652 return NULL; | |
653 } | |
654 | |
655 // Generate a VM operation | |
656 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); | |
657 VMThread::execute(&op); | |
658 | |
659 // Did the VM operation execute? If so, return the result directly. | |
660 // This prevents us from looping until time out on requests that can | |
661 // not be satisfied. | |
662 if (op.prologue_succeeded()) { | |
663 assert(Universe::heap()->is_in_permanent_or_null(op.result()), | |
664 "result not in heap"); | |
139
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
665 // If GC was locked out during VM operation then retry allocation |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
666 // and/or stall as necessary. |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
667 if (op.gc_locked()) { |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
668 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
669 continue; // retry and/or stall as necessary |
c0492d52d55b
6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen
apetrusenko
parents:
13
diff
changeset
|
670 } |
0 | 671 // If a NULL results is being returned, an out-of-memory |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
672 // will be thrown now. Clear the gc_overhead_limit_exceeded |
0 | 673 // flag to avoid the following situation. |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
674 // gc_overhead_limit_exceeded is set during a collection |
0 | 675 // the collection fails to return enough space and an OOM is thrown |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
676 // a subsequent GC prematurely throws an out-of-memory because |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
677 // the gc_overhead_limit_exceeded counts did not start |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
678 // again from 0. |
0 | 679 if (op.result() == NULL) { |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1166
diff
changeset
|
680 size_policy()->reset_gc_overhead_limit_count(); |
0 | 681 } |
682 return op.result(); | |
683 } | |
684 } | |
685 | |
686 // The policy object will prevent us from looping forever. If the | |
687 // time spent in gc crosses a threshold, we will bail out. | |
688 loop_count++; | |
689 if ((QueuedAllocationWarningCount > 0) && | |
690 (loop_count % QueuedAllocationWarningCount == 0)) { | |
691 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" | |
692 " size=%d", loop_count, size); | |
693 } | |
694 } while (result == NULL); | |
695 | |
696 return result; | |
697 } | |
698 | |
699 // | |
700 // This is the policy code for permanent allocations which have failed | |
701 // and require a collection. Note that just as in failed_mem_allocate, | |
702 // we do not set collection policy, only where & when to allocate and | |
703 // collect. | |
704 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { | |
705 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
706 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
707 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | |
708 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); | |
709 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); | |
710 | |
711 // We assume (and assert!) that an allocation at this point will fail | |
712 // unless we collect. | |
713 | |
714 // First level allocation failure. Mark-sweep and allocate in perm gen. | |
715 GCCauseSetter gccs(this, GCCause::_allocation_failure); | |
716 invoke_full_gc(false); | |
717 HeapWord* result = perm_gen()->allocate_permanent(size); | |
718 | |
719 // Second level allocation failure. We're running out of memory. | |
720 if (result == NULL) { | |
721 invoke_full_gc(true); | |
722 result = perm_gen()->allocate_permanent(size); | |
723 } | |
724 | |
725 return result; | |
726 } | |
727 | |
728 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { | |
729 CollectedHeap::ensure_parsability(retire_tlabs); | |
730 young_gen()->eden_space()->ensure_parsability(); | |
731 } | |
732 | |
733 size_t ParallelScavengeHeap::unsafe_max_alloc() { | |
734 return young_gen()->eden_space()->free_in_bytes(); | |
735 } | |
736 | |
737 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { | |
738 return young_gen()->eden_space()->tlab_capacity(thr); | |
739 } | |
740 | |
741 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { | |
742 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); | |
743 } | |
744 | |
745 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { | |
746 return young_gen()->allocate(size, true); | |
747 } | |
748 | |
749 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { | |
750 CollectedHeap::accumulate_statistics_all_tlabs(); | |
751 } | |
752 | |
753 void ParallelScavengeHeap::resize_all_tlabs() { | |
754 CollectedHeap::resize_all_tlabs(); | |
755 } | |
756 | |
1027
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
757 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
758 // We don't need barriers for stores to objects in the |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
759 // young gen and, a fortiori, for initializing stores to |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
760 // objects therein. |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
761 return is_in_young(new_obj); |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
762 } |
39b01ab7035a
6888898: CMS: ReduceInitialCardMarks unsafe in the presence of cms precleaning
ysr
parents:
989
diff
changeset
|
763 |
0 | 764 // This method is used by System.gc() and JVMTI. |
765 void ParallelScavengeHeap::collect(GCCause::Cause cause) { | |
766 assert(!Heap_lock->owned_by_self(), | |
767 "this thread should not own the Heap_lock"); | |
768 | |
769 unsigned int gc_count = 0; | |
770 unsigned int full_gc_count = 0; | |
771 { | |
772 MutexLocker ml(Heap_lock); | |
773 // This value is guarded by the Heap_lock | |
774 gc_count = Universe::heap()->total_collections(); | |
775 full_gc_count = Universe::heap()->total_full_collections(); | |
776 } | |
777 | |
778 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); | |
779 VMThread::execute(&op); | |
780 } | |
781 | |
782 // This interface assumes that it's being called by the | |
783 // vm thread. It collects the heap assuming that the | |
784 // heap lock is already held and that we are executing in | |
785 // the context of the vm thread. | |
786 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { | |
787 assert(Thread::current()->is_VM_thread(), "Precondition#1"); | |
788 assert(Heap_lock->is_locked(), "Precondition#2"); | |
789 GCCauseSetter gcs(this, cause); | |
790 switch (cause) { | |
791 case GCCause::_heap_inspection: | |
792 case GCCause::_heap_dump: { | |
793 HandleMark hm; | |
794 invoke_full_gc(false); | |
795 break; | |
796 } | |
797 default: // XXX FIX ME | |
798 ShouldNotReachHere(); | |
799 } | |
800 } | |
801 | |
802 | |
803 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { | |
804 Unimplemented(); | |
805 } | |
806 | |
807 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { | |
808 young_gen()->object_iterate(cl); | |
809 old_gen()->object_iterate(cl); | |
810 perm_gen()->object_iterate(cl); | |
811 } | |
812 | |
813 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { | |
814 Unimplemented(); | |
815 } | |
816 | |
817 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { | |
818 perm_gen()->object_iterate(cl); | |
819 } | |
820 | |
821 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { | |
822 if (young_gen()->is_in_reserved(addr)) { | |
823 assert(young_gen()->is_in(addr), | |
824 "addr should be in allocated part of young gen"); | |
1907 | 825 // called from os::print_location by find or VMError |
826 if (Debugging || VMError::fatal_error_in_progress()) return NULL; | |
0 | 827 Unimplemented(); |
828 } else if (old_gen()->is_in_reserved(addr)) { | |
829 assert(old_gen()->is_in(addr), | |
830 "addr should be in allocated part of old gen"); | |
831 return old_gen()->start_array()->object_start((HeapWord*)addr); | |
832 } else if (perm_gen()->is_in_reserved(addr)) { | |
833 assert(perm_gen()->is_in(addr), | |
834 "addr should be in allocated part of perm gen"); | |
835 return perm_gen()->start_array()->object_start((HeapWord*)addr); | |
836 } | |
837 return 0; | |
838 } | |
839 | |
840 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { | |
841 return oop(addr)->size(); | |
842 } | |
843 | |
844 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { | |
845 return block_start(addr) == addr; | |
846 } | |
847 | |
848 jlong ParallelScavengeHeap::millis_since_last_gc() { | |
849 return UseParallelOldGC ? | |
850 PSParallelCompact::millis_since_last_gc() : | |
851 PSMarkSweep::millis_since_last_gc(); | |
852 } | |
853 | |
854 void ParallelScavengeHeap::prepare_for_verify() { | |
855 ensure_parsability(false); // no need to retire TLABs for verification | |
856 } | |
857 | |
858 void ParallelScavengeHeap::print() const { print_on(tty); } | |
859 | |
860 void ParallelScavengeHeap::print_on(outputStream* st) const { | |
861 young_gen()->print_on(st); | |
862 old_gen()->print_on(st); | |
863 perm_gen()->print_on(st); | |
864 } | |
865 | |
866 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { | |
867 PSScavenge::gc_task_manager()->threads_do(tc); | |
868 } | |
869 | |
870 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { | |
871 PSScavenge::gc_task_manager()->print_threads_on(st); | |
872 } | |
873 | |
874 void ParallelScavengeHeap::print_tracing_info() const { | |
875 if (TraceGen0Time) { | |
876 double time = PSScavenge::accumulated_time()->seconds(); | |
877 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); | |
878 } | |
879 if (TraceGen1Time) { | |
880 double time = PSMarkSweep::accumulated_time()->seconds(); | |
881 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); | |
882 } | |
883 } | |
884 | |
885 | |
845
df6caf649ff7
6700789: G1: Enable use of compressed oops with G1 heaps
ysr
parents:
665
diff
changeset
|
886 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { |
0 | 887 // Why do we need the total_collections()-filter below? |
888 if (total_collections() > 0) { | |
889 if (!silent) { | |
890 gclog_or_tty->print("permanent "); | |
891 } | |
892 perm_gen()->verify(allow_dirty); | |
893 | |
894 if (!silent) { | |
895 gclog_or_tty->print("tenured "); | |
896 } | |
897 old_gen()->verify(allow_dirty); | |
898 | |
899 if (!silent) { | |
900 gclog_or_tty->print("eden "); | |
901 } | |
902 young_gen()->verify(allow_dirty); | |
903 } | |
904 if (!silent) { | |
905 gclog_or_tty->print("ref_proc "); | |
906 } | |
907 ReferenceProcessor::verify(); | |
908 } | |
909 | |
910 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { | |
911 if (PrintGCDetails && Verbose) { | |
912 gclog_or_tty->print(" " SIZE_FORMAT | |
913 "->" SIZE_FORMAT | |
914 "(" SIZE_FORMAT ")", | |
915 prev_used, used(), capacity()); | |
916 } else { | |
917 gclog_or_tty->print(" " SIZE_FORMAT "K" | |
918 "->" SIZE_FORMAT "K" | |
919 "(" SIZE_FORMAT "K)", | |
920 prev_used / K, used() / K, capacity() / K); | |
921 } | |
922 } | |
923 | |
924 ParallelScavengeHeap* ParallelScavengeHeap::heap() { | |
925 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); | |
926 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); | |
927 return _psh; | |
928 } | |
929 | |
930 // Before delegating the resize to the young generation, | |
931 // the reserved space for the young and old generations | |
932 // may be changed to accomodate the desired resize. | |
933 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, | |
934 size_t survivor_size) { | |
935 if (UseAdaptiveGCBoundary) { | |
936 if (size_policy()->bytes_absorbed_from_eden() != 0) { | |
937 size_policy()->reset_bytes_absorbed_from_eden(); | |
938 return; // The generation changed size already. | |
939 } | |
940 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); | |
941 } | |
942 | |
943 // Delegate the resize to the generation. | |
944 _young_gen->resize(eden_size, survivor_size); | |
945 } | |
946 | |
947 // Before delegating the resize to the old generation, | |
948 // the reserved space for the young and old generations | |
949 // may be changed to accomodate the desired resize. | |
950 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { | |
951 if (UseAdaptiveGCBoundary) { | |
952 if (size_policy()->bytes_absorbed_from_eden() != 0) { | |
953 size_policy()->reset_bytes_absorbed_from_eden(); | |
954 return; // The generation changed size already. | |
955 } | |
956 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); | |
957 } | |
958 | |
959 // Delegate the resize to the generation. | |
960 _old_gen->resize(desired_free_space); | |
961 } | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
962 |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
963 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
964 // nothing particular |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
965 } |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
966 |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
967 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
968 // nothing particular |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
969 } |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
845
diff
changeset
|
970 |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
971 #ifndef PRODUCT |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
972 void ParallelScavengeHeap::record_gen_tops_before_GC() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
973 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
974 young_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
975 old_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
976 perm_gen()->record_spaces_top(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
977 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
978 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
979 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
980 void ParallelScavengeHeap::gen_mangle_unused_area() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
981 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
982 young_gen()->eden_space()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
983 young_gen()->to_space()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
984 young_gen()->from_space()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
985 old_gen()->object_space()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
986 perm_gen()->object_space()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
987 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
988 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
139
diff
changeset
|
989 #endif |