Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/defNewGeneration.cpp @ 1994:6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
author | ysr |
---|---|
date | Tue, 07 Dec 2010 21:55:53 -0800 |
parents | f95d63e2154a |
children | 74ee0db180fa |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/shared/collectorCounters.hpp" | |
27 #include "gc_implementation/shared/gcPolicyCounters.hpp" | |
28 #include "gc_implementation/shared/spaceDecorator.hpp" | |
29 #include "memory/defNewGeneration.inline.hpp" | |
30 #include "memory/gcLocker.inline.hpp" | |
31 #include "memory/genCollectedHeap.hpp" | |
32 #include "memory/genOopClosures.inline.hpp" | |
33 #include "memory/generationSpec.hpp" | |
34 #include "memory/iterator.hpp" | |
35 #include "memory/referencePolicy.hpp" | |
36 #include "memory/space.inline.hpp" | |
37 #include "oops/instanceRefKlass.hpp" | |
38 #include "oops/oop.inline.hpp" | |
39 #include "runtime/java.hpp" | |
40 #include "utilities/copy.hpp" | |
41 #include "utilities/stack.inline.hpp" | |
42 #ifdef TARGET_OS_FAMILY_linux | |
43 # include "thread_linux.inline.hpp" | |
44 #endif | |
45 #ifdef TARGET_OS_FAMILY_solaris | |
46 # include "thread_solaris.inline.hpp" | |
47 #endif | |
48 #ifdef TARGET_OS_FAMILY_windows | |
49 # include "thread_windows.inline.hpp" | |
50 #endif | |
0 | 51 |
52 // | |
53 // DefNewGeneration functions. | |
54 | |
55 // Methods of protected closure types. | |
56 | |
57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { | |
58 assert(g->level() == 0, "Optimized for youngest gen."); | |
59 } | |
60 void DefNewGeneration::IsAliveClosure::do_object(oop p) { | |
61 assert(false, "Do not call."); | |
62 } | |
63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { | |
64 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); | |
65 } | |
66 | |
67 DefNewGeneration::KeepAliveClosure:: | |
68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { | |
69 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); | |
70 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); | |
71 _rs = (CardTableRS*)rs; | |
72 } | |
73 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
0 | 76 |
77 | |
78 DefNewGeneration::FastKeepAliveClosure:: | |
79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : | |
80 DefNewGeneration::KeepAliveClosure(cl) { | |
81 _boundary = g->reserved().end(); | |
82 } | |
83 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
0 | 86 |
87 DefNewGeneration::EvacuateFollowersClosure:: | |
88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
89 ScanClosure* cur, ScanClosure* older) : | |
90 _gch(gch), _level(level), | |
91 _scan_cur_or_nonheap(cur), _scan_older(older) | |
92 {} | |
93 | |
94 void DefNewGeneration::EvacuateFollowersClosure::do_void() { | |
95 do { | |
96 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
97 _scan_older); | |
98 } while (!_gch->no_allocs_since_save_marks(_level)); | |
99 } | |
100 | |
101 DefNewGeneration::FastEvacuateFollowersClosure:: | |
102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
103 DefNewGeneration* gen, | |
104 FastScanClosure* cur, FastScanClosure* older) : | |
105 _gch(gch), _level(level), _gen(gen), | |
106 _scan_cur_or_nonheap(cur), _scan_older(older) | |
107 {} | |
108 | |
109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { | |
110 do { | |
111 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
112 _scan_older); | |
113 } while (!_gch->no_allocs_since_save_marks(_level)); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
114 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); |
0 | 115 } |
116 | |
117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : | |
118 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) | |
119 { | |
120 assert(_g->level() == 0, "Optimized for youngest generation"); | |
121 _boundary = _g->reserved().end(); | |
122 } | |
123 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
124 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
126 |
0 | 127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : |
128 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) | |
129 { | |
130 assert(_g->level() == 0, "Optimized for youngest generation"); | |
131 _boundary = _g->reserved().end(); | |
132 } | |
133 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
134 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
136 |
0 | 137 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : |
138 OopClosure(g->ref_processor()), _g(g) | |
139 { | |
140 assert(_g->level() == 0, "Optimized for youngest generation"); | |
141 _boundary = _g->reserved().end(); | |
142 } | |
143 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
144 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
145 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
146 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
147 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
148 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } |
0 | 149 |
150 DefNewGeneration::DefNewGeneration(ReservedSpace rs, | |
151 size_t initial_size, | |
152 int level, | |
153 const char* policy) | |
154 : Generation(rs, initial_size, level), | |
155 _promo_failure_drain_in_progress(false), | |
156 _should_allocate_from_space(false) | |
157 { | |
158 MemRegion cmr((HeapWord*)_virtual_space.low(), | |
159 (HeapWord*)_virtual_space.high()); | |
160 Universe::heap()->barrier_set()->resize_covered_region(cmr); | |
161 | |
162 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { | |
163 _eden_space = new ConcEdenSpace(this); | |
164 } else { | |
165 _eden_space = new EdenSpace(this); | |
166 } | |
167 _from_space = new ContiguousSpace(); | |
168 _to_space = new ContiguousSpace(); | |
169 | |
170 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) | |
171 vm_exit_during_initialization("Could not allocate a new gen space"); | |
172 | |
173 // Compute the maximum eden and survivor space sizes. These sizes | |
174 // are computed assuming the entire reserved space is committed. | |
175 // These values are exported as performance counters. | |
176 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
177 uintx size = _virtual_space.reserved_size(); | |
178 _max_survivor_size = compute_survivor_size(size, alignment); | |
179 _max_eden_size = size - (2*_max_survivor_size); | |
180 | |
181 // allocate the performance counters | |
182 | |
183 // Generation counters -- generation 0, 3 subspaces | |
184 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); | |
185 _gc_counters = new CollectorCounters(policy, 0); | |
186 | |
187 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, | |
188 _gen_counters); | |
189 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, | |
190 _gen_counters); | |
191 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, | |
192 _gen_counters); | |
193 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
194 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 195 update_counters(); |
196 _next_gen = NULL; | |
197 _tenuring_threshold = MaxTenuringThreshold; | |
198 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; | |
199 } | |
200 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
201 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
202 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
203 bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
204 uintx alignment = |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
205 GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
206 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
207 // If the spaces are being cleared (only done at heap initialization |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
208 // currently), the survivor spaces need not be empty. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
209 // Otherwise, no care is taken for used areas in the survivor spaces |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
210 // so check. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
211 assert(clear_space || (to()->is_empty() && from()->is_empty()), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
212 "Initialization of the survivor spaces assumes these are empty"); |
0 | 213 |
214 // Compute sizes | |
215 uintx size = _virtual_space.committed_size(); | |
216 uintx survivor_size = compute_survivor_size(size, alignment); | |
217 uintx eden_size = size - (2*survivor_size); | |
218 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
219 | |
220 if (eden_size < minimum_eden_size) { | |
221 // May happen due to 64Kb rounding, if so adjust eden size back up | |
222 minimum_eden_size = align_size_up(minimum_eden_size, alignment); | |
223 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; | |
224 uintx unaligned_survivor_size = | |
225 align_size_down(maximum_survivor_size, alignment); | |
226 survivor_size = MAX2(unaligned_survivor_size, alignment); | |
227 eden_size = size - (2*survivor_size); | |
228 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
229 assert(eden_size >= minimum_eden_size, "just checking"); | |
230 } | |
231 | |
232 char *eden_start = _virtual_space.low(); | |
233 char *from_start = eden_start + eden_size; | |
234 char *to_start = from_start + survivor_size; | |
235 char *to_end = to_start + survivor_size; | |
236 | |
237 assert(to_end == _virtual_space.high(), "just checking"); | |
238 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); | |
239 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); | |
240 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); | |
241 | |
242 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); | |
243 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); | |
244 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); | |
245 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
246 // A minimum eden size implies that there is a part of eden that |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
247 // is being used and that affects the initialization of any |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
248 // newly formed eden. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
249 bool live_in_eden = minimum_eden_size > 0; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
250 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
251 // If not clearing the spaces, do some checking to verify that |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
252 // the space are already mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
253 if (!clear_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
254 // Must check mangling before the spaces are reshaped. Otherwise, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
255 // the bottom or end of one space may have moved into another |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
256 // a failure of the check may not correctly indicate which space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
257 // is not properly mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
258 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
259 HeapWord* limit = (HeapWord*) _virtual_space.high(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
260 eden()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
261 from()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
262 to()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
263 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
264 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
265 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
266 // Reset the spaces for their new regions. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
267 eden()->initialize(edenMR, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
268 clear_space && !live_in_eden, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
269 SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
270 // If clear_space and live_in_eden, we will not have cleared any |
0 | 271 // portion of eden above its top. This can cause newly |
272 // expanded space not to be mangled if using ZapUnusedHeapArea. | |
273 // We explicitly do such mangling here. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
274 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { |
0 | 275 eden()->mangle_unused_area(); |
276 } | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
277 from()->initialize(fromMR, clear_space, mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
278 to()->initialize(toMR, clear_space, mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
279 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
280 // Set next compaction spaces. |
0 | 281 eden()->set_next_compaction_space(from()); |
282 // The to-space is normally empty before a compaction so need | |
283 // not be considered. The exception is during promotion | |
284 // failure handling when to-space can contain live objects. | |
285 from()->set_next_compaction_space(NULL); | |
286 } | |
287 | |
288 void DefNewGeneration::swap_spaces() { | |
289 ContiguousSpace* s = from(); | |
290 _from_space = to(); | |
291 _to_space = s; | |
292 eden()->set_next_compaction_space(from()); | |
293 // The to-space is normally empty before a compaction so need | |
294 // not be considered. The exception is during promotion | |
295 // failure handling when to-space can contain live objects. | |
296 from()->set_next_compaction_space(NULL); | |
297 | |
298 if (UsePerfData) { | |
299 CSpaceCounters* c = _from_counters; | |
300 _from_counters = _to_counters; | |
301 _to_counters = c; | |
302 } | |
303 } | |
304 | |
305 bool DefNewGeneration::expand(size_t bytes) { | |
306 MutexLocker x(ExpandHeap_lock); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
307 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); |
0 | 308 bool success = _virtual_space.expand_by(bytes); |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
309 if (success && ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
310 // Mangle newly committed space immediately because it |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
311 // can be done here more simply that after the new |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
312 // spaces have been computed. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
313 HeapWord* new_high = (HeapWord*) _virtual_space.high(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
314 MemRegion mangle_region(prev_high, new_high); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
315 SpaceMangler::mangle_region(mangle_region); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
316 } |
0 | 317 |
318 // Do not attempt an expand-to-the reserve size. The | |
319 // request should properly observe the maximum size of | |
320 // the generation so an expand-to-reserve should be | |
321 // unnecessary. Also a second call to expand-to-reserve | |
322 // value potentially can cause an undue expansion. | |
323 // For example if the first expand fail for unknown reasons, | |
324 // but the second succeeds and expands the heap to its maximum | |
325 // value. | |
326 if (GC_locker::is_active()) { | |
327 if (PrintGC && Verbose) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
328 gclog_or_tty->print_cr("Garbage collection disabled, " |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
329 "expanded heap instead"); |
0 | 330 } |
331 } | |
332 | |
333 return success; | |
334 } | |
335 | |
336 | |
337 void DefNewGeneration::compute_new_size() { | |
338 // This is called after a gc that includes the following generation | |
339 // (which is required to exist.) So from-space will normally be empty. | |
340 // Note that we check both spaces, since if scavenge failed they revert roles. | |
341 // If not we bail out (otherwise we would have to relocate the objects) | |
342 if (!from()->is_empty() || !to()->is_empty()) { | |
343 return; | |
344 } | |
345 | |
346 int next_level = level() + 1; | |
347 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
348 assert(next_level < gch->_n_gens, | |
349 "DefNewGeneration cannot be an oldest gen"); | |
350 | |
351 Generation* next_gen = gch->_gens[next_level]; | |
352 size_t old_size = next_gen->capacity(); | |
353 size_t new_size_before = _virtual_space.committed_size(); | |
354 size_t min_new_size = spec()->init_size(); | |
355 size_t max_new_size = reserved().byte_size(); | |
356 assert(min_new_size <= new_size_before && | |
357 new_size_before <= max_new_size, | |
358 "just checking"); | |
359 // All space sizes must be multiples of Generation::GenGrain. | |
360 size_t alignment = Generation::GenGrain; | |
361 | |
362 // Compute desired new generation size based on NewRatio and | |
363 // NewSizeThreadIncrease | |
364 size_t desired_new_size = old_size/NewRatio; | |
365 int threads_count = Threads::number_of_non_daemon_threads(); | |
366 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; | |
367 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); | |
368 | |
369 // Adjust new generation size | |
370 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); | |
371 assert(desired_new_size <= max_new_size, "just checking"); | |
372 | |
373 bool changed = false; | |
374 if (desired_new_size > new_size_before) { | |
375 size_t change = desired_new_size - new_size_before; | |
376 assert(change % alignment == 0, "just checking"); | |
377 if (expand(change)) { | |
378 changed = true; | |
379 } | |
380 // If the heap failed to expand to the desired size, | |
381 // "changed" will be false. If the expansion failed | |
382 // (and at this point it was expected to succeed), | |
383 // ignore the failure (leaving "changed" as false). | |
384 } | |
385 if (desired_new_size < new_size_before && eden()->is_empty()) { | |
386 // bail out of shrinking if objects in eden | |
387 size_t change = new_size_before - desired_new_size; | |
388 assert(change % alignment == 0, "just checking"); | |
389 _virtual_space.shrink_by(change); | |
390 changed = true; | |
391 } | |
392 if (changed) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
393 // The spaces have already been mangled at this point but |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
394 // may not have been cleared (set top = bottom) and should be. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
395 // Mangling was done when the heap was being expanded. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
396 compute_space_boundaries(eden()->used(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
397 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
398 SpaceDecorator::DontMangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
399 MemRegion cmr((HeapWord*)_virtual_space.low(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
400 (HeapWord*)_virtual_space.high()); |
0 | 401 Universe::heap()->barrier_set()->resize_covered_region(cmr); |
402 if (Verbose && PrintGC) { | |
403 size_t new_size_after = _virtual_space.committed_size(); | |
404 size_t eden_size_after = eden()->capacity(); | |
405 size_t survivor_size_after = from()->capacity(); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
406 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
407 SIZE_FORMAT "K [eden=" |
0 | 408 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
409 new_size_before/K, new_size_after/K, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
410 eden_size_after/K, survivor_size_after/K); |
0 | 411 if (WizardMode) { |
412 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", | |
413 thread_increase_size/K, threads_count); | |
414 } | |
415 gclog_or_tty->cr(); | |
416 } | |
417 } | |
418 } | |
419 | |
420 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { | |
421 // $$$ This may be wrong in case of "scavenge failure"? | |
422 eden()->object_iterate(cl); | |
423 } | |
424 | |
425 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
426 assert(false, "NYI -- are you sure you want to call this?"); | |
427 } | |
428 | |
429 | |
430 size_t DefNewGeneration::capacity() const { | |
431 return eden()->capacity() | |
432 + from()->capacity(); // to() is only used during scavenge | |
433 } | |
434 | |
435 | |
436 size_t DefNewGeneration::used() const { | |
437 return eden()->used() | |
438 + from()->used(); // to() is only used during scavenge | |
439 } | |
440 | |
441 | |
442 size_t DefNewGeneration::free() const { | |
443 return eden()->free() | |
444 + from()->free(); // to() is only used during scavenge | |
445 } | |
446 | |
447 size_t DefNewGeneration::max_capacity() const { | |
448 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
449 const size_t reserved_bytes = reserved().byte_size(); | |
450 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); | |
451 } | |
452 | |
453 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { | |
454 return eden()->free(); | |
455 } | |
456 | |
457 size_t DefNewGeneration::capacity_before_gc() const { | |
458 return eden()->capacity(); | |
459 } | |
460 | |
461 size_t DefNewGeneration::contiguous_available() const { | |
462 return eden()->free(); | |
463 } | |
464 | |
465 | |
466 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } | |
467 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } | |
468 | |
469 void DefNewGeneration::object_iterate(ObjectClosure* blk) { | |
470 eden()->object_iterate(blk); | |
471 from()->object_iterate(blk); | |
472 } | |
473 | |
474 | |
475 void DefNewGeneration::space_iterate(SpaceClosure* blk, | |
476 bool usedOnly) { | |
477 blk->do_space(eden()); | |
478 blk->do_space(from()); | |
479 blk->do_space(to()); | |
480 } | |
481 | |
482 // The last collection bailed out, we are running out of heap space, | |
483 // so we try to allocate the from-space, too. | |
484 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { | |
485 HeapWord* result = NULL; | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
486 if (Verbose && PrintGCDetails) { |
0 | 487 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
488 " will_fail: %s" |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
489 " heap_lock: %s" |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
490 " free: " SIZE_FORMAT, |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
491 size, |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
492 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
493 "true" : "false", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
494 Heap_lock->is_locked() ? "locked" : "unlocked", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
495 from()->free()); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
496 } |
0 | 497 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { |
498 if (Heap_lock->owned_by_self() || | |
499 (SafepointSynchronize::is_at_safepoint() && | |
500 Thread::current()->is_VM_thread())) { | |
501 // If the Heap_lock is not locked by this thread, this will be called | |
502 // again later with the Heap_lock held. | |
503 result = from()->allocate(size); | |
504 } else if (PrintGC && Verbose) { | |
505 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); | |
506 } | |
507 } else if (PrintGC && Verbose) { | |
508 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); | |
509 } | |
510 if (PrintGC && Verbose) { | |
511 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); | |
512 } | |
513 return result; | |
514 } | |
515 | |
516 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, | |
517 bool is_tlab, | |
518 bool parallel) { | |
519 // We don't attempt to expand the young generation (but perhaps we should.) | |
520 return allocate(size, is_tlab); | |
521 } | |
522 | |
523 | |
524 void DefNewGeneration::collect(bool full, | |
525 bool clear_all_soft_refs, | |
526 size_t size, | |
527 bool is_tlab) { | |
528 assert(full || size > 0, "otherwise we don't want to collect"); | |
529 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
530 _next_gen = gch->next_gen(this); | |
531 assert(_next_gen != NULL, | |
532 "This must be the youngest gen, and not the only gen"); | |
533 | |
534 // If the next generation is too full to accomodate promotion | |
535 // from this generation, pass on collection; let the next generation | |
536 // do it. | |
537 if (!collection_attempt_is_safe()) { | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
538 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
539 gclog_or_tty->print(" :: Collection attempt not safe :: "); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
540 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
541 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one |
0 | 542 return; |
543 } | |
544 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); | |
545 | |
546 init_assuming_no_promotion_failure(); | |
547 | |
548 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
549 // Capture heap used before collection (for printing). | |
550 size_t gch_prev_used = gch->used(); | |
551 | |
552 SpecializationStats::clear(); | |
553 | |
554 // These can be shared for all code paths | |
555 IsAliveClosure is_alive(this); | |
556 ScanWeakRefClosure scan_weak_ref(this); | |
557 | |
558 age_table()->clear(); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
559 to()->clear(SpaceDecorator::Mangle); |
0 | 560 |
561 gch->rem_set()->prepare_for_younger_refs_iterate(false); | |
562 | |
563 assert(gch->no_allocs_since_save_marks(0), | |
564 "save marks have not been newly set."); | |
565 | |
566 // Not very pretty. | |
567 CollectorPolicy* cp = gch->collector_policy(); | |
568 | |
569 FastScanClosure fsc_with_no_gc_barrier(this, false); | |
570 FastScanClosure fsc_with_gc_barrier(this, true); | |
571 | |
572 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); | |
573 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, | |
574 &fsc_with_no_gc_barrier, | |
575 &fsc_with_gc_barrier); | |
576 | |
577 assert(gch->no_allocs_since_save_marks(0), | |
578 "save marks have not been newly set."); | |
579 | |
580 gch->gen_process_strong_roots(_level, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
581 true, // Process younger gens, if any, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
582 // as strong roots. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
583 true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
584 false, // not collecting perm generation. |
0 | 585 SharedHeap::SO_AllClasses, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
586 &fsc_with_no_gc_barrier, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
587 true, // walk *all* scavengable nmethods |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
588 &fsc_with_gc_barrier); |
0 | 589 |
590 // "evacuate followers". | |
591 evacuate_followers.do_void(); | |
592 | |
593 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
594 ReferenceProcessor* rp = ref_processor(); |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
595 rp->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
596 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
597 NULL); |
0 | 598 if (!promotion_failed()) { |
599 // Swap the survivor spaces. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
600 eden()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
601 from()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
602 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
603 // This is now done here because of the piece-meal mangling which |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
604 // can check for valid mangling at intermediate points in the |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
605 // collection(s). When a minor collection fails to collect |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
606 // sufficient space resizing of the young generation can occur |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
607 // an redistribute the spaces in the young generation. Mangle |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
608 // here so that unzapped regions don't get distributed to |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
609 // other spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
610 to()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
611 } |
0 | 612 swap_spaces(); |
613 | |
614 assert(to()->is_empty(), "to space should be empty now"); | |
615 | |
616 // Set the desired survivor size to half the real survivor space | |
617 _tenuring_threshold = | |
618 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); | |
619 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
620 // A successful scavenge should restart the GC time limit count which is |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
621 // for full GC's. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
622 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
623 size_policy->reset_gc_overhead_limit_count(); |
0 | 624 if (PrintGC && !PrintGCDetails) { |
625 gch->print_heap_change(gch_prev_used); | |
626 } | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
627 assert(!gch->incremental_collection_failed(), "Should be clear"); |
0 | 628 } else { |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
629 assert(_promo_failure_scan_stack.is_empty(), "post condition"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
630 _promo_failure_scan_stack.clear(true); // Clear cached segments. |
0 | 631 |
632 remove_forwarding_pointers(); | |
633 if (PrintGCDetails) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
634 gclog_or_tty->print(" (promotion failed) "); |
0 | 635 } |
636 // Add to-space to the list of space to compact | |
637 // when a promotion failure has occurred. In that | |
638 // case there can be live objects in to-space | |
639 // as a result of a partial evacuation of eden | |
640 // and from-space. | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
641 swap_spaces(); // For uniformity wrt ParNewGeneration. |
0 | 642 from()->set_next_compaction_space(to()); |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
643 gch->set_incremental_collection_failed(); |
0 | 644 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
645 // Inform the next generation that a promotion failure occurred. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
646 _next_gen->promotion_failure_occurred(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
647 |
0 | 648 // Reset the PromotionFailureALot counters. |
649 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) | |
650 } | |
651 // set new iteration safe limit for the survivor spaces | |
652 from()->set_concurrent_iteration_safe_limit(from()->top()); | |
653 to()->set_concurrent_iteration_safe_limit(to()->top()); | |
654 SpecializationStats::print(); | |
655 update_time_of_last_gc(os::javaTimeMillis()); | |
656 } | |
657 | |
658 class RemoveForwardPointerClosure: public ObjectClosure { | |
659 public: | |
660 void do_object(oop obj) { | |
661 obj->init_mark(); | |
662 } | |
663 }; | |
664 | |
665 void DefNewGeneration::init_assuming_no_promotion_failure() { | |
666 _promotion_failed = false; | |
667 from()->set_next_compaction_space(NULL); | |
668 } | |
669 | |
670 void DefNewGeneration::remove_forwarding_pointers() { | |
671 RemoveForwardPointerClosure rspc; | |
672 eden()->object_iterate(&rspc); | |
673 from()->object_iterate(&rspc); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
674 |
0 | 675 // Now restore saved marks, if any. |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
676 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
677 "should be the same"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
678 while (!_objs_with_preserved_marks.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
679 oop obj = _objs_with_preserved_marks.pop(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
680 markOop m = _preserved_marks_of_objs.pop(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
681 obj->set_mark(m); |
0 | 682 } |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
683 _objs_with_preserved_marks.clear(true); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
684 _preserved_marks_of_objs.clear(true); |
0 | 685 } |
686 | |
687 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { | |
688 if (m->must_be_preserved_for_promotion_failure(obj)) { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
689 _objs_with_preserved_marks.push(obj); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
690 _preserved_marks_of_objs.push(m); |
0 | 691 } |
692 } | |
693 | |
694 void DefNewGeneration::handle_promotion_failure(oop old) { | |
695 preserve_mark_if_necessary(old, old->mark()); | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
696 if (!_promotion_failed && PrintPromotionFailure) { |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
697 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
698 old->size()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
699 } |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
700 |
0 | 701 // forward to self |
702 old->forward_to(old); | |
703 _promotion_failed = true; | |
704 | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
705 _promo_failure_scan_stack.push(old); |
0 | 706 |
707 if (!_promo_failure_drain_in_progress) { | |
708 // prevent recursion in copy_to_survivor_space() | |
709 _promo_failure_drain_in_progress = true; | |
710 drain_promo_failure_scan_stack(); | |
711 _promo_failure_drain_in_progress = false; | |
712 } | |
713 } | |
714 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
715 oop DefNewGeneration::copy_to_survivor_space(oop old) { |
0 | 716 assert(is_in_reserved(old) && !old->is_forwarded(), |
717 "shouldn't be scavenging this oop"); | |
718 size_t s = old->size(); | |
719 oop obj = NULL; | |
720 | |
721 // Try allocating obj in to-space (unless too old) | |
722 if (old->age() < tenuring_threshold()) { | |
723 obj = (oop) to()->allocate(s); | |
724 } | |
725 | |
726 // Otherwise try allocating obj tenured | |
727 if (obj == NULL) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
728 obj = _next_gen->promote(old, s); |
0 | 729 if (obj == NULL) { |
730 handle_promotion_failure(old); | |
731 return old; | |
732 } | |
733 } else { | |
734 // Prefetch beyond obj | |
735 const intx interval = PrefetchCopyIntervalInBytes; | |
736 Prefetch::write(obj, interval); | |
737 | |
738 // Copy obj | |
739 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); | |
740 | |
741 // Increment age if obj still in new generation | |
742 obj->incr_age(); | |
743 age_table()->add(obj, s); | |
744 } | |
745 | |
746 // Done, insert forward pointer to obj in this header | |
747 old->forward_to(obj); | |
748 | |
749 return obj; | |
750 } | |
751 | |
752 void DefNewGeneration::drain_promo_failure_scan_stack() { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
753 while (!_promo_failure_scan_stack.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
754 oop obj = _promo_failure_scan_stack.pop(); |
0 | 755 obj->oop_iterate(_promo_failure_scan_stack_closure); |
756 } | |
757 } | |
758 | |
759 void DefNewGeneration::save_marks() { | |
760 eden()->set_saved_mark(); | |
761 to()->set_saved_mark(); | |
762 from()->set_saved_mark(); | |
763 } | |
764 | |
765 | |
766 void DefNewGeneration::reset_saved_marks() { | |
767 eden()->reset_saved_mark(); | |
768 to()->reset_saved_mark(); | |
769 from()->reset_saved_mark(); | |
770 } | |
771 | |
772 | |
773 bool DefNewGeneration::no_allocs_since_save_marks() { | |
774 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); | |
775 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); | |
776 return to()->saved_mark_at_top(); | |
777 } | |
778 | |
779 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
780 \ | |
781 void DefNewGeneration:: \ | |
782 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
783 cl->set_generation(this); \ | |
784 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
785 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
786 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
787 cl->reset_generation(); \ | |
788 save_marks(); \ | |
789 } | |
790 | |
791 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) | |
792 | |
793 #undef DefNew_SINCE_SAVE_MARKS_DEFN | |
794 | |
795 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, | |
796 size_t max_alloc_words) { | |
797 if (requestor == this || _promotion_failed) return; | |
798 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); | |
799 | |
800 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. | |
801 if (to_space->top() > to_space->bottom()) { | |
802 trace("to_space not empty when contribute_scratch called"); | |
803 } | |
804 */ | |
805 | |
806 ContiguousSpace* to_space = to(); | |
807 assert(to_space->end() >= to_space->top(), "pointers out of order"); | |
808 size_t free_words = pointer_delta(to_space->end(), to_space->top()); | |
809 if (free_words >= MinFreeScratchWords) { | |
810 ScratchBlock* sb = (ScratchBlock*)to_space->top(); | |
811 sb->num_words = free_words; | |
812 sb->next = list; | |
813 list = sb; | |
814 } | |
815 } | |
816 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
817 void DefNewGeneration::reset_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
818 // If contributing scratch in to_space, mangle all of |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
819 // to_space if ZapUnusedHeapArea. This is needed because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
820 // top is not maintained while using to-space as scratch. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
821 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
822 to()->mangle_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
823 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
824 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
825 |
0 | 826 bool DefNewGeneration::collection_attempt_is_safe() { |
827 if (!to()->is_empty()) { | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
828 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
829 gclog_or_tty->print(" :: to is not empty :: "); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
830 } |
0 | 831 return false; |
832 } | |
833 if (_next_gen == NULL) { | |
834 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
835 _next_gen = gch->next_gen(this); | |
836 assert(_next_gen != NULL, | |
837 "This must be the youngest gen, and not the only gen"); | |
838 } | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
839 return _next_gen->promotion_attempt_is_safe(used()); |
0 | 840 } |
841 | |
842 void DefNewGeneration::gc_epilogue(bool full) { | |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
843 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
844 |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
845 assert(!GC_locker::is_active(), "We should not be executing here"); |
0 | 846 // Check if the heap is approaching full after a collection has |
847 // been done. Generally the young generation is empty at | |
848 // a minimum at the end of a collection. If it is not, then | |
849 // the heap is approaching full. | |
850 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
851 if (full) { |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
852 DEBUG_ONLY(seen_incremental_collection_failed = false;) |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
853 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
854 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
855 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
856 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
857 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
858 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
859 set_should_allocate_from_space(); // we seem to be running out of space |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
860 } else { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
861 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
862 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
863 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
864 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
865 gch->clear_incremental_collection_failed(); // We just did a full collection |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
866 clear_should_allocate_from_space(); // if set |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
867 } |
0 | 868 } else { |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
869 #ifdef ASSERT |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
870 // It is possible that incremental_collection_failed() == true |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
871 // here, because an attempted scavenge did not succeed. The policy |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
872 // is normally expected to cause a full collection which should |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
873 // clear that condition, so we should not be here twice in a row |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
874 // with incremental_collection_failed() == true without having done |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
875 // a full collection in between. |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
876 if (!seen_incremental_collection_failed && |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
877 gch->incremental_collection_failed()) { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
878 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
879 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
880 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
881 } |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
882 seen_incremental_collection_failed = true; |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
883 } else if (seen_incremental_collection_failed) { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
884 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
885 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
886 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
887 } |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
888 assert(gch->gc_cause() == GCCause::_scavenge_alot || |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
889 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
890 !gch->incremental_collection_failed(), |
1953
8d81b4a1d3e1
6998802: ScavengeALot: assert(!gch->incremental_collection_failed()) failed: Twice in a row
ysr
parents:
1889
diff
changeset
|
891 "Twice in a row"); |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
892 seen_incremental_collection_failed = false; |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
893 } |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
894 #endif // ASSERT |
0 | 895 } |
896 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
897 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
898 eden()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
899 from()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
900 to()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
901 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
902 |
0 | 903 // update the generation and space performance counters |
904 update_counters(); | |
905 gch->collector_policy()->counters()->update_counters(); | |
906 } | |
907 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
908 void DefNewGeneration::record_spaces_top() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
909 assert(ZapUnusedHeapArea, "Not mangling unused space"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
910 eden()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
911 to()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
912 from()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
913 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
914 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
915 |
0 | 916 void DefNewGeneration::update_counters() { |
917 if (UsePerfData) { | |
918 _eden_counters->update_all(); | |
919 _from_counters->update_all(); | |
920 _to_counters->update_all(); | |
921 _gen_counters->update_all(); | |
922 } | |
923 } | |
924 | |
925 void DefNewGeneration::verify(bool allow_dirty) { | |
926 eden()->verify(allow_dirty); | |
927 from()->verify(allow_dirty); | |
928 to()->verify(allow_dirty); | |
929 } | |
930 | |
931 void DefNewGeneration::print_on(outputStream* st) const { | |
932 Generation::print_on(st); | |
933 st->print(" eden"); | |
934 eden()->print_on(st); | |
935 st->print(" from"); | |
936 from()->print_on(st); | |
937 st->print(" to "); | |
938 to()->print_on(st); | |
939 } | |
940 | |
941 | |
942 const char* DefNewGeneration::name() const { | |
943 return "def new generation"; | |
944 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
945 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
946 // Moved from inline file as they are not called inline |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
947 CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
948 return eden(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
949 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
950 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
951 HeapWord* DefNewGeneration::allocate(size_t word_size, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
952 bool is_tlab) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
953 // This is the slow-path allocation for the DefNewGeneration. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
954 // Most allocations are fast-path in compiled code. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
955 // We try to allocate from the eden. If that works, we are happy. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
956 // Note that since DefNewGeneration supports lock-free allocation, we |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
957 // have to use it here, as well. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
958 HeapWord* result = eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
959 if (result != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
960 return result; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
961 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
962 do { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
963 HeapWord* old_limit = eden()->soft_end(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
964 if (old_limit < eden()->end()) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
965 // Tell the next generation we reached a limit. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
966 HeapWord* new_limit = |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
967 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
968 if (new_limit != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
969 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
970 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
971 assert(eden()->soft_end() == eden()->end(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
972 "invalid state after allocation_limit_reached returned null"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
973 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
974 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
975 // The allocation failed and the soft limit is equal to the hard limit, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
976 // there are no reasons to do an attempt to allocate |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
977 assert(old_limit == eden()->end(), "sanity check"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
978 break; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
979 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
980 // Try to allocate until succeeded or the soft limit can't be adjusted |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
981 result = eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
982 } while (result == NULL); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
983 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
984 // If the eden is full and the last collection bailed out, we are running |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
985 // out of heap space, and we try to allocate the from-space, too. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
986 // allocate_from_space can't be inlined because that would introduce a |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
987 // circular dependency at compile time. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
988 if (result == NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
989 result = allocate_from_space(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
990 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
991 return result; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
992 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
993 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
994 HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
995 bool is_tlab) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
996 return eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
997 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
998 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
999 void DefNewGeneration::gc_prologue(bool full) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1000 // Ensure that _end and _soft_end are the same in eden space. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1001 eden()->set_soft_end(eden()->end()); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1002 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1003 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1004 size_t DefNewGeneration::tlab_capacity() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1005 return eden()->capacity(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1006 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1007 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1008 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1009 return unsafe_max_alloc_nogc(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1010 } |