Mercurial > hg > graal-jvmci-8
annotate src/share/vm/memory/defNewGeneration.cpp @ 10241:d17700c82d7d
8006088: Incompatible heap size flags accepted by VM
Summary: Make processing of minimum, initial and maximum heap size more intiutive by removing previous limitations on allowed values, and make error reporting consistent. Further, fix errors in ergonomic heap sizing.
Reviewed-by: johnc, jwilhelm, tamao
author | tschatzl |
---|---|
date | Mon, 06 May 2013 17:19:42 +0200 |
parents | a30e7b564541 |
children | 001ec9515f84 |
rev | line source |
---|---|
0 | 1 /* |
4911 | 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "gc_implementation/shared/collectorCounters.hpp" | |
27 #include "gc_implementation/shared/gcPolicyCounters.hpp" | |
28 #include "gc_implementation/shared/spaceDecorator.hpp" | |
29 #include "memory/defNewGeneration.inline.hpp" | |
30 #include "memory/gcLocker.inline.hpp" | |
31 #include "memory/genCollectedHeap.hpp" | |
32 #include "memory/genOopClosures.inline.hpp" | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
33 #include "memory/genRemSet.hpp" |
1972 | 34 #include "memory/generationSpec.hpp" |
35 #include "memory/iterator.hpp" | |
36 #include "memory/referencePolicy.hpp" | |
37 #include "memory/space.inline.hpp" | |
38 #include "oops/instanceRefKlass.hpp" | |
39 #include "oops/oop.inline.hpp" | |
40 #include "runtime/java.hpp" | |
7180
f34d701e952e
8003935: Simplify the needed includes for using Thread::current()
stefank
parents:
6725
diff
changeset
|
41 #include "runtime/thread.inline.hpp" |
1972 | 42 #include "utilities/copy.hpp" |
43 #include "utilities/stack.inline.hpp" | |
0 | 44 |
45 // | |
46 // DefNewGeneration functions. | |
47 | |
48 // Methods of protected closure types. | |
49 | |
50 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { | |
51 assert(g->level() == 0, "Optimized for youngest gen."); | |
52 } | |
53 void DefNewGeneration::IsAliveClosure::do_object(oop p) { | |
54 assert(false, "Do not call."); | |
55 } | |
56 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { | |
57 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); | |
58 } | |
59 | |
60 DefNewGeneration::KeepAliveClosure:: | |
61 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { | |
62 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); | |
63 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); | |
64 _rs = (CardTableRS*)rs; | |
65 } | |
66 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
67 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
68 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
0 | 69 |
70 | |
71 DefNewGeneration::FastKeepAliveClosure:: | |
72 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : | |
73 DefNewGeneration::KeepAliveClosure(cl) { | |
74 _boundary = g->reserved().end(); | |
75 } | |
76 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
77 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
78 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
0 | 79 |
80 DefNewGeneration::EvacuateFollowersClosure:: | |
81 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
82 ScanClosure* cur, ScanClosure* older) : | |
83 _gch(gch), _level(level), | |
84 _scan_cur_or_nonheap(cur), _scan_older(older) | |
85 {} | |
86 | |
87 void DefNewGeneration::EvacuateFollowersClosure::do_void() { | |
88 do { | |
89 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
90 _scan_older); | |
91 } while (!_gch->no_allocs_since_save_marks(_level)); | |
92 } | |
93 | |
94 DefNewGeneration::FastEvacuateFollowersClosure:: | |
95 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
96 DefNewGeneration* gen, | |
97 FastScanClosure* cur, FastScanClosure* older) : | |
98 _gch(gch), _level(level), _gen(gen), | |
99 _scan_cur_or_nonheap(cur), _scan_older(older) | |
100 {} | |
101 | |
102 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { | |
103 do { | |
104 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
105 _scan_older); | |
106 } while (!_gch->no_allocs_since_save_marks(_level)); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
107 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); |
0 | 108 } |
109 | |
110 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
111 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
0 | 112 { |
113 assert(_g->level() == 0, "Optimized for youngest generation"); | |
114 _boundary = _g->reserved().end(); | |
115 } | |
116 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
117 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
118 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
119 |
0 | 120 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
121 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
0 | 122 { |
123 assert(_g->level() == 0, "Optimized for youngest generation"); | |
124 _boundary = _g->reserved().end(); | |
125 } | |
126 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
127 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
128 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
129 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
130 void KlassScanClosure::do_klass(Klass* klass) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
131 #ifndef PRODUCT |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
132 if (TraceScavenge) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
133 ResourceMark rm; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
134 gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s", |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
135 klass, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
136 klass->external_name(), |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
137 klass->has_modified_oops() ? "true" : "false"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
138 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
139 #endif |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
140 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
141 // If the klass has not been dirtied we know that there's |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
142 // no references into the young gen and we can skip it. |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
143 if (klass->has_modified_oops()) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
144 if (_accumulate_modified_oops) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
145 klass->accumulate_modified_oops(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
146 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
147 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
148 // Clear this state since we're going to scavenge all the metadata. |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
149 klass->clear_modified_oops(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
150 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
151 // Tell the closure which Klass is being scanned so that it can be dirtied |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
152 // if oops are left pointing into the young gen. |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
153 _scavenge_closure->set_scanned_klass(klass); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
154 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
155 klass->oops_do(_scavenge_closure); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
156 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
157 _scavenge_closure->set_scanned_klass(NULL); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
158 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
159 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
160 |
0 | 161 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
162 _g(g) |
0 | 163 { |
164 assert(_g->level() == 0, "Optimized for youngest generation"); | |
165 _boundary = _g->reserved().end(); | |
166 } | |
167 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
168 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
169 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
170 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
171 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
172 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } |
0 | 173 |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
174 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
175 KlassRemSet* klass_rem_set) |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
176 : _scavenge_closure(scavenge_closure), |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
177 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
178 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
179 |
0 | 180 DefNewGeneration::DefNewGeneration(ReservedSpace rs, |
181 size_t initial_size, | |
182 int level, | |
183 const char* policy) | |
184 : Generation(rs, initial_size, level), | |
185 _promo_failure_drain_in_progress(false), | |
186 _should_allocate_from_space(false) | |
187 { | |
188 MemRegion cmr((HeapWord*)_virtual_space.low(), | |
189 (HeapWord*)_virtual_space.high()); | |
190 Universe::heap()->barrier_set()->resize_covered_region(cmr); | |
191 | |
192 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { | |
193 _eden_space = new ConcEdenSpace(this); | |
194 } else { | |
195 _eden_space = new EdenSpace(this); | |
196 } | |
197 _from_space = new ContiguousSpace(); | |
198 _to_space = new ContiguousSpace(); | |
199 | |
200 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) | |
201 vm_exit_during_initialization("Could not allocate a new gen space"); | |
202 | |
203 // Compute the maximum eden and survivor space sizes. These sizes | |
204 // are computed assuming the entire reserved space is committed. | |
205 // These values are exported as performance counters. | |
206 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
207 uintx size = _virtual_space.reserved_size(); | |
208 _max_survivor_size = compute_survivor_size(size, alignment); | |
209 _max_eden_size = size - (2*_max_survivor_size); | |
210 | |
211 // allocate the performance counters | |
212 | |
213 // Generation counters -- generation 0, 3 subspaces | |
214 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); | |
215 _gc_counters = new CollectorCounters(policy, 0); | |
216 | |
217 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, | |
218 _gen_counters); | |
219 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, | |
220 _gen_counters); | |
221 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, | |
222 _gen_counters); | |
223 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
224 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
0 | 225 update_counters(); |
226 _next_gen = NULL; | |
227 _tenuring_threshold = MaxTenuringThreshold; | |
228 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; | |
229 } | |
230 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
231 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
232 bool clear_space, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
233 bool mangle_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
234 uintx alignment = |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
235 GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
236 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
237 // If the spaces are being cleared (only done at heap initialization |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
238 // currently), the survivor spaces need not be empty. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
239 // Otherwise, no care is taken for used areas in the survivor spaces |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
240 // so check. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
241 assert(clear_space || (to()->is_empty() && from()->is_empty()), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
242 "Initialization of the survivor spaces assumes these are empty"); |
0 | 243 |
244 // Compute sizes | |
245 uintx size = _virtual_space.committed_size(); | |
246 uintx survivor_size = compute_survivor_size(size, alignment); | |
247 uintx eden_size = size - (2*survivor_size); | |
248 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
249 | |
250 if (eden_size < minimum_eden_size) { | |
251 // May happen due to 64Kb rounding, if so adjust eden size back up | |
252 minimum_eden_size = align_size_up(minimum_eden_size, alignment); | |
253 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; | |
254 uintx unaligned_survivor_size = | |
255 align_size_down(maximum_survivor_size, alignment); | |
256 survivor_size = MAX2(unaligned_survivor_size, alignment); | |
257 eden_size = size - (2*survivor_size); | |
258 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
259 assert(eden_size >= minimum_eden_size, "just checking"); | |
260 } | |
261 | |
262 char *eden_start = _virtual_space.low(); | |
263 char *from_start = eden_start + eden_size; | |
264 char *to_start = from_start + survivor_size; | |
265 char *to_end = to_start + survivor_size; | |
266 | |
267 assert(to_end == _virtual_space.high(), "just checking"); | |
268 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); | |
269 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); | |
270 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); | |
271 | |
272 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); | |
273 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); | |
274 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); | |
275 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
276 // A minimum eden size implies that there is a part of eden that |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
277 // is being used and that affects the initialization of any |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
278 // newly formed eden. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
279 bool live_in_eden = minimum_eden_size > 0; |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
280 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
281 // If not clearing the spaces, do some checking to verify that |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
282 // the space are already mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
283 if (!clear_space) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
284 // Must check mangling before the spaces are reshaped. Otherwise, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
285 // the bottom or end of one space may have moved into another |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
286 // a failure of the check may not correctly indicate which space |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
287 // is not properly mangled. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
288 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
289 HeapWord* limit = (HeapWord*) _virtual_space.high(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
290 eden()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
291 from()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
292 to()->check_mangled_unused_area(limit); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
293 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
294 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
295 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
296 // Reset the spaces for their new regions. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
297 eden()->initialize(edenMR, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
298 clear_space && !live_in_eden, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
299 SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
300 // If clear_space and live_in_eden, we will not have cleared any |
0 | 301 // portion of eden above its top. This can cause newly |
302 // expanded space not to be mangled if using ZapUnusedHeapArea. | |
303 // We explicitly do such mangling here. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
304 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { |
0 | 305 eden()->mangle_unused_area(); |
306 } | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
307 from()->initialize(fromMR, clear_space, mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
308 to()->initialize(toMR, clear_space, mangle_space); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
309 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
310 // Set next compaction spaces. |
0 | 311 eden()->set_next_compaction_space(from()); |
312 // The to-space is normally empty before a compaction so need | |
313 // not be considered. The exception is during promotion | |
314 // failure handling when to-space can contain live objects. | |
315 from()->set_next_compaction_space(NULL); | |
316 } | |
317 | |
318 void DefNewGeneration::swap_spaces() { | |
319 ContiguousSpace* s = from(); | |
320 _from_space = to(); | |
321 _to_space = s; | |
322 eden()->set_next_compaction_space(from()); | |
323 // The to-space is normally empty before a compaction so need | |
324 // not be considered. The exception is during promotion | |
325 // failure handling when to-space can contain live objects. | |
326 from()->set_next_compaction_space(NULL); | |
327 | |
328 if (UsePerfData) { | |
329 CSpaceCounters* c = _from_counters; | |
330 _from_counters = _to_counters; | |
331 _to_counters = c; | |
332 } | |
333 } | |
334 | |
335 bool DefNewGeneration::expand(size_t bytes) { | |
336 MutexLocker x(ExpandHeap_lock); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
337 HeapWord* prev_high = (HeapWord*) _virtual_space.high(); |
0 | 338 bool success = _virtual_space.expand_by(bytes); |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
339 if (success && ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
340 // Mangle newly committed space immediately because it |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
341 // can be done here more simply that after the new |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
342 // spaces have been computed. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
343 HeapWord* new_high = (HeapWord*) _virtual_space.high(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
344 MemRegion mangle_region(prev_high, new_high); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
345 SpaceMangler::mangle_region(mangle_region); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
346 } |
0 | 347 |
348 // Do not attempt an expand-to-the reserve size. The | |
349 // request should properly observe the maximum size of | |
350 // the generation so an expand-to-reserve should be | |
351 // unnecessary. Also a second call to expand-to-reserve | |
352 // value potentially can cause an undue expansion. | |
353 // For example if the first expand fail for unknown reasons, | |
354 // but the second succeeds and expands the heap to its maximum | |
355 // value. | |
356 if (GC_locker::is_active()) { | |
357 if (PrintGC && Verbose) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
358 gclog_or_tty->print_cr("Garbage collection disabled, " |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
359 "expanded heap instead"); |
0 | 360 } |
361 } | |
362 | |
363 return success; | |
364 } | |
365 | |
366 | |
367 void DefNewGeneration::compute_new_size() { | |
368 // This is called after a gc that includes the following generation | |
369 // (which is required to exist.) So from-space will normally be empty. | |
370 // Note that we check both spaces, since if scavenge failed they revert roles. | |
371 // If not we bail out (otherwise we would have to relocate the objects) | |
372 if (!from()->is_empty() || !to()->is_empty()) { | |
373 return; | |
374 } | |
375 | |
376 int next_level = level() + 1; | |
377 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
378 assert(next_level < gch->_n_gens, | |
379 "DefNewGeneration cannot be an oldest gen"); | |
380 | |
381 Generation* next_gen = gch->_gens[next_level]; | |
382 size_t old_size = next_gen->capacity(); | |
383 size_t new_size_before = _virtual_space.committed_size(); | |
384 size_t min_new_size = spec()->init_size(); | |
385 size_t max_new_size = reserved().byte_size(); | |
386 assert(min_new_size <= new_size_before && | |
387 new_size_before <= max_new_size, | |
388 "just checking"); | |
389 // All space sizes must be multiples of Generation::GenGrain. | |
390 size_t alignment = Generation::GenGrain; | |
391 | |
392 // Compute desired new generation size based on NewRatio and | |
393 // NewSizeThreadIncrease | |
394 size_t desired_new_size = old_size/NewRatio; | |
395 int threads_count = Threads::number_of_non_daemon_threads(); | |
396 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; | |
397 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); | |
398 | |
399 // Adjust new generation size | |
400 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); | |
401 assert(desired_new_size <= max_new_size, "just checking"); | |
402 | |
403 bool changed = false; | |
404 if (desired_new_size > new_size_before) { | |
405 size_t change = desired_new_size - new_size_before; | |
406 assert(change % alignment == 0, "just checking"); | |
407 if (expand(change)) { | |
408 changed = true; | |
409 } | |
410 // If the heap failed to expand to the desired size, | |
411 // "changed" will be false. If the expansion failed | |
412 // (and at this point it was expected to succeed), | |
413 // ignore the failure (leaving "changed" as false). | |
414 } | |
415 if (desired_new_size < new_size_before && eden()->is_empty()) { | |
416 // bail out of shrinking if objects in eden | |
417 size_t change = new_size_before - desired_new_size; | |
418 assert(change % alignment == 0, "just checking"); | |
419 _virtual_space.shrink_by(change); | |
420 changed = true; | |
421 } | |
422 if (changed) { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
423 // The spaces have already been mangled at this point but |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
424 // may not have been cleared (set top = bottom) and should be. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
425 // Mangling was done when the heap was being expanded. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
426 compute_space_boundaries(eden()->used(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
427 SpaceDecorator::Clear, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
428 SpaceDecorator::DontMangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
429 MemRegion cmr((HeapWord*)_virtual_space.low(), |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
430 (HeapWord*)_virtual_space.high()); |
0 | 431 Universe::heap()->barrier_set()->resize_covered_region(cmr); |
432 if (Verbose && PrintGC) { | |
433 size_t new_size_after = _virtual_space.committed_size(); | |
434 size_t eden_size_after = eden()->capacity(); | |
435 size_t survivor_size_after = from()->capacity(); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
436 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
437 SIZE_FORMAT "K [eden=" |
0 | 438 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
439 new_size_before/K, new_size_after/K, |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
440 eden_size_after/K, survivor_size_after/K); |
0 | 441 if (WizardMode) { |
442 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", | |
443 thread_increase_size/K, threads_count); | |
444 } | |
445 gclog_or_tty->cr(); | |
446 } | |
447 } | |
448 } | |
449 | |
450 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { | |
451 // $$$ This may be wrong in case of "scavenge failure"? | |
452 eden()->object_iterate(cl); | |
453 } | |
454 | |
455 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
456 assert(false, "NYI -- are you sure you want to call this?"); | |
457 } | |
458 | |
459 | |
460 size_t DefNewGeneration::capacity() const { | |
461 return eden()->capacity() | |
462 + from()->capacity(); // to() is only used during scavenge | |
463 } | |
464 | |
465 | |
466 size_t DefNewGeneration::used() const { | |
467 return eden()->used() | |
468 + from()->used(); // to() is only used during scavenge | |
469 } | |
470 | |
471 | |
472 size_t DefNewGeneration::free() const { | |
473 return eden()->free() | |
474 + from()->free(); // to() is only used during scavenge | |
475 } | |
476 | |
477 size_t DefNewGeneration::max_capacity() const { | |
478 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
479 const size_t reserved_bytes = reserved().byte_size(); | |
480 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); | |
481 } | |
482 | |
483 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { | |
484 return eden()->free(); | |
485 } | |
486 | |
487 size_t DefNewGeneration::capacity_before_gc() const { | |
488 return eden()->capacity(); | |
489 } | |
490 | |
491 size_t DefNewGeneration::contiguous_available() const { | |
492 return eden()->free(); | |
493 } | |
494 | |
495 | |
496 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } | |
497 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } | |
498 | |
499 void DefNewGeneration::object_iterate(ObjectClosure* blk) { | |
500 eden()->object_iterate(blk); | |
501 from()->object_iterate(blk); | |
502 } | |
503 | |
504 | |
505 void DefNewGeneration::space_iterate(SpaceClosure* blk, | |
506 bool usedOnly) { | |
507 blk->do_space(eden()); | |
508 blk->do_space(from()); | |
509 blk->do_space(to()); | |
510 } | |
511 | |
512 // The last collection bailed out, we are running out of heap space, | |
513 // so we try to allocate the from-space, too. | |
514 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { | |
515 HeapWord* result = NULL; | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
516 if (Verbose && PrintGCDetails) { |
0 | 517 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
518 " will_fail: %s" |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
519 " heap_lock: %s" |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
520 " free: " SIZE_FORMAT, |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
521 size, |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
522 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
523 "true" : "false", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
524 Heap_lock->is_locked() ? "locked" : "unlocked", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
525 from()->free()); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
526 } |
0 | 527 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { |
528 if (Heap_lock->owned_by_self() || | |
529 (SafepointSynchronize::is_at_safepoint() && | |
530 Thread::current()->is_VM_thread())) { | |
531 // If the Heap_lock is not locked by this thread, this will be called | |
532 // again later with the Heap_lock held. | |
533 result = from()->allocate(size); | |
534 } else if (PrintGC && Verbose) { | |
535 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); | |
536 } | |
537 } else if (PrintGC && Verbose) { | |
538 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); | |
539 } | |
540 if (PrintGC && Verbose) { | |
541 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); | |
542 } | |
543 return result; | |
544 } | |
545 | |
546 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, | |
547 bool is_tlab, | |
548 bool parallel) { | |
549 // We don't attempt to expand the young generation (but perhaps we should.) | |
550 return allocate(size, is_tlab); | |
551 } | |
552 | |
7609
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
553 void DefNewGeneration::adjust_desired_tenuring_threshold() { |
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
554 // Set the desired survivor size to half the real survivor space |
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
555 _tenuring_threshold = |
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
556 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); |
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
557 } |
0 | 558 |
559 void DefNewGeneration::collect(bool full, | |
560 bool clear_all_soft_refs, | |
561 size_t size, | |
562 bool is_tlab) { | |
563 assert(full || size > 0, "otherwise we don't want to collect"); | |
564 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
565 _next_gen = gch->next_gen(this); | |
566 assert(_next_gen != NULL, | |
567 "This must be the youngest gen, and not the only gen"); | |
568 | |
569 // If the next generation is too full to accomodate promotion | |
570 // from this generation, pass on collection; let the next generation | |
571 // do it. | |
572 if (!collection_attempt_is_safe()) { | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
573 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
574 gclog_or_tty->print(" :: Collection attempt not safe :: "); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
575 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
576 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one |
0 | 577 return; |
578 } | |
579 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); | |
580 | |
581 init_assuming_no_promotion_failure(); | |
582 | |
6064
9d679effd28c
7166894: Add gc cause to GC logging for all collectors
brutisso
parents:
6008
diff
changeset
|
583 TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); |
0 | 584 // Capture heap used before collection (for printing). |
585 size_t gch_prev_used = gch->used(); | |
586 | |
587 SpecializationStats::clear(); | |
588 | |
589 // These can be shared for all code paths | |
590 IsAliveClosure is_alive(this); | |
591 ScanWeakRefClosure scan_weak_ref(this); | |
592 | |
593 age_table()->clear(); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
594 to()->clear(SpaceDecorator::Mangle); |
0 | 595 |
596 gch->rem_set()->prepare_for_younger_refs_iterate(false); | |
597 | |
598 assert(gch->no_allocs_since_save_marks(0), | |
599 "save marks have not been newly set."); | |
600 | |
601 // Not very pretty. | |
602 CollectorPolicy* cp = gch->collector_policy(); | |
603 | |
604 FastScanClosure fsc_with_no_gc_barrier(this, false); | |
605 FastScanClosure fsc_with_gc_barrier(this, true); | |
606 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
607 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
608 gch->rem_set()->klass_rem_set()); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
609 |
0 | 610 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); |
611 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, | |
612 &fsc_with_no_gc_barrier, | |
613 &fsc_with_gc_barrier); | |
614 | |
615 assert(gch->no_allocs_since_save_marks(0), | |
616 "save marks have not been newly set."); | |
617 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
618 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
619 |
0 | 620 gch->gen_process_strong_roots(_level, |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
621 true, // Process younger gens, if any, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
622 // as strong roots. |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
623 true, // activate StrongRootsScope |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
624 true, // is scavenging |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
625 SharedHeap::ScanningOption(so), |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
626 &fsc_with_no_gc_barrier, |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
457
diff
changeset
|
627 true, // walk *all* scavengable nmethods |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
628 &fsc_with_gc_barrier, |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6064
diff
changeset
|
629 &klass_scan_closure); |
0 | 630 |
631 // "evacuate followers". | |
632 evacuate_followers.do_void(); | |
633 | |
634 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
635 ReferenceProcessor* rp = ref_processor(); |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
636 rp->setup_policy(clear_all_soft_refs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
637 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
638 NULL); |
0 | 639 if (!promotion_failed()) { |
640 // Swap the survivor spaces. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
641 eden()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
642 from()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
643 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
644 // This is now done here because of the piece-meal mangling which |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
645 // can check for valid mangling at intermediate points in the |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
646 // collection(s). When a minor collection fails to collect |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
647 // sufficient space resizing of the young generation can occur |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
648 // an redistribute the spaces in the young generation. Mangle |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
649 // here so that unzapped regions don't get distributed to |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
650 // other spaces. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
651 to()->mangle_unused_area(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
652 } |
0 | 653 swap_spaces(); |
654 | |
655 assert(to()->is_empty(), "to space should be empty now"); | |
656 | |
7609
a30e7b564541
8005972: ParNew should not update the tenuring threshold when promotion failed has occurred
brutisso
parents:
7180
diff
changeset
|
657 adjust_desired_tenuring_threshold(); |
0 | 658 |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
659 // A successful scavenge should restart the GC time limit count which is |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
660 // for full GC's. |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
661 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1145
diff
changeset
|
662 size_policy->reset_gc_overhead_limit_count(); |
0 | 663 if (PrintGC && !PrintGCDetails) { |
664 gch->print_heap_change(gch_prev_used); | |
665 } | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
666 assert(!gch->incremental_collection_failed(), "Should be clear"); |
0 | 667 } else { |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
668 assert(_promo_failure_scan_stack.is_empty(), "post condition"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
669 _promo_failure_scan_stack.clear(true); // Clear cached segments. |
0 | 670 |
671 remove_forwarding_pointers(); | |
672 if (PrintGCDetails) { | |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
673 gclog_or_tty->print(" (promotion failed) "); |
0 | 674 } |
675 // Add to-space to the list of space to compact | |
676 // when a promotion failure has occurred. In that | |
677 // case there can be live objects in to-space | |
678 // as a result of a partial evacuation of eden | |
679 // and from-space. | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
680 swap_spaces(); // For uniformity wrt ParNewGeneration. |
0 | 681 from()->set_next_compaction_space(to()); |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
682 gch->set_incremental_collection_failed(); |
0 | 683 |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
684 // Inform the next generation that a promotion failure occurred. |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
685 _next_gen->promotion_failure_occurred(); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
686 |
0 | 687 // Reset the PromotionFailureALot counters. |
688 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) | |
689 } | |
690 // set new iteration safe limit for the survivor spaces | |
691 from()->set_concurrent_iteration_safe_limit(from()->top()); | |
692 to()->set_concurrent_iteration_safe_limit(to()->top()); | |
693 SpecializationStats::print(); | |
4911 | 694 |
695 // We need to use a monotonically non-deccreasing time in ms | |
696 // or we will see time-warp warnings and os::javaTimeMillis() | |
697 // does not guarantee monotonicity. | |
698 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; | |
699 update_time_of_last_gc(now); | |
0 | 700 } |
701 | |
702 class RemoveForwardPointerClosure: public ObjectClosure { | |
703 public: | |
704 void do_object(oop obj) { | |
705 obj->init_mark(); | |
706 } | |
707 }; | |
708 | |
709 void DefNewGeneration::init_assuming_no_promotion_failure() { | |
710 _promotion_failed = false; | |
711 from()->set_next_compaction_space(NULL); | |
712 } | |
713 | |
714 void DefNewGeneration::remove_forwarding_pointers() { | |
715 RemoveForwardPointerClosure rspc; | |
716 eden()->object_iterate(&rspc); | |
717 from()->object_iterate(&rspc); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
718 |
0 | 719 // Now restore saved marks, if any. |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
720 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
721 "should be the same"); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
722 while (!_objs_with_preserved_marks.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
723 oop obj = _objs_with_preserved_marks.pop(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
724 markOop m = _preserved_marks_of_objs.pop(); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
725 obj->set_mark(m); |
0 | 726 } |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
727 _objs_with_preserved_marks.clear(true); |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
728 _preserved_marks_of_objs.clear(true); |
0 | 729 } |
730 | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
731 void DefNewGeneration::preserve_mark(oop obj, markOop m) { |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
732 assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj), |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
733 "Oversaving!"); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
734 _objs_with_preserved_marks.push(obj); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
735 _preserved_marks_of_objs.push(m); |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
736 } |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
737 |
0 | 738 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { |
739 if (m->must_be_preserved_for_promotion_failure(obj)) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
740 preserve_mark(obj, m); |
0 | 741 } |
742 } | |
743 | |
744 void DefNewGeneration::handle_promotion_failure(oop old) { | |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
745 if (PrintPromotionFailure && !_promotion_failed) { |
1145
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
746 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
747 old->size()); |
e018e6884bd8
6631166: CMS: better heuristics when combatting fragmentation
ysr
parents:
989
diff
changeset
|
748 } |
2038
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
749 _promotion_failed = true; |
74ee0db180fa
6807801: CMS: could save/restore fewer header words during scavenge
ysr
parents:
1994
diff
changeset
|
750 preserve_mark_if_necessary(old, old->mark()); |
0 | 751 // forward to self |
752 old->forward_to(old); | |
753 | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
754 _promo_failure_scan_stack.push(old); |
0 | 755 |
756 if (!_promo_failure_drain_in_progress) { | |
757 // prevent recursion in copy_to_survivor_space() | |
758 _promo_failure_drain_in_progress = true; | |
759 drain_promo_failure_scan_stack(); | |
760 _promo_failure_drain_in_progress = false; | |
761 } | |
762 } | |
763 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
764 oop DefNewGeneration::copy_to_survivor_space(oop old) { |
0 | 765 assert(is_in_reserved(old) && !old->is_forwarded(), |
766 "shouldn't be scavenging this oop"); | |
767 size_t s = old->size(); | |
768 oop obj = NULL; | |
769 | |
770 // Try allocating obj in to-space (unless too old) | |
771 if (old->age() < tenuring_threshold()) { | |
772 obj = (oop) to()->allocate(s); | |
773 } | |
774 | |
775 // Otherwise try allocating obj tenured | |
776 if (obj == NULL) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
777 obj = _next_gen->promote(old, s); |
0 | 778 if (obj == NULL) { |
779 handle_promotion_failure(old); | |
780 return old; | |
781 } | |
782 } else { | |
783 // Prefetch beyond obj | |
784 const intx interval = PrefetchCopyIntervalInBytes; | |
785 Prefetch::write(obj, interval); | |
786 | |
787 // Copy obj | |
788 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); | |
789 | |
790 // Increment age if obj still in new generation | |
791 obj->incr_age(); | |
792 age_table()->add(obj, s); | |
793 } | |
794 | |
795 // Done, insert forward pointer to obj in this header | |
796 old->forward_to(obj); | |
797 | |
798 return obj; | |
799 } | |
800 | |
801 void DefNewGeneration::drain_promo_failure_scan_stack() { | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
802 while (!_promo_failure_scan_stack.is_empty()) { |
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
803 oop obj = _promo_failure_scan_stack.pop(); |
0 | 804 obj->oop_iterate(_promo_failure_scan_stack_closure); |
805 } | |
806 } | |
807 | |
808 void DefNewGeneration::save_marks() { | |
809 eden()->set_saved_mark(); | |
810 to()->set_saved_mark(); | |
811 from()->set_saved_mark(); | |
812 } | |
813 | |
814 | |
815 void DefNewGeneration::reset_saved_marks() { | |
816 eden()->reset_saved_mark(); | |
817 to()->reset_saved_mark(); | |
818 from()->reset_saved_mark(); | |
819 } | |
820 | |
821 | |
822 bool DefNewGeneration::no_allocs_since_save_marks() { | |
823 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); | |
824 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); | |
825 return to()->saved_mark_at_top(); | |
826 } | |
827 | |
828 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
829 \ | |
830 void DefNewGeneration:: \ | |
831 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
832 cl->set_generation(this); \ | |
833 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
834 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
835 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
836 cl->reset_generation(); \ | |
837 save_marks(); \ | |
838 } | |
839 | |
840 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) | |
841 | |
842 #undef DefNew_SINCE_SAVE_MARKS_DEFN | |
843 | |
844 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, | |
845 size_t max_alloc_words) { | |
846 if (requestor == this || _promotion_failed) return; | |
847 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); | |
848 | |
849 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. | |
850 if (to_space->top() > to_space->bottom()) { | |
851 trace("to_space not empty when contribute_scratch called"); | |
852 } | |
853 */ | |
854 | |
855 ContiguousSpace* to_space = to(); | |
856 assert(to_space->end() >= to_space->top(), "pointers out of order"); | |
857 size_t free_words = pointer_delta(to_space->end(), to_space->top()); | |
858 if (free_words >= MinFreeScratchWords) { | |
859 ScratchBlock* sb = (ScratchBlock*)to_space->top(); | |
860 sb->num_words = free_words; | |
861 sb->next = list; | |
862 list = sb; | |
863 } | |
864 } | |
865 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
866 void DefNewGeneration::reset_scratch() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
867 // If contributing scratch in to_space, mangle all of |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
868 // to_space if ZapUnusedHeapArea. This is needed because |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
869 // top is not maintained while using to-space as scratch. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
870 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
871 to()->mangle_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
872 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
873 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
874 |
0 | 875 bool DefNewGeneration::collection_attempt_is_safe() { |
876 if (!to()->is_empty()) { | |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
877 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
878 gclog_or_tty->print(" :: to is not empty :: "); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
879 } |
0 | 880 return false; |
881 } | |
882 if (_next_gen == NULL) { | |
883 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
884 _next_gen = gch->next_gen(this); | |
885 assert(_next_gen != NULL, | |
886 "This must be the youngest gen, and not the only gen"); | |
887 } | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
888 return _next_gen->promotion_attempt_is_safe(used()); |
0 | 889 } |
890 | |
891 void DefNewGeneration::gc_epilogue(bool full) { | |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
892 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
893 |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
894 assert(!GC_locker::is_active(), "We should not be executing here"); |
0 | 895 // Check if the heap is approaching full after a collection has |
896 // been done. Generally the young generation is empty at | |
897 // a minimum at the end of a collection. If it is not, then | |
898 // the heap is approaching full. | |
899 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
900 if (full) { |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
901 DEBUG_ONLY(seen_incremental_collection_failed = false;) |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
902 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
903 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
904 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
905 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
906 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
907 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
908 set_should_allocate_from_space(); // we seem to be running out of space |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
909 } else { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
910 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
911 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
912 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
913 } |
1888
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
914 gch->clear_incremental_collection_failed(); // We just did a full collection |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
915 clear_should_allocate_from_space(); // if set |
a7214d79fcf1
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
ysr
parents:
1836
diff
changeset
|
916 } |
0 | 917 } else { |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
918 #ifdef ASSERT |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
919 // It is possible that incremental_collection_failed() == true |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
920 // here, because an attempted scavenge did not succeed. The policy |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
921 // is normally expected to cause a full collection which should |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
922 // clear that condition, so we should not be here twice in a row |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
923 // with incremental_collection_failed() == true without having done |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
924 // a full collection in between. |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
925 if (!seen_incremental_collection_failed && |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
926 gch->incremental_collection_failed()) { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
927 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
928 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
929 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
930 } |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
931 seen_incremental_collection_failed = true; |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
932 } else if (seen_incremental_collection_failed) { |
1994
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
933 if (Verbose && PrintGCDetails) { |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
934 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
935 GCCause::to_string(gch->gc_cause())); |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
936 } |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
937 assert(gch->gc_cause() == GCCause::_scavenge_alot || |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
938 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || |
6cd6d394f280
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
ysr
parents:
1972
diff
changeset
|
939 !gch->incremental_collection_failed(), |
1953
8d81b4a1d3e1
6998802: ScavengeALot: assert(!gch->incremental_collection_failed()) failed: Twice in a row
ysr
parents:
1889
diff
changeset
|
940 "Twice in a row"); |
1889
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
941 seen_incremental_collection_failed = false; |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
942 } |
c766bae6c14d
6995045: assert(!gch->incremental_collection_failed()) failed: Error, defNewGeneration.cpp:827
ysr
parents:
1888
diff
changeset
|
943 #endif // ASSERT |
0 | 944 } |
945 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
946 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
947 eden()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
948 from()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
949 to()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
950 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
951 |
3799
48048b59a551
7061204: clean the chunk table synchronously in embedded builds
jcoomes
parents:
2038
diff
changeset
|
952 if (!CleanChunkPoolAsync) { |
48048b59a551
7061204: clean the chunk table synchronously in embedded builds
jcoomes
parents:
2038
diff
changeset
|
953 Chunk::clean_chunk_pool(); |
48048b59a551
7061204: clean the chunk table synchronously in embedded builds
jcoomes
parents:
2038
diff
changeset
|
954 } |
48048b59a551
7061204: clean the chunk table synchronously in embedded builds
jcoomes
parents:
2038
diff
changeset
|
955 |
0 | 956 // update the generation and space performance counters |
957 update_counters(); | |
958 gch->collector_policy()->counters()->update_counters(); | |
959 } | |
960 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
961 void DefNewGeneration::record_spaces_top() { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
962 assert(ZapUnusedHeapArea, "Not mangling unused space"); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
963 eden()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
964 to()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
965 from()->set_top_for_allocations(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
966 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
967 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
968 |
0 | 969 void DefNewGeneration::update_counters() { |
970 if (UsePerfData) { | |
971 _eden_counters->update_all(); | |
972 _from_counters->update_all(); | |
973 _to_counters->update_all(); | |
974 _gen_counters->update_all(); | |
975 } | |
976 } | |
977 | |
6008 | 978 void DefNewGeneration::verify() { |
979 eden()->verify(); | |
980 from()->verify(); | |
981 to()->verify(); | |
0 | 982 } |
983 | |
984 void DefNewGeneration::print_on(outputStream* st) const { | |
985 Generation::print_on(st); | |
986 st->print(" eden"); | |
987 eden()->print_on(st); | |
988 st->print(" from"); | |
989 from()->print_on(st); | |
990 st->print(" to "); | |
991 to()->print_on(st); | |
992 } | |
993 | |
994 | |
995 const char* DefNewGeneration::name() const { | |
996 return "def new generation"; | |
997 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
998 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
999 // Moved from inline file as they are not called inline |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1000 CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1001 return eden(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1002 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1003 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1004 HeapWord* DefNewGeneration::allocate(size_t word_size, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1005 bool is_tlab) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1006 // This is the slow-path allocation for the DefNewGeneration. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1007 // Most allocations are fast-path in compiled code. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1008 // We try to allocate from the eden. If that works, we are happy. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1009 // Note that since DefNewGeneration supports lock-free allocation, we |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1010 // have to use it here, as well. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1011 HeapWord* result = eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1012 if (result != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1013 return result; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1014 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1015 do { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1016 HeapWord* old_limit = eden()->soft_end(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1017 if (old_limit < eden()->end()) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1018 // Tell the next generation we reached a limit. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1019 HeapWord* new_limit = |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1020 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1021 if (new_limit != NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1022 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1023 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1024 assert(eden()->soft_end() == eden()->end(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1025 "invalid state after allocation_limit_reached returned null"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1026 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1027 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1028 // The allocation failed and the soft limit is equal to the hard limit, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1029 // there are no reasons to do an attempt to allocate |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1030 assert(old_limit == eden()->end(), "sanity check"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1031 break; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1032 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1033 // Try to allocate until succeeded or the soft limit can't be adjusted |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1034 result = eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1035 } while (result == NULL); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1036 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1037 // If the eden is full and the last collection bailed out, we are running |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1038 // out of heap space, and we try to allocate the from-space, too. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1039 // allocate_from_space can't be inlined because that would introduce a |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1040 // circular dependency at compile time. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1041 if (result == NULL) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1042 result = allocate_from_space(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1043 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1044 return result; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1045 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1046 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1047 HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1048 bool is_tlab) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1049 return eden()->par_allocate(word_size); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1050 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1051 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1052 void DefNewGeneration::gc_prologue(bool full) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1053 // Ensure that _end and _soft_end are the same in eden space. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1054 eden()->set_soft_end(eden()->end()); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1055 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1056 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1057 size_t DefNewGeneration::tlab_capacity() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1058 return eden()->capacity(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1059 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1060 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1061 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1062 return unsafe_max_alloc_nogc(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1063 } |