Mercurial > hg > graal-jvmci-8
comparison src/share/vm/memory/defNewGeneration.cpp @ 0:a61af66fc99e jdk7-b24
Initial load
author | duke |
---|---|
date | Sat, 01 Dec 2007 00:00:00 +0000 |
parents | |
children | ba764ed4b6f2 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a61af66fc99e |
---|---|
1 /* | |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_defNewGeneration.cpp.incl" | |
27 | |
28 // | |
29 // DefNewGeneration functions. | |
30 | |
31 // Methods of protected closure types. | |
32 | |
33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { | |
34 assert(g->level() == 0, "Optimized for youngest gen."); | |
35 } | |
36 void DefNewGeneration::IsAliveClosure::do_object(oop p) { | |
37 assert(false, "Do not call."); | |
38 } | |
39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { | |
40 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); | |
41 } | |
42 | |
43 DefNewGeneration::KeepAliveClosure:: | |
44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { | |
45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); | |
46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); | |
47 _rs = (CardTableRS*)rs; | |
48 } | |
49 | |
50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { | |
51 // We never expect to see a null reference being processed | |
52 // as a weak reference. | |
53 assert (*p != NULL, "expected non-null ref"); | |
54 assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); | |
55 | |
56 _cl->do_oop_nv(p); | |
57 | |
58 // Card marking is trickier for weak refs. | |
59 // This oop is a 'next' field which was filled in while we | |
60 // were discovering weak references. While we might not need | |
61 // to take a special action to keep this reference alive, we | |
62 // will need to dirty a card as the field was modified. | |
63 // | |
64 // Alternatively, we could create a method which iterates through | |
65 // each generation, allowing them in turn to examine the modified | |
66 // field. | |
67 // | |
68 // We could check that p is also in an older generation, but | |
69 // dirty cards in the youngest gen are never scanned, so the | |
70 // extra check probably isn't worthwhile. | |
71 if (Universe::heap()->is_in_reserved(p)) { | |
72 _rs->inline_write_ref_field_gc(p, *p); | |
73 } | |
74 } | |
75 | |
76 DefNewGeneration::FastKeepAliveClosure:: | |
77 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : | |
78 DefNewGeneration::KeepAliveClosure(cl) { | |
79 _boundary = g->reserved().end(); | |
80 } | |
81 | |
82 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { | |
83 assert (*p != NULL, "expected non-null ref"); | |
84 assert ((*p)->is_oop(), "expected an oop while scanning weak refs"); | |
85 | |
86 _cl->do_oop_nv(p); | |
87 | |
88 // Optimized for Defnew generation if it's the youngest generation: | |
89 // we set a younger_gen card if we have an older->youngest | |
90 // generation pointer. | |
91 if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) { | |
92 _rs->inline_write_ref_field_gc(p, *p); | |
93 } | |
94 } | |
95 | |
96 DefNewGeneration::EvacuateFollowersClosure:: | |
97 EvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
98 ScanClosure* cur, ScanClosure* older) : | |
99 _gch(gch), _level(level), | |
100 _scan_cur_or_nonheap(cur), _scan_older(older) | |
101 {} | |
102 | |
103 void DefNewGeneration::EvacuateFollowersClosure::do_void() { | |
104 do { | |
105 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
106 _scan_older); | |
107 } while (!_gch->no_allocs_since_save_marks(_level)); | |
108 } | |
109 | |
110 DefNewGeneration::FastEvacuateFollowersClosure:: | |
111 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, | |
112 DefNewGeneration* gen, | |
113 FastScanClosure* cur, FastScanClosure* older) : | |
114 _gch(gch), _level(level), _gen(gen), | |
115 _scan_cur_or_nonheap(cur), _scan_older(older) | |
116 {} | |
117 | |
118 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { | |
119 do { | |
120 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, | |
121 _scan_older); | |
122 } while (!_gch->no_allocs_since_save_marks(_level)); | |
123 guarantee(_gen->promo_failure_scan_stack() == NULL | |
124 || _gen->promo_failure_scan_stack()->length() == 0, | |
125 "Failed to finish scan"); | |
126 } | |
127 | |
128 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : | |
129 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) | |
130 { | |
131 assert(_g->level() == 0, "Optimized for youngest generation"); | |
132 _boundary = _g->reserved().end(); | |
133 } | |
134 | |
135 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : | |
136 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) | |
137 { | |
138 assert(_g->level() == 0, "Optimized for youngest generation"); | |
139 _boundary = _g->reserved().end(); | |
140 } | |
141 | |
142 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : | |
143 OopClosure(g->ref_processor()), _g(g) | |
144 { | |
145 assert(_g->level() == 0, "Optimized for youngest generation"); | |
146 _boundary = _g->reserved().end(); | |
147 } | |
148 | |
149 | |
150 DefNewGeneration::DefNewGeneration(ReservedSpace rs, | |
151 size_t initial_size, | |
152 int level, | |
153 const char* policy) | |
154 : Generation(rs, initial_size, level), | |
155 _objs_with_preserved_marks(NULL), | |
156 _preserved_marks_of_objs(NULL), | |
157 _promo_failure_scan_stack(NULL), | |
158 _promo_failure_drain_in_progress(false), | |
159 _should_allocate_from_space(false) | |
160 { | |
161 MemRegion cmr((HeapWord*)_virtual_space.low(), | |
162 (HeapWord*)_virtual_space.high()); | |
163 Universe::heap()->barrier_set()->resize_covered_region(cmr); | |
164 | |
165 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { | |
166 _eden_space = new ConcEdenSpace(this); | |
167 } else { | |
168 _eden_space = new EdenSpace(this); | |
169 } | |
170 _from_space = new ContiguousSpace(); | |
171 _to_space = new ContiguousSpace(); | |
172 | |
173 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) | |
174 vm_exit_during_initialization("Could not allocate a new gen space"); | |
175 | |
176 // Compute the maximum eden and survivor space sizes. These sizes | |
177 // are computed assuming the entire reserved space is committed. | |
178 // These values are exported as performance counters. | |
179 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
180 uintx size = _virtual_space.reserved_size(); | |
181 _max_survivor_size = compute_survivor_size(size, alignment); | |
182 _max_eden_size = size - (2*_max_survivor_size); | |
183 | |
184 // allocate the performance counters | |
185 | |
186 // Generation counters -- generation 0, 3 subspaces | |
187 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); | |
188 _gc_counters = new CollectorCounters(policy, 0); | |
189 | |
190 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, | |
191 _gen_counters); | |
192 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, | |
193 _gen_counters); | |
194 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, | |
195 _gen_counters); | |
196 | |
197 compute_space_boundaries(0); | |
198 update_counters(); | |
199 _next_gen = NULL; | |
200 _tenuring_threshold = MaxTenuringThreshold; | |
201 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; | |
202 } | |
203 | |
204 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { | |
205 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
206 | |
207 // Compute sizes | |
208 uintx size = _virtual_space.committed_size(); | |
209 uintx survivor_size = compute_survivor_size(size, alignment); | |
210 uintx eden_size = size - (2*survivor_size); | |
211 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
212 | |
213 if (eden_size < minimum_eden_size) { | |
214 // May happen due to 64Kb rounding, if so adjust eden size back up | |
215 minimum_eden_size = align_size_up(minimum_eden_size, alignment); | |
216 uintx maximum_survivor_size = (size - minimum_eden_size) / 2; | |
217 uintx unaligned_survivor_size = | |
218 align_size_down(maximum_survivor_size, alignment); | |
219 survivor_size = MAX2(unaligned_survivor_size, alignment); | |
220 eden_size = size - (2*survivor_size); | |
221 assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); | |
222 assert(eden_size >= minimum_eden_size, "just checking"); | |
223 } | |
224 | |
225 char *eden_start = _virtual_space.low(); | |
226 char *from_start = eden_start + eden_size; | |
227 char *to_start = from_start + survivor_size; | |
228 char *to_end = to_start + survivor_size; | |
229 | |
230 assert(to_end == _virtual_space.high(), "just checking"); | |
231 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); | |
232 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); | |
233 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); | |
234 | |
235 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); | |
236 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); | |
237 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); | |
238 | |
239 eden()->initialize(edenMR, (minimum_eden_size == 0)); | |
240 // If minumum_eden_size != 0, we will not have cleared any | |
241 // portion of eden above its top. This can cause newly | |
242 // expanded space not to be mangled if using ZapUnusedHeapArea. | |
243 // We explicitly do such mangling here. | |
244 if (ZapUnusedHeapArea && (minimum_eden_size != 0)) { | |
245 eden()->mangle_unused_area(); | |
246 } | |
247 from()->initialize(fromMR, true); | |
248 to()->initialize(toMR , true); | |
249 eden()->set_next_compaction_space(from()); | |
250 // The to-space is normally empty before a compaction so need | |
251 // not be considered. The exception is during promotion | |
252 // failure handling when to-space can contain live objects. | |
253 from()->set_next_compaction_space(NULL); | |
254 } | |
255 | |
256 void DefNewGeneration::swap_spaces() { | |
257 ContiguousSpace* s = from(); | |
258 _from_space = to(); | |
259 _to_space = s; | |
260 eden()->set_next_compaction_space(from()); | |
261 // The to-space is normally empty before a compaction so need | |
262 // not be considered. The exception is during promotion | |
263 // failure handling when to-space can contain live objects. | |
264 from()->set_next_compaction_space(NULL); | |
265 | |
266 if (UsePerfData) { | |
267 CSpaceCounters* c = _from_counters; | |
268 _from_counters = _to_counters; | |
269 _to_counters = c; | |
270 } | |
271 } | |
272 | |
273 bool DefNewGeneration::expand(size_t bytes) { | |
274 MutexLocker x(ExpandHeap_lock); | |
275 bool success = _virtual_space.expand_by(bytes); | |
276 | |
277 // Do not attempt an expand-to-the reserve size. The | |
278 // request should properly observe the maximum size of | |
279 // the generation so an expand-to-reserve should be | |
280 // unnecessary. Also a second call to expand-to-reserve | |
281 // value potentially can cause an undue expansion. | |
282 // For example if the first expand fail for unknown reasons, | |
283 // but the second succeeds and expands the heap to its maximum | |
284 // value. | |
285 if (GC_locker::is_active()) { | |
286 if (PrintGC && Verbose) { | |
287 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); | |
288 } | |
289 } | |
290 | |
291 return success; | |
292 } | |
293 | |
294 | |
295 void DefNewGeneration::compute_new_size() { | |
296 // This is called after a gc that includes the following generation | |
297 // (which is required to exist.) So from-space will normally be empty. | |
298 // Note that we check both spaces, since if scavenge failed they revert roles. | |
299 // If not we bail out (otherwise we would have to relocate the objects) | |
300 if (!from()->is_empty() || !to()->is_empty()) { | |
301 return; | |
302 } | |
303 | |
304 int next_level = level() + 1; | |
305 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
306 assert(next_level < gch->_n_gens, | |
307 "DefNewGeneration cannot be an oldest gen"); | |
308 | |
309 Generation* next_gen = gch->_gens[next_level]; | |
310 size_t old_size = next_gen->capacity(); | |
311 size_t new_size_before = _virtual_space.committed_size(); | |
312 size_t min_new_size = spec()->init_size(); | |
313 size_t max_new_size = reserved().byte_size(); | |
314 assert(min_new_size <= new_size_before && | |
315 new_size_before <= max_new_size, | |
316 "just checking"); | |
317 // All space sizes must be multiples of Generation::GenGrain. | |
318 size_t alignment = Generation::GenGrain; | |
319 | |
320 // Compute desired new generation size based on NewRatio and | |
321 // NewSizeThreadIncrease | |
322 size_t desired_new_size = old_size/NewRatio; | |
323 int threads_count = Threads::number_of_non_daemon_threads(); | |
324 size_t thread_increase_size = threads_count * NewSizeThreadIncrease; | |
325 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); | |
326 | |
327 // Adjust new generation size | |
328 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); | |
329 assert(desired_new_size <= max_new_size, "just checking"); | |
330 | |
331 bool changed = false; | |
332 if (desired_new_size > new_size_before) { | |
333 size_t change = desired_new_size - new_size_before; | |
334 assert(change % alignment == 0, "just checking"); | |
335 if (expand(change)) { | |
336 changed = true; | |
337 } | |
338 // If the heap failed to expand to the desired size, | |
339 // "changed" will be false. If the expansion failed | |
340 // (and at this point it was expected to succeed), | |
341 // ignore the failure (leaving "changed" as false). | |
342 } | |
343 if (desired_new_size < new_size_before && eden()->is_empty()) { | |
344 // bail out of shrinking if objects in eden | |
345 size_t change = new_size_before - desired_new_size; | |
346 assert(change % alignment == 0, "just checking"); | |
347 _virtual_space.shrink_by(change); | |
348 changed = true; | |
349 } | |
350 if (changed) { | |
351 compute_space_boundaries(eden()->used()); | |
352 MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); | |
353 Universe::heap()->barrier_set()->resize_covered_region(cmr); | |
354 if (Verbose && PrintGC) { | |
355 size_t new_size_after = _virtual_space.committed_size(); | |
356 size_t eden_size_after = eden()->capacity(); | |
357 size_t survivor_size_after = from()->capacity(); | |
358 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" | |
359 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", | |
360 new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); | |
361 if (WizardMode) { | |
362 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", | |
363 thread_increase_size/K, threads_count); | |
364 } | |
365 gclog_or_tty->cr(); | |
366 } | |
367 } | |
368 } | |
369 | |
370 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { | |
371 // $$$ This may be wrong in case of "scavenge failure"? | |
372 eden()->object_iterate(cl); | |
373 } | |
374 | |
375 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { | |
376 assert(false, "NYI -- are you sure you want to call this?"); | |
377 } | |
378 | |
379 | |
380 size_t DefNewGeneration::capacity() const { | |
381 return eden()->capacity() | |
382 + from()->capacity(); // to() is only used during scavenge | |
383 } | |
384 | |
385 | |
386 size_t DefNewGeneration::used() const { | |
387 return eden()->used() | |
388 + from()->used(); // to() is only used during scavenge | |
389 } | |
390 | |
391 | |
392 size_t DefNewGeneration::free() const { | |
393 return eden()->free() | |
394 + from()->free(); // to() is only used during scavenge | |
395 } | |
396 | |
397 size_t DefNewGeneration::max_capacity() const { | |
398 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); | |
399 const size_t reserved_bytes = reserved().byte_size(); | |
400 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); | |
401 } | |
402 | |
403 size_t DefNewGeneration::unsafe_max_alloc_nogc() const { | |
404 return eden()->free(); | |
405 } | |
406 | |
407 size_t DefNewGeneration::capacity_before_gc() const { | |
408 return eden()->capacity(); | |
409 } | |
410 | |
411 size_t DefNewGeneration::contiguous_available() const { | |
412 return eden()->free(); | |
413 } | |
414 | |
415 | |
416 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } | |
417 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } | |
418 | |
419 void DefNewGeneration::object_iterate(ObjectClosure* blk) { | |
420 eden()->object_iterate(blk); | |
421 from()->object_iterate(blk); | |
422 } | |
423 | |
424 | |
425 void DefNewGeneration::space_iterate(SpaceClosure* blk, | |
426 bool usedOnly) { | |
427 blk->do_space(eden()); | |
428 blk->do_space(from()); | |
429 blk->do_space(to()); | |
430 } | |
431 | |
432 // The last collection bailed out, we are running out of heap space, | |
433 // so we try to allocate the from-space, too. | |
434 HeapWord* DefNewGeneration::allocate_from_space(size_t size) { | |
435 HeapWord* result = NULL; | |
436 if (PrintGC && Verbose) { | |
437 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" | |
438 " will_fail: %s" | |
439 " heap_lock: %s" | |
440 " free: " SIZE_FORMAT, | |
441 size, | |
442 GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false", | |
443 Heap_lock->is_locked() ? "locked" : "unlocked", | |
444 from()->free()); | |
445 } | |
446 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { | |
447 if (Heap_lock->owned_by_self() || | |
448 (SafepointSynchronize::is_at_safepoint() && | |
449 Thread::current()->is_VM_thread())) { | |
450 // If the Heap_lock is not locked by this thread, this will be called | |
451 // again later with the Heap_lock held. | |
452 result = from()->allocate(size); | |
453 } else if (PrintGC && Verbose) { | |
454 gclog_or_tty->print_cr(" Heap_lock is not owned by self"); | |
455 } | |
456 } else if (PrintGC && Verbose) { | |
457 gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); | |
458 } | |
459 if (PrintGC && Verbose) { | |
460 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); | |
461 } | |
462 return result; | |
463 } | |
464 | |
465 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, | |
466 bool is_tlab, | |
467 bool parallel) { | |
468 // We don't attempt to expand the young generation (but perhaps we should.) | |
469 return allocate(size, is_tlab); | |
470 } | |
471 | |
472 | |
473 void DefNewGeneration::collect(bool full, | |
474 bool clear_all_soft_refs, | |
475 size_t size, | |
476 bool is_tlab) { | |
477 assert(full || size > 0, "otherwise we don't want to collect"); | |
478 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
479 _next_gen = gch->next_gen(this); | |
480 assert(_next_gen != NULL, | |
481 "This must be the youngest gen, and not the only gen"); | |
482 | |
483 // If the next generation is too full to accomodate promotion | |
484 // from this generation, pass on collection; let the next generation | |
485 // do it. | |
486 if (!collection_attempt_is_safe()) { | |
487 gch->set_incremental_collection_will_fail(); | |
488 return; | |
489 } | |
490 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); | |
491 | |
492 init_assuming_no_promotion_failure(); | |
493 | |
494 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
495 // Capture heap used before collection (for printing). | |
496 size_t gch_prev_used = gch->used(); | |
497 | |
498 SpecializationStats::clear(); | |
499 | |
500 // These can be shared for all code paths | |
501 IsAliveClosure is_alive(this); | |
502 ScanWeakRefClosure scan_weak_ref(this); | |
503 | |
504 age_table()->clear(); | |
505 to()->clear(); | |
506 | |
507 gch->rem_set()->prepare_for_younger_refs_iterate(false); | |
508 | |
509 assert(gch->no_allocs_since_save_marks(0), | |
510 "save marks have not been newly set."); | |
511 | |
512 // Weak refs. | |
513 // FIXME: Are these storage leaks, or are they resource objects? | |
514 #ifdef COMPILER2 | |
515 ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); | |
516 #else | |
517 ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); | |
518 #endif // COMPILER2 | |
519 | |
520 // Not very pretty. | |
521 CollectorPolicy* cp = gch->collector_policy(); | |
522 | |
523 FastScanClosure fsc_with_no_gc_barrier(this, false); | |
524 FastScanClosure fsc_with_gc_barrier(this, true); | |
525 | |
526 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); | |
527 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, | |
528 &fsc_with_no_gc_barrier, | |
529 &fsc_with_gc_barrier); | |
530 | |
531 assert(gch->no_allocs_since_save_marks(0), | |
532 "save marks have not been newly set."); | |
533 | |
534 gch->gen_process_strong_roots(_level, | |
535 true, // Process younger gens, if any, as | |
536 // strong roots. | |
537 false,// not collecting permanent generation. | |
538 SharedHeap::SO_AllClasses, | |
539 &fsc_with_gc_barrier, | |
540 &fsc_with_no_gc_barrier); | |
541 | |
542 // "evacuate followers". | |
543 evacuate_followers.do_void(); | |
544 | |
545 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); | |
546 ref_processor()->process_discovered_references( | |
547 soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); | |
548 if (!promotion_failed()) { | |
549 // Swap the survivor spaces. | |
550 eden()->clear(); | |
551 from()->clear(); | |
552 swap_spaces(); | |
553 | |
554 assert(to()->is_empty(), "to space should be empty now"); | |
555 | |
556 // Set the desired survivor size to half the real survivor space | |
557 _tenuring_threshold = | |
558 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); | |
559 | |
560 if (PrintGC && !PrintGCDetails) { | |
561 gch->print_heap_change(gch_prev_used); | |
562 } | |
563 } else { | |
564 assert(HandlePromotionFailure, | |
565 "Should not be here unless promotion failure handling is on"); | |
566 assert(_promo_failure_scan_stack != NULL && | |
567 _promo_failure_scan_stack->length() == 0, "post condition"); | |
568 | |
569 // deallocate stack and it's elements | |
570 delete _promo_failure_scan_stack; | |
571 _promo_failure_scan_stack = NULL; | |
572 | |
573 remove_forwarding_pointers(); | |
574 if (PrintGCDetails) { | |
575 gclog_or_tty->print(" (promotion failed)"); | |
576 } | |
577 // Add to-space to the list of space to compact | |
578 // when a promotion failure has occurred. In that | |
579 // case there can be live objects in to-space | |
580 // as a result of a partial evacuation of eden | |
581 // and from-space. | |
582 swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). | |
583 from()->set_next_compaction_space(to()); | |
584 gch->set_incremental_collection_will_fail(); | |
585 | |
586 // Reset the PromotionFailureALot counters. | |
587 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) | |
588 } | |
589 // set new iteration safe limit for the survivor spaces | |
590 from()->set_concurrent_iteration_safe_limit(from()->top()); | |
591 to()->set_concurrent_iteration_safe_limit(to()->top()); | |
592 SpecializationStats::print(); | |
593 update_time_of_last_gc(os::javaTimeMillis()); | |
594 } | |
595 | |
596 class RemoveForwardPointerClosure: public ObjectClosure { | |
597 public: | |
598 void do_object(oop obj) { | |
599 obj->init_mark(); | |
600 } | |
601 }; | |
602 | |
603 void DefNewGeneration::init_assuming_no_promotion_failure() { | |
604 _promotion_failed = false; | |
605 from()->set_next_compaction_space(NULL); | |
606 } | |
607 | |
608 void DefNewGeneration::remove_forwarding_pointers() { | |
609 RemoveForwardPointerClosure rspc; | |
610 eden()->object_iterate(&rspc); | |
611 from()->object_iterate(&rspc); | |
612 // Now restore saved marks, if any. | |
613 if (_objs_with_preserved_marks != NULL) { | |
614 assert(_preserved_marks_of_objs != NULL, "Both or none."); | |
615 assert(_objs_with_preserved_marks->length() == | |
616 _preserved_marks_of_objs->length(), "Both or none."); | |
617 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { | |
618 oop obj = _objs_with_preserved_marks->at(i); | |
619 markOop m = _preserved_marks_of_objs->at(i); | |
620 obj->set_mark(m); | |
621 } | |
622 delete _objs_with_preserved_marks; | |
623 delete _preserved_marks_of_objs; | |
624 _objs_with_preserved_marks = NULL; | |
625 _preserved_marks_of_objs = NULL; | |
626 } | |
627 } | |
628 | |
629 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { | |
630 if (m->must_be_preserved_for_promotion_failure(obj)) { | |
631 if (_objs_with_preserved_marks == NULL) { | |
632 assert(_preserved_marks_of_objs == NULL, "Both or none."); | |
633 _objs_with_preserved_marks = new (ResourceObj::C_HEAP) | |
634 GrowableArray<oop>(PreserveMarkStackSize, true); | |
635 _preserved_marks_of_objs = new (ResourceObj::C_HEAP) | |
636 GrowableArray<markOop>(PreserveMarkStackSize, true); | |
637 } | |
638 _objs_with_preserved_marks->push(obj); | |
639 _preserved_marks_of_objs->push(m); | |
640 } | |
641 } | |
642 | |
643 void DefNewGeneration::handle_promotion_failure(oop old) { | |
644 preserve_mark_if_necessary(old, old->mark()); | |
645 // forward to self | |
646 old->forward_to(old); | |
647 _promotion_failed = true; | |
648 | |
649 push_on_promo_failure_scan_stack(old); | |
650 | |
651 if (!_promo_failure_drain_in_progress) { | |
652 // prevent recursion in copy_to_survivor_space() | |
653 _promo_failure_drain_in_progress = true; | |
654 drain_promo_failure_scan_stack(); | |
655 _promo_failure_drain_in_progress = false; | |
656 } | |
657 } | |
658 | |
659 oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) { | |
660 assert(is_in_reserved(old) && !old->is_forwarded(), | |
661 "shouldn't be scavenging this oop"); | |
662 size_t s = old->size(); | |
663 oop obj = NULL; | |
664 | |
665 // Try allocating obj in to-space (unless too old) | |
666 if (old->age() < tenuring_threshold()) { | |
667 obj = (oop) to()->allocate(s); | |
668 } | |
669 | |
670 // Otherwise try allocating obj tenured | |
671 if (obj == NULL) { | |
672 obj = _next_gen->promote(old, s, from); | |
673 if (obj == NULL) { | |
674 if (!HandlePromotionFailure) { | |
675 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag | |
676 // is incorrectly set. In any case, its seriously wrong to be here! | |
677 vm_exit_out_of_memory(s*wordSize, "promotion"); | |
678 } | |
679 | |
680 handle_promotion_failure(old); | |
681 return old; | |
682 } | |
683 } else { | |
684 // Prefetch beyond obj | |
685 const intx interval = PrefetchCopyIntervalInBytes; | |
686 Prefetch::write(obj, interval); | |
687 | |
688 // Copy obj | |
689 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); | |
690 | |
691 // Increment age if obj still in new generation | |
692 obj->incr_age(); | |
693 age_table()->add(obj, s); | |
694 } | |
695 | |
696 // Done, insert forward pointer to obj in this header | |
697 old->forward_to(obj); | |
698 | |
699 return obj; | |
700 } | |
701 | |
702 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) { | |
703 if (_promo_failure_scan_stack == NULL) { | |
704 _promo_failure_scan_stack = new (ResourceObj::C_HEAP) | |
705 GrowableArray<oop>(40, true); | |
706 } | |
707 | |
708 _promo_failure_scan_stack->push(obj); | |
709 } | |
710 | |
711 void DefNewGeneration::drain_promo_failure_scan_stack() { | |
712 assert(_promo_failure_scan_stack != NULL, "precondition"); | |
713 | |
714 while (_promo_failure_scan_stack->length() > 0) { | |
715 oop obj = _promo_failure_scan_stack->pop(); | |
716 obj->oop_iterate(_promo_failure_scan_stack_closure); | |
717 } | |
718 } | |
719 | |
720 void DefNewGeneration::save_marks() { | |
721 eden()->set_saved_mark(); | |
722 to()->set_saved_mark(); | |
723 from()->set_saved_mark(); | |
724 } | |
725 | |
726 | |
727 void DefNewGeneration::reset_saved_marks() { | |
728 eden()->reset_saved_mark(); | |
729 to()->reset_saved_mark(); | |
730 from()->reset_saved_mark(); | |
731 } | |
732 | |
733 | |
734 bool DefNewGeneration::no_allocs_since_save_marks() { | |
735 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); | |
736 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); | |
737 return to()->saved_mark_at_top(); | |
738 } | |
739 | |
740 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ | |
741 \ | |
742 void DefNewGeneration:: \ | |
743 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ | |
744 cl->set_generation(this); \ | |
745 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
746 to()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
747 from()->oop_since_save_marks_iterate##nv_suffix(cl); \ | |
748 cl->reset_generation(); \ | |
749 save_marks(); \ | |
750 } | |
751 | |
752 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) | |
753 | |
754 #undef DefNew_SINCE_SAVE_MARKS_DEFN | |
755 | |
756 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, | |
757 size_t max_alloc_words) { | |
758 if (requestor == this || _promotion_failed) return; | |
759 assert(requestor->level() > level(), "DefNewGeneration must be youngest"); | |
760 | |
761 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. | |
762 if (to_space->top() > to_space->bottom()) { | |
763 trace("to_space not empty when contribute_scratch called"); | |
764 } | |
765 */ | |
766 | |
767 ContiguousSpace* to_space = to(); | |
768 assert(to_space->end() >= to_space->top(), "pointers out of order"); | |
769 size_t free_words = pointer_delta(to_space->end(), to_space->top()); | |
770 if (free_words >= MinFreeScratchWords) { | |
771 ScratchBlock* sb = (ScratchBlock*)to_space->top(); | |
772 sb->num_words = free_words; | |
773 sb->next = list; | |
774 list = sb; | |
775 } | |
776 } | |
777 | |
778 bool DefNewGeneration::collection_attempt_is_safe() { | |
779 if (!to()->is_empty()) { | |
780 return false; | |
781 } | |
782 if (_next_gen == NULL) { | |
783 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
784 _next_gen = gch->next_gen(this); | |
785 assert(_next_gen != NULL, | |
786 "This must be the youngest gen, and not the only gen"); | |
787 } | |
788 | |
789 // Decide if there's enough room for a full promotion | |
790 // When using extremely large edens, we effectively lose a | |
791 // large amount of old space. Use the "MaxLiveObjectEvacuationRatio" | |
792 // flag to reduce the minimum evacuation space requirements. If | |
793 // there is not enough space to evacuate eden during a scavenge, | |
794 // the VM will immediately exit with an out of memory error. | |
795 // This flag has not been tested | |
796 // with collectors other than simple mark & sweep. | |
797 // | |
798 // Note that with the addition of promotion failure handling, the | |
799 // VM will not immediately exit but will undo the young generation | |
800 // collection. The parameter is left here for compatibility. | |
801 const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0; | |
802 | |
803 // worst_case_evacuation is based on "used()". For the case where this | |
804 // method is called after a collection, this is still appropriate because | |
805 // the case that needs to be detected is one in which a full collection | |
806 // has been done and has overflowed into the young generation. In that | |
807 // case a minor collection will fail (the overflow of the full collection | |
808 // means there is no space in the old generation for any promotion). | |
809 size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio); | |
810 | |
811 return _next_gen->promotion_attempt_is_safe(worst_case_evacuation, | |
812 HandlePromotionFailure); | |
813 } | |
814 | |
815 void DefNewGeneration::gc_epilogue(bool full) { | |
816 // Check if the heap is approaching full after a collection has | |
817 // been done. Generally the young generation is empty at | |
818 // a minimum at the end of a collection. If it is not, then | |
819 // the heap is approaching full. | |
820 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
821 clear_should_allocate_from_space(); | |
822 if (collection_attempt_is_safe()) { | |
823 gch->clear_incremental_collection_will_fail(); | |
824 } else { | |
825 gch->set_incremental_collection_will_fail(); | |
826 if (full) { // we seem to be running out of space | |
827 set_should_allocate_from_space(); | |
828 } | |
829 } | |
830 | |
831 // update the generation and space performance counters | |
832 update_counters(); | |
833 gch->collector_policy()->counters()->update_counters(); | |
834 } | |
835 | |
836 void DefNewGeneration::update_counters() { | |
837 if (UsePerfData) { | |
838 _eden_counters->update_all(); | |
839 _from_counters->update_all(); | |
840 _to_counters->update_all(); | |
841 _gen_counters->update_all(); | |
842 } | |
843 } | |
844 | |
845 void DefNewGeneration::verify(bool allow_dirty) { | |
846 eden()->verify(allow_dirty); | |
847 from()->verify(allow_dirty); | |
848 to()->verify(allow_dirty); | |
849 } | |
850 | |
851 void DefNewGeneration::print_on(outputStream* st) const { | |
852 Generation::print_on(st); | |
853 st->print(" eden"); | |
854 eden()->print_on(st); | |
855 st->print(" from"); | |
856 from()->print_on(st); | |
857 st->print(" to "); | |
858 to()->print_on(st); | |
859 } | |
860 | |
861 | |
862 const char* DefNewGeneration::name() const { | |
863 return "def new generation"; | |
864 } |