comparison src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25
26 # include "incls/_precompiled.incl"
27 # include "incls/_psScavenge.cpp.incl"
28
29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
30 int PSScavenge::_consecutive_skipped_scavenges = 0;
31 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
32 CardTableExtension* PSScavenge::_card_table = NULL;
33 bool PSScavenge::_survivor_overflow = false;
34 int PSScavenge::_tenuring_threshold = 0;
35 HeapWord* PSScavenge::_young_generation_boundary = NULL;
36 elapsedTimer PSScavenge::_accumulated_time;
37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL;
38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL;
39 CollectorCounters* PSScavenge::_counters = NULL;
40
41 // Define before use
42 class PSIsAliveClosure: public BoolObjectClosure {
43 public:
44 void do_object(oop p) {
45 assert(false, "Do not call.");
46 }
47 bool do_object_b(oop p) {
48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
49 }
50 };
51
52 PSIsAliveClosure PSScavenge::_is_alive_closure;
53
54 class PSKeepAliveClosure: public OopClosure {
55 protected:
56 MutableSpace* _to_space;
57 PSPromotionManager* _promotion_manager;
58
59 public:
60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
63 _to_space = heap->young_gen()->to_space();
64
65 assert(_promotion_manager != NULL, "Sanity");
66 }
67
68 void do_oop(oop* p) {
69 assert (*p != NULL, "expected non-null ref");
70 assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
71
72 oop obj = oop(*p);
73 // Weak refs may be visited more than once.
74 if (PSScavenge::should_scavenge(obj, _to_space)) {
75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
76 }
77 }
78 };
79
80 class PSEvacuateFollowersClosure: public VoidClosure {
81 private:
82 PSPromotionManager* _promotion_manager;
83 public:
84 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
85
86 void do_void() {
87 assert(_promotion_manager != NULL, "Sanity");
88 _promotion_manager->drain_stacks(true);
89 guarantee(_promotion_manager->stacks_empty(),
90 "stacks should be empty at this point");
91 }
92 };
93
94 class PSPromotionFailedClosure : public ObjectClosure {
95 virtual void do_object(oop obj) {
96 if (obj->is_forwarded()) {
97 obj->init_mark();
98 }
99 }
100 };
101
102 class PSRefProcTaskProxy: public GCTask {
103 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
104 ProcessTask & _rp_task;
105 uint _work_id;
106 public:
107 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
108 : _rp_task(rp_task),
109 _work_id(work_id)
110 { }
111
112 private:
113 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
114 virtual void do_it(GCTaskManager* manager, uint which);
115 };
116
117 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
118 {
119 PSPromotionManager* promotion_manager =
120 PSPromotionManager::gc_thread_promotion_manager(which);
121 assert(promotion_manager != NULL, "sanity check");
122 PSKeepAliveClosure keep_alive(promotion_manager);
123 PSEvacuateFollowersClosure evac_followers(promotion_manager);
124 PSIsAliveClosure is_alive;
125 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
126 }
127
128 class PSRefEnqueueTaskProxy: public GCTask {
129 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
130 EnqueueTask& _enq_task;
131 uint _work_id;
132
133 public:
134 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
135 : _enq_task(enq_task),
136 _work_id(work_id)
137 { }
138
139 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
140 virtual void do_it(GCTaskManager* manager, uint which)
141 {
142 _enq_task.work(_work_id);
143 }
144 };
145
146 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
147 virtual void execute(ProcessTask& task);
148 virtual void execute(EnqueueTask& task);
149 };
150
151 void PSRefProcTaskExecutor::execute(ProcessTask& task)
152 {
153 GCTaskQueue* q = GCTaskQueue::create();
154 for(uint i=0; i<ParallelGCThreads; i++) {
155 q->enqueue(new PSRefProcTaskProxy(task, i));
156 }
157 ParallelTaskTerminator terminator(
158 ParallelScavengeHeap::gc_task_manager()->workers(),
159 UseDepthFirstScavengeOrder ?
160 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()
161 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth());
162 if (task.marks_oops_alive() && ParallelGCThreads > 1) {
163 for (uint j=0; j<ParallelGCThreads; j++) {
164 q->enqueue(new StealTask(&terminator));
165 }
166 }
167 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
168 }
169
170
171 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
172 {
173 GCTaskQueue* q = GCTaskQueue::create();
174 for(uint i=0; i<ParallelGCThreads; i++) {
175 q->enqueue(new PSRefEnqueueTaskProxy(task, i));
176 }
177 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
178 }
179
180 // This method contains all heap specific policy for invoking scavenge.
181 // PSScavenge::invoke_no_policy() will do nothing but attempt to
182 // scavenge. It will not clean up after failed promotions, bail out if
183 // we've exceeded policy time limits, or any other special behavior.
184 // All such policy should be placed here.
185 //
186 // Note that this method should only be called from the vm_thread while
187 // at a safepoint!
188 void PSScavenge::invoke()
189 {
190 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
191 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
192 assert(!Universe::heap()->is_gc_active(), "not reentrant");
193
194 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
195 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
196
197 PSAdaptiveSizePolicy* policy = heap->size_policy();
198
199 // Before each allocation/collection attempt, find out from the
200 // policy object if GCs are, on the whole, taking too long. If so,
201 // bail out without attempting a collection.
202 if (!policy->gc_time_limit_exceeded()) {
203 IsGCActiveMark mark;
204
205 bool scavenge_was_done = PSScavenge::invoke_no_policy();
206
207 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
208 if (UsePerfData)
209 counters->update_full_follows_scavenge(0);
210 if (!scavenge_was_done ||
211 policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
212 if (UsePerfData)
213 counters->update_full_follows_scavenge(full_follows_scavenge);
214
215 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
216 if (UseParallelOldGC) {
217 PSParallelCompact::invoke_no_policy(false);
218 } else {
219 PSMarkSweep::invoke_no_policy(false);
220 }
221 }
222 }
223 }
224
225 // This method contains no policy. You should probably
226 // be calling invoke() instead.
227 bool PSScavenge::invoke_no_policy() {
228 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
229 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
230
231 TimeStamp scavenge_entry;
232 TimeStamp scavenge_midpoint;
233 TimeStamp scavenge_exit;
234
235 scavenge_entry.update();
236
237 if (GC_locker::check_active_before_gc()) {
238 return false;
239 }
240
241 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
242 GCCause::Cause gc_cause = heap->gc_cause();
243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
244
245 // Check for potential problems.
246 if (!should_attempt_scavenge()) {
247 return false;
248 }
249
250 bool promotion_failure_occurred = false;
251
252 PSYoungGen* young_gen = heap->young_gen();
253 PSOldGen* old_gen = heap->old_gen();
254 PSPermGen* perm_gen = heap->perm_gen();
255 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
256 heap->increment_total_collections();
257
258 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
259
260 if ((gc_cause != GCCause::_java_lang_system_gc) ||
261 UseAdaptiveSizePolicyWithSystemGC) {
262 // Gather the feedback data for eden occupancy.
263 young_gen->eden_space()->accumulate_statistics();
264 }
265
266 if (PrintHeapAtGC) {
267 Universe::print_heap_before_gc();
268 }
269
270 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
271 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
272
273 size_t prev_used = heap->used();
274 assert(promotion_failed() == false, "Sanity");
275
276 // Fill in TLABs
277 heap->accumulate_statistics_all_tlabs();
278 heap->ensure_parsability(true); // retire TLABs
279
280 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
281 HandleMark hm; // Discard invalid handles created during verification
282 gclog_or_tty->print(" VerifyBeforeGC:");
283 Universe::verify(true);
284 }
285
286 {
287 ResourceMark rm;
288 HandleMark hm;
289
290 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
291 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
292 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
293 TraceCollectorStats tcs(counters());
294 TraceMemoryManagerStats tms(false /* not full GC */);
295
296 if (TraceGen0Time) accumulated_time()->start();
297
298 // Let the size policy know we're starting
299 size_policy->minor_collection_begin();
300
301 // Verify the object start arrays.
302 if (VerifyObjectStartArray &&
303 VerifyBeforeGC) {
304 old_gen->verify_object_start_array();
305 perm_gen->verify_object_start_array();
306 }
307
308 // Verify no unmarked old->young roots
309 if (VerifyRememberedSets) {
310 CardTableExtension::verify_all_young_refs_imprecise();
311 }
312
313 if (!ScavengeWithObjectsInToSpace) {
314 assert(young_gen->to_space()->is_empty(),
315 "Attempt to scavenge with live objects in to_space");
316 young_gen->to_space()->clear();
317 } else if (ZapUnusedHeapArea) {
318 young_gen->to_space()->mangle_unused_area();
319 }
320 save_to_space_top_before_gc();
321
322 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
323 COMPILER2_PRESENT(DerivedPointerTable::clear());
324
325 reference_processor()->enable_discovery();
326
327 // We track how much was promoted to the next generation for
328 // the AdaptiveSizePolicy.
329 size_t old_gen_used_before = old_gen->used_in_bytes();
330
331 // For PrintGCDetails
332 size_t young_gen_used_before = young_gen->used_in_bytes();
333
334 // Reset our survivor overflow.
335 set_survivor_overflow(false);
336
337 // We need to save the old/perm top values before
338 // creating the promotion_manager. We pass the top
339 // values to the card_table, to prevent it from
340 // straying into the promotion labs.
341 HeapWord* old_top = old_gen->object_space()->top();
342 HeapWord* perm_top = perm_gen->object_space()->top();
343
344 // Release all previously held resources
345 gc_task_manager()->release_all_resources();
346
347 PSPromotionManager::pre_scavenge();
348
349 // We'll use the promotion manager again later.
350 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
351 {
352 // TraceTime("Roots");
353
354 GCTaskQueue* q = GCTaskQueue::create();
355
356 for(uint i=0; i<ParallelGCThreads; i++) {
357 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
358 }
359
360 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
361
362 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
363 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
364 // We scan the thread roots in parallel
365 Threads::create_thread_roots_tasks(q);
366 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
367 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
368 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
369 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
371
372 ParallelTaskTerminator terminator(
373 gc_task_manager()->workers(),
374 promotion_manager->depth_first() ?
375 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
376 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
377 if (ParallelGCThreads>1) {
378 for (uint j=0; j<ParallelGCThreads; j++) {
379 q->enqueue(new StealTask(&terminator));
380 }
381 }
382
383 gc_task_manager()->execute_and_wait(q);
384 }
385
386 scavenge_midpoint.update();
387
388 // Process reference objects discovered during scavenge
389 {
390 #ifdef COMPILER2
391 ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
392 #else
393 ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
394 #endif // COMPILER2
395
396 PSKeepAliveClosure keep_alive(promotion_manager);
397 PSEvacuateFollowersClosure evac_followers(promotion_manager);
398 assert(soft_ref_policy != NULL,"No soft reference policy");
399 if (reference_processor()->processing_is_mt()) {
400 PSRefProcTaskExecutor task_executor;
401 reference_processor()->process_discovered_references(
402 soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
403 &task_executor);
404 } else {
405 reference_processor()->process_discovered_references(
406 soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
407 NULL);
408 }
409 }
410
411 // Enqueue reference objects discovered during scavenge.
412 if (reference_processor()->processing_is_mt()) {
413 PSRefProcTaskExecutor task_executor;
414 reference_processor()->enqueue_discovered_references(&task_executor);
415 } else {
416 reference_processor()->enqueue_discovered_references(NULL);
417 }
418
419 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
420 assert(promotion_manager->claimed_stack_empty(), "Sanity");
421 PSPromotionManager::post_scavenge();
422
423 promotion_failure_occurred = promotion_failed();
424 if (promotion_failure_occurred) {
425 clean_up_failed_promotion();
426 if (PrintGC) {
427 gclog_or_tty->print("--");
428 }
429 }
430
431 // Let the size policy know we're done. Note that we count promotion
432 // failure cleanup time as part of the collection (otherwise, we're
433 // implicitly saying it's mutator time).
434 size_policy->minor_collection_end(gc_cause);
435
436 if (!promotion_failure_occurred) {
437 // Swap the survivor spaces.
438 young_gen->eden_space()->clear();
439 young_gen->from_space()->clear();
440 young_gen->swap_spaces();
441
442 size_t survived = young_gen->from_space()->used_in_bytes();
443 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
444 size_policy->update_averages(_survivor_overflow, survived, promoted);
445
446 if (UseAdaptiveSizePolicy) {
447 // Calculate the new survivor size and tenuring threshold
448
449 if (PrintAdaptiveSizePolicy) {
450 gclog_or_tty->print("AdaptiveSizeStart: ");
451 gclog_or_tty->stamp();
452 gclog_or_tty->print_cr(" collection: %d ",
453 heap->total_collections());
454
455 if (Verbose) {
456 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
457 " perm_gen_capacity: %d ",
458 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
459 perm_gen->capacity_in_bytes());
460 }
461 }
462
463
464 if (UsePerfData) {
465 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
466 counters->update_old_eden_size(
467 size_policy->calculated_eden_size_in_bytes());
468 counters->update_old_promo_size(
469 size_policy->calculated_promo_size_in_bytes());
470 counters->update_old_capacity(old_gen->capacity_in_bytes());
471 counters->update_young_capacity(young_gen->capacity_in_bytes());
472 counters->update_survived(survived);
473 counters->update_promoted(promoted);
474 counters->update_survivor_overflowed(_survivor_overflow);
475 }
476
477 size_t survivor_limit =
478 size_policy->max_survivor_size(young_gen->max_size());
479 _tenuring_threshold =
480 size_policy->compute_survivor_space_size_and_threshold(
481 _survivor_overflow,
482 _tenuring_threshold,
483 survivor_limit);
484
485 if (PrintTenuringDistribution) {
486 gclog_or_tty->cr();
487 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
488 size_policy->calculated_survivor_size_in_bytes(),
489 _tenuring_threshold, MaxTenuringThreshold);
490 }
491
492 if (UsePerfData) {
493 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
494 counters->update_tenuring_threshold(_tenuring_threshold);
495 counters->update_survivor_size_counters();
496 }
497
498 // Do call at minor collections?
499 // Don't check if the size_policy is ready at this
500 // level. Let the size_policy check that internally.
501 if (UseAdaptiveSizePolicy &&
502 UseAdaptiveGenerationSizePolicyAtMinorCollection &&
503 ((gc_cause != GCCause::_java_lang_system_gc) ||
504 UseAdaptiveSizePolicyWithSystemGC)) {
505
506 // Calculate optimial free space amounts
507 assert(young_gen->max_size() >
508 young_gen->from_space()->capacity_in_bytes() +
509 young_gen->to_space()->capacity_in_bytes(),
510 "Sizes of space in young gen are out-of-bounds");
511 size_t max_eden_size = young_gen->max_size() -
512 young_gen->from_space()->capacity_in_bytes() -
513 young_gen->to_space()->capacity_in_bytes();
514 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
515 young_gen->eden_space()->used_in_bytes(),
516 old_gen->used_in_bytes(),
517 perm_gen->used_in_bytes(),
518 young_gen->eden_space()->capacity_in_bytes(),
519 old_gen->max_gen_size(),
520 max_eden_size,
521 false /* full gc*/,
522 gc_cause);
523
524 }
525 // Resize the young generation at every collection
526 // even if new sizes have not been calculated. This is
527 // to allow resizes that may have been inhibited by the
528 // relative location of the "to" and "from" spaces.
529
530 // Resizing the old gen at minor collects can cause increases
531 // that don't feed back to the generation sizing policy until
532 // a major collection. Don't resize the old gen here.
533
534 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
535 size_policy->calculated_survivor_size_in_bytes());
536
537 if (PrintAdaptiveSizePolicy) {
538 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
539 heap->total_collections());
540 }
541 }
542
543 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
544 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
545 // Also update() will case adaptive NUMA chunk resizing.
546 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
547 young_gen->eden_space()->update();
548
549 heap->gc_policy_counters()->update_counters();
550
551 heap->resize_all_tlabs();
552
553 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
554 }
555
556 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
557
558 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
559
560 // Re-verify object start arrays
561 if (VerifyObjectStartArray &&
562 VerifyAfterGC) {
563 old_gen->verify_object_start_array();
564 perm_gen->verify_object_start_array();
565 }
566
567 // Verify all old -> young cards are now precise
568 if (VerifyRememberedSets) {
569 // Precise verification will give false positives. Until this is fixed,
570 // use imprecise verification.
571 // CardTableExtension::verify_all_young_refs_precise();
572 CardTableExtension::verify_all_young_refs_imprecise();
573 }
574
575 if (TraceGen0Time) accumulated_time()->stop();
576
577 if (PrintGC) {
578 if (PrintGCDetails) {
579 // Don't print a GC timestamp here. This is after the GC so
580 // would be confusing.
581 young_gen->print_used_change(young_gen_used_before);
582 }
583 heap->print_heap_change(prev_used);
584 }
585
586 // Track memory usage and detect low memory
587 MemoryService::track_memory_usage();
588 heap->update_counters();
589 }
590
591 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
592 HandleMark hm; // Discard invalid handles created during verification
593 gclog_or_tty->print(" VerifyAfterGC:");
594 Universe::verify(false);
595 }
596
597 if (PrintHeapAtGC) {
598 Universe::print_heap_after_gc();
599 }
600
601 scavenge_exit.update();
602
603 if (PrintGCTaskTimeStamps) {
604 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
605 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
606 scavenge_exit.ticks());
607 gc_task_manager()->print_task_time_stamps();
608 }
609
610 return !promotion_failure_occurred;
611 }
612
613 // This method iterates over all objects in the young generation,
614 // unforwarding markOops. It then restores any preserved mark oops,
615 // and clears the _preserved_mark_stack.
616 void PSScavenge::clean_up_failed_promotion() {
617 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
618 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
619 assert(promotion_failed(), "Sanity");
620
621 PSYoungGen* young_gen = heap->young_gen();
622
623 {
624 ResourceMark rm;
625
626 // Unforward all pointers in the young gen.
627 PSPromotionFailedClosure unforward_closure;
628 young_gen->object_iterate(&unforward_closure);
629
630 if (PrintGC && Verbose) {
631 gclog_or_tty->print_cr("Restoring %d marks",
632 _preserved_oop_stack->length());
633 }
634
635 // Restore any saved marks.
636 for (int i=0; i < _preserved_oop_stack->length(); i++) {
637 oop obj = _preserved_oop_stack->at(i);
638 markOop mark = _preserved_mark_stack->at(i);
639 obj->set_mark(mark);
640 }
641
642 // Deallocate the preserved mark and oop stacks.
643 // The stacks were allocated as CHeap objects, so
644 // we must call delete to prevent mem leaks.
645 delete _preserved_mark_stack;
646 _preserved_mark_stack = NULL;
647 delete _preserved_oop_stack;
648 _preserved_oop_stack = NULL;
649 }
650
651 // Reset the PromotionFailureALot counters.
652 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
653 }
654
655 // This method is called whenever an attempt to promote an object
656 // fails. Some markOops will need preserving, some will not. Note
657 // that the entire eden is traversed after a failed promotion, with
658 // all forwarded headers replaced by the default markOop. This means
659 // it is not neccessary to preserve most markOops.
660 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
661 if (_preserved_mark_stack == NULL) {
662 ThreadCritical tc; // Lock and retest
663 if (_preserved_mark_stack == NULL) {
664 assert(_preserved_oop_stack == NULL, "Sanity");
665 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
666 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
667 }
668 }
669
670 // Because we must hold the ThreadCritical lock before using
671 // the stacks, we should be safe from observing partial allocations,
672 // which are also guarded by the ThreadCritical lock.
673 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
674 ThreadCritical tc;
675 _preserved_oop_stack->push(obj);
676 _preserved_mark_stack->push(obj_mark);
677 }
678 }
679
680 bool PSScavenge::should_attempt_scavenge() {
681 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
682 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
683 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
684
685 if (UsePerfData) {
686 counters->update_scavenge_skipped(not_skipped);
687 }
688
689 PSYoungGen* young_gen = heap->young_gen();
690 PSOldGen* old_gen = heap->old_gen();
691
692 if (!ScavengeWithObjectsInToSpace) {
693 // Do not attempt to promote unless to_space is empty
694 if (!young_gen->to_space()->is_empty()) {
695 _consecutive_skipped_scavenges++;
696 if (UsePerfData) {
697 counters->update_scavenge_skipped(to_space_not_empty);
698 }
699 return false;
700 }
701 }
702
703 // Test to see if the scavenge will likely fail.
704 PSAdaptiveSizePolicy* policy = heap->size_policy();
705
706 // A similar test is done in the policy's should_full_GC(). If this is
707 // changed, decide if that test should also be changed.
708 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
709 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
710 bool result = promotion_estimate < old_gen->free_in_bytes();
711
712 if (PrintGCDetails && Verbose) {
713 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
714 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
715 " padded_average_promoted " SIZE_FORMAT
716 " free in old gen " SIZE_FORMAT,
717 (size_t) policy->average_promoted_in_bytes(),
718 (size_t) policy->padded_average_promoted_in_bytes(),
719 old_gen->free_in_bytes());
720 if (young_gen->used_in_bytes() <
721 (size_t) policy->padded_average_promoted_in_bytes()) {
722 gclog_or_tty->print_cr(" padded_promoted_average is greater"
723 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
724 }
725 }
726
727 if (result) {
728 _consecutive_skipped_scavenges = 0;
729 } else {
730 _consecutive_skipped_scavenges++;
731 if (UsePerfData) {
732 counters->update_scavenge_skipped(promoted_too_large);
733 }
734 }
735 return result;
736 }
737
738 // Used to add tasks
739 GCTaskManager* const PSScavenge::gc_task_manager() {
740 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
741 "shouldn't return NULL");
742 return ParallelScavengeHeap::gc_task_manager();
743 }
744
745 void PSScavenge::initialize() {
746 // Arguments must have been parsed
747
748 if (AlwaysTenure) {
749 _tenuring_threshold = 0;
750 } else if (NeverTenure) {
751 _tenuring_threshold = markOopDesc::max_age + 1;
752 } else {
753 // We want to smooth out our startup times for the AdaptiveSizePolicy
754 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
755 MaxTenuringThreshold;
756 }
757
758 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
759 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
760
761 PSYoungGen* young_gen = heap->young_gen();
762 PSOldGen* old_gen = heap->old_gen();
763 PSPermGen* perm_gen = heap->perm_gen();
764
765 // Set boundary between young_gen and old_gen
766 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
767 "perm above old");
768 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
769 "old above young");
770 _young_generation_boundary = young_gen->eden_space()->bottom();
771
772 // Initialize ref handling object for scavenging.
773 MemRegion mr = young_gen->reserved();
774 _ref_processor = ReferenceProcessor::create_ref_processor(
775 mr, // span
776 true, // atomic_discovery
777 true, // mt_discovery
778 NULL, // is_alive_non_header
779 ParallelGCThreads,
780 ParallelRefProcEnabled);
781
782 // Cache the cardtable
783 BarrierSet* bs = Universe::heap()->barrier_set();
784 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
785 _card_table = (CardTableExtension*)bs;
786
787 _counters = new CollectorCounters("PSScavenge", 0);
788 }