Mercurial > hg > graal-compiler
annotate src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @ 380:eb28cf662f56
Merge
author | trims |
---|---|
date | Tue, 07 Oct 2008 11:01:35 -0700 |
parents | 850fdf70db2b |
children | c96030fff130 |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 | |
26 # include "incls/_precompiled.incl" | |
27 # include "incls/_psScavenge.cpp.incl" | |
28 | |
29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; | |
30 int PSScavenge::_consecutive_skipped_scavenges = 0; | |
31 ReferenceProcessor* PSScavenge::_ref_processor = NULL; | |
32 CardTableExtension* PSScavenge::_card_table = NULL; | |
33 bool PSScavenge::_survivor_overflow = false; | |
34 int PSScavenge::_tenuring_threshold = 0; | |
35 HeapWord* PSScavenge::_young_generation_boundary = NULL; | |
36 elapsedTimer PSScavenge::_accumulated_time; | |
37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; | |
38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; | |
39 CollectorCounters* PSScavenge::_counters = NULL; | |
40 | |
41 // Define before use | |
42 class PSIsAliveClosure: public BoolObjectClosure { | |
43 public: | |
44 void do_object(oop p) { | |
45 assert(false, "Do not call."); | |
46 } | |
47 bool do_object_b(oop p) { | |
48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); | |
49 } | |
50 }; | |
51 | |
52 PSIsAliveClosure PSScavenge::_is_alive_closure; | |
53 | |
54 class PSKeepAliveClosure: public OopClosure { | |
55 protected: | |
56 MutableSpace* _to_space; | |
57 PSPromotionManager* _promotion_manager; | |
58 | |
59 public: | |
60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { | |
61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
63 _to_space = heap->young_gen()->to_space(); | |
64 | |
65 assert(_promotion_manager != NULL, "Sanity"); | |
66 } | |
67 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
68 template <class T> void do_oop_work(T* p) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
69 assert (!oopDesc::is_null(*p), "expected non-null ref"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
71 "expected an oop while scanning weak refs"); |
0 | 72 |
73 // Weak refs may be visited more than once. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
74 if (PSScavenge::should_scavenge(p, _to_space)) { |
0 | 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); |
76 } | |
77 } | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } |
0 | 80 }; |
81 | |
82 class PSEvacuateFollowersClosure: public VoidClosure { | |
83 private: | |
84 PSPromotionManager* _promotion_manager; | |
85 public: | |
86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} | |
87 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
88 virtual void do_void() { |
0 | 89 assert(_promotion_manager != NULL, "Sanity"); |
90 _promotion_manager->drain_stacks(true); | |
91 guarantee(_promotion_manager->stacks_empty(), | |
92 "stacks should be empty at this point"); | |
93 } | |
94 }; | |
95 | |
96 class PSPromotionFailedClosure : public ObjectClosure { | |
97 virtual void do_object(oop obj) { | |
98 if (obj->is_forwarded()) { | |
99 obj->init_mark(); | |
100 } | |
101 } | |
102 }; | |
103 | |
104 class PSRefProcTaskProxy: public GCTask { | |
105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; | |
106 ProcessTask & _rp_task; | |
107 uint _work_id; | |
108 public: | |
109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) | |
110 : _rp_task(rp_task), | |
111 _work_id(work_id) | |
112 { } | |
113 | |
114 private: | |
115 virtual char* name() { return (char *)"Process referents by policy in parallel"; } | |
116 virtual void do_it(GCTaskManager* manager, uint which); | |
117 }; | |
118 | |
119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) | |
120 { | |
121 PSPromotionManager* promotion_manager = | |
122 PSPromotionManager::gc_thread_promotion_manager(which); | |
123 assert(promotion_manager != NULL, "sanity check"); | |
124 PSKeepAliveClosure keep_alive(promotion_manager); | |
125 PSEvacuateFollowersClosure evac_followers(promotion_manager); | |
126 PSIsAliveClosure is_alive; | |
127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); | |
128 } | |
129 | |
130 class PSRefEnqueueTaskProxy: public GCTask { | |
131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; | |
132 EnqueueTask& _enq_task; | |
133 uint _work_id; | |
134 | |
135 public: | |
136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) | |
137 : _enq_task(enq_task), | |
138 _work_id(work_id) | |
139 { } | |
140 | |
141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } | |
142 virtual void do_it(GCTaskManager* manager, uint which) | |
143 { | |
144 _enq_task.work(_work_id); | |
145 } | |
146 }; | |
147 | |
148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { | |
149 virtual void execute(ProcessTask& task); | |
150 virtual void execute(EnqueueTask& task); | |
151 }; | |
152 | |
153 void PSRefProcTaskExecutor::execute(ProcessTask& task) | |
154 { | |
155 GCTaskQueue* q = GCTaskQueue::create(); | |
156 for(uint i=0; i<ParallelGCThreads; i++) { | |
157 q->enqueue(new PSRefProcTaskProxy(task, i)); | |
158 } | |
159 ParallelTaskTerminator terminator( | |
160 ParallelScavengeHeap::gc_task_manager()->workers(), | |
161 UseDepthFirstScavengeOrder ? | |
162 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() | |
163 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); | |
164 if (task.marks_oops_alive() && ParallelGCThreads > 1) { | |
165 for (uint j=0; j<ParallelGCThreads; j++) { | |
166 q->enqueue(new StealTask(&terminator)); | |
167 } | |
168 } | |
169 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); | |
170 } | |
171 | |
172 | |
173 void PSRefProcTaskExecutor::execute(EnqueueTask& task) | |
174 { | |
175 GCTaskQueue* q = GCTaskQueue::create(); | |
176 for(uint i=0; i<ParallelGCThreads; i++) { | |
177 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); | |
178 } | |
179 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); | |
180 } | |
181 | |
182 // This method contains all heap specific policy for invoking scavenge. | |
183 // PSScavenge::invoke_no_policy() will do nothing but attempt to | |
184 // scavenge. It will not clean up after failed promotions, bail out if | |
185 // we've exceeded policy time limits, or any other special behavior. | |
186 // All such policy should be placed here. | |
187 // | |
188 // Note that this method should only be called from the vm_thread while | |
189 // at a safepoint! | |
190 void PSScavenge::invoke() | |
191 { | |
192 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
193 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
194 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | |
195 | |
196 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
197 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
198 | |
199 PSAdaptiveSizePolicy* policy = heap->size_policy(); | |
200 | |
201 // Before each allocation/collection attempt, find out from the | |
202 // policy object if GCs are, on the whole, taking too long. If so, | |
203 // bail out without attempting a collection. | |
204 if (!policy->gc_time_limit_exceeded()) { | |
205 IsGCActiveMark mark; | |
206 | |
207 bool scavenge_was_done = PSScavenge::invoke_no_policy(); | |
208 | |
209 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); | |
210 if (UsePerfData) | |
211 counters->update_full_follows_scavenge(0); | |
212 if (!scavenge_was_done || | |
213 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { | |
214 if (UsePerfData) | |
215 counters->update_full_follows_scavenge(full_follows_scavenge); | |
216 | |
217 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); | |
218 if (UseParallelOldGC) { | |
219 PSParallelCompact::invoke_no_policy(false); | |
220 } else { | |
221 PSMarkSweep::invoke_no_policy(false); | |
222 } | |
223 } | |
224 } | |
225 } | |
226 | |
227 // This method contains no policy. You should probably | |
228 // be calling invoke() instead. | |
229 bool PSScavenge::invoke_no_policy() { | |
230 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
231 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
232 | |
233 TimeStamp scavenge_entry; | |
234 TimeStamp scavenge_midpoint; | |
235 TimeStamp scavenge_exit; | |
236 | |
237 scavenge_entry.update(); | |
238 | |
239 if (GC_locker::check_active_before_gc()) { | |
240 return false; | |
241 } | |
242 | |
243 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
244 GCCause::Cause gc_cause = heap->gc_cause(); | |
245 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
246 | |
247 // Check for potential problems. | |
248 if (!should_attempt_scavenge()) { | |
249 return false; | |
250 } | |
251 | |
252 bool promotion_failure_occurred = false; | |
253 | |
254 PSYoungGen* young_gen = heap->young_gen(); | |
255 PSOldGen* old_gen = heap->old_gen(); | |
256 PSPermGen* perm_gen = heap->perm_gen(); | |
257 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); | |
258 heap->increment_total_collections(); | |
259 | |
260 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); | |
261 | |
262 if ((gc_cause != GCCause::_java_lang_system_gc) || | |
263 UseAdaptiveSizePolicyWithSystemGC) { | |
264 // Gather the feedback data for eden occupancy. | |
265 young_gen->eden_space()->accumulate_statistics(); | |
266 } | |
267 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
268 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
269 // Save information needed to minimize mangling |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
270 heap->record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
271 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
272 |
0 | 273 if (PrintHeapAtGC) { |
274 Universe::print_heap_before_gc(); | |
275 } | |
276 | |
277 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); | |
278 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); | |
279 | |
280 size_t prev_used = heap->used(); | |
281 assert(promotion_failed() == false, "Sanity"); | |
282 | |
283 // Fill in TLABs | |
284 heap->accumulate_statistics_all_tlabs(); | |
285 heap->ensure_parsability(true); // retire TLABs | |
286 | |
287 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { | |
288 HandleMark hm; // Discard invalid handles created during verification | |
289 gclog_or_tty->print(" VerifyBeforeGC:"); | |
290 Universe::verify(true); | |
291 } | |
292 | |
293 { | |
294 ResourceMark rm; | |
295 HandleMark hm; | |
296 | |
297 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
298 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
299 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); | |
300 TraceCollectorStats tcs(counters()); | |
301 TraceMemoryManagerStats tms(false /* not full GC */); | |
302 | |
303 if (TraceGen0Time) accumulated_time()->start(); | |
304 | |
305 // Let the size policy know we're starting | |
306 size_policy->minor_collection_begin(); | |
307 | |
308 // Verify the object start arrays. | |
309 if (VerifyObjectStartArray && | |
310 VerifyBeforeGC) { | |
311 old_gen->verify_object_start_array(); | |
312 perm_gen->verify_object_start_array(); | |
313 } | |
314 | |
315 // Verify no unmarked old->young roots | |
316 if (VerifyRememberedSets) { | |
317 CardTableExtension::verify_all_young_refs_imprecise(); | |
318 } | |
319 | |
320 if (!ScavengeWithObjectsInToSpace) { | |
321 assert(young_gen->to_space()->is_empty(), | |
322 "Attempt to scavenge with live objects in to_space"); | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
323 young_gen->to_space()->clear(SpaceDecorator::Mangle); |
0 | 324 } else if (ZapUnusedHeapArea) { |
325 young_gen->to_space()->mangle_unused_area(); | |
326 } | |
327 save_to_space_top_before_gc(); | |
328 | |
329 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); | |
330 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
331 | |
332 reference_processor()->enable_discovery(); | |
333 | |
334 // We track how much was promoted to the next generation for | |
335 // the AdaptiveSizePolicy. | |
336 size_t old_gen_used_before = old_gen->used_in_bytes(); | |
337 | |
338 // For PrintGCDetails | |
339 size_t young_gen_used_before = young_gen->used_in_bytes(); | |
340 | |
341 // Reset our survivor overflow. | |
342 set_survivor_overflow(false); | |
343 | |
344 // We need to save the old/perm top values before | |
345 // creating the promotion_manager. We pass the top | |
346 // values to the card_table, to prevent it from | |
347 // straying into the promotion labs. | |
348 HeapWord* old_top = old_gen->object_space()->top(); | |
349 HeapWord* perm_top = perm_gen->object_space()->top(); | |
350 | |
351 // Release all previously held resources | |
352 gc_task_manager()->release_all_resources(); | |
353 | |
354 PSPromotionManager::pre_scavenge(); | |
355 | |
356 // We'll use the promotion manager again later. | |
357 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); | |
358 { | |
359 // TraceTime("Roots"); | |
360 | |
361 GCTaskQueue* q = GCTaskQueue::create(); | |
362 | |
363 for(uint i=0; i<ParallelGCThreads; i++) { | |
364 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); | |
365 } | |
366 | |
367 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); | |
368 | |
369 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); | |
370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); | |
371 // We scan the thread roots in parallel | |
372 Threads::create_thread_roots_tasks(q); | |
373 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); | |
374 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); | |
375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); | |
376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); | |
377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); | |
378 | |
379 ParallelTaskTerminator terminator( | |
380 gc_task_manager()->workers(), | |
381 promotion_manager->depth_first() ? | |
382 (TaskQueueSetSuper*) promotion_manager->stack_array_depth() | |
383 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth()); | |
384 if (ParallelGCThreads>1) { | |
385 for (uint j=0; j<ParallelGCThreads; j++) { | |
386 q->enqueue(new StealTask(&terminator)); | |
387 } | |
388 } | |
389 | |
390 gc_task_manager()->execute_and_wait(q); | |
391 } | |
392 | |
393 scavenge_midpoint.update(); | |
394 | |
395 // Process reference objects discovered during scavenge | |
396 { | |
397 #ifdef COMPILER2 | |
398 ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); | |
399 #else | |
400 ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy(); | |
401 #endif // COMPILER2 | |
402 | |
403 PSKeepAliveClosure keep_alive(promotion_manager); | |
404 PSEvacuateFollowersClosure evac_followers(promotion_manager); | |
405 assert(soft_ref_policy != NULL,"No soft reference policy"); | |
406 if (reference_processor()->processing_is_mt()) { | |
407 PSRefProcTaskExecutor task_executor; | |
408 reference_processor()->process_discovered_references( | |
409 soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, | |
410 &task_executor); | |
411 } else { | |
412 reference_processor()->process_discovered_references( | |
413 soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, | |
414 NULL); | |
415 } | |
416 } | |
417 | |
418 // Enqueue reference objects discovered during scavenge. | |
419 if (reference_processor()->processing_is_mt()) { | |
420 PSRefProcTaskExecutor task_executor; | |
421 reference_processor()->enqueue_discovered_references(&task_executor); | |
422 } else { | |
423 reference_processor()->enqueue_discovered_references(NULL); | |
424 } | |
425 | |
426 // Finally, flush the promotion_manager's labs, and deallocate its stacks. | |
427 assert(promotion_manager->claimed_stack_empty(), "Sanity"); | |
428 PSPromotionManager::post_scavenge(); | |
429 | |
430 promotion_failure_occurred = promotion_failed(); | |
431 if (promotion_failure_occurred) { | |
432 clean_up_failed_promotion(); | |
433 if (PrintGC) { | |
434 gclog_or_tty->print("--"); | |
435 } | |
436 } | |
437 | |
438 // Let the size policy know we're done. Note that we count promotion | |
439 // failure cleanup time as part of the collection (otherwise, we're | |
440 // implicitly saying it's mutator time). | |
441 size_policy->minor_collection_end(gc_cause); | |
442 | |
443 if (!promotion_failure_occurred) { | |
444 // Swap the survivor spaces. | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
445 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
446 |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
447 young_gen->eden_space()->clear(SpaceDecorator::Mangle); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
448 young_gen->from_space()->clear(SpaceDecorator::Mangle); |
0 | 449 young_gen->swap_spaces(); |
450 | |
451 size_t survived = young_gen->from_space()->used_in_bytes(); | |
452 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; | |
453 size_policy->update_averages(_survivor_overflow, survived, promoted); | |
454 | |
455 if (UseAdaptiveSizePolicy) { | |
456 // Calculate the new survivor size and tenuring threshold | |
457 | |
458 if (PrintAdaptiveSizePolicy) { | |
459 gclog_or_tty->print("AdaptiveSizeStart: "); | |
460 gclog_or_tty->stamp(); | |
461 gclog_or_tty->print_cr(" collection: %d ", | |
462 heap->total_collections()); | |
463 | |
464 if (Verbose) { | |
465 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" | |
466 " perm_gen_capacity: %d ", | |
467 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), | |
468 perm_gen->capacity_in_bytes()); | |
469 } | |
470 } | |
471 | |
472 | |
473 if (UsePerfData) { | |
474 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); | |
475 counters->update_old_eden_size( | |
476 size_policy->calculated_eden_size_in_bytes()); | |
477 counters->update_old_promo_size( | |
478 size_policy->calculated_promo_size_in_bytes()); | |
479 counters->update_old_capacity(old_gen->capacity_in_bytes()); | |
480 counters->update_young_capacity(young_gen->capacity_in_bytes()); | |
481 counters->update_survived(survived); | |
482 counters->update_promoted(promoted); | |
483 counters->update_survivor_overflowed(_survivor_overflow); | |
484 } | |
485 | |
486 size_t survivor_limit = | |
487 size_policy->max_survivor_size(young_gen->max_size()); | |
488 _tenuring_threshold = | |
489 size_policy->compute_survivor_space_size_and_threshold( | |
490 _survivor_overflow, | |
491 _tenuring_threshold, | |
492 survivor_limit); | |
493 | |
494 if (PrintTenuringDistribution) { | |
495 gclog_or_tty->cr(); | |
496 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", | |
497 size_policy->calculated_survivor_size_in_bytes(), | |
498 _tenuring_threshold, MaxTenuringThreshold); | |
499 } | |
500 | |
501 if (UsePerfData) { | |
502 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); | |
503 counters->update_tenuring_threshold(_tenuring_threshold); | |
504 counters->update_survivor_size_counters(); | |
505 } | |
506 | |
507 // Do call at minor collections? | |
508 // Don't check if the size_policy is ready at this | |
509 // level. Let the size_policy check that internally. | |
510 if (UseAdaptiveSizePolicy && | |
511 UseAdaptiveGenerationSizePolicyAtMinorCollection && | |
512 ((gc_cause != GCCause::_java_lang_system_gc) || | |
513 UseAdaptiveSizePolicyWithSystemGC)) { | |
514 | |
515 // Calculate optimial free space amounts | |
516 assert(young_gen->max_size() > | |
517 young_gen->from_space()->capacity_in_bytes() + | |
518 young_gen->to_space()->capacity_in_bytes(), | |
519 "Sizes of space in young gen are out-of-bounds"); | |
520 size_t max_eden_size = young_gen->max_size() - | |
521 young_gen->from_space()->capacity_in_bytes() - | |
522 young_gen->to_space()->capacity_in_bytes(); | |
523 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), | |
524 young_gen->eden_space()->used_in_bytes(), | |
525 old_gen->used_in_bytes(), | |
526 perm_gen->used_in_bytes(), | |
527 young_gen->eden_space()->capacity_in_bytes(), | |
528 old_gen->max_gen_size(), | |
529 max_eden_size, | |
530 false /* full gc*/, | |
531 gc_cause); | |
532 | |
533 } | |
534 // Resize the young generation at every collection | |
535 // even if new sizes have not been calculated. This is | |
536 // to allow resizes that may have been inhibited by the | |
537 // relative location of the "to" and "from" spaces. | |
538 | |
539 // Resizing the old gen at minor collects can cause increases | |
540 // that don't feed back to the generation sizing policy until | |
541 // a major collection. Don't resize the old gen here. | |
542 | |
543 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), | |
544 size_policy->calculated_survivor_size_in_bytes()); | |
545 | |
546 if (PrintAdaptiveSizePolicy) { | |
547 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", | |
548 heap->total_collections()); | |
549 } | |
550 } | |
551 | |
552 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can | |
553 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. | |
554 // Also update() will case adaptive NUMA chunk resizing. | |
555 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); | |
556 young_gen->eden_space()->update(); | |
557 | |
558 heap->gc_policy_counters()->update_counters(); | |
559 | |
560 heap->resize_all_tlabs(); | |
561 | |
562 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); | |
563 } | |
564 | |
565 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
566 | |
567 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); | |
568 | |
569 // Re-verify object start arrays | |
570 if (VerifyObjectStartArray && | |
571 VerifyAfterGC) { | |
572 old_gen->verify_object_start_array(); | |
573 perm_gen->verify_object_start_array(); | |
574 } | |
575 | |
576 // Verify all old -> young cards are now precise | |
577 if (VerifyRememberedSets) { | |
578 // Precise verification will give false positives. Until this is fixed, | |
579 // use imprecise verification. | |
580 // CardTableExtension::verify_all_young_refs_precise(); | |
581 CardTableExtension::verify_all_young_refs_imprecise(); | |
582 } | |
583 | |
584 if (TraceGen0Time) accumulated_time()->stop(); | |
585 | |
586 if (PrintGC) { | |
587 if (PrintGCDetails) { | |
588 // Don't print a GC timestamp here. This is after the GC so | |
589 // would be confusing. | |
590 young_gen->print_used_change(young_gen_used_before); | |
591 } | |
592 heap->print_heap_change(prev_used); | |
593 } | |
594 | |
595 // Track memory usage and detect low memory | |
596 MemoryService::track_memory_usage(); | |
597 heap->update_counters(); | |
598 } | |
599 | |
600 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { | |
601 HandleMark hm; // Discard invalid handles created during verification | |
602 gclog_or_tty->print(" VerifyAfterGC:"); | |
603 Universe::verify(false); | |
604 } | |
605 | |
606 if (PrintHeapAtGC) { | |
607 Universe::print_heap_after_gc(); | |
608 } | |
609 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
610 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
611 young_gen->eden_space()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
612 young_gen->from_space()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
613 young_gen->to_space()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
614 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
615 |
0 | 616 scavenge_exit.update(); |
617 | |
618 if (PrintGCTaskTimeStamps) { | |
619 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, | |
620 scavenge_entry.ticks(), scavenge_midpoint.ticks(), | |
621 scavenge_exit.ticks()); | |
622 gc_task_manager()->print_task_time_stamps(); | |
623 } | |
624 | |
625 return !promotion_failure_occurred; | |
626 } | |
627 | |
628 // This method iterates over all objects in the young generation, | |
629 // unforwarding markOops. It then restores any preserved mark oops, | |
630 // and clears the _preserved_mark_stack. | |
631 void PSScavenge::clean_up_failed_promotion() { | |
632 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
633 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
634 assert(promotion_failed(), "Sanity"); | |
635 | |
636 PSYoungGen* young_gen = heap->young_gen(); | |
637 | |
638 { | |
639 ResourceMark rm; | |
640 | |
641 // Unforward all pointers in the young gen. | |
642 PSPromotionFailedClosure unforward_closure; | |
643 young_gen->object_iterate(&unforward_closure); | |
644 | |
645 if (PrintGC && Verbose) { | |
646 gclog_or_tty->print_cr("Restoring %d marks", | |
647 _preserved_oop_stack->length()); | |
648 } | |
649 | |
650 // Restore any saved marks. | |
651 for (int i=0; i < _preserved_oop_stack->length(); i++) { | |
652 oop obj = _preserved_oop_stack->at(i); | |
653 markOop mark = _preserved_mark_stack->at(i); | |
654 obj->set_mark(mark); | |
655 } | |
656 | |
657 // Deallocate the preserved mark and oop stacks. | |
658 // The stacks were allocated as CHeap objects, so | |
659 // we must call delete to prevent mem leaks. | |
660 delete _preserved_mark_stack; | |
661 _preserved_mark_stack = NULL; | |
662 delete _preserved_oop_stack; | |
663 _preserved_oop_stack = NULL; | |
664 } | |
665 | |
666 // Reset the PromotionFailureALot counters. | |
667 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) | |
668 } | |
669 | |
670 // This method is called whenever an attempt to promote an object | |
671 // fails. Some markOops will need preserving, some will not. Note | |
672 // that the entire eden is traversed after a failed promotion, with | |
673 // all forwarded headers replaced by the default markOop. This means | |
674 // it is not neccessary to preserve most markOops. | |
675 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { | |
676 if (_preserved_mark_stack == NULL) { | |
677 ThreadCritical tc; // Lock and retest | |
678 if (_preserved_mark_stack == NULL) { | |
679 assert(_preserved_oop_stack == NULL, "Sanity"); | |
680 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); | |
681 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); | |
682 } | |
683 } | |
684 | |
685 // Because we must hold the ThreadCritical lock before using | |
686 // the stacks, we should be safe from observing partial allocations, | |
687 // which are also guarded by the ThreadCritical lock. | |
688 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { | |
689 ThreadCritical tc; | |
690 _preserved_oop_stack->push(obj); | |
691 _preserved_mark_stack->push(obj_mark); | |
692 } | |
693 } | |
694 | |
695 bool PSScavenge::should_attempt_scavenge() { | |
696 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
697 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
698 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); | |
699 | |
700 if (UsePerfData) { | |
701 counters->update_scavenge_skipped(not_skipped); | |
702 } | |
703 | |
704 PSYoungGen* young_gen = heap->young_gen(); | |
705 PSOldGen* old_gen = heap->old_gen(); | |
706 | |
707 if (!ScavengeWithObjectsInToSpace) { | |
708 // Do not attempt to promote unless to_space is empty | |
709 if (!young_gen->to_space()->is_empty()) { | |
710 _consecutive_skipped_scavenges++; | |
711 if (UsePerfData) { | |
712 counters->update_scavenge_skipped(to_space_not_empty); | |
713 } | |
714 return false; | |
715 } | |
716 } | |
717 | |
718 // Test to see if the scavenge will likely fail. | |
719 PSAdaptiveSizePolicy* policy = heap->size_policy(); | |
720 | |
721 // A similar test is done in the policy's should_full_GC(). If this is | |
722 // changed, decide if that test should also be changed. | |
723 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); | |
724 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); | |
725 bool result = promotion_estimate < old_gen->free_in_bytes(); | |
726 | |
727 if (PrintGCDetails && Verbose) { | |
728 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); | |
729 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT | |
730 " padded_average_promoted " SIZE_FORMAT | |
731 " free in old gen " SIZE_FORMAT, | |
732 (size_t) policy->average_promoted_in_bytes(), | |
733 (size_t) policy->padded_average_promoted_in_bytes(), | |
734 old_gen->free_in_bytes()); | |
735 if (young_gen->used_in_bytes() < | |
736 (size_t) policy->padded_average_promoted_in_bytes()) { | |
737 gclog_or_tty->print_cr(" padded_promoted_average is greater" | |
738 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); | |
739 } | |
740 } | |
741 | |
742 if (result) { | |
743 _consecutive_skipped_scavenges = 0; | |
744 } else { | |
745 _consecutive_skipped_scavenges++; | |
746 if (UsePerfData) { | |
747 counters->update_scavenge_skipped(promoted_too_large); | |
748 } | |
749 } | |
750 return result; | |
751 } | |
752 | |
753 // Used to add tasks | |
754 GCTaskManager* const PSScavenge::gc_task_manager() { | |
755 assert(ParallelScavengeHeap::gc_task_manager() != NULL, | |
756 "shouldn't return NULL"); | |
757 return ParallelScavengeHeap::gc_task_manager(); | |
758 } | |
759 | |
760 void PSScavenge::initialize() { | |
761 // Arguments must have been parsed | |
762 | |
763 if (AlwaysTenure) { | |
764 _tenuring_threshold = 0; | |
765 } else if (NeverTenure) { | |
766 _tenuring_threshold = markOopDesc::max_age + 1; | |
767 } else { | |
768 // We want to smooth out our startup times for the AdaptiveSizePolicy | |
769 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : | |
770 MaxTenuringThreshold; | |
771 } | |
772 | |
773 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
774 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
775 | |
776 PSYoungGen* young_gen = heap->young_gen(); | |
777 PSOldGen* old_gen = heap->old_gen(); | |
778 PSPermGen* perm_gen = heap->perm_gen(); | |
779 | |
780 // Set boundary between young_gen and old_gen | |
781 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), | |
782 "perm above old"); | |
783 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), | |
784 "old above young"); | |
785 _young_generation_boundary = young_gen->eden_space()->bottom(); | |
786 | |
787 // Initialize ref handling object for scavenging. | |
788 MemRegion mr = young_gen->reserved(); | |
789 _ref_processor = ReferenceProcessor::create_ref_processor( | |
790 mr, // span | |
791 true, // atomic_discovery | |
792 true, // mt_discovery | |
793 NULL, // is_alive_non_header | |
794 ParallelGCThreads, | |
795 ParallelRefProcEnabled); | |
796 | |
797 // Cache the cardtable | |
798 BarrierSet* bs = Universe::heap()->barrier_set(); | |
799 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); | |
800 _card_table = (CardTableExtension*)bs; | |
801 | |
802 _counters = new CollectorCounters("PSScavenge", 0); | |
803 } |