Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | eb28cf662f56 |
children | 27a80744a83b |
rev | line source |
---|---|
0 | 1 /* |
337 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_psMarkSweep.cpp.incl" | |
27 | |
28 elapsedTimer PSMarkSweep::_accumulated_time; | |
29 unsigned int PSMarkSweep::_total_invocations = 0; | |
30 jlong PSMarkSweep::_time_of_last_gc = 0; | |
31 CollectorCounters* PSMarkSweep::_counters = NULL; | |
32 | |
33 void PSMarkSweep::initialize() { | |
34 MemRegion mr = Universe::heap()->reserved_region(); | |
35 _ref_processor = new ReferenceProcessor(mr, | |
36 true, // atomic_discovery | |
37 false); // mt_discovery | |
374
a4b729f5b611
6716466: par compact - remove VerifyParallelOldWithMarkSweep code
jcoomes
parents:
263
diff
changeset
|
38 _counters = new CollectorCounters("PSMarkSweep", 1); |
0 | 39 } |
40 | |
41 // This method contains all heap specific policy for invoking mark sweep. | |
42 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact | |
43 // the heap. It will do nothing further. If we need to bail out for policy | |
44 // reasons, scavenge before full gc, or any other specialized behavior, it | |
45 // needs to be added here. | |
46 // | |
47 // Note that this method should only be called from the vm_thread while | |
48 // at a safepoint! | |
49 void PSMarkSweep::invoke(bool maximum_heap_compaction) { | |
50 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); | |
51 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); | |
52 assert(!Universe::heap()->is_gc_active(), "not reentrant"); | |
53 | |
54 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
55 GCCause::Cause gc_cause = heap->gc_cause(); | |
56 PSAdaptiveSizePolicy* policy = heap->size_policy(); | |
57 | |
58 // Before each allocation/collection attempt, find out from the | |
59 // policy object if GCs are, on the whole, taking too long. If so, | |
60 // bail out without attempting a collection. The exceptions are | |
61 // for explicitly requested GC's. | |
62 if (!policy->gc_time_limit_exceeded() || | |
63 GCCause::is_user_requested_gc(gc_cause) || | |
64 GCCause::is_serviceability_requested_gc(gc_cause)) { | |
65 IsGCActiveMark mark; | |
66 | |
67 if (ScavengeBeforeFullGC) { | |
68 PSScavenge::invoke_no_policy(); | |
69 } | |
70 | |
71 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; | |
72 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); | |
73 PSMarkSweep::invoke_no_policy(maximum_heap_compaction); | |
74 } | |
75 } | |
76 | |
77 // This method contains no policy. You should probably | |
78 // be calling invoke() instead. | |
79 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { | |
80 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); | |
81 assert(ref_processor() != NULL, "Sanity"); | |
82 | |
83 if (GC_locker::check_active_before_gc()) { | |
84 return; | |
85 } | |
86 | |
87 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
88 GCCause::Cause gc_cause = heap->gc_cause(); | |
89 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
90 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); | |
91 | |
92 PSYoungGen* young_gen = heap->young_gen(); | |
93 PSOldGen* old_gen = heap->old_gen(); | |
94 PSPermGen* perm_gen = heap->perm_gen(); | |
95 | |
96 // Increment the invocation count | |
97 heap->increment_total_collections(true /* full */); | |
98 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
99 // Save information needed to minimize mangling |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
100 heap->record_gen_tops_before_GC(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
101 |
0 | 102 // We need to track unique mark sweep invocations as well. |
103 _total_invocations++; | |
104 | |
105 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); | |
106 | |
107 if (PrintHeapAtGC) { | |
108 Universe::print_heap_before_gc(); | |
109 } | |
110 | |
111 // Fill in TLABs | |
112 heap->accumulate_statistics_all_tlabs(); | |
113 heap->ensure_parsability(true); // retire TLABs | |
114 | |
115 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { | |
116 HandleMark hm; // Discard invalid handles created during verification | |
117 gclog_or_tty->print(" VerifyBeforeGC:"); | |
118 Universe::verify(true); | |
119 } | |
120 | |
121 // Verify object start arrays | |
122 if (VerifyObjectStartArray && | |
123 VerifyBeforeGC) { | |
124 old_gen->verify_object_start_array(); | |
125 perm_gen->verify_object_start_array(); | |
126 } | |
127 | |
128 // Filled in below to track the state of the young gen after the collection. | |
129 bool eden_empty; | |
130 bool survivors_empty; | |
131 bool young_gen_empty; | |
132 | |
133 { | |
134 HandleMark hm; | |
135 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; | |
136 // This is useful for debugging but don't change the output the | |
137 // the customer sees. | |
138 const char* gc_cause_str = "Full GC"; | |
139 if (is_system_gc && PrintGCDetails) { | |
140 gc_cause_str = "Full GC (System)"; | |
141 } | |
142 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); | |
143 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); | |
144 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); | |
145 TraceCollectorStats tcs(counters()); | |
146 TraceMemoryManagerStats tms(true /* Full GC */); | |
147 | |
148 if (TraceGen1Time) accumulated_time()->start(); | |
149 | |
150 // Let the size policy know we're starting | |
151 size_policy->major_collection_begin(); | |
152 | |
153 // When collecting the permanent generation methodOops may be moving, | |
154 // so we either have to flush all bcp data or convert it into bci. | |
155 CodeCache::gc_prologue(); | |
156 Threads::gc_prologue(); | |
157 BiasedLocking::preserve_marks(); | |
158 | |
159 // Capture heap size before collection for printing. | |
160 size_t prev_used = heap->used(); | |
161 | |
162 // Capture perm gen size before collection for sizing. | |
163 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); | |
164 | |
165 // For PrintGCDetails | |
166 size_t old_gen_prev_used = old_gen->used_in_bytes(); | |
167 size_t young_gen_prev_used = young_gen->used_in_bytes(); | |
168 | |
169 allocate_stacks(); | |
170 | |
171 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); | |
172 COMPILER2_PRESENT(DerivedPointerTable::clear()); | |
173 | |
174 ref_processor()->enable_discovery(); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
380
diff
changeset
|
175 ref_processor()->snap_policy(clear_all_softrefs); |
0 | 176 |
177 mark_sweep_phase1(clear_all_softrefs); | |
178 | |
179 mark_sweep_phase2(); | |
180 | |
181 // Don't add any more derived pointers during phase3 | |
182 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); | |
183 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); | |
184 | |
185 mark_sweep_phase3(); | |
186 | |
187 mark_sweep_phase4(); | |
188 | |
189 restore_marks(); | |
190 | |
191 deallocate_stacks(); | |
192 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
193 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
194 // Do a complete mangle (top to end) because the usage for |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
195 // scratch does not maintain a top pointer. |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
196 young_gen->to_space()->mangle_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
197 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
198 |
0 | 199 eden_empty = young_gen->eden_space()->is_empty(); |
200 if (!eden_empty) { | |
201 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); | |
202 } | |
203 | |
204 // Update heap occupancy information which is used as | |
205 // input to soft ref clearing policy at the next gc. | |
206 Universe::update_heap_info_at_gc(); | |
207 | |
208 survivors_empty = young_gen->from_space()->is_empty() && | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
209 young_gen->to_space()->is_empty(); |
0 | 210 young_gen_empty = eden_empty && survivors_empty; |
211 | |
212 BarrierSet* bs = heap->barrier_set(); | |
213 if (bs->is_a(BarrierSet::ModRef)) { | |
214 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; | |
215 MemRegion old_mr = heap->old_gen()->reserved(); | |
216 MemRegion perm_mr = heap->perm_gen()->reserved(); | |
217 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); | |
218 | |
219 if (young_gen_empty) { | |
220 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); | |
221 } else { | |
222 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); | |
223 } | |
224 } | |
225 | |
226 BiasedLocking::restore_marks(); | |
227 Threads::gc_epilogue(); | |
228 CodeCache::gc_epilogue(); | |
229 | |
230 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); | |
231 | |
232 ref_processor()->enqueue_discovered_references(NULL); | |
233 | |
234 // Update time of last GC | |
235 reset_millis_since_last_gc(); | |
236 | |
237 // Let the size policy know we're done | |
238 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); | |
239 | |
240 if (UseAdaptiveSizePolicy) { | |
241 | |
242 if (PrintAdaptiveSizePolicy) { | |
243 gclog_or_tty->print("AdaptiveSizeStart: "); | |
244 gclog_or_tty->stamp(); | |
245 gclog_or_tty->print_cr(" collection: %d ", | |
246 heap->total_collections()); | |
247 if (Verbose) { | |
248 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" | |
249 " perm_gen_capacity: %d ", | |
250 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), | |
251 perm_gen->capacity_in_bytes()); | |
252 } | |
253 } | |
254 | |
255 // Don't check if the size_policy is ready here. Let | |
256 // the size_policy check that internally. | |
257 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && | |
258 ((gc_cause != GCCause::_java_lang_system_gc) || | |
259 UseAdaptiveSizePolicyWithSystemGC)) { | |
260 // Calculate optimal free space amounts | |
261 assert(young_gen->max_size() > | |
262 young_gen->from_space()->capacity_in_bytes() + | |
263 young_gen->to_space()->capacity_in_bytes(), | |
264 "Sizes of space in young gen are out-of-bounds"); | |
265 size_t max_eden_size = young_gen->max_size() - | |
266 young_gen->from_space()->capacity_in_bytes() - | |
267 young_gen->to_space()->capacity_in_bytes(); | |
268 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), | |
269 young_gen->eden_space()->used_in_bytes(), | |
270 old_gen->used_in_bytes(), | |
271 perm_gen->used_in_bytes(), | |
272 young_gen->eden_space()->capacity_in_bytes(), | |
273 old_gen->max_gen_size(), | |
274 max_eden_size, | |
275 true /* full gc*/, | |
276 gc_cause); | |
277 | |
278 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); | |
279 | |
280 // Don't resize the young generation at an major collection. A | |
281 // desired young generation size may have been calculated but | |
282 // resizing the young generation complicates the code because the | |
283 // resizing of the old generation may have moved the boundary | |
284 // between the young generation and the old generation. Let the | |
285 // young generation resizing happen at the minor collections. | |
286 } | |
287 if (PrintAdaptiveSizePolicy) { | |
288 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", | |
289 heap->total_collections()); | |
290 } | |
291 } | |
292 | |
293 if (UsePerfData) { | |
294 heap->gc_policy_counters()->update_counters(); | |
295 heap->gc_policy_counters()->update_old_capacity( | |
296 old_gen->capacity_in_bytes()); | |
297 heap->gc_policy_counters()->update_young_capacity( | |
298 young_gen->capacity_in_bytes()); | |
299 } | |
300 | |
301 heap->resize_all_tlabs(); | |
302 | |
303 // We collected the perm gen, so we'll resize it here. | |
304 perm_gen->compute_new_size(perm_gen_prev_used); | |
305 | |
306 if (TraceGen1Time) accumulated_time()->stop(); | |
307 | |
308 if (PrintGC) { | |
309 if (PrintGCDetails) { | |
310 // Don't print a GC timestamp here. This is after the GC so | |
311 // would be confusing. | |
312 young_gen->print_used_change(young_gen_prev_used); | |
313 old_gen->print_used_change(old_gen_prev_used); | |
314 } | |
315 heap->print_heap_change(prev_used); | |
316 // Do perm gen after heap becase prev_used does | |
317 // not include the perm gen (done this way in the other | |
318 // collectors). | |
319 if (PrintGCDetails) { | |
320 perm_gen->print_used_change(perm_gen_prev_used); | |
321 } | |
322 } | |
323 | |
324 // Track memory usage and detect low memory | |
325 MemoryService::track_memory_usage(); | |
326 heap->update_counters(); | |
327 | |
328 if (PrintGCDetails) { | |
329 if (size_policy->print_gc_time_limit_would_be_exceeded()) { | |
330 if (size_policy->gc_time_limit_exceeded()) { | |
331 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " | |
332 "of %d%%", GCTimeLimit); | |
333 } else { | |
334 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " | |
335 "of %d%%", GCTimeLimit); | |
336 } | |
337 } | |
338 size_policy->set_print_gc_time_limit_would_be_exceeded(false); | |
339 } | |
340 } | |
341 | |
342 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { | |
343 HandleMark hm; // Discard invalid handles created during verification | |
344 gclog_or_tty->print(" VerifyAfterGC:"); | |
345 Universe::verify(false); | |
346 } | |
347 | |
348 // Re-verify object start arrays | |
349 if (VerifyObjectStartArray && | |
350 VerifyAfterGC) { | |
351 old_gen->verify_object_start_array(); | |
352 perm_gen->verify_object_start_array(); | |
353 } | |
354 | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
355 if (ZapUnusedHeapArea) { |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
356 old_gen->object_space()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
357 perm_gen->object_space()->check_mangled_unused_area_complete(); |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
358 } |
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
0
diff
changeset
|
359 |
0 | 360 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
361 | |
362 if (PrintHeapAtGC) { | |
363 Universe::print_heap_after_gc(); | |
364 } | |
365 } | |
366 | |
367 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, | |
368 PSYoungGen* young_gen, | |
369 PSOldGen* old_gen) { | |
370 MutableSpace* const eden_space = young_gen->eden_space(); | |
371 assert(!eden_space->is_empty(), "eden must be non-empty"); | |
372 assert(young_gen->virtual_space()->alignment() == | |
373 old_gen->virtual_space()->alignment(), "alignments do not match"); | |
374 | |
375 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { | |
376 return false; | |
377 } | |
378 | |
379 // Both generations must be completely committed. | |
380 if (young_gen->virtual_space()->uncommitted_size() != 0) { | |
381 return false; | |
382 } | |
383 if (old_gen->virtual_space()->uncommitted_size() != 0) { | |
384 return false; | |
385 } | |
386 | |
387 // Figure out how much to take from eden. Include the average amount promoted | |
388 // in the total; otherwise the next young gen GC will simply bail out to a | |
389 // full GC. | |
390 const size_t alignment = old_gen->virtual_space()->alignment(); | |
391 const size_t eden_used = eden_space->used_in_bytes(); | |
392 const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average()); | |
393 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); | |
394 const size_t eden_capacity = eden_space->capacity_in_bytes(); | |
395 | |
396 if (absorb_size >= eden_capacity) { | |
397 return false; // Must leave some space in eden. | |
398 } | |
399 | |
400 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; | |
401 if (new_young_size < young_gen->min_gen_size()) { | |
402 return false; // Respect young gen minimum size. | |
403 } | |
404 | |
405 if (TraceAdaptiveGCBoundary && Verbose) { | |
406 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " | |
407 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " | |
408 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " | |
409 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", | |
410 absorb_size / K, | |
411 eden_capacity / K, (eden_capacity - absorb_size) / K, | |
412 young_gen->from_space()->used_in_bytes() / K, | |
413 young_gen->to_space()->used_in_bytes() / K, | |
414 young_gen->capacity_in_bytes() / K, new_young_size / K); | |
415 } | |
416 | |
417 // Fill the unused part of the old gen. | |
418 MutableSpace* const old_space = old_gen->object_space(); | |
419 MemRegion old_gen_unused(old_space->top(), old_space->end()); | |
420 | |
421 // If the unused part of the old gen cannot be filled, skip | |
422 // absorbing eden. | |
423 if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) { | |
424 return false; | |
425 } | |
426 | |
427 if (!old_gen_unused.is_empty()) { | |
428 SharedHeap::fill_region_with_object(old_gen_unused); | |
429 } | |
430 | |
431 // Take the live data from eden and set both top and end in the old gen to | |
432 // eden top. (Need to set end because reset_after_change() mangles the region | |
433 // from end to virtual_space->high() in debug builds). | |
434 HeapWord* const new_top = eden_space->top(); | |
435 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), | |
436 absorb_size); | |
437 young_gen->reset_after_change(); | |
438 old_space->set_top(new_top); | |
439 old_space->set_end(new_top); | |
440 old_gen->reset_after_change(); | |
441 | |
442 // Update the object start array for the filler object and the data from eden. | |
443 ObjectStartArray* const start_array = old_gen->start_array(); | |
444 HeapWord* const start = old_gen_unused.start(); | |
445 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { | |
446 start_array->allocate_block(addr); | |
447 } | |
448 | |
449 // Could update the promoted average here, but it is not typically updated at | |
450 // full GCs and the value to use is unclear. Something like | |
451 // | |
452 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. | |
453 | |
454 size_policy->set_bytes_absorbed_from_eden(absorb_size); | |
455 return true; | |
456 } | |
457 | |
458 void PSMarkSweep::allocate_stacks() { | |
459 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
460 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
461 | |
462 PSYoungGen* young_gen = heap->young_gen(); | |
463 | |
464 MutableSpace* to_space = young_gen->to_space(); | |
465 _preserved_marks = (PreservedMark*)to_space->top(); | |
466 _preserved_count = 0; | |
467 | |
468 // We want to calculate the size in bytes first. | |
469 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); | |
470 // Now divide by the size of a PreservedMark | |
471 _preserved_count_max /= sizeof(PreservedMark); | |
472 | |
473 _preserved_mark_stack = NULL; | |
474 _preserved_oop_stack = NULL; | |
475 | |
476 _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); | |
477 | |
478 int size = SystemDictionary::number_of_classes() * 2; | |
479 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); | |
480 } | |
481 | |
482 | |
483 void PSMarkSweep::deallocate_stacks() { | |
484 if (_preserved_oop_stack) { | |
485 delete _preserved_mark_stack; | |
486 _preserved_mark_stack = NULL; | |
487 delete _preserved_oop_stack; | |
488 _preserved_oop_stack = NULL; | |
489 } | |
490 | |
491 delete _marking_stack; | |
492 delete _revisit_klass_stack; | |
493 } | |
494 | |
495 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { | |
496 // Recursively traverse all live objects and mark them | |
497 EventMark m("1 mark object"); | |
498 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); | |
499 trace(" 1"); | |
500 | |
501 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
502 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
503 | |
504 // General strong roots. | |
505 Universe::oops_do(mark_and_push_closure()); | |
506 ReferenceProcessor::oops_do(mark_and_push_closure()); | |
507 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles | |
508 Threads::oops_do(mark_and_push_closure()); | |
509 ObjectSynchronizer::oops_do(mark_and_push_closure()); | |
510 FlatProfiler::oops_do(mark_and_push_closure()); | |
511 Management::oops_do(mark_and_push_closure()); | |
512 JvmtiExport::oops_do(mark_and_push_closure()); | |
513 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); | |
514 vmSymbols::oops_do(mark_and_push_closure()); | |
515 | |
516 // Flush marking stack. | |
517 follow_stack(); | |
518 | |
519 // Process reference objects found during marking | |
520 { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
380
diff
changeset
|
521 ref_processor()->snap_policy(clear_all_softrefs); |
0 | 522 ref_processor()->process_discovered_references( |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
380
diff
changeset
|
523 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); |
0 | 524 } |
525 | |
526 // Follow system dictionary roots and unload classes | |
527 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); | |
528 | |
529 // Follow code cache roots | |
530 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), | |
531 purged_class); | |
532 follow_stack(); // Flush marking stack | |
533 | |
534 // Update subklass/sibling/implementor links of live klasses | |
535 follow_weak_klass_links(); | |
536 assert(_marking_stack->is_empty(), "just drained"); | |
537 | |
538 // Visit symbol and interned string tables and delete unmarked oops | |
539 SymbolTable::unlink(is_alive_closure()); | |
540 StringTable::unlink(is_alive_closure()); | |
541 | |
542 assert(_marking_stack->is_empty(), "stack should be empty by now"); | |
543 } | |
544 | |
545 | |
546 void PSMarkSweep::mark_sweep_phase2() { | |
547 EventMark m("2 compute new addresses"); | |
548 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); | |
549 trace("2"); | |
550 | |
551 // Now all live objects are marked, compute the new object addresses. | |
552 | |
553 // It is imperative that we traverse perm_gen LAST. If dead space is | |
554 // allowed a range of dead object may get overwritten by a dead int | |
555 // array. If perm_gen is not traversed last a klassOop may get | |
556 // overwritten. This is fine since it is dead, but if the class has dead | |
557 // instances we have to skip them, and in order to find their size we | |
558 // need the klassOop! | |
559 // | |
560 // It is not required that we traverse spaces in the same order in | |
561 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops | |
562 // tracking expects us to do so. See comment under phase4. | |
563 | |
564 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
565 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
566 | |
567 PSOldGen* old_gen = heap->old_gen(); | |
568 PSPermGen* perm_gen = heap->perm_gen(); | |
569 | |
570 // Begin compacting into the old gen | |
571 PSMarkSweepDecorator::set_destination_decorator_tenured(); | |
572 | |
573 // This will also compact the young gen spaces. | |
574 old_gen->precompact(); | |
575 | |
576 // Compact the perm gen into the perm gen | |
577 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); | |
578 | |
579 perm_gen->precompact(); | |
580 } | |
581 | |
582 // This should be moved to the shared markSweep code! | |
583 class PSAlwaysTrueClosure: public BoolObjectClosure { | |
584 public: | |
585 void do_object(oop p) { ShouldNotReachHere(); } | |
586 bool do_object_b(oop p) { return true; } | |
587 }; | |
588 static PSAlwaysTrueClosure always_true; | |
589 | |
590 void PSMarkSweep::mark_sweep_phase3() { | |
591 // Adjust the pointers to reflect the new locations | |
592 EventMark m("3 adjust pointers"); | |
593 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); | |
594 trace("3"); | |
595 | |
596 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
597 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
598 | |
599 PSYoungGen* young_gen = heap->young_gen(); | |
600 PSOldGen* old_gen = heap->old_gen(); | |
601 PSPermGen* perm_gen = heap->perm_gen(); | |
602 | |
603 // General strong roots. | |
604 Universe::oops_do(adjust_root_pointer_closure()); | |
605 ReferenceProcessor::oops_do(adjust_root_pointer_closure()); | |
606 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles | |
607 Threads::oops_do(adjust_root_pointer_closure()); | |
608 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); | |
609 FlatProfiler::oops_do(adjust_root_pointer_closure()); | |
610 Management::oops_do(adjust_root_pointer_closure()); | |
611 JvmtiExport::oops_do(adjust_root_pointer_closure()); | |
612 // SO_AllClasses | |
613 SystemDictionary::oops_do(adjust_root_pointer_closure()); | |
614 vmSymbols::oops_do(adjust_root_pointer_closure()); | |
615 | |
616 // Now adjust pointers in remaining weak roots. (All of which should | |
617 // have been cleared if they pointed to non-surviving objects.) | |
618 // Global (weak) JNI handles | |
619 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); | |
620 | |
621 CodeCache::oops_do(adjust_pointer_closure()); | |
622 SymbolTable::oops_do(adjust_root_pointer_closure()); | |
623 StringTable::oops_do(adjust_root_pointer_closure()); | |
624 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); | |
625 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); | |
626 | |
627 adjust_marks(); | |
628 | |
629 young_gen->adjust_pointers(); | |
630 old_gen->adjust_pointers(); | |
631 perm_gen->adjust_pointers(); | |
632 } | |
633 | |
634 void PSMarkSweep::mark_sweep_phase4() { | |
635 EventMark m("4 compact heap"); | |
636 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); | |
637 trace("4"); | |
638 | |
639 // All pointers are now adjusted, move objects accordingly | |
640 | |
641 // It is imperative that we traverse perm_gen first in phase4. All | |
642 // classes must be allocated earlier than their instances, and traversing | |
643 // perm_gen first makes sure that all klassOops have moved to their new | |
644 // location before any instance does a dispatch through it's klass! | |
645 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
646 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
647 | |
648 PSYoungGen* young_gen = heap->young_gen(); | |
649 PSOldGen* old_gen = heap->old_gen(); | |
650 PSPermGen* perm_gen = heap->perm_gen(); | |
651 | |
652 perm_gen->compact(); | |
653 old_gen->compact(); | |
654 young_gen->compact(); | |
655 } | |
656 | |
657 jlong PSMarkSweep::millis_since_last_gc() { | |
658 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; | |
659 // XXX See note in genCollectedHeap::millis_since_last_gc(). | |
660 if (ret_val < 0) { | |
661 NOT_PRODUCT(warning("time warp: %d", ret_val);) | |
662 return 0; | |
663 } | |
664 return ret_val; | |
665 } | |
666 | |
667 void PSMarkSweep::reset_millis_since_last_gc() { | |
668 _time_of_last_gc = os::javaTimeMillis(); | |
669 } |