Mercurial > hg > truffle
annotate src/share/vm/memory/genMarkSweep.cpp @ 453:c96030fff130
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
author | ysr |
---|---|
date | Thu, 20 Nov 2008 16:56:09 -0800 |
parents | 1ee8caae33af |
children | 27a80744a83b |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_genMarkSweep.cpp.incl" | |
27 | |
28 void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, | |
29 bool clear_all_softrefs) { | |
30 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); | |
31 | |
32 // hook up weak ref data so it can be used during Mark-Sweep | |
33 assert(ref_processor() == NULL, "no stomping"); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
34 assert(rp != NULL, "should be non-NULL"); |
0 | 35 _ref_processor = rp; |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
36 rp->snap_policy(clear_all_softrefs); |
0 | 37 |
38 TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); | |
39 | |
40 // When collecting the permanent generation methodOops may be moving, | |
41 // so we either have to flush all bcp data or convert it into bci. | |
42 CodeCache::gc_prologue(); | |
43 Threads::gc_prologue(); | |
44 | |
45 // Increment the invocation count for the permanent generation, since it is | |
46 // implicitly collected whenever we do a full mark sweep collection. | |
47 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
48 gch->perm_gen()->stat_record()->invocations++; | |
49 | |
50 // Capture heap size before collection for printing. | |
51 size_t gch_prev_used = gch->used(); | |
52 | |
53 // Some of the card table updates below assume that the perm gen is | |
54 // also being collected. | |
55 assert(level == gch->n_gens() - 1, | |
56 "All generations are being collected, ergo perm gen too."); | |
57 | |
58 // Capture used regions for each generation that will be | |
59 // subject to collection, so that card table adjustments can | |
60 // be made intelligently (see clear / invalidate further below). | |
61 gch->save_used_regions(level, true /* perm */); | |
62 | |
63 allocate_stacks(); | |
64 | |
65 mark_sweep_phase1(level, clear_all_softrefs); | |
66 | |
67 mark_sweep_phase2(); | |
68 | |
69 // Don't add any more derived pointers during phase3 | |
70 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); | |
71 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); | |
72 | |
73 mark_sweep_phase3(level); | |
74 | |
75 VALIDATE_MARK_SWEEP_ONLY( | |
76 if (ValidateMarkSweep) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
77 guarantee(_root_refs_stack->length() == 0, "should be empty by now"); |
0 | 78 } |
79 ) | |
80 | |
81 mark_sweep_phase4(); | |
82 | |
83 VALIDATE_MARK_SWEEP_ONLY( | |
84 if (ValidateMarkSweep) { | |
85 guarantee(_live_oops->length() == _live_oops_moved_to->length(), | |
86 "should be the same size"); | |
87 } | |
88 ) | |
89 | |
90 restore_marks(); | |
91 | |
92 // Set saved marks for allocation profiler (and other things? -- dld) | |
93 // (Should this be in general part?) | |
94 gch->save_marks(); | |
95 | |
96 deallocate_stacks(); | |
97 | |
98 // If compaction completely evacuated all generations younger than this | |
99 // one, then we can clear the card table. Otherwise, we must invalidate | |
100 // it (consider all cards dirty). In the future, we might consider doing | |
101 // compaction within generations only, and doing card-table sliding. | |
102 bool all_empty = true; | |
103 for (int i = 0; all_empty && i < level; i++) { | |
104 Generation* g = gch->get_gen(i); | |
105 all_empty = all_empty && gch->get_gen(i)->used() == 0; | |
106 } | |
107 GenRemSet* rs = gch->rem_set(); | |
108 // Clear/invalidate below make use of the "prev_used_regions" saved earlier. | |
109 if (all_empty) { | |
110 // We've evacuated all generations below us. | |
111 Generation* g = gch->get_gen(level); | |
112 rs->clear_into_younger(g, true /* perm */); | |
113 } else { | |
114 // Invalidate the cards corresponding to the currently used | |
115 // region and clear those corresponding to the evacuated region | |
116 // of all generations just collected (i.e. level and younger). | |
117 rs->invalidate_or_clear(gch->get_gen(level), | |
118 true /* younger */, | |
119 true /* perm */); | |
120 } | |
121 | |
122 Threads::gc_epilogue(); | |
123 CodeCache::gc_epilogue(); | |
124 | |
125 if (PrintGC && !PrintGCDetails) { | |
126 gch->print_heap_change(gch_prev_used); | |
127 } | |
128 | |
129 // refs processing: clean slate | |
130 _ref_processor = NULL; | |
131 | |
132 // Update heap occupancy information which is used as | |
133 // input to soft ref clearing policy at the next gc. | |
134 Universe::update_heap_info_at_gc(); | |
135 | |
136 // Update time of last gc for all generations we collected | |
137 // (which curently is all the generations in the heap). | |
138 gch->update_time_of_last_gc(os::javaTimeMillis()); | |
139 } | |
140 | |
141 void GenMarkSweep::allocate_stacks() { | |
142 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
143 // Scratch request on behalf of oldest generation; will do no | |
144 // allocation. | |
145 ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0); | |
146 | |
147 // $$$ To cut a corner, we'll only use the first scratch block, and then | |
148 // revert to malloc. | |
149 if (scratch != NULL) { | |
150 _preserved_count_max = | |
151 scratch->num_words * HeapWordSize / sizeof(PreservedMark); | |
152 } else { | |
153 _preserved_count_max = 0; | |
154 } | |
155 | |
156 _preserved_marks = (PreservedMark*)scratch; | |
157 _preserved_count = 0; | |
158 _preserved_mark_stack = NULL; | |
159 _preserved_oop_stack = NULL; | |
160 | |
161 _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); | |
162 | |
163 int size = SystemDictionary::number_of_classes() * 2; | |
164 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); | |
165 | |
166 #ifdef VALIDATE_MARK_SWEEP | |
167 if (ValidateMarkSweep) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
168 _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
169 _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
170 _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); |
0 | 171 _live_oops = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true); |
172 _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true); | |
173 _live_oops_size = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); | |
174 } | |
175 if (RecordMarkSweepCompaction) { | |
176 if (_cur_gc_live_oops == NULL) { | |
177 _cur_gc_live_oops = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); | |
178 _cur_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); | |
179 _cur_gc_live_oops_size = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); | |
180 _last_gc_live_oops = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); | |
181 _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); | |
182 _last_gc_live_oops_size = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); | |
183 } else { | |
184 _cur_gc_live_oops->clear(); | |
185 _cur_gc_live_oops_moved_to->clear(); | |
186 _cur_gc_live_oops_size->clear(); | |
187 } | |
188 } | |
189 #endif | |
190 } | |
191 | |
192 | |
193 void GenMarkSweep::deallocate_stacks() { | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
194 |
356 | 195 if (!UseG1GC) { |
196 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
197 gch->release_scratch(); | |
198 } | |
263
12eea04c8b06
6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents:
113
diff
changeset
|
199 |
0 | 200 if (_preserved_oop_stack) { |
201 delete _preserved_mark_stack; | |
202 _preserved_mark_stack = NULL; | |
203 delete _preserved_oop_stack; | |
204 _preserved_oop_stack = NULL; | |
205 } | |
206 | |
207 delete _marking_stack; | |
208 delete _revisit_klass_stack; | |
209 | |
210 #ifdef VALIDATE_MARK_SWEEP | |
211 if (ValidateMarkSweep) { | |
212 delete _root_refs_stack; | |
213 delete _other_refs_stack; | |
214 delete _adjusted_pointers; | |
215 delete _live_oops; | |
216 delete _live_oops_size; | |
217 delete _live_oops_moved_to; | |
218 _live_oops_index = 0; | |
219 _live_oops_index_at_perm = 0; | |
220 } | |
221 #endif | |
222 } | |
223 | |
224 void GenMarkSweep::mark_sweep_phase1(int level, | |
225 bool clear_all_softrefs) { | |
226 // Recursively traverse all live objects and mark them | |
227 EventMark m("1 mark object"); | |
228 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); | |
229 trace(" 1"); | |
230 | |
231 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); | |
232 | |
233 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
234 | |
235 // Because follow_root_closure is created statically, cannot | |
236 // use OopsInGenClosure constructor which takes a generation, | |
237 // as the Universe has not been created when the static constructors | |
238 // are run. | |
239 follow_root_closure.set_orig_generation(gch->get_gen(level)); | |
240 | |
241 gch->gen_process_strong_roots(level, | |
242 false, // Younger gens are not roots. | |
243 true, // Collecting permanent generation. | |
244 SharedHeap::SO_SystemClasses, | |
245 &follow_root_closure, &follow_root_closure); | |
246 | |
247 // Process reference objects found during marking | |
248 { | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
249 ref_processor()->snap_policy(clear_all_softrefs); |
0 | 250 ref_processor()->process_discovered_references( |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
356
diff
changeset
|
251 &is_alive, &keep_alive, &follow_stack_closure, NULL); |
0 | 252 } |
253 | |
254 // Follow system dictionary roots and unload classes | |
255 bool purged_class = SystemDictionary::do_unloading(&is_alive); | |
256 | |
257 // Follow code cache roots | |
258 CodeCache::do_unloading(&is_alive, &keep_alive, purged_class); | |
259 follow_stack(); // Flush marking stack | |
260 | |
261 // Update subklass/sibling/implementor links of live klasses | |
262 follow_weak_klass_links(); | |
263 assert(_marking_stack->is_empty(), "just drained"); | |
264 | |
265 // Visit symbol and interned string tables and delete unmarked oops | |
266 SymbolTable::unlink(&is_alive); | |
267 StringTable::unlink(&is_alive); | |
268 | |
269 assert(_marking_stack->is_empty(), "stack should be empty by now"); | |
270 } | |
271 | |
272 | |
273 void GenMarkSweep::mark_sweep_phase2() { | |
274 // Now all live objects are marked, compute the new object addresses. | |
275 | |
276 // It is imperative that we traverse perm_gen LAST. If dead space is | |
277 // allowed a range of dead object may get overwritten by a dead int | |
278 // array. If perm_gen is not traversed last a klassOop may get | |
279 // overwritten. This is fine since it is dead, but if the class has dead | |
280 // instances we have to skip them, and in order to find their size we | |
281 // need the klassOop! | |
282 // | |
283 // It is not required that we traverse spaces in the same order in | |
284 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops | |
285 // tracking expects us to do so. See comment under phase4. | |
286 | |
287 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
288 Generation* pg = gch->perm_gen(); | |
289 | |
290 EventMark m("2 compute new addresses"); | |
291 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); | |
292 trace("2"); | |
293 | |
294 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); | |
295 | |
296 gch->prepare_for_compaction(); | |
297 | |
298 VALIDATE_MARK_SWEEP_ONLY(_live_oops_index_at_perm = _live_oops_index); | |
299 CompactPoint perm_cp(pg, NULL, NULL); | |
300 pg->prepare_for_compaction(&perm_cp); | |
301 } | |
302 | |
303 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure { | |
304 public: | |
305 void do_generation(Generation* gen) { | |
306 gen->adjust_pointers(); | |
307 } | |
308 }; | |
309 | |
310 void GenMarkSweep::mark_sweep_phase3(int level) { | |
311 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
312 Generation* pg = gch->perm_gen(); | |
313 | |
314 // Adjust the pointers to reflect the new locations | |
315 EventMark m("3 adjust pointers"); | |
316 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); | |
317 trace("3"); | |
318 | |
319 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); | |
320 | |
321 // Needs to be done before the system dictionary is adjusted. | |
322 pg->pre_adjust_pointers(); | |
323 | |
324 // Because the two closures below are created statically, cannot | |
325 // use OopsInGenClosure constructor which takes a generation, | |
326 // as the Universe has not been created when the static constructors | |
327 // are run. | |
328 adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); | |
329 adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); | |
330 | |
331 gch->gen_process_strong_roots(level, | |
332 false, // Younger gens are not roots. | |
333 true, // Collecting permanent generation. | |
334 SharedHeap::SO_AllClasses, | |
335 &adjust_root_pointer_closure, | |
336 &adjust_root_pointer_closure); | |
337 | |
338 // Now adjust pointers in remaining weak roots. (All of which should | |
339 // have been cleared if they pointed to non-surviving objects.) | |
340 gch->gen_process_weak_roots(&adjust_root_pointer_closure, | |
341 &adjust_pointer_closure); | |
342 | |
343 adjust_marks(); | |
344 GenAdjustPointersClosure blk; | |
345 gch->generation_iterate(&blk, true); | |
346 pg->adjust_pointers(); | |
347 } | |
348 | |
349 class GenCompactClosure: public GenCollectedHeap::GenClosure { | |
350 public: | |
351 void do_generation(Generation* gen) { | |
352 gen->compact(); | |
353 } | |
354 }; | |
355 | |
356 void GenMarkSweep::mark_sweep_phase4() { | |
357 // All pointers are now adjusted, move objects accordingly | |
358 | |
359 // It is imperative that we traverse perm_gen first in phase4. All | |
360 // classes must be allocated earlier than their instances, and traversing | |
361 // perm_gen first makes sure that all klassOops have moved to their new | |
362 // location before any instance does a dispatch through it's klass! | |
363 | |
364 // The ValidateMarkSweep live oops tracking expects us to traverse spaces | |
365 // in the same order in phase2, phase3 and phase4. We don't quite do that | |
366 // here (perm_gen first rather than last), so we tell the validate code | |
367 // to use a higher index (saved from phase2) when verifying perm_gen. | |
368 GenCollectedHeap* gch = GenCollectedHeap::heap(); | |
369 Generation* pg = gch->perm_gen(); | |
370 | |
371 EventMark m("4 compact heap"); | |
372 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); | |
373 trace("4"); | |
374 | |
375 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(true)); | |
376 | |
377 pg->compact(); | |
378 | |
379 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); | |
380 | |
381 GenCompactClosure blk; | |
382 gch->generation_iterate(&blk, true); | |
383 | |
384 VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); | |
385 | |
386 pg->post_compact(); // Shared spaces verification. | |
387 } |