comparison src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @ 342:37f87013dfd8

6711316: Open source the Garbage-First garbage collector Summary: First mercurial integration of the code for the Garbage-First garbage collector. Reviewed-by: apetrusenko, iveresov, jmasa, sgoldman, tonyp, ysr
author ysr
date Thu, 05 Jun 2008 15:57:56 -0700
parents
children 8651a65ac4b4
comparison
equal deleted inserted replaced
189:0b27f3512f9e 342:37f87013dfd8
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1MarkSweep.cpp.incl"
27
28 class HeapRegion;
29
30 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
31 bool clear_all_softrefs) {
32 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
33
34 // hook up weak ref data so it can be used during Mark-Sweep
35 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
36 GenMarkSweep::_ref_processor = rp;
37 assert(rp != NULL, "should be non-NULL");
38
39 // When collecting the permanent generation methodOops may be moving,
40 // so we either have to flush all bcp data or convert it into bci.
41 CodeCache::gc_prologue();
42 Threads::gc_prologue();
43
44 // Increment the invocation count for the permanent generation, since it is
45 // implicitly collected whenever we do a full mark sweep collection.
46 SharedHeap* sh = SharedHeap::heap();
47 sh->perm_gen()->stat_record()->invocations++;
48
49 bool marked_for_unloading = false;
50
51 allocate_stacks();
52
53 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
54
55 if (G1VerifyConcMark) {
56 G1CollectedHeap* g1h = G1CollectedHeap::heap();
57 g1h->checkConcurrentMark();
58 }
59
60 mark_sweep_phase2();
61
62 // Don't add any more derived pointers during phase3
63 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
64
65 mark_sweep_phase3();
66
67 mark_sweep_phase4();
68
69 GenMarkSweep::restore_marks();
70
71 GenMarkSweep::deallocate_stacks();
72
73 // We must invalidate the perm-gen rs, so that it gets rebuilt.
74 GenRemSet* rs = sh->rem_set();
75 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
76
77 // "free at last gc" is calculated from these.
78 // CHF: cheating for now!!!
79 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
80 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
81
82 Threads::gc_epilogue();
83 CodeCache::gc_epilogue();
84
85 // refs processing: clean slate
86 GenMarkSweep::_ref_processor = NULL;
87 }
88
89
90 void G1MarkSweep::allocate_stacks() {
91 GenMarkSweep::_preserved_count_max = 0;
92 GenMarkSweep::_preserved_marks = NULL;
93 GenMarkSweep::_preserved_count = 0;
94 GenMarkSweep::_preserved_mark_stack = NULL;
95 GenMarkSweep::_preserved_oop_stack = NULL;
96
97 GenMarkSweep::_marking_stack =
98 new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
99
100 size_t size = SystemDictionary::number_of_classes() * 2;
101 GenMarkSweep::_revisit_klass_stack =
102 new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
103 }
104
105 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
106 bool clear_all_softrefs) {
107 // Recursively traverse all live objects and mark them
108 EventMark m("1 mark object");
109 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
110 GenMarkSweep::trace(" 1");
111
112 SharedHeap* sh = SharedHeap::heap();
113
114 sh->process_strong_roots(true, // Collecting permanent generation.
115 SharedHeap::SO_SystemClasses,
116 &GenMarkSweep::follow_root_closure,
117 &GenMarkSweep::follow_root_closure);
118
119 // Process reference objects found during marking
120 ReferencePolicy *soft_ref_policy;
121 if (clear_all_softrefs) {
122 soft_ref_policy = new AlwaysClearPolicy();
123 } else {
124 #ifdef COMPILER2
125 soft_ref_policy = new LRUMaxHeapPolicy();
126 #else
127 soft_ref_policy = new LRUCurrentHeapPolicy();
128 #endif
129 }
130 assert(soft_ref_policy != NULL,"No soft reference policy");
131 GenMarkSweep::ref_processor()->process_discovered_references(
132 soft_ref_policy,
133 &GenMarkSweep::is_alive,
134 &GenMarkSweep::keep_alive,
135 &GenMarkSweep::follow_stack_closure,
136 NULL);
137
138 // Follow system dictionary roots and unload classes
139 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
140 assert(GenMarkSweep::_marking_stack->is_empty(),
141 "stack should be empty by now");
142
143 // Follow code cache roots (has to be done after system dictionary,
144 // assumes all live klasses are marked)
145 CodeCache::do_unloading(&GenMarkSweep::is_alive,
146 &GenMarkSweep::keep_alive,
147 purged_class);
148 GenMarkSweep::follow_stack();
149
150 // Update subklass/sibling/implementor links of live klasses
151 GenMarkSweep::follow_weak_klass_links();
152 assert(GenMarkSweep::_marking_stack->is_empty(),
153 "stack should be empty by now");
154
155 // Visit symbol and interned string tables and delete unmarked oops
156 SymbolTable::unlink(&GenMarkSweep::is_alive);
157 StringTable::unlink(&GenMarkSweep::is_alive);
158
159 assert(GenMarkSweep::_marking_stack->is_empty(),
160 "stack should be empty by now");
161 }
162
163 class G1PrepareCompactClosure: public HeapRegionClosure {
164 ModRefBarrierSet* _mrbs;
165 CompactPoint _cp;
166 bool _popular_only;
167
168 void free_humongous_region(HeapRegion* hr) {
169 HeapWord* bot = hr->bottom();
170 HeapWord* end = hr->end();
171 assert(hr->startsHumongous(),
172 "Only the start of a humongous region should be freed.");
173 G1CollectedHeap::heap()->free_region(hr);
174 hr->prepare_for_compaction(&_cp);
175 // Also clear the part of the card table that will be unused after
176 // compaction.
177 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
178 }
179
180 public:
181 G1PrepareCompactClosure(CompactibleSpace* cs, bool popular_only) :
182 _cp(NULL, cs, cs->initialize_threshold()),
183 _mrbs(G1CollectedHeap::heap()->mr_bs()),
184 _popular_only(popular_only)
185 {}
186 bool doHeapRegion(HeapRegion* hr) {
187 if (_popular_only && !hr->popular())
188 return true; // terminate early
189 else if (!_popular_only && hr->popular())
190 return false; // skip this one.
191
192 if (hr->isHumongous()) {
193 if (hr->startsHumongous()) {
194 oop obj = oop(hr->bottom());
195 if (obj->is_gc_marked()) {
196 obj->forward_to(obj);
197 } else {
198 free_humongous_region(hr);
199 }
200 } else {
201 assert(hr->continuesHumongous(), "Invalid humongous.");
202 }
203 } else {
204 hr->prepare_for_compaction(&_cp);
205 // Also clear the part of the card table that will be unused after
206 // compaction.
207 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
208 }
209 return false;
210 }
211 };
212 // Stolen verbatim from g1CollectedHeap.cpp
213 class FindFirstRegionClosure: public HeapRegionClosure {
214 HeapRegion* _a_region;
215 bool _find_popular;
216 public:
217 FindFirstRegionClosure(bool find_popular) :
218 _a_region(NULL), _find_popular(find_popular) {}
219 bool doHeapRegion(HeapRegion* r) {
220 if (r->popular() == _find_popular) {
221 _a_region = r;
222 return true;
223 } else {
224 return false;
225 }
226 }
227 HeapRegion* result() { return _a_region; }
228 };
229
230 void G1MarkSweep::mark_sweep_phase2() {
231 // Now all live objects are marked, compute the new object addresses.
232
233 // It is imperative that we traverse perm_gen LAST. If dead space is
234 // allowed a range of dead object may get overwritten by a dead int
235 // array. If perm_gen is not traversed last a klassOop may get
236 // overwritten. This is fine since it is dead, but if the class has dead
237 // instances we have to skip them, and in order to find their size we
238 // need the klassOop!
239 //
240 // It is not required that we traverse spaces in the same order in
241 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
242 // tracking expects us to do so. See comment under phase4.
243
244 G1CollectedHeap* g1h = G1CollectedHeap::heap();
245 Generation* pg = g1h->perm_gen();
246
247 EventMark m("2 compute new addresses");
248 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
249 GenMarkSweep::trace("2");
250
251 // First we compact the popular regions.
252 if (G1NumPopularRegions > 0) {
253 CompactibleSpace* sp = g1h->first_compactible_space();
254 FindFirstRegionClosure cl(true /*find_popular*/);
255 g1h->heap_region_iterate(&cl);
256 HeapRegion *r = cl.result();
257 assert(r->popular(), "should have found a popular region.");
258 assert(r == sp, "first popular heap region should "
259 "== first compactible space");
260 G1PrepareCompactClosure blk(sp, true/*popular_only*/);
261 g1h->heap_region_iterate(&blk);
262 }
263
264 // Now we do the regular regions.
265 FindFirstRegionClosure cl(false /*find_popular*/);
266 g1h->heap_region_iterate(&cl);
267 HeapRegion *r = cl.result();
268 assert(!r->popular(), "should have founda non-popular region.");
269 CompactibleSpace* sp = r;
270 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
271 sp = r->next_compaction_space();
272 }
273
274 G1PrepareCompactClosure blk(sp, false/*popular_only*/);
275 g1h->heap_region_iterate(&blk);
276
277 CompactPoint perm_cp(pg, NULL, NULL);
278 pg->prepare_for_compaction(&perm_cp);
279 }
280
281 class G1AdjustPointersClosure: public HeapRegionClosure {
282 public:
283 bool doHeapRegion(HeapRegion* r) {
284 if (r->isHumongous()) {
285 if (r->startsHumongous()) {
286 // We must adjust the pointers on the single H object.
287 oop obj = oop(r->bottom());
288 debug_only(GenMarkSweep::track_interior_pointers(obj));
289 // point all the oops to the new location
290 obj->adjust_pointers();
291 debug_only(GenMarkSweep::check_interior_pointers());
292 }
293 } else {
294 // This really ought to be "as_CompactibleSpace"...
295 r->adjust_pointers();
296 }
297 return false;
298 }
299 };
300
301 void G1MarkSweep::mark_sweep_phase3() {
302 G1CollectedHeap* g1h = G1CollectedHeap::heap();
303 Generation* pg = g1h->perm_gen();
304
305 // Adjust the pointers to reflect the new locations
306 EventMark m("3 adjust pointers");
307 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
308 GenMarkSweep::trace("3");
309
310 SharedHeap* sh = SharedHeap::heap();
311
312 sh->process_strong_roots(true, // Collecting permanent generation.
313 SharedHeap::SO_AllClasses,
314 &GenMarkSweep::adjust_root_pointer_closure,
315 &GenMarkSweep::adjust_pointer_closure);
316
317 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
318
319 // Now adjust pointers in remaining weak roots. (All of which should
320 // have been cleared if they pointed to non-surviving objects.)
321 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
322 &GenMarkSweep::adjust_pointer_closure);
323
324 GenMarkSweep::adjust_marks();
325
326 G1AdjustPointersClosure blk;
327 g1h->heap_region_iterate(&blk);
328 pg->adjust_pointers();
329 }
330
331 class G1SpaceCompactClosure: public HeapRegionClosure {
332 public:
333 G1SpaceCompactClosure() {}
334
335 bool doHeapRegion(HeapRegion* hr) {
336 if (hr->isHumongous()) {
337 if (hr->startsHumongous()) {
338 oop obj = oop(hr->bottom());
339 if (obj->is_gc_marked()) {
340 obj->init_mark();
341 } else {
342 assert(hr->is_empty(), "Should have been cleared in phase 2.");
343 }
344 hr->reset_during_compaction();
345 }
346 } else {
347 hr->compact();
348 }
349 return false;
350 }
351 };
352
353 void G1MarkSweep::mark_sweep_phase4() {
354 // All pointers are now adjusted, move objects accordingly
355
356 // It is imperative that we traverse perm_gen first in phase4. All
357 // classes must be allocated earlier than their instances, and traversing
358 // perm_gen first makes sure that all klassOops have moved to their new
359 // location before any instance does a dispatch through it's klass!
360
361 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
362 // in the same order in phase2, phase3 and phase4. We don't quite do that
363 // here (perm_gen first rather than last), so we tell the validate code
364 // to use a higher index (saved from phase2) when verifying perm_gen.
365 G1CollectedHeap* g1h = G1CollectedHeap::heap();
366 Generation* pg = g1h->perm_gen();
367
368 EventMark m("4 compact heap");
369 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
370 GenMarkSweep::trace("4");
371
372 pg->compact();
373
374 G1SpaceCompactClosure blk;
375 g1h->heap_region_iterate(&blk);
376
377 }
378
379 // Local Variables: ***
380 // c-indentation-style: gnu ***
381 // End: ***