Mercurial > hg > truffle
annotate src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @ 2152:0fa27f37d4d4
6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc
author | tonyp |
---|---|
date | Wed, 19 Jan 2011 19:30:42 -0500 |
parents | f95d63e2154a |
children | 377371490991 |
rev | line source |
---|---|
342 | 1 /* |
2152 | 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
342 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1387
diff
changeset
|
21 * questions. |
342 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "classfile/javaClasses.hpp" | |
27 #include "classfile/symbolTable.hpp" | |
28 #include "classfile/systemDictionary.hpp" | |
29 #include "classfile/vmSymbols.hpp" | |
30 #include "code/codeCache.hpp" | |
31 #include "code/icBuffer.hpp" | |
32 #include "gc_implementation/g1/g1MarkSweep.hpp" | |
33 #include "memory/gcLocker.hpp" | |
34 #include "memory/genCollectedHeap.hpp" | |
35 #include "memory/modRefBarrierSet.hpp" | |
36 #include "memory/referencePolicy.hpp" | |
37 #include "memory/space.hpp" | |
38 #include "oops/instanceRefKlass.hpp" | |
39 #include "oops/oop.inline.hpp" | |
40 #include "prims/jvmtiExport.hpp" | |
41 #include "runtime/aprofiler.hpp" | |
42 #include "runtime/biasedLocking.hpp" | |
43 #include "runtime/fprofiler.hpp" | |
44 #include "runtime/synchronizer.hpp" | |
45 #include "runtime/thread.hpp" | |
46 #include "runtime/vmThread.hpp" | |
47 #include "utilities/copy.hpp" | |
48 #include "utilities/events.hpp" | |
342 | 49 |
50 class HeapRegion; | |
51 | |
52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, | |
53 bool clear_all_softrefs) { | |
54 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); | |
55 | |
1387
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
56 SharedHeap* sh = SharedHeap::heap(); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
57 #ifdef ASSERT |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
58 if (sh->collector_policy()->should_clear_all_soft_refs()) { |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
59 assert(clear_all_softrefs, "Policy should have been checked earler"); |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
60 } |
0bfd3fb24150
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
jmasa
parents:
1311
diff
changeset
|
61 #endif |
342 | 62 // hook up weak ref data so it can be used during Mark-Sweep |
63 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
64 assert(rp != NULL, "should be non-NULL"); |
342 | 65 GenMarkSweep::_ref_processor = rp; |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
66 rp->setup_policy(clear_all_softrefs); |
342 | 67 |
68 // When collecting the permanent generation methodOops may be moving, | |
69 // so we either have to flush all bcp data or convert it into bci. | |
70 CodeCache::gc_prologue(); | |
71 Threads::gc_prologue(); | |
72 | |
73 // Increment the invocation count for the permanent generation, since it is | |
74 // implicitly collected whenever we do a full mark sweep collection. | |
75 sh->perm_gen()->stat_record()->invocations++; | |
76 | |
77 bool marked_for_unloading = false; | |
78 | |
79 allocate_stacks(); | |
80 | |
358
8651a65ac4b4
6735416: G1: runThese javasoft.sqe.tests.lang.thrd011.thrd01101.thrd01101 fails
iveresov
parents:
342
diff
changeset
|
81 // We should save the marks of the currently locked biased monitors. |
8651a65ac4b4
6735416: G1: runThese javasoft.sqe.tests.lang.thrd011.thrd01101.thrd01101 fails
iveresov
parents:
342
diff
changeset
|
82 // The marking doesn't preserve the marks of biased objects. |
8651a65ac4b4
6735416: G1: runThese javasoft.sqe.tests.lang.thrd011.thrd01101.thrd01101 fails
iveresov
parents:
342
diff
changeset
|
83 BiasedLocking::preserve_marks(); |
8651a65ac4b4
6735416: G1: runThese javasoft.sqe.tests.lang.thrd011.thrd01101.thrd01101 fails
iveresov
parents:
342
diff
changeset
|
84 |
342 | 85 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); |
86 | |
751 | 87 if (VerifyDuringGC) { |
342 | 88 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
89 g1h->checkConcurrentMark(); | |
90 } | |
91 | |
92 mark_sweep_phase2(); | |
93 | |
94 // Don't add any more derived pointers during phase3 | |
95 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); | |
96 | |
97 mark_sweep_phase3(); | |
98 | |
99 mark_sweep_phase4(); | |
100 | |
101 GenMarkSweep::restore_marks(); | |
358
8651a65ac4b4
6735416: G1: runThese javasoft.sqe.tests.lang.thrd011.thrd01101.thrd01101 fails
iveresov
parents:
342
diff
changeset
|
102 BiasedLocking::restore_marks(); |
342 | 103 GenMarkSweep::deallocate_stacks(); |
104 | |
105 // We must invalidate the perm-gen rs, so that it gets rebuilt. | |
106 GenRemSet* rs = sh->rem_set(); | |
107 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); | |
108 | |
109 // "free at last gc" is calculated from these. | |
110 // CHF: cheating for now!!! | |
111 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); | |
112 // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); | |
113 | |
114 Threads::gc_epilogue(); | |
115 CodeCache::gc_epilogue(); | |
116 | |
117 // refs processing: clean slate | |
118 GenMarkSweep::_ref_processor = NULL; | |
119 } | |
120 | |
121 | |
122 void G1MarkSweep::allocate_stacks() { | |
123 GenMarkSweep::_preserved_count_max = 0; | |
124 GenMarkSweep::_preserved_marks = NULL; | |
125 GenMarkSweep::_preserved_count = 0; | |
126 } | |
127 | |
128 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, | |
129 bool clear_all_softrefs) { | |
130 // Recursively traverse all live objects and mark them | |
131 EventMark m("1 mark object"); | |
132 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); | |
133 GenMarkSweep::trace(" 1"); | |
134 | |
135 SharedHeap* sh = SharedHeap::heap(); | |
136 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
137 sh->process_strong_roots(true, // activeate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
138 true, // Collecting permanent generation. |
342 | 139 SharedHeap::SO_SystemClasses, |
140 &GenMarkSweep::follow_root_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
141 &GenMarkSweep::follow_code_root_closure, |
342 | 142 &GenMarkSweep::follow_root_closure); |
143 | |
144 // Process reference objects found during marking | |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
145 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); |
457
27a80744a83b
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
ysr
parents:
453
diff
changeset
|
146 rp->setup_policy(clear_all_softrefs); |
453
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
147 rp->process_discovered_references(&GenMarkSweep::is_alive, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
148 &GenMarkSweep::keep_alive, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
149 &GenMarkSweep::follow_stack_closure, |
c96030fff130
6684579: SoftReference processing can be made more efficient
ysr
parents:
358
diff
changeset
|
150 NULL); |
342 | 151 |
152 // Follow system dictionary roots and unload classes | |
153 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
154 assert(GenMarkSweep::_marking_stack.is_empty(), |
342 | 155 "stack should be empty by now"); |
156 | |
157 // Follow code cache roots (has to be done after system dictionary, | |
158 // assumes all live klasses are marked) | |
159 CodeCache::do_unloading(&GenMarkSweep::is_alive, | |
160 &GenMarkSweep::keep_alive, | |
161 purged_class); | |
941 | 162 GenMarkSweep::follow_stack(); |
342 | 163 |
164 // Update subklass/sibling/implementor links of live klasses | |
165 GenMarkSweep::follow_weak_klass_links(); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
166 assert(GenMarkSweep::_marking_stack.is_empty(), |
342 | 167 "stack should be empty by now"); |
168 | |
941 | 169 // Visit memoized MDO's and clear any unmarked weak refs |
170 GenMarkSweep::follow_mdo_weak_refs(); | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
171 assert(GenMarkSweep::_marking_stack.is_empty(), "just drained"); |
941 | 172 |
173 | |
342 | 174 // Visit symbol and interned string tables and delete unmarked oops |
175 SymbolTable::unlink(&GenMarkSweep::is_alive); | |
176 StringTable::unlink(&GenMarkSweep::is_alive); | |
177 | |
1836
894b1d7c7e01
6423256: GC stacks should use a better data structure
jcoomes
parents:
1552
diff
changeset
|
178 assert(GenMarkSweep::_marking_stack.is_empty(), |
342 | 179 "stack should be empty by now"); |
180 } | |
181 | |
182 class G1PrepareCompactClosure: public HeapRegionClosure { | |
2152 | 183 G1CollectedHeap* _g1h; |
342 | 184 ModRefBarrierSet* _mrbs; |
185 CompactPoint _cp; | |
2152 | 186 size_t _pre_used; |
187 FreeRegionList _free_list; | |
188 HumongousRegionSet _humongous_proxy_set; | |
342 | 189 |
190 void free_humongous_region(HeapRegion* hr) { | |
191 HeapWord* end = hr->end(); | |
192 assert(hr->startsHumongous(), | |
193 "Only the start of a humongous region should be freed."); | |
2152 | 194 _g1h->free_humongous_region(hr, &_pre_used, &_free_list, |
195 &_humongous_proxy_set, false /* par */); | |
196 // Do we also need to do this for the continues humongous regions | |
197 // we just collapsed? | |
342 | 198 hr->prepare_for_compaction(&_cp); |
199 // Also clear the part of the card table that will be unused after | |
200 // compaction. | |
2152 | 201 _mrbs->clear(MemRegion(hr->compaction_top(), end)); |
342 | 202 } |
203 | |
204 public: | |
2152 | 205 G1PrepareCompactClosure(CompactibleSpace* cs) |
206 : _g1h(G1CollectedHeap::heap()), | |
207 _mrbs(G1CollectedHeap::heap()->mr_bs()), | |
342 | 208 _cp(NULL, cs, cs->initialize_threshold()), |
2152 | 209 _pre_used(0), |
210 _free_list("Local Free List for G1MarkSweep"), | |
211 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } | |
212 | |
213 void update_sets() { | |
214 // We'll recalculate total used bytes and recreate the free list | |
215 // at the end of the GC, so no point in updating those values here. | |
216 _g1h->update_sets_after_freeing_regions(0, /* pre_used */ | |
217 NULL, /* free_list */ | |
218 &_humongous_proxy_set, | |
219 false /* par */); | |
220 _free_list.remove_all(); | |
221 } | |
222 | |
342 | 223 bool doHeapRegion(HeapRegion* hr) { |
224 if (hr->isHumongous()) { | |
225 if (hr->startsHumongous()) { | |
226 oop obj = oop(hr->bottom()); | |
227 if (obj->is_gc_marked()) { | |
228 obj->forward_to(obj); | |
229 } else { | |
230 free_humongous_region(hr); | |
231 } | |
232 } else { | |
233 assert(hr->continuesHumongous(), "Invalid humongous."); | |
234 } | |
235 } else { | |
236 hr->prepare_for_compaction(&_cp); | |
237 // Also clear the part of the card table that will be unused after | |
238 // compaction. | |
239 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); | |
240 } | |
241 return false; | |
242 } | |
243 }; | |
677 | 244 |
245 // Finds the first HeapRegion. | |
342 | 246 class FindFirstRegionClosure: public HeapRegionClosure { |
247 HeapRegion* _a_region; | |
248 public: | |
677 | 249 FindFirstRegionClosure() : _a_region(NULL) {} |
342 | 250 bool doHeapRegion(HeapRegion* r) { |
677 | 251 _a_region = r; |
252 return true; | |
342 | 253 } |
254 HeapRegion* result() { return _a_region; } | |
255 }; | |
256 | |
257 void G1MarkSweep::mark_sweep_phase2() { | |
258 // Now all live objects are marked, compute the new object addresses. | |
259 | |
260 // It is imperative that we traverse perm_gen LAST. If dead space is | |
261 // allowed a range of dead object may get overwritten by a dead int | |
262 // array. If perm_gen is not traversed last a klassOop may get | |
263 // overwritten. This is fine since it is dead, but if the class has dead | |
264 // instances we have to skip them, and in order to find their size we | |
265 // need the klassOop! | |
266 // | |
267 // It is not required that we traverse spaces in the same order in | |
268 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops | |
269 // tracking expects us to do so. See comment under phase4. | |
270 | |
271 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
272 Generation* pg = g1h->perm_gen(); | |
273 | |
274 EventMark m("2 compute new addresses"); | |
275 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); | |
276 GenMarkSweep::trace("2"); | |
277 | |
677 | 278 FindFirstRegionClosure cl; |
342 | 279 g1h->heap_region_iterate(&cl); |
280 HeapRegion *r = cl.result(); | |
281 CompactibleSpace* sp = r; | |
282 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) { | |
283 sp = r->next_compaction_space(); | |
284 } | |
285 | |
677 | 286 G1PrepareCompactClosure blk(sp); |
342 | 287 g1h->heap_region_iterate(&blk); |
2152 | 288 blk.update_sets(); |
342 | 289 |
290 CompactPoint perm_cp(pg, NULL, NULL); | |
291 pg->prepare_for_compaction(&perm_cp); | |
292 } | |
293 | |
294 class G1AdjustPointersClosure: public HeapRegionClosure { | |
295 public: | |
296 bool doHeapRegion(HeapRegion* r) { | |
297 if (r->isHumongous()) { | |
298 if (r->startsHumongous()) { | |
299 // We must adjust the pointers on the single H object. | |
300 oop obj = oop(r->bottom()); | |
301 debug_only(GenMarkSweep::track_interior_pointers(obj)); | |
302 // point all the oops to the new location | |
303 obj->adjust_pointers(); | |
304 debug_only(GenMarkSweep::check_interior_pointers()); | |
305 } | |
306 } else { | |
307 // This really ought to be "as_CompactibleSpace"... | |
308 r->adjust_pointers(); | |
309 } | |
310 return false; | |
311 } | |
312 }; | |
313 | |
314 void G1MarkSweep::mark_sweep_phase3() { | |
315 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
316 Generation* pg = g1h->perm_gen(); | |
317 | |
318 // Adjust the pointers to reflect the new locations | |
319 EventMark m("3 adjust pointers"); | |
320 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); | |
321 GenMarkSweep::trace("3"); | |
322 | |
323 SharedHeap* sh = SharedHeap::heap(); | |
324 | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
325 sh->process_strong_roots(true, // activate StrongRootsScope |
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
326 true, // Collecting permanent generation. |
342 | 327 SharedHeap::SO_AllClasses, |
328 &GenMarkSweep::adjust_root_pointer_closure, | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
844
diff
changeset
|
329 NULL, // do not touch code cache here |
342 | 330 &GenMarkSweep::adjust_pointer_closure); |
331 | |
332 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); | |
333 | |
334 // Now adjust pointers in remaining weak roots. (All of which should | |
335 // have been cleared if they pointed to non-surviving objects.) | |
336 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, | |
337 &GenMarkSweep::adjust_pointer_closure); | |
338 | |
339 GenMarkSweep::adjust_marks(); | |
340 | |
341 G1AdjustPointersClosure blk; | |
342 g1h->heap_region_iterate(&blk); | |
343 pg->adjust_pointers(); | |
344 } | |
345 | |
346 class G1SpaceCompactClosure: public HeapRegionClosure { | |
347 public: | |
348 G1SpaceCompactClosure() {} | |
349 | |
350 bool doHeapRegion(HeapRegion* hr) { | |
351 if (hr->isHumongous()) { | |
352 if (hr->startsHumongous()) { | |
353 oop obj = oop(hr->bottom()); | |
354 if (obj->is_gc_marked()) { | |
355 obj->init_mark(); | |
356 } else { | |
357 assert(hr->is_empty(), "Should have been cleared in phase 2."); | |
358 } | |
359 hr->reset_during_compaction(); | |
360 } | |
361 } else { | |
362 hr->compact(); | |
363 } | |
364 return false; | |
365 } | |
366 }; | |
367 | |
368 void G1MarkSweep::mark_sweep_phase4() { | |
369 // All pointers are now adjusted, move objects accordingly | |
370 | |
371 // It is imperative that we traverse perm_gen first in phase4. All | |
372 // classes must be allocated earlier than their instances, and traversing | |
373 // perm_gen first makes sure that all klassOops have moved to their new | |
374 // location before any instance does a dispatch through it's klass! | |
375 | |
376 // The ValidateMarkSweep live oops tracking expects us to traverse spaces | |
377 // in the same order in phase2, phase3 and phase4. We don't quite do that | |
378 // here (perm_gen first rather than last), so we tell the validate code | |
379 // to use a higher index (saved from phase2) when verifying perm_gen. | |
380 G1CollectedHeap* g1h = G1CollectedHeap::heap(); | |
381 Generation* pg = g1h->perm_gen(); | |
382 | |
383 EventMark m("4 compact heap"); | |
384 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); | |
385 GenMarkSweep::trace("4"); | |
386 | |
387 pg->compact(); | |
388 | |
389 G1SpaceCompactClosure blk; | |
390 g1h->heap_region_iterate(&blk); | |
391 | |
392 } | |
393 | |
394 // Local Variables: *** | |
395 // c-indentation-style: gnu *** | |
396 // End: *** |