Mercurial > hg > graal-jvmci-8
comparison src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp @ 0:a61af66fc99e jdk7-b24
Initial load
author | duke |
---|---|
date | Sat, 01 Dec 2007 00:00:00 +0000 |
parents | |
children | ba764ed4b6f2 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a61af66fc99e |
---|---|
1 /* | |
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_psMarkSweepDecorator.cpp.incl" | |
27 | |
28 PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL; | |
29 | |
30 | |
31 void PSMarkSweepDecorator::set_destination_decorator_tenured() { | |
32 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
33 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
34 | |
35 _destination_decorator = heap->old_gen()->object_mark_sweep(); | |
36 } | |
37 | |
38 void PSMarkSweepDecorator::set_destination_decorator_perm_gen() { | |
39 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
40 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
41 | |
42 _destination_decorator = heap->perm_gen()->object_mark_sweep(); | |
43 } | |
44 | |
45 void PSMarkSweepDecorator::advance_destination_decorator() { | |
46 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); | |
47 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); | |
48 | |
49 assert(_destination_decorator != NULL, "Sanity"); | |
50 guarantee(_destination_decorator != heap->perm_gen()->object_mark_sweep(), "Cannot advance perm gen decorator"); | |
51 | |
52 PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep(); | |
53 PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep(); | |
54 PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep(); | |
55 PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep(); | |
56 | |
57 if ( _destination_decorator == first ) { | |
58 _destination_decorator = second; | |
59 } else if ( _destination_decorator == second ) { | |
60 _destination_decorator = third; | |
61 } else if ( _destination_decorator == third ) { | |
62 _destination_decorator = fourth; | |
63 } else { | |
64 fatal("PSMarkSweep attempting to advance past last compaction area"); | |
65 } | |
66 } | |
67 | |
68 PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() { | |
69 assert(_destination_decorator != NULL, "Sanity"); | |
70 | |
71 return _destination_decorator; | |
72 } | |
73 | |
74 // FIX ME FIX ME FIX ME FIX ME!!!!!!!!! | |
75 // The object forwarding code is duplicated. Factor this out!!!!! | |
76 // | |
77 // This method "precompacts" objects inside its space to dest. It places forwarding | |
78 // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we | |
79 // finish by compacting into our own space. | |
80 | |
81 void PSMarkSweepDecorator::precompact() { | |
82 // Reset our own compact top. | |
83 set_compaction_top(space()->bottom()); | |
84 | |
85 /* We allow some amount of garbage towards the bottom of the space, so | |
86 * we don't start compacting before there is a significant gain to be made. | |
87 * Occasionally, we want to ensure a full compaction, which is determined | |
88 * by the MarkSweepAlwaysCompactCount parameter. This is a significant | |
89 * performance improvement! | |
90 */ | |
91 bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); | |
92 | |
93 ssize_t allowed_deadspace = 0; | |
94 if (skip_dead) { | |
95 int ratio = allowed_dead_ratio(); | |
96 allowed_deadspace = (space()->capacity_in_bytes() * ratio / 100) / HeapWordSize; | |
97 } | |
98 | |
99 // Fetch the current destination decorator | |
100 PSMarkSweepDecorator* dest = destination_decorator(); | |
101 ObjectStartArray* start_array = dest->start_array(); | |
102 | |
103 HeapWord* compact_top = dest->compaction_top(); | |
104 HeapWord* compact_end = dest->space()->end(); | |
105 | |
106 HeapWord* q = space()->bottom(); | |
107 HeapWord* t = space()->top(); | |
108 | |
109 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last | |
110 live object. */ | |
111 HeapWord* first_dead = space()->end(); /* The first dead object. */ | |
112 LiveRange* liveRange = NULL; /* The current live range, recorded in the | |
113 first header of preceding free area. */ | |
114 _first_dead = first_dead; | |
115 | |
116 const intx interval = PrefetchScanIntervalInBytes; | |
117 | |
118 while (q < t) { | |
119 assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || | |
120 oop(q)->mark()->has_bias_pattern(), | |
121 "these are the only valid states during a mark sweep"); | |
122 if (oop(q)->is_gc_marked()) { | |
123 /* prefetch beyond q */ | |
124 Prefetch::write(q, interval); | |
125 size_t size = oop(q)->size(); | |
126 | |
127 size_t compaction_max_size = pointer_delta(compact_end, compact_top); | |
128 | |
129 // This should only happen if a space in the young gen overflows the | |
130 // old gen. If that should happen, we null out the start_array, because | |
131 // the young spaces are not covered by one. | |
132 while(size > compaction_max_size) { | |
133 // First record the last compact_top | |
134 dest->set_compaction_top(compact_top); | |
135 | |
136 // Advance to the next compaction decorator | |
137 advance_destination_decorator(); | |
138 dest = destination_decorator(); | |
139 | |
140 // Update compaction info | |
141 start_array = dest->start_array(); | |
142 compact_top = dest->compaction_top(); | |
143 compact_end = dest->space()->end(); | |
144 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use"); | |
145 assert(compact_end > compact_top, "Must always be space remaining"); | |
146 compaction_max_size = | |
147 pointer_delta(compact_end, compact_top); | |
148 } | |
149 | |
150 // store the forwarding pointer into the mark word | |
151 if (q != compact_top) { | |
152 oop(q)->forward_to(oop(compact_top)); | |
153 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
154 } else { | |
155 // Don't clear the mark since it's confuses parallel old | |
156 // verification. | |
157 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { | |
158 // if the object isn't moving we can just set the mark to the default | |
159 // mark and handle it specially later on. | |
160 oop(q)->init_mark(); | |
161 } | |
162 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); | |
163 } | |
164 | |
165 // Update object start array | |
166 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { | |
167 if (start_array) | |
168 start_array->allocate_block(compact_top); | |
169 } | |
170 | |
171 debug_only(MarkSweep::register_live_oop(oop(q), size)); | |
172 compact_top += size; | |
173 assert(compact_top <= dest->space()->end(), | |
174 "Exceeding space in destination"); | |
175 | |
176 q += size; | |
177 end_of_live = q; | |
178 } else { | |
179 /* run over all the contiguous dead objects */ | |
180 HeapWord* end = q; | |
181 do { | |
182 /* prefetch beyond end */ | |
183 Prefetch::write(end, interval); | |
184 end += oop(end)->size(); | |
185 } while (end < t && (!oop(end)->is_gc_marked())); | |
186 | |
187 /* see if we might want to pretend this object is alive so that | |
188 * we don't have to compact quite as often. | |
189 */ | |
190 if (allowed_deadspace > 0 && q == compact_top) { | |
191 size_t sz = pointer_delta(end, q); | |
192 if (insert_deadspace(allowed_deadspace, q, sz)) { | |
193 size_t compaction_max_size = pointer_delta(compact_end, compact_top); | |
194 | |
195 // This should only happen if a space in the young gen overflows the | |
196 // old gen. If that should happen, we null out the start_array, because | |
197 // the young spaces are not covered by one. | |
198 while (sz > compaction_max_size) { | |
199 // First record the last compact_top | |
200 dest->set_compaction_top(compact_top); | |
201 | |
202 // Advance to the next compaction decorator | |
203 advance_destination_decorator(); | |
204 dest = destination_decorator(); | |
205 | |
206 // Update compaction info | |
207 start_array = dest->start_array(); | |
208 compact_top = dest->compaction_top(); | |
209 compact_end = dest->space()->end(); | |
210 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use"); | |
211 assert(compact_end > compact_top, "Must always be space remaining"); | |
212 compaction_max_size = | |
213 pointer_delta(compact_end, compact_top); | |
214 } | |
215 | |
216 // store the forwarding pointer into the mark word | |
217 if (q != compact_top) { | |
218 oop(q)->forward_to(oop(compact_top)); | |
219 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark"); | |
220 } else { | |
221 // if the object isn't moving we can just set the mark to the default | |
222 // Don't clear the mark since it's confuses parallel old | |
223 // verification. | |
224 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { | |
225 // mark and handle it specially later on. | |
226 oop(q)->init_mark(); | |
227 } | |
228 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL"); | |
229 } | |
230 | |
231 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) { | |
232 // Update object start array | |
233 if (start_array) | |
234 start_array->allocate_block(compact_top); | |
235 } | |
236 | |
237 debug_only(MarkSweep::register_live_oop(oop(q), sz)); | |
238 compact_top += sz; | |
239 assert(compact_top <= dest->space()->end(), | |
240 "Exceeding space in destination"); | |
241 | |
242 q = end; | |
243 end_of_live = end; | |
244 continue; | |
245 } | |
246 } | |
247 | |
248 /* for the previous LiveRange, record the end of the live objects. */ | |
249 if (liveRange) { | |
250 liveRange->set_end(q); | |
251 } | |
252 | |
253 /* record the current LiveRange object. | |
254 * liveRange->start() is overlaid on the mark word. | |
255 */ | |
256 liveRange = (LiveRange*)q; | |
257 liveRange->set_start(end); | |
258 liveRange->set_end(end); | |
259 | |
260 /* see if this is the first dead region. */ | |
261 if (q < first_dead) { | |
262 first_dead = q; | |
263 } | |
264 | |
265 /* move on to the next object */ | |
266 q = end; | |
267 } | |
268 } | |
269 | |
270 assert(q == t, "just checking"); | |
271 if (liveRange != NULL) { | |
272 liveRange->set_end(q); | |
273 } | |
274 _end_of_live = end_of_live; | |
275 if (end_of_live < first_dead) { | |
276 first_dead = end_of_live; | |
277 } | |
278 _first_dead = first_dead; | |
279 | |
280 // Update compaction top | |
281 dest->set_compaction_top(compact_top); | |
282 } | |
283 | |
284 bool PSMarkSweepDecorator::insert_deadspace(ssize_t& allowed_deadspace_words, | |
285 HeapWord* q, size_t deadlength) { | |
286 allowed_deadspace_words -= deadlength; | |
287 if (allowed_deadspace_words >= 0) { | |
288 oop(q)->set_mark(markOopDesc::prototype()->set_marked()); | |
289 const size_t aligned_min_int_array_size = | |
290 align_object_size(typeArrayOopDesc::header_size(T_INT)); | |
291 if (deadlength >= aligned_min_int_array_size) { | |
292 oop(q)->set_klass(Universe::intArrayKlassObj()); | |
293 assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, | |
294 "deadspace too big for Arrayoop"); | |
295 typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size) | |
296 * (HeapWordSize/sizeof(jint)))); | |
297 } else { | |
298 assert((int) deadlength == instanceOopDesc::header_size(), | |
299 "size for smallest fake dead object doesn't match"); | |
300 oop(q)->set_klass(SystemDictionary::object_klass()); | |
301 } | |
302 assert((int) deadlength == oop(q)->size(), | |
303 "make sure size for fake dead object match"); | |
304 // Recall that we required "q == compaction_top". | |
305 return true; | |
306 } else { | |
307 allowed_deadspace_words = 0; | |
308 return false; | |
309 } | |
310 } | |
311 | |
312 void PSMarkSweepDecorator::adjust_pointers() { | |
313 // adjust all the interior pointers to point at the new locations of objects | |
314 // Used by MarkSweep::mark_sweep_phase3() | |
315 | |
316 HeapWord* q = space()->bottom(); | |
317 HeapWord* t = _end_of_live; // Established by "prepare_for_compaction". | |
318 | |
319 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); | |
320 | |
321 if (q < t && _first_dead > q && | |
322 !oop(q)->is_gc_marked()) { | |
323 // we have a chunk of the space which hasn't moved and we've | |
324 // reinitialized the mark word during the previous pass, so we can't | |
325 // use is_gc_marked for the traversal. | |
326 HeapWord* end = _first_dead; | |
327 | |
328 while (q < end) { | |
329 debug_only(MarkSweep::track_interior_pointers(oop(q))); | |
330 | |
331 // point all the oops to the new location | |
332 size_t size = oop(q)->adjust_pointers(); | |
333 | |
334 debug_only(MarkSweep::check_interior_pointers()); | |
335 | |
336 debug_only(MarkSweep::validate_live_oop(oop(q), size)); | |
337 | |
338 q += size; | |
339 } | |
340 | |
341 if (_first_dead == t) { | |
342 q = t; | |
343 } else { | |
344 // $$$ This is funky. Using this to read the previously written | |
345 // LiveRange. See also use below. | |
346 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); | |
347 } | |
348 } | |
349 const intx interval = PrefetchScanIntervalInBytes; | |
350 | |
351 debug_only(HeapWord* prev_q = NULL); | |
352 while (q < t) { | |
353 // prefetch beyond q | |
354 Prefetch::write(q, interval); | |
355 if (oop(q)->is_gc_marked()) { | |
356 // q is alive | |
357 debug_only(MarkSweep::track_interior_pointers(oop(q))); | |
358 // point all the oops to the new location | |
359 size_t size = oop(q)->adjust_pointers(); | |
360 debug_only(MarkSweep::check_interior_pointers()); | |
361 debug_only(MarkSweep::validate_live_oop(oop(q), size)); | |
362 debug_only(prev_q = q); | |
363 q += size; | |
364 } else { | |
365 // q is not a live object, so its mark should point at the next | |
366 // live object | |
367 debug_only(prev_q = q); | |
368 q = (HeapWord*) oop(q)->mark()->decode_pointer(); | |
369 assert(q > prev_q, "we should be moving forward through memory"); | |
370 } | |
371 } | |
372 | |
373 assert(q == t, "just checking"); | |
374 } | |
375 | |
376 void PSMarkSweepDecorator::compact(bool mangle_free_space ) { | |
377 // Copy all live objects to their new location | |
378 // Used by MarkSweep::mark_sweep_phase4() | |
379 | |
380 HeapWord* q = space()->bottom(); | |
381 HeapWord* const t = _end_of_live; | |
382 debug_only(HeapWord* prev_q = NULL); | |
383 | |
384 if (q < t && _first_dead > q && | |
385 !oop(q)->is_gc_marked()) { | |
386 #ifdef ASSERT | |
387 // we have a chunk of the space which hasn't moved and we've reinitialized the | |
388 // mark word during the previous pass, so we can't use is_gc_marked for the | |
389 // traversal. | |
390 HeapWord* const end = _first_dead; | |
391 | |
392 while (q < end) { | |
393 size_t size = oop(q)->size(); | |
394 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); | |
395 debug_only(MarkSweep::live_oop_moved_to(q, size, q)); | |
396 debug_only(prev_q = q); | |
397 q += size; | |
398 } | |
399 #endif | |
400 | |
401 if (_first_dead == t) { | |
402 q = t; | |
403 } else { | |
404 // $$$ Funky | |
405 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); | |
406 } | |
407 } | |
408 | |
409 const intx scan_interval = PrefetchScanIntervalInBytes; | |
410 const intx copy_interval = PrefetchCopyIntervalInBytes; | |
411 | |
412 while (q < t) { | |
413 if (!oop(q)->is_gc_marked()) { | |
414 // mark is pointer to next marked oop | |
415 debug_only(prev_q = q); | |
416 q = (HeapWord*) oop(q)->mark()->decode_pointer(); | |
417 assert(q > prev_q, "we should be moving forward through memory"); | |
418 } else { | |
419 // prefetch beyond q | |
420 Prefetch::read(q, scan_interval); | |
421 | |
422 // size and destination | |
423 size_t size = oop(q)->size(); | |
424 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); | |
425 | |
426 // prefetch beyond compaction_top | |
427 Prefetch::write(compaction_top, copy_interval); | |
428 | |
429 // copy object and reinit its mark | |
430 debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top)); | |
431 assert(q != compaction_top, "everything in this pass should be moving"); | |
432 Copy::aligned_conjoint_words(q, compaction_top, size); | |
433 oop(compaction_top)->init_mark(); | |
434 assert(oop(compaction_top)->klass() != NULL, "should have a class"); | |
435 | |
436 debug_only(prev_q = q); | |
437 q += size; | |
438 } | |
439 } | |
440 | |
441 assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(), | |
442 "should point inside space"); | |
443 space()->set_top(compaction_top()); | |
444 | |
445 if (mangle_free_space) space()->mangle_unused_area(); | |
446 } |